├── .gitignore
├── .imgs
├── complete_3_mesh.png
├── complete_3_pc.png
├── only_2_mesh.png
└── only_2_pc.png
├── DCoG
├── DCoGModel.py
├── __init__.py
├── init_handpose.npy
└── init_quat.npy
├── GrainGrasp.py
├── HandAnnotation
├── colored_fingers.ply
├── colored_tips.ply
├── colors.json
├── finger_index.json
├── get_index.py
└── tip_index.json
├── PointCVAE
├── __init__.py
├── inference.py
├── model.pth
├── network
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── CVAE.cpython-39.pyc
│ │ ├── PointCVAENet.cpython-39.pyc
│ │ ├── __init__.cpython-39.pyc
│ │ ├── cave.cpython-39.pyc
│ │ ├── point_cvae_net.cpython-39.pyc
│ │ └── pointnet_encoder.cpython-39.pyc
│ ├── cave.py
│ ├── point_cvae_net.py
│ └── pointnet_encoder.py
├── obman_dataset.py
└── train.py
├── README.md
├── SupNet
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-39.pyc
│ ├── inference.cpython-39.pyc
│ ├── load.cpython-39.pyc
│ ├── obman_dataset.cpython-39.pyc
│ ├── pointnet_encoder.cpython-39.pyc
│ └── supnet.cpython-39.pyc
├── inference.py
├── model.pth
├── network
│ ├── __pycache__
│ │ ├── pointnet_encoder.cpython-39.pyc
│ │ └── supnet.cpython-39.pyc
│ ├── pointnet_encoder.py
│ └── supnet.py
├── obman_dataset.py
└── train.py
├── __init__.py
├── config.json
├── config.py
├── dataprocess.py
├── mano
├── __init__.py
├── joints_info.py
├── lbs.py
├── model.py
└── utils.py
├── run_complete.py
├── run_only_opt.py
├── sample
├── 1
│ ├── hand_pc.npy
│ ├── obj_mesh.obj
│ └── obj_pc.npy
├── 2
│ ├── hand_pc.npy
│ ├── obj_mesh.obj
│ └── obj_pc.npy
├── 3
│ ├── hand_pc.npy
│ ├── obj_mesh.obj
│ └── obj_pc.npy
├── 4
│ ├── hand_pc.npy
│ ├── obj_mesh.obj
│ └── obj_pc.npy
├── 5
│ ├── hand_pc.npy
│ ├── obj_mesh.obj
│ └── obj_pc.npy
├── 6
│ ├── hand_pc.npy
│ ├── obj_mesh.obj
│ └── obj_pc.npy
├── 7
│ ├── hand_pc.npy
│ ├── obj_mesh.obj
│ └── obj_pc.npy
├── 8
│ ├── hand_pc.npy
│ ├── obj_mesh.obj
│ └── obj_pc.npy
├── 9
│ ├── hand_pc.npy
│ ├── obj_mesh.obj
│ └── obj_pc.npy
└── 10
│ ├── hand_pc.npy
│ ├── obj_mesh.obj
│ └── obj_pc.npy
└── utils
├── __init__.py
├── __pycache__
├── __init__.cpython-39.pyc
├── annotate.cpython-39.pyc
├── energy.cpython-39.pyc
├── load_obman.cpython-39.pyc
├── loss.cpython-39.pyc
├── tools.cpython-39.pyc
├── utils.cpython-39.pyc
├── utils_annotate.cpython-39.pyc
├── utils_cls.cpython-39.pyc
├── utils_loss.cpython-39.pyc
└── vis.cpython-39.pyc
├── annotate.py
├── load_obman.py
├── loss.py
├── tools.py
└── vis.py
/.gitignore:
--------------------------------------------------------------------------------
1 | logs/
2 | *.pyc
3 | .vscode
4 | *.tmp
5 |
--------------------------------------------------------------------------------
/.imgs/complete_3_mesh.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/.imgs/complete_3_mesh.png
--------------------------------------------------------------------------------
/.imgs/complete_3_pc.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/.imgs/complete_3_pc.png
--------------------------------------------------------------------------------
/.imgs/only_2_mesh.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/.imgs/only_2_mesh.png
--------------------------------------------------------------------------------
/.imgs/only_2_pc.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/.imgs/only_2_pc.png
--------------------------------------------------------------------------------
/DCoG/DCoGModel.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 |
4 | sys.path.append(os.getcwd())
5 | import numpy as np
6 | import torch
7 | import torch.nn.functional as F
8 | from pytorch3d.structures import Meshes
9 | from pytorch3d.ops.knn import knn_points
10 | import mano
11 | from attrdict import AttrDict
12 | from pytorch3d import transforms
13 | from utils import tools
14 | from SupNet import load_model as load_supnet_model
15 |
16 |
17 | class DCoGModel:
18 | def __init__(
19 | self,
20 | mano_path,
21 | init_handpose_path,
22 | init_quat_path,
23 | finger_index_path,
24 | tip_index_path,
25 | supnet_path,
26 | init_move_finger_idx=3,
27 | weights=None,
28 | device="cuda",
29 | ):
30 | self.device = device
31 | self.rh_mano, self.rh_faces = self.load_mano(mano_path)
32 | self.init_handpose = torch.tensor(np.load(init_handpose_path)).to(self.device)
33 | self.init_quat = torch.tensor(np.load(init_quat_path)).to(self.device)
34 | self.init_handpose = self.init_handpose.repeat(self.init_quat.shape[0], 1)
35 | self.rh_faces = self.rh_faces.repeat(self.init_quat.shape[0], 1, 1)
36 | self.finger_index = tools.fingerName2fingerId(tools.readJson(finger_index_path))
37 | self.tip_index = tools.fingerName2fingerId(tools.readJson(tip_index_path))
38 |
39 | self.hand_normal = None
40 | sup_net = load_supnet_model(supnet_path, requires_grad=False)
41 | self.sup_net = sup_net.eval().to(device)
42 | self.init_move_finger_idx = init_move_finger_idx
43 | if weights is None:
44 | weights = AttrDict(
45 | {"w_dis": 0.5, "w_dct": 0.8, "w_dcf": 0.6, "w_net": 0.6, "w_pen": 10}
46 | )
47 | self.weights = weights
48 |
49 | def run(self, obj_pc, obj_cls, epochs=300, select_finger_idx=[1, 2, 3, 4, 5]):
50 | """
51 | obj_pc: [N, 3]
52 | obj_cls: [N]
53 | """
54 | record_hand_pc = []
55 | concat_center = self.get_center_contactmaps(obj_cls, obj_pc)
56 | init_tran = self.get_init_translation(concat_center)
57 | init_tran = init_tran * torch.tensor([[1.2, 1.2, 1.2]]).to(self.device)
58 | init_tran = torch.autograd.Variable(init_tran, requires_grad=True)
59 | quat_rt = torch.autograd.Variable(
60 | torch.Tensor([[1, 0, 0, 0, 0, 0, 0]]).repeat(init_tran.shape[0], 1).to(self.device),
61 | requires_grad=True,
62 | )
63 | init_quat = torch.autograd.Variable(self.init_quat, requires_grad=True)
64 | handpose = torch.autograd.Variable(self.init_handpose, requires_grad=True)
65 | optimizer = torch.optim.Adam([handpose, quat_rt, init_quat, init_tran], lr=0.01)
66 |
67 | for epoch in range(epochs):
68 | optimizer.zero_grad()
69 | q_rot, tran = quat_rt[:, :4], quat_rt[:, 4:]
70 | q_rot = transforms.quaternion_multiply(q_rot, init_quat)
71 | tran = tran + init_tran
72 | m_rot = transforms.quaternion_to_matrix(q_rot)
73 | hand_pc_opt = self.get_hand_pc(handpose, m_rot, tran)
74 | record_hand_pc.append(hand_pc_opt.clone().detach().cpu())
75 | mesh_p3d = Meshes(verts=hand_pc_opt, faces=self.rh_faces)
76 | self.hand_normal = mesh_p3d.verts_normals_packed().view(-1, 778, 3)
77 | E_dis = self.cal_Edis(obj_pc, obj_cls, hand_pc_opt, select_finger_idx)
78 | E_dct = self.cal_Edct(obj_pc, obj_cls, hand_pc_opt)
79 | E_dcf = self.cal_Edcf(obj_pc, hand_pc_opt)
80 | E_net = self.cal_Enet(obj_pc, hand_pc_opt)
81 | E_pen = self.cal_Epen(obj_pc, hand_pc_opt)
82 | E = (
83 | (epoch + 1) * self.weights.w_dis * E_dis
84 | + max(0, epochs - epoch) * (self.weights.w_dct * E_dct + self.weights.w_dcf * E_dcf)
85 | + self.weights.w_net * E_net
86 | + self.weights.w_pen * E_pen
87 | )
88 | E.backward()
89 | optimizer.step()
90 | hand_pc_opt = self.get_hand_pc(handpose, m_rot, tran)
91 | record_hand_pc.append(hand_pc_opt.clone().detach().cpu())
92 | result = AttrDict()
93 | result.record_hand_pc = torch.stack(record_hand_pc) # [epochs+1, B, 778, 3]
94 | result.hand_pc = hand_pc_opt.detach().cpu()
95 | result.handpose = handpose.detach().cpu()
96 | result.rot = m_rot.detach().cpu()
97 | result.translation = tran.detach().cpu()
98 |
99 | return result
100 |
101 | def get_idx_minEpen(self, obj_pc, hand_pose, threshold=0.0):
102 | self.hand_normal = None # recalculate hand normal
103 | E_pen = self.cal_Epen(obj_pc, hand_pose.to(self.device), reuturn_batch=True)
104 | re_E_pen = E_pen.tolist()
105 | E_pen[E_pen <= threshold] = torch.inf
106 | min_idx = E_pen.argmin().item()
107 | return re_E_pen, min_idx
108 |
109 | def load_mano(self, mano_model_path):
110 | with torch.no_grad():
111 | rh_mano = mano.load(
112 | model_path=mano_model_path,
113 | model_type="mano",
114 | use_pca=True,
115 | num_pca_comps=45,
116 | batch_size=6,
117 | flat_hand_mean=True,
118 | ).to(self.device)
119 | rh_faces = torch.tensor(rh_mano.faces.astype(int)).unsqueeze(0).to(self.device)
120 | return rh_mano, rh_faces
121 |
122 | def get_hand_pc(self, pose, m_rot, tran=None):
123 | hand_pc = self.rh_mano(hand_pose=pose).vertices
124 | if tran == None:
125 | return torch.bmm(hand_pc, m_rot)
126 | else:
127 | return torch.bmm(hand_pc, m_rot) + tran.reshape(-1, 1, 3)
128 |
129 | def get_center_contactmaps(self, obj_cls, obj_pc):
130 | concat_center = torch.zeros((5, 3)).to(obj_pc.device)
131 | for i in range(1, 6):
132 | concat_center[i - 1] = obj_pc[obj_cls == i].mean(dim=0)
133 | return concat_center
134 |
135 | def get_init_translation(self, concat_center, init_move_finger_idx=-1):
136 | if init_move_finger_idx == -1:
137 | init_move_finger_idx = self.init_move_finger_idx
138 | hand_pc = self.rh_mano(hand_pose=self.init_handpose).vertices
139 | m_rot = transforms.quaternion_to_matrix(self.init_quat)
140 | hand_pc = torch.bmm(hand_pc, m_rot)
141 | select_finger_index = self.finger_index[init_move_finger_idx]
142 | concat_center = concat_center[init_move_finger_idx].repeat(6, 1)
143 | tran = concat_center - hand_pc[:, select_finger_index].mean(dim=1, keepdim=False)
144 | return tran
145 |
146 | def cal_Edis(self, obj_pc, obj_cls, hand_pc, select_idx=[1, 2, 3, 4, 5]):
147 | E = 0
148 | for fingerId, ft_idx in self.tip_index.items():
149 | if fingerId in select_idx:
150 | obj_idx_pc = obj_pc[obj_cls == fingerId].repeat(hand_pc.shape[0], 1, 1)
151 | if obj_idx_pc.shape[1] == 0:
152 | continue
153 | e = knn_points(hand_pc[:, ft_idx], obj_idx_pc, K=1).dists
154 | e = torch.dropout(e, p=0.2, train=True).sum(dim=0).mean()
155 | E += e
156 | return E * 500
157 |
158 | def cal_Edct(self, obj_pc, obj_cls, hand_pc):
159 | if self.hand_normal is None:
160 | mesh = Meshes(verts=hand_pc, faces=self.rh_faces[: hand_pc.shape[0]])
161 | self.hand_normal = mesh.verts_normals_packed().view(-1, 778, 3).to(self.device)
162 |
163 | E = 0
164 | for fingerId, ft_idx in self.tip_index.items():
165 | tip_idx_pc = hand_pc[:, ft_idx]
166 | obj_idx_pc = obj_pc[obj_cls == fingerId].repeat(hand_pc.shape[0], 1, 1)
167 | if obj_idx_pc.shape[1] == 0:
168 | continue
169 | _, _, nn = knn_points(tip_idx_pc, obj_idx_pc, K=1, return_nn=True)
170 | idxtip2obj_normal = F.normalize(nn.squeeze(-2) - tip_idx_pc, dim=2)
171 | idxtip_normal = F.normalize(self.hand_normal[:, ft_idx], dim=2)
172 | e = torch.square(idxtip2obj_normal - idxtip_normal)
173 | e = torch.dropout(e, p=0.2, train=True).sum(dim=0).mean()
174 | E += e
175 |
176 | return E * 0.5
177 |
178 | def cal_Edcf(self, obj_pc, hand_pc):
179 | if self.hand_normal is None:
180 | mesh = Meshes(verts=hand_pc, faces=self.rh_faces[: hand_pc.shape[0]])
181 | self.hand_normal = mesh.verts_normals_packed().view(-1, 778, 3).to(self.device)
182 |
183 | finger_index_all = []
184 | for _, ft_idx in self.finger_index.items():
185 | finger_index_all.extend(ft_idx)
186 | finger_pc = hand_pc[:, finger_index_all]
187 | finger_normal = F.normalize(self.hand_normal[:, finger_index_all], dim=2)
188 | _, _, nn = knn_points(
189 | finger_pc,
190 | obj_pc.repeat(hand_pc.shape[0], 1, 1).contiguous(),
191 | return_nn=True,
192 | )
193 | finger2obj_normal = F.normalize(nn.squeeze(-2) - finger_pc, dim=2)
194 | E = torch.square(finger2obj_normal - finger_normal)
195 | E = torch.dropout(E, p=0.2, train=True).sum(dim=0).mean()
196 | return E
197 |
198 | def cal_Enet(self, obj_pc, hand_pc):
199 | obj_pc = obj_pc.T
200 | obj_pc = obj_pc.repeat(hand_pc.shape[0], 1, 1)
201 | net_pred, _ = self.sup_net(obj_pc, hand_pc)
202 | # E = torch.nn.functional.cross_entropy(
203 | # net_pred,
204 | # torch.ones((net_pred.shape[0]), dtype=torch.long).to(net_pred.device),
205 | # reduction="sum",
206 | # )
207 | net_pred_softmax = torch.softmax(net_pred, 1)
208 | E = -torch.log(net_pred_softmax[:, 1]).sum()
209 | return E / obj_pc.shape[0]
210 |
211 | def cal_Epen(self, obj_pc, hand_pc, reuturn_batch=False):
212 | """
213 |
214 | get penetrate object xyz and the distance to its NN
215 | :param hand_pc: [B, 778, 3]
216 | :param hand_face: [B, 1538, 3], hand faces vertex index in [0:778]
217 | :param obj_pc: [B, 3000, 3]
218 | :return: inter penetration loss
219 | """
220 | if self.hand_normal is None:
221 | mesh = Meshes(verts=hand_pc, faces=self.rh_faces[: hand_pc.shape[0]])
222 | self.hand_normal = mesh.verts_normals_packed().view(-1, 778, 3).to(self.device)
223 | obj_pc = obj_pc.repeat(hand_pc.shape[0], 1, 1)
224 | B = hand_pc.size(0)
225 | nn_dist, nn_idx, _ = knn_points(obj_pc, hand_pc)
226 | nn_idx = nn_idx.repeat(1, 1, 3)
227 | hand_idx_pc = hand_pc.gather(dim=1, index=nn_idx)
228 | obj2hand_normal = hand_idx_pc - obj_pc
229 | hand_idx_normal = self.hand_normal.gather(dim=1, index=nn_idx)
230 | interior = (obj2hand_normal * hand_idx_normal).sum(
231 | dim=-1
232 | ) > 0 # interior as true, exterior as false
233 | E = nn_dist.squeeze(-1) * interior * 1e4
234 | if reuturn_batch:
235 | return E.sum(dim=1) / B
236 | else:
237 | return E.sum() / B
238 |
239 | def cal_Edc_Edis(
240 | self,
241 | obj_pc,
242 | obj_cls,
243 | hand_pc,
244 | select_idx=[1, 2, 3, 4, 5],
245 | weight=[0.5, 0.8, 0.6],
246 | ):
247 | E = 0
248 | mesh = Meshes(verts=hand_pc, faces=self.rh_faces)
249 | self.hand_normal = mesh.verts_normals_packed().view(-1, 778, 3)
250 | obj_pc = obj_pc.repeat(hand_pc.shape[0], 1, 1).contiguous()
251 | finger_index_all = []
252 | for fingerId, ft_idx in self.tip_index.items():
253 | finger_index_all.extend(self.finger_index[fingerId])
254 | # E_dct
255 | tip_idx_pc = hand_pc[:, ft_idx]
256 | obj_idx_pc = obj_pc[:, obj_cls == fingerId]
257 | if obj_idx_pc.shape[1] == 0:
258 | continue
259 | dists, _, nn = knn_points(tip_idx_pc, obj_idx_pc, K=1, return_nn=True)
260 | tipidx2obj_normal = F.normalize(nn.squeeze(-2) - tip_idx_pc, dim=2)
261 | tip_idx_normal = F.normalize(self.hand_normal[:, ft_idx], dim=2)
262 | e_dct = torch.square(tipidx2obj_normal - tip_idx_normal)
263 | e_dct = torch.dropout(e_dct, p=0.2, train=True).sum(dim=0).mean() * 0.5
264 | E += e_dct * weight[0]
265 | # E_dis
266 | if fingerId in select_idx:
267 | e_dis = torch.dropout(dists, p=0.2, train=True).sum(dim=0).mean() * 500
268 | E += e_dis * weight[1]
269 | # E_dcf
270 | finger_pc = hand_pc[:, finger_index_all]
271 | finger_normal = F.normalize(self.hand_normal[:, finger_index_all], dim=2)
272 | _, _, nn = knn_points(finger_pc, obj_pc, return_nn=True)
273 | finger2obj_normal = F.normalize(nn.squeeze(-2) - finger_pc, dim=2)
274 | e_dcf = torch.square(finger2obj_normal - finger_normal)
275 | e_dcf = torch.dropout(e_dcf, p=0.2, train=True).sum(dim=0).mean()
276 | E += e_dcf * weight[2]
277 | return E
278 |
--------------------------------------------------------------------------------
/DCoG/__init__.py:
--------------------------------------------------------------------------------
1 | from .DCoGModel import DCoGModel
2 |
--------------------------------------------------------------------------------
/DCoG/init_handpose.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/DCoG/init_handpose.npy
--------------------------------------------------------------------------------
/DCoG/init_quat.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/DCoG/init_quat.npy
--------------------------------------------------------------------------------
/GrainGrasp.py:
--------------------------------------------------------------------------------
1 | import time
2 | import torch
3 | import numpy as np
4 | import open3d as o3d
5 | from utils import annotate
6 | from utils import vis
7 | from utils import tools
8 | from utils import Load_obman
9 | from attrdict import AttrDict
10 | from DCoG import DCoGModel
11 | from PointCVAE import load_model as load_cvae_model
12 | from PointCVAE import inference as cvae_inference
13 | from config import cfgs
14 |
15 |
16 | class GrainGrasp:
17 | def __init__(self, dcog_config, cvae_path=None, device="cuda"):
18 | """
19 | if cvae_config is None, the cvae model will not be loaded
20 | """
21 | self.device = device
22 | self.dcog_model = DCoGModel(
23 | dcog_config.mano_path,
24 | dcog_config.init_handpose_path,
25 | dcog_config.init_quat_path,
26 | dcog_config.finger_index_path,
27 | dcog_config.tip_index_path,
28 | dcog_config.supnet_path,
29 | dcog_config.init_move_finger_idx,
30 | dcog_config.weights,
31 | device=self.device,
32 | )
33 | print("-----------------DCoG Model loaded successfully-----------------")
34 | if cvae_path is not None:
35 | self.cvae_model = load_cvae_model(cvae_path, requires_grad=False)
36 | self.cvae_model = self.cvae_model.eval().to(self.device)
37 | print("-----------------CVAE Model loaded successfully-----------------")
38 | else:
39 | self.cvae_model = None
40 |
41 | def inference_complete(self, obj_pc, epochs=300, select_finger_idx=[1, 2, 3, 4, 5], threshold=0.1):
42 | """
43 | obj_pc: Tensor, (N, 3)
44 | return: result, AttrDict
45 | """
46 | obj_pc = obj_pc.to(self.device)
47 | if self.cvae_model is None:
48 | RuntimeError("You should load the CVAE model if you want to run this function.")
49 |
50 | obj_cls = cvae_inference(self.cvae_model, obj_pc)
51 | result = self.dcog_model.run(obj_pc, obj_cls, epochs, select_finger_idx)
52 | E_pen, min_idx = self.dcog_model.get_idx_minEpen(obj_pc, result.hand_pc, threshold)
53 | result.obj_cls = obj_cls.cpu().detach()
54 | result.E_pen = E_pen
55 | result.min_idx = min_idx
56 | result.min_idx_hand_pc = result.hand_pc[min_idx]
57 | result.min_idx_record_hand_pc = result.record_hand_pc[:, min_idx]
58 |
59 | return result
60 |
61 | def inference_only_opt(self, obj_pc, obj_cls=None, hand_pc=None, K=50, epochs=300, select_finger_idx=[1, 2, 3, 4, 5], threshold=0.1):
62 | """
63 | obj_pc: Tensor, (N, 3)
64 | obj_cls: Tensor, (N, 3)
65 | hand_pc: Tensor, (M, 3)
66 | K: int, the number of the nearest neighbors
67 | return: result, AttrDict
68 | """
69 | obj_pc = obj_pc.to(self.device)
70 | if obj_cls is None:
71 | if hand_pc is None:
72 | RuntimeError("If you don't have 'obj_cls', you should at least provide 'hand_pc'.")
73 | print("'obj_cls' will be generated by the annotation method with K = {}".format(K))
74 | obj_cls, _ = annotate.get_obj_cls_and_colors(hand_pc, obj_pc, K=K, device=self.device)
75 | obj_cls = obj_cls.squeeze()
76 |
77 | obj_cls = obj_cls.to(self.device)
78 |
79 | result = self.dcog_model.run(obj_pc, obj_cls, epochs, select_finger_idx)
80 | E_pen, min_idx = self.dcog_model.get_idx_minEpen(obj_pc, result.hand_pc, threshold)
81 | result.obj_cls = obj_cls.cpu().detach()
82 | result.E_pen = E_pen
83 | result.min_idx = min_idx
84 | result.min_idx_hand_pc = result.hand_pc[min_idx]
85 | result.min_idx_record_hand_pc = result.record_hand_pc[:, min_idx]
86 |
87 | return result
88 |
89 |
90 | if __name__ == "__main__":
91 | pass
92 |
--------------------------------------------------------------------------------
/HandAnnotation/colors.json:
--------------------------------------------------------------------------------
1 | {"thumb": [1.0, 0.0, 0.0], "index": [0.0, 1.0, 0.0], "middle": [0.0, 0.0, 1.0], "ring": [1.0, 1.0, 0.0], "pinky": [1.0, 0.0, 1.0]}
--------------------------------------------------------------------------------
/HandAnnotation/finger_index.json:
--------------------------------------------------------------------------------
1 | {"thumb": [267, 31, 700, 699, 704, 714, 739, 740, 741, 743, 753, 754, 755, 756, 757, 760, 762, 763, 125, 768, 767], "index": [139, 280, 281, 165, 166, 170, 171, 301, 47, 48, 46, 49, 194, 195, 328, 329, 330, 332, 340, 341, 342, 343, 344, 347, 349, 350, 354, 355, 237, 238], "middle": [396, 397, 398, 402, 403, 410, 413, 435, 436, 438, 439, 440, 441, 442, 452, 453, 454, 455, 456, 459, 461, 462, 466, 467, 357, 358, 356, 359, 370, 371, 372, 373, 375, 376, 374, 379, 378, 380, 385, 386, 387], "ring": [524, 549, 550, 551, 553, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 577, 578, 579, 468, 469, 471, 470, 485, 486, 484, 487, 488, 489, 496, 497, 503, 502, 506, 507, 510, 513, 514], "pinky": [582, 614, 681, 682, 683, 684, 687, 689, 690, 625, 596, 630, 631, 601, 602, 607, 641]}
--------------------------------------------------------------------------------
/HandAnnotation/get_index.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 |
4 | sys.path.append(os.getcwd())
5 | import numpy as np
6 | import open3d as o3d
7 | import mano
8 | import torch
9 | import json
10 | import trimesh
11 | import os
12 |
13 | # args
14 | vis_pointcloud = False
15 | vis_mesh = True
16 | save = True
17 | root_path = "HandAnnotation"
18 |
19 |
20 | with torch.no_grad():
21 | rh_mano = mano.load(
22 | model_path="mano/models/MANO_RIGHT.pkl",
23 | model_type="mano",
24 | use_pca=True,
25 | num_pca_comps=45,
26 | batch_size=1,
27 | flat_hand_mean=True,
28 | )
29 | mano_vertices = rh_mano().vertices[0].numpy()
30 | mano_faces = rh_mano.faces
31 |
32 |
33 | def index_of_rgb(points_color, RGB):
34 | R, G, B = RGB
35 | id0 = np.argwhere(points_color[:, 0] == R).flatten().tolist()
36 | id1 = np.argwhere(points_color[:, 1] == G).flatten().tolist()
37 | id2 = np.argwhere(points_color[:, 2] == B).flatten().tolist()
38 | return list(set(id0).intersection(set(id1), set(id2)))
39 |
40 |
41 | def get_blender2mano(blender_faces):
42 | blender2mano = dict()
43 | for i in range(blender_faces.shape[0]):
44 | for j in range(3):
45 | blender2mano[blender_faces[i][j]] = mano_faces[i][j]
46 | return blender2mano
47 |
48 |
49 | def get_finger_colors(path="colors.json"):
50 | with open(path, "r") as f:
51 | finger_colors = json.loads(f.read())
52 | return finger_colors
53 |
54 |
55 | def get_finger_index(blender_colors, finger_colors, blender2mano):
56 | finger_index = dict()
57 | for key in finger_colors:
58 | blender_index = index_of_rgb(blender_colors, finger_colors[key])
59 | finger_index[key] = list(map(lambda x: int(blender2mano[x]), blender_index))
60 | return finger_index
61 |
62 |
63 | def get_save_index_path(root_path, annotation_mesh_path):
64 | if "tip" in annotation_mesh_path:
65 | save_index_path = os.path.join(root_path, "tips_index")
66 | elif "finger" in annotation_mesh_path:
67 | save_index_path = os.path.join(root_path, "fingers_index")
68 | else:
69 | raise ValueError("annotation_mesh_path should contain tip or finger")
70 | return save_index_path
71 |
72 |
73 | if __name__ == "__main__":
74 |
75 | annotation_mesh_path = os.path.join(
76 | root_path, "colored_fingers.ply"
77 | ) # colored_tips or colored_fingers
78 | annotation_colors_path = os.path.join(root_path, "colors.json")
79 | save_index_path = get_save_index_path(root_path, annotation_mesh_path)
80 |
81 | colored_mesh = o3d.io.read_triangle_mesh(annotation_mesh_path)
82 | # colored_hand from blender, vertices index is disorder with mano.
83 | blender_faces = np.asarray(colored_mesh.triangles)
84 | blender2mano = get_blender2mano(blender_faces)
85 | # print(blender2mano)
86 |
87 | blender_colors = np.asarray(colored_mesh.vertex_colors)
88 | finger_colors = get_finger_colors(annotation_colors_path)
89 | finger_index = get_finger_index(blender_colors, finger_colors, blender2mano)
90 |
91 | if vis_mesh or vis_pointcloud:
92 | mano_colors = np.zeros_like(mano_vertices)
93 | for key in finger_colors:
94 | mano_colors[finger_index[key]] = finger_colors[key]
95 |
96 | if vis_pointcloud:
97 | pcd = o3d.geometry.PointCloud()
98 | pcd.points = o3d.utility.Vector3dVector(mano_vertices)
99 | pcd.colors = o3d.utility.Vector3dVector(mano_colors)
100 | o3d.visualization.draw_geometries([pcd])
101 |
102 | if vis_mesh:
103 | mesh = o3d.geometry.TriangleMesh()
104 | mesh.vertices = o3d.utility.Vector3dVector(mano_vertices)
105 | mesh.vertex_colors = o3d.utility.Vector3dVector(mano_colors)
106 | mesh.triangles = o3d.utility.Vector3iVector(mano_faces)
107 | o3d.visualization.draw_geometries([mesh])
108 |
109 | if save:
110 | save_index_path += ".json"
111 | with open(save_index_path, "w") as f:
112 | f.write(json.dumps(finger_index))
113 | print("have saved to {}".format(save_index_path))
114 |
--------------------------------------------------------------------------------
/HandAnnotation/tip_index.json:
--------------------------------------------------------------------------------
1 | {"thumb": [740, 743, 756, 760, 762, 763, 768, 767, 739], "index": [328, 329, 332, 343, 347, 349, 350, 354, 355], "middle": [455, 459, 461, 462, 466, 435, 436, 467, 438, 439, 442], "ring": [549, 550, 553, 566, 569, 570, 571, 572, 573, 577, 578], "pinky": [687, 689, 690, 683]}
--------------------------------------------------------------------------------
/PointCVAE/__init__.py:
--------------------------------------------------------------------------------
1 | from .network.cave import VAE
2 | from .network.pointnet_encoder import PointNetEncoder
3 | from .network.point_cvae_net import PointCVAENet
4 | from .obman_dataset import obman
5 |
6 | from .inference import load_model, inference
7 |
--------------------------------------------------------------------------------
/PointCVAE/inference.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 |
4 | sys.path.append(os.getcwd())
5 | import torch
6 | import numpy as np
7 | from PointCVAE import PointCVAENet
8 | from utils import annotate
9 |
10 |
11 | def load_model(model_path, requires_grad=False):
12 | model = PointCVAENet([1024, 512, 256], 1024, [512, 256, 128, 64, 6], 6, 64)
13 | param = torch.load(model_path)
14 | weights_dict = {}
15 | for k, v in param["network"].items():
16 | if "obj_encoder" in k:
17 | if "module" in k:
18 | k = k.replace("module.", "")
19 | weights_dict[k] = v
20 | weights_dict = {}
21 | for k, v in param["network"].items():
22 | if "cls_decoder" in k:
23 | continue
24 | elif "recon" in k:
25 | continue
26 | else:
27 | new_k = k.replace("module.", "") if "module" in k else k
28 | weights_dict[new_k] = v
29 | model.load_state_dict(weights_dict)
30 | for param in model.parameters():
31 | param.requires_grad = requires_grad
32 | return model
33 |
34 |
35 | def inference(model, obj_pc):
36 | # obj_pc : [B, 3, N]
37 | input_dim = obj_pc.dim()
38 | if input_dim == 2:
39 | obj_pc = obj_pc.unsqueeze(0)
40 | if obj_pc.shape[1] != 3:
41 | obj_pc = obj_pc.transpose(2, 1).contiguous()
42 | with torch.no_grad():
43 | _, _, cls_pred = model(obj_pc.detach())
44 | cls_pred = cls_pred.max(dim=1, keepdim=False)[1]
45 | if input_dim == 2:
46 | cls_pred = cls_pred.squeeze(0)
47 | return cls_pred.detach()
48 |
49 |
50 | if __name__ == "__main__":
51 | model_path = "PointCVAE/model_best_val.pth"
52 | model = load_model(model_path)
53 | model.eval()
54 |
55 | save_root = "pre_results"
56 | K = 50
57 | mode = "train"
58 | idx = 50
59 | obj_pc = np.load("Data/processed/{}/{}/obj_pc/{}.npy".format(K, mode, idx))
60 | obj_pc_cls = np.load("Data/processed/{}/{}/obj_cls/{}.npy".format(K, mode, idx))
61 | hand_pc = np.load("Data/processed/{}/{}/hand_pc/{}.npy".format(K, mode, idx))
62 | obj_pc = torch.Tensor(obj_pc).unsqueeze_(0)
63 | hand_pc = torch.Tensor(hand_pc)
64 | obj_pc_cls = torch.Tensor(obj_pc_cls)
65 |
66 | cls_pred = inference(model, obj_pc)
67 | obj_pc_ = obj_pc.transpose(2, 1).detach()
68 | obj_colors = annotate.get_obj_colors(cls_pred)
69 | obj_colors_true = annotate.get_obj_colors(obj_pc_cls)
70 | pcd_hand = annotate.get_o3d_pcd(hand_pc, vis=False)
71 | pcd_obj = annotate.get_o3d_pcd(obj_pc_, obj_colors, vis=False)
72 | pcd_obj_true = annotate.get_o3d_pcd(obj_pc_, obj_colors_true, vis=False)
73 | annotate.vis_HandObject(pcd_hand, pcd_obj, window_name="pred_cls")
74 |
75 | if not os.path.exists(save_root):
76 | os.makedirs(save_root)
77 | np.save(os.path.join(save_root, "{}.npy".format(idx)), cls_pred.cpu().numpy())
78 |
--------------------------------------------------------------------------------
/PointCVAE/model.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/PointCVAE/model.pth
--------------------------------------------------------------------------------
/PointCVAE/network/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/PointCVAE/network/__init__.py
--------------------------------------------------------------------------------
/PointCVAE/network/__pycache__/CVAE.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/PointCVAE/network/__pycache__/CVAE.cpython-39.pyc
--------------------------------------------------------------------------------
/PointCVAE/network/__pycache__/PointCVAENet.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/PointCVAE/network/__pycache__/PointCVAENet.cpython-39.pyc
--------------------------------------------------------------------------------
/PointCVAE/network/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/PointCVAE/network/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/PointCVAE/network/__pycache__/cave.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/PointCVAE/network/__pycache__/cave.cpython-39.pyc
--------------------------------------------------------------------------------
/PointCVAE/network/__pycache__/point_cvae_net.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/PointCVAE/network/__pycache__/point_cvae_net.cpython-39.pyc
--------------------------------------------------------------------------------
/PointCVAE/network/__pycache__/pointnet_encoder.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/PointCVAE/network/__pycache__/pointnet_encoder.cpython-39.pyc
--------------------------------------------------------------------------------
/PointCVAE/network/cave.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class VAE(nn.Module):
6 |
7 | def __init__(
8 | self,
9 | encoder_layer_sizes,
10 | latent_size,
11 | decoder_layer_sizes,
12 | conditional=True,
13 | condition_size=1088,
14 | ):
15 | super().__init__()
16 |
17 | if conditional:
18 | assert condition_size > 0
19 |
20 | assert type(encoder_layer_sizes) == list
21 | assert type(latent_size) == int
22 | assert type(decoder_layer_sizes) == list
23 |
24 | self.latent_size = latent_size
25 |
26 | self.encoder = Encoder(
27 | encoder_layer_sizes, latent_size, conditional, condition_size
28 | )
29 | self.decoder = Decoder(
30 | decoder_layer_sizes, latent_size, conditional, condition_size
31 | )
32 |
33 | def forward(self, x, c=None):
34 |
35 | batch_size = x.size(0)
36 |
37 | means, log_var = self.encoder(x, c)
38 |
39 | std = torch.exp(0.5 * log_var)
40 | eps = torch.randn([batch_size, self.latent_size], device=means.device)
41 | z = eps * std + means
42 |
43 | recon_x = self.decoder(z, c) # (B,latent_size),(B,1088,N)
44 |
45 | return recon_x, means, log_var, z
46 |
47 | def inference(self, n=1, c=None):
48 | # batch_size = n
49 | z = torch.randn([n, self.latent_size], device=c.device)
50 | recon_x = self.decoder(z, c)
51 |
52 | return recon_x
53 |
54 |
55 | class Encoder(nn.Module):
56 |
57 | def __init__(self, layer_sizes, latent_size, conditional, condition_size):
58 |
59 | super().__init__()
60 |
61 | self.conditional = conditional
62 | if self.conditional:
63 | layer_sizes[0] += condition_size # emb_dim+576
64 |
65 | self.MLP = nn.Sequential()
66 |
67 | for i, (in_size, out_size) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
68 | self.MLP.add_module(
69 | name="L{:d}".format(i), module=nn.Conv1d(in_size, out_size, 1)
70 | )
71 | self.MLP.add_module(name="B{:d}".format(i), module=nn.BatchNorm1d(out_size))
72 | self.MLP.add_module(name="A{:d}".format(i), module=nn.ReLU())
73 |
74 | self.linear_means = nn.Linear(layer_sizes[-1], latent_size)
75 | self.linear_log_var = nn.Linear(layer_sizes[-1], latent_size)
76 | # print('encoder', self.MLP)
77 |
78 | def forward(self, x, c=None):
79 |
80 | if self.conditional:
81 | x = torch.cat((x, c), dim=1) # [B, emb_dim+576,N]
82 |
83 | x = self.MLP(x)
84 | x = torch.max(x, 2, keepdim=True)[0].squeeze(-1)
85 | means = self.linear_means(x)
86 | log_vars = self.linear_log_var(x)
87 |
88 | return means, log_vars
89 |
90 |
91 | class Decoder(nn.Module):
92 |
93 | def __init__(self, layer_sizes, latent_size, conditional, condition_size):
94 | super().__init__()
95 |
96 | self.MLP = nn.Sequential()
97 |
98 | self.conditional = conditional
99 | if self.conditional:
100 | input_size = latent_size + condition_size
101 | else:
102 | input_size = latent_size
103 |
104 | for i, (in_size, out_size) in enumerate(
105 | zip([input_size] + layer_sizes[:-1], layer_sizes)
106 | ):
107 | self.MLP.add_module(
108 | name="L{:d}".format(i), module=nn.Conv1d(in_size, out_size, 1)
109 | )
110 | if i + 1 < len(layer_sizes):
111 |
112 | self.MLP.add_module(name="A{:d}".format(i), module=nn.ReLU())
113 |
114 | def forward(self, z, c):
115 |
116 | N = c.shape[-1]
117 | if self.conditional:
118 | z = z.unsqueeze(-1).repeat(1, 1, N)
119 | z = torch.cat((z, c), dim=1)
120 | x = self.MLP(z)
121 |
122 | return x
123 |
124 |
125 | if __name__ == "__main__":
126 | pass
127 |
--------------------------------------------------------------------------------
/PointCVAE/network/point_cvae_net.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.parallel
4 | import torch.utils.data
5 | from PointCVAE import PointNetEncoder
6 | from PointCVAE import VAE
7 |
8 |
9 | class PointCVAENet(nn.Module):
10 | def __init__(
11 | self,
12 | cvae_encoder_sizes=[512, 512, 256],
13 | cvae_latent_size=1024,
14 | cvae_decoder_sizes=[512, 256, 128, 64, 6],
15 | cls_num=6,
16 | emb_dim=128,
17 | ):
18 | super(PointCVAENet, self).__init__()
19 | self.cvae_encoder_sizes = cvae_encoder_sizes
20 | self.cvae_encoder_sizes[0] = emb_dim
21 | self.cvae_latent_size = cvae_latent_size
22 | self.cvae_decoder_sizes = cvae_decoder_sizes
23 | self.cvae_decoder_sizes[0] = cvae_latent_size
24 | self.cvae_decoder_sizes[-1] = cls_num
25 | self.cvae_condition_size = 576
26 | self.cls_num = cls_num
27 | self.emb_dim = emb_dim
28 |
29 | self.cls_embedding = nn.Embedding(cls_num, emb_dim)
30 | self.obj_encoder = PointNetEncoder(
31 | global_feat=False, feature_transform=False, channel=3
32 | )
33 |
34 | self.cvae = VAE(
35 | encoder_layer_sizes=self.cvae_encoder_sizes,
36 | latent_size=self.cvae_latent_size,
37 | decoder_layer_sizes=self.cvae_decoder_sizes,
38 | condition_size=self.cvae_condition_size,
39 | )
40 |
41 | def forward(self, obj_pc, obj_cls=None):
42 | """
43 | :param obj_pc: [B, 3, N]
44 | :return: reconstructed object class
45 | """
46 | if len(obj_pc.shape) == 2:
47 | obj_pc = obj_pc.unsqueeze(0)
48 |
49 | x_feature, rot, tran, _ = self.obj_encoder(obj_pc)
50 | if self.training:
51 | obj_cls_emb = (
52 | self.cls_embedding(obj_cls).permute(0, 2, 1).contiguous()
53 | ) # [B,N]->[B,emb_dim,N]
54 | obj_cls_pred, means, log_var, z = self.cvae(obj_cls_emb, x_feature)
55 | return rot, tran, obj_cls_pred, means, log_var, z
56 | else:
57 | # inference
58 | obj_cls_pred = self.cvae.inference(obj_pc.shape[0], x_feature)
59 | return rot, tran, obj_cls_pred
60 |
61 |
62 | if __name__ == "__main__":
63 | pass
64 |
--------------------------------------------------------------------------------
/PointCVAE/network/pointnet_encoder.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.parallel
4 | import torch.utils.data
5 | from torch.autograd import Variable
6 | import numpy as np
7 | import torch.nn.functional as F
8 |
9 |
10 | class STN3d(nn.Module):
11 | def __init__(self, channel):
12 | super(STN3d, self).__init__()
13 | self.conv1 = torch.nn.Conv1d(channel, 64, 1)
14 | self.conv2 = torch.nn.Conv1d(64, 128, 1)
15 | self.conv3 = torch.nn.Conv1d(128, 1024, 1)
16 | self.fc1 = nn.Linear(1024, 512)
17 | self.fc2 = nn.Linear(512, 256)
18 | self.fc3 = nn.Linear(256, 12)
19 | self.relu = nn.ReLU()
20 |
21 | self.bn1 = nn.BatchNorm1d(64)
22 | self.bn2 = nn.BatchNorm1d(128)
23 | self.bn3 = nn.BatchNorm1d(1024)
24 | self.bn4 = nn.BatchNorm1d(512)
25 | self.bn5 = nn.BatchNorm1d(256)
26 |
27 | def forward(self, x):
28 | batchsize = x.size()[0]
29 | x = F.relu(self.bn1(self.conv1(x)))
30 | x = F.relu(self.bn2(self.conv2(x)))
31 | x = F.relu(self.bn3(self.conv3(x)))
32 | x = torch.max(x, 2, keepdim=False)[0]
33 | # x = x.view(-1, 1024)
34 | x = F.relu(self.bn4(self.fc1(x)))
35 | x = F.relu(self.bn5(self.fc2(x)))
36 | x = self.fc3(x)
37 |
38 | iden = (
39 | Variable(
40 | torch.from_numpy(
41 | np.array([1, 0, 0, 0, 1, 0, 0, 0, 1]).astype(np.float32)
42 | )
43 | )
44 | .view(1, 9)
45 | .repeat(batchsize, 1)
46 | )
47 | if x.is_cuda:
48 | iden = iden.cuda()
49 | # print(x.shape)
50 | x, y = x[:, :9] + iden, x[:, 9:]
51 | x = x.view(-1, 3, 3)
52 | y = y.view(-1, 1, 3)
53 | return x, y
54 |
55 |
56 | class STNkd(nn.Module):
57 | def __init__(self, k=64):
58 | super(STNkd, self).__init__()
59 | self.conv1 = torch.nn.Conv1d(k, 64, 1)
60 | self.conv2 = torch.nn.Conv1d(64, 128, 1)
61 | self.conv3 = torch.nn.Conv1d(128, 1024, 1)
62 | self.fc1 = nn.Linear(1024, 512)
63 | self.fc2 = nn.Linear(512, 256)
64 | self.fc3 = nn.Linear(256, k * k)
65 | self.relu = nn.ReLU()
66 |
67 | self.bn1 = nn.BatchNorm1d(64)
68 | self.bn2 = nn.BatchNorm1d(128)
69 | self.bn3 = nn.BatchNorm1d(1024)
70 | self.bn4 = nn.BatchNorm1d(512)
71 | self.bn5 = nn.BatchNorm1d(256)
72 |
73 | self.k = k
74 |
75 | def forward(self, x):
76 | batchsize = x.size()[0]
77 | x = F.relu(self.bn1(self.conv1(x)))
78 | x = F.relu(self.bn2(self.conv2(x)))
79 | x = F.relu(self.bn3(self.conv3(x)))
80 | x = torch.max(x, 2, keepdim=True)[0]
81 | x = x.view(-1, 1024)
82 | print(x)
83 | x = F.relu(self.bn4(self.fc1(x)))
84 | x = F.relu(self.bn5(self.fc2(x)))
85 | x = self.fc3(x)
86 |
87 | iden = (
88 | Variable(torch.from_numpy(np.eye(self.k).flatten().astype(np.float32)))
89 | .view(1, self.k * self.k)
90 | .repeat(batchsize, 1)
91 | )
92 | if x.is_cuda:
93 | iden = iden.cuda()
94 | x = x + iden
95 | x = x.view(-1, self.k, self.k)
96 | return x
97 |
98 |
99 | class PointNetEncoder(nn.Module):
100 | def __init__(self, global_feat=True, feature_transform=False, channel=3):
101 | super(PointNetEncoder, self).__init__()
102 | self.stn = STN3d(channel)
103 | self.conv1 = torch.nn.Conv1d(channel, 64, 1)
104 | self.conv2 = torch.nn.Conv1d(64, 128, 1)
105 | self.conv3 = torch.nn.Conv1d(128, 512, 1)
106 | self.bn1 = nn.BatchNorm1d(64)
107 | self.bn2 = nn.BatchNorm1d(128)
108 | self.bn3 = nn.BatchNorm1d(512)
109 | self.global_feat = global_feat
110 | self.feature_transform = feature_transform
111 | if self.feature_transform:
112 | self.fstn = STNkd(k=64)
113 |
114 | def forward(self, x):
115 | B, D, N = x.size()
116 | rot, tran = self.stn(x)
117 | x = x.transpose(2, 1).contiguous()
118 | if D > 3:
119 | x, feature = x[..., :3], x[..., 3:] # x.split(3,dim=2)
120 | x = torch.bmm(x, rot) + tran
121 | if D > 3:
122 | x = torch.cat([x, feature], dim=2)
123 | x = x.transpose(2, 1).contiguous()
124 | x = F.relu(self.bn1(self.conv1(x)))
125 |
126 | if self.feature_transform:
127 | trans_feat = self.fstn(x)
128 | x = x.transpose(2, 1)
129 | x = torch.bmm(x, trans_feat)
130 | x = x.transpose(2, 1).contiguous()
131 | else:
132 | trans_feat = None
133 | pointfeat = x
134 |
135 | x = F.relu(self.bn2(self.conv2(x)))
136 | x = self.bn3(self.conv3(x))
137 | x = torch.max(x, 2, keepdim=True)[0] # B,1024,N->B,1024,1
138 | if self.global_feat:
139 | return x.view(-1, 1024), rot, tran, trans_feat
140 | else:
141 | x = x.repeat(1, 1, N)
142 | return torch.cat([x, pointfeat], 1), rot, tran, trans_feat # (B,1088,N)
143 |
144 |
145 | if __name__ == "__main__":
146 | pass
147 |
--------------------------------------------------------------------------------
/PointCVAE/obman_dataset.py:
--------------------------------------------------------------------------------
1 | from torch.utils.data import Dataset
2 | import torch
3 | import os
4 | import numpy as np
5 |
6 |
7 | class obman(Dataset):
8 | def __init__(self, obj_pc_path, obj_cls_path):
9 | self.obj_pc_path = obj_pc_path
10 | self.obj_cls_path = obj_cls_path
11 | self.file_list = os.listdir(self.obj_pc_path)
12 |
13 | def __len__(self):
14 | return len(self.file_list)
15 |
16 | def __getitem__(self, idx):
17 | # obj_pc
18 | obj_pc = np.load(os.path.join(self.obj_pc_path, self.file_list[idx]))
19 | obj_cls = np.load(os.path.join(self.obj_cls_path, self.file_list[idx]))
20 | obj_pc = torch.tensor(obj_pc, dtype=torch.float32)
21 | obj_cls = torch.tensor(obj_cls, dtype=torch.long)
22 | return (obj_pc, obj_cls)
23 |
--------------------------------------------------------------------------------
/PointCVAE/train.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 |
4 | sys.path.append(os.getcwd())
5 | import time
6 | import torch
7 | from torch.optim import lr_scheduler
8 | from torch.utils.data import DataLoader
9 | from collections import defaultdict
10 | from PointCVAE import obman
11 | from PointCVAE import PointCVAENet
12 | import numpy as np
13 | import random
14 | from utils import tools
15 | from utils.loss import CVAE_loss, transform_loss
16 | from config import cfgs
17 |
18 |
19 | def train(cfg, epoch, model, train_loader, device, optimizer, log_root):
20 | since = time.time()
21 | logs = defaultdict(list)
22 | w_cross, w_dice, w_kd, w_rot = cfg.loss_weight
23 | model.train()
24 | for batch_idx, (obj_pc, obj_cls) in enumerate(train_loader):
25 | obj_pc, obj_cls = obj_pc.to(device), obj_cls.to(device)
26 | optimizer.zero_grad()
27 | rot, tran, obj_cls_pred, means, log_var, z = model(obj_pc, obj_cls)
28 | recon_cls_loss, KLD_loss = CVAE_loss(obj_cls_pred, obj_cls, means, log_var, w_cross, w_dice, "train")
29 | rot_loss = transform_loss(rot)
30 | loss = recon_cls_loss + w_kd * KLD_loss + w_rot * rot_loss # rot_loss from training pointnet
31 | loss.backward()
32 | optimizer.step()
33 | logs["loss"].append(loss.item())
34 | logs["recon_cls_loss"].append(recon_cls_loss.item())
35 | logs["KLD_loss"].append(KLD_loss.item())
36 | logs["rot_loss"].append(rot_loss.item())
37 | if batch_idx % cfg.print_every == 0 or batch_idx == len(train_loader) - 1:
38 | print(
39 | "Train Epoch {:02d}/{:02d}, Batch {:04d}/{:d}, Total Loss {:9.5f}, Cls Loss {:9.5f}, KLD_loss {:9.5f}, rot_loss {:9.5f}".format(
40 | epoch,
41 | cfg.epochs,
42 | batch_idx,
43 | len(train_loader) - 1,
44 | loss.item(),
45 | recon_cls_loss.item(),
46 | w_kd * KLD_loss.item(),
47 | w_rot * rot_loss.item(),
48 | )
49 | )
50 |
51 | time_elapsed = time.time() - since
52 | out_str = "Epoch: {:02d}/{:02d}, train, time {:.0f}m, Mean Toal Loss {:9.5f}, Cls Loss {:9.5f}, KLD_loss {:9.5f}, rot_loss {:9.5f}".format(
53 | epoch,
54 | cfg.epochs,
55 | time_elapsed // 60,
56 | sum(logs["loss"]) / len(logs["loss"]),
57 | sum(logs["recon_cls_loss"]) / len(logs["recon_cls_loss"]),
58 | sum(logs["KLD_loss"]) / len(logs["KLD_loss"]),
59 | sum(logs["rot_loss"]) / len(logs["rot_loss"]),
60 | )
61 | with open(log_root, "a") as f:
62 | f.write(out_str + "\n")
63 |
64 |
65 | def val(cfg, epoch, model, val_loader, device, log_root, checkpoint_root, best_val_loss, mode="val"):
66 | # validation
67 | w_cross, w_dice, w_kd, w_rot = cfg.loss_weight
68 | total_recon_cls_loss = 0.0
69 | model.eval()
70 | with torch.no_grad():
71 | for batch_idx, (obj_pc, obj_cls) in enumerate(val_loader):
72 | obj_pc, obj_cls = obj_pc.to(device), obj_cls.to(device)
73 | _, _, obj_cls_pred = model(obj_pc, obj_cls) # recon [B,61] mano params
74 | recon_cls_loss, _ = CVAE_loss(obj_cls_pred, obj_cls, None, None, w_cross, w_dice, "test")
75 | total_recon_cls_loss += recon_cls_loss.item()
76 | mean_recon_cls_loss = total_recon_cls_loss / len(val_loader)
77 | if mean_recon_cls_loss < best_val_loss:
78 | best_eval_loss = mean_recon_cls_loss
79 | save_name = os.path.join(checkpoint_root, "model_best_{}.pth".format(str(best_eval_loss)))
80 | torch.save({"network": model.state_dict(), "epoch": epoch}, save_name)
81 |
82 | out_str = "Epoch: {:02d}/{:02d}, {}, Best Recon cls Loss: {:9.5f}".format(epoch, cfg.epochs, mode, best_eval_loss)
83 | print(out_str)
84 | with open(log_root, "a") as f:
85 | f.write(out_str + "\n")
86 | return best_val_loss
87 |
88 |
89 | if __name__ == "__main__":
90 | # config
91 | cfg = cfgs.cvae_config
92 | cfg.K = cfgs.obman_config.K
93 | del cfgs
94 | # log file
95 | local_time = time.localtime(time.time())
96 | time_str = str(local_time[1]) + "_" + str(local_time[2]) + "_" + str(local_time[3])
97 | model_info = "W{}".format(str(cfg.loss_weight))
98 | save_root = os.path.join("logs", cfg.model_type, time_str + "_" + model_info)
99 | tools.check_dir(save_root)
100 | log_root = save_root + "/log.txt"
101 | log_file = open(log_root, "w+")
102 | log_file.write(str(cfg) + "\n")
103 | log_file.write("weights for recon_cls_loss, KLD_loss, rot_loss are {}".format(str(cfg.loss_weight)) + "\n")
104 | log_file.close()
105 |
106 | # seed
107 | torch.manual_seed(cfg.seed)
108 | if torch.cuda.is_available():
109 | torch.cuda.manual_seed(cfg.seed)
110 | np.random.seed(cfg.seed)
111 | random.seed(cfg.seed)
112 |
113 | # device
114 | use_cuda = cfg.use_cuda and torch.cuda.is_available()
115 | device = torch.device("cuda" if use_cuda else "cpu")
116 | print("using device", device)
117 | device_num = 1
118 |
119 | # network
120 | model = PointCVAENet(
121 | cvae_encoder_sizes=list(cfg.encoder_layer_sizes),
122 | cvae_latent_size=cfg.latent_size,
123 | cvae_decoder_sizes=list(cfg.decoder_layer_sizes),
124 | cls_num=cfg.cls_num,
125 | emb_dim=cfg.emb_dim,
126 | ).to(device)
127 |
128 | # multi-gpu
129 | if device == torch.device("cuda"):
130 | torch.backends.cudnn.benchmark = True
131 | device_ids = range(torch.cuda.device_count())
132 | print("using {} cuda".format(len(device_ids)))
133 | if len(device_ids) > 1:
134 | model = torch.nn.DataParallel(model)
135 | device_num = len(device_ids)
136 |
137 | # dataset
138 | if "Train" in cfg.train_mode:
139 | obj_pc_path = "Data/processed/{}/{}/obj_pc".format(cfg.K, "train")
140 | obj_cls_path = "Data/processed/{}/{}/obj_cls".format(cfg.K, "train")
141 | train_dataset = obman(obj_pc_path, obj_cls_path)
142 | train_loader = DataLoader(dataset=train_dataset, batch_size=cfg.batch_size, shuffle=True, num_workers=cfg.dataloader_workers)
143 | if "Val" in cfg.train_mode:
144 | obj_pc_path = "Data/processed/{}/{}/obj_pc".format(cfg.K, "val")
145 | obj_cls_path = "Data/processed/{}/{}/obj_cls".format(cfg.K, "val")
146 | val_dataset = obman(obj_pc_path, obj_cls_path)
147 | val_loader = DataLoader(dataset=val_dataset, batch_size=cfg.batch_size, shuffle=False, num_workers=cfg.dataloader_workers)
148 | if "Test" in cfg.train_mode:
149 | obj_pc_path = "Data/processed/{}/{}/obj_pc".format(cfg.K, "test")
150 | obj_cls_path = "Data/processed/{}/{}/obj_cls".format(cfg.K, "test")
151 | eval_dataset = obman(obj_pc_path, obj_cls_path)
152 | eval_loader = DataLoader(dataset=eval_dataset, batch_size=cfg.batch_size, shuffle=False, num_workers=cfg.dataloader_workers)
153 |
154 | # optimizer
155 | optimizer = torch.optim.Adam(model.parameters(), lr=cfg.learning_rate)
156 | scheduler = lr_scheduler.MultiStepLR(
157 | optimizer,
158 | milestones=[round(cfg.epochs * x) for x in [0.3, 0.6, 0.8, 0.9]],
159 | gamma=0.5,
160 | )
161 |
162 | best_val_loss = float("inf")
163 | best_eval_loss = float("inf")
164 | for epoch in range(1, cfg.epochs + 1):
165 | print("Begin Trian epoch={}".format(epoch))
166 | if "Train" in cfg.train_mode:
167 | train(cfg, epoch, model, train_loader, device, optimizer, log_root)
168 | scheduler.step()
169 | if "Val" in cfg.train_mode or "Test" in cfg.train_mode:
170 | print("Begin Val epoch={}".format(epoch))
171 | best_val_loss = val(cfg, epoch, model, val_loader, device, log_root, save_root, best_val_loss, "val")
172 | if "Test" in cfg.train_mode:
173 | best_val_loss = val(cfg, epoch, model, eval_loader, device, log_root, save_root, best_val_loss, "test")
174 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # GrainGrasp
2 | ### [GrainGrasp: Dexterous Grasp Generation with Fine-grained Contact Guidance](https://arxiv.org/abs/2405.09310)
3 |
4 | ## Requirements:
5 | The ```Python``` version for this project is ```3.9.18```.
6 | These packages we used:
7 | ```
8 | numpy==1.23.0
9 | pytorch==2.0.1+cu118
10 | open3d==0.17.0
11 | trimesh==4.1.7
12 | attrdict==2.0.1
13 | mano
14 | pytorch3d==0.7.5
15 | ```
16 |
17 | ```mano```: The version of ```MANO``` slightly different from [the one provided by Omid Taheri](https://github.com/otaheri/MANO), please use the version we provide. You should download the MANO model files from [MANO](http://mano.is.tue.mpg.de/) website. Then put ```MANO_RIGHT.pkl``` into ```mano/models/``` in this project directory. This path is customized in [here](config.json#L6).
18 |
19 | ```pytorch3d```: The installation of pytorch3d can be found in [pytorch3d](https://github.com/facebookresearch/pytorch3d).
20 |
21 | ## Run Example
22 | ```Python
23 | python run_complete.py -i=3 -s=1234
24 | ```
25 | Maybe you will see the following results:
26 |
27 | 
28 |
29 |
30 | ```Python
31 | python run_only_opt.py -i=2 -s=134
32 | ```
33 | Maybe you will see the following results:
34 |
35 | 
36 |
37 | **Note: Due to the randomness, different results may be generated. The images are for reference only.**
38 |
39 |
40 | ## Training Code
41 | If you intend to retrain the model, please download the [obman dataset](https://www.di.ens.fr/willow/research/obman/data/) and [ShapeNetCore.v2](https://shapenet.org/), and then set their paths in the code.
42 |
43 | Place the ```obman``` directory after decompression and the compressed
44 | ```ShapeNetCore.v2.zip``` file in the ```Data``` directory (You need create it manually). These paths are customized in [here](config.json#L26-L27).
45 |
46 | The data processing part can be found in [dataprocess.py](dataprocess.py). We recommend incorporating the point cloud sampling steps into the training process rather than setting them before training, better training methods will generate better results.
47 |
48 | ## Configurations
49 | The configurations for the experiments can be found and modified in [config.json](config.json).
50 |
51 |
52 |
53 |
54 | ## Citation
55 |
56 | ```
57 | @INPROCEEDINGS{zhao2024graingrasp,
58 | author={Zhao, Fuqiang and Tsetserukou, Dzmitry and Liu, Qian},
59 | booktitle={2024 IEEE International Conference on Robotics and Automation (ICRA)},
60 | title={GrainGrasp: Dexterous Grasp Generation with Fine-grained Contact Guidance},
61 | year={2024},
62 | volume={},
63 | number={},
64 | pages={6470-6476},
65 | doi={10.1109/ICRA57147.2024.10610035}}
66 | ```
67 |
--------------------------------------------------------------------------------
/SupNet/__init__.py:
--------------------------------------------------------------------------------
1 | from .network.pointnet_encoder import PointNetEncoder
2 | from .network.supnet import SupNet
3 | from .inference import load_model
4 | from .obman_dataset import obman
5 |
--------------------------------------------------------------------------------
/SupNet/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/SupNet/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/SupNet/__pycache__/inference.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/SupNet/__pycache__/inference.cpython-39.pyc
--------------------------------------------------------------------------------
/SupNet/__pycache__/load.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/SupNet/__pycache__/load.cpython-39.pyc
--------------------------------------------------------------------------------
/SupNet/__pycache__/obman_dataset.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/SupNet/__pycache__/obman_dataset.cpython-39.pyc
--------------------------------------------------------------------------------
/SupNet/__pycache__/pointnet_encoder.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/SupNet/__pycache__/pointnet_encoder.cpython-39.pyc
--------------------------------------------------------------------------------
/SupNet/__pycache__/supnet.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/SupNet/__pycache__/supnet.cpython-39.pyc
--------------------------------------------------------------------------------
/SupNet/inference.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 |
4 | sys.path.append(os.getcwd())
5 | from SupNet import SupNet
6 | from utils.loss import *
7 |
8 |
9 | def load_model(model_path, requires_grad=False):
10 | param = torch.load(model_path)
11 | weights_dict = {}
12 | for k, v in param["network"].items():
13 | new_k = k.replace("module.", "") if "module" in k else k
14 | weights_dict[new_k] = v
15 | model = SupNet()
16 | model.load_state_dict(weights_dict)
17 | for param in model.parameters():
18 | param.requires_grad = requires_grad
19 | return model
20 |
21 |
22 | if __name__ == "__main__":
23 |
24 | model_path = "SupNet/model.pth"
25 | model = load_model(model_path)
26 | print(model)
27 |
--------------------------------------------------------------------------------
/SupNet/model.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/SupNet/model.pth
--------------------------------------------------------------------------------
/SupNet/network/__pycache__/pointnet_encoder.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/SupNet/network/__pycache__/pointnet_encoder.cpython-39.pyc
--------------------------------------------------------------------------------
/SupNet/network/__pycache__/supnet.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/SupNet/network/__pycache__/supnet.cpython-39.pyc
--------------------------------------------------------------------------------
/SupNet/network/pointnet_encoder.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.parallel
4 | import torch.utils.data
5 | from torch.autograd import Variable
6 | import numpy as np
7 | import torch.nn.functional as F
8 | from pytorch3d import transforms
9 |
10 |
11 | class STN3d(nn.Module):
12 | def __init__(self, channel):
13 | super(STN3d, self).__init__()
14 | self.conv1 = torch.nn.Conv1d(channel, 64, 1)
15 | self.conv2 = torch.nn.Conv1d(64, 128, 1)
16 | self.conv3 = torch.nn.Conv1d(128, 1024, 1)
17 | self.fc1 = nn.Linear(1024, 512)
18 | self.fc2 = nn.Linear(512, 256)
19 | self.fc3 = nn.Linear(256, 7)
20 | self.relu = nn.ReLU()
21 |
22 | self.bn1 = nn.BatchNorm1d(64)
23 | self.bn2 = nn.BatchNorm1d(128)
24 | self.bn3 = nn.BatchNorm1d(1024)
25 | self.bn4 = nn.BatchNorm1d(512)
26 | self.bn5 = nn.BatchNorm1d(256)
27 |
28 | def forward(self, x):
29 | batchsize = x.size()[0]
30 | x = F.relu(self.bn1(self.conv1(x)))
31 | x = F.relu(self.bn2(self.conv2(x)))
32 | x = F.relu(self.bn3(self.conv3(x)))
33 | x = torch.max(x, 2, keepdim=False)[0]
34 | # x = x.view(-1, 1024)
35 | x = F.relu(self.bn4(self.fc1(x)))
36 | x = F.relu(self.bn5(self.fc2(x)))
37 | x = self.fc3(x)
38 |
39 | iden = (
40 | Variable(torch.from_numpy(np.array([1, 0, 0, 0]).astype(np.float32)))
41 | .view(1, 4)
42 | .repeat(batchsize, 1)
43 | )
44 | if x.is_cuda:
45 | iden = iden.cuda()
46 | # print(x.shape)
47 | x, y = x[:, :4] + iden, x[:, 4:]
48 | y = y.view(batchsize, 1, 3) * 0.05
49 | # y = 0.01*torch.randn((batchsize,1,3)).to(x.device)
50 | return x, y
51 |
52 |
53 | class STNkd(nn.Module):
54 | def __init__(self, k=64):
55 | super(STNkd, self).__init__()
56 | self.conv1 = torch.nn.Conv1d(k, 64, 1)
57 | self.conv2 = torch.nn.Conv1d(64, 128, 1)
58 | self.conv3 = torch.nn.Conv1d(128, 1024, 1)
59 | self.fc1 = nn.Linear(1024, 512)
60 | self.fc2 = nn.Linear(512, 256)
61 | self.fc3 = nn.Linear(256, k * k)
62 | self.relu = nn.ReLU()
63 |
64 | self.bn1 = nn.BatchNorm1d(64)
65 | self.bn2 = nn.BatchNorm1d(128)
66 | self.bn3 = nn.BatchNorm1d(1024)
67 | self.bn4 = nn.BatchNorm1d(512)
68 | self.bn5 = nn.BatchNorm1d(256)
69 |
70 | self.k = k
71 |
72 | def forward(self, x):
73 | batchsize = x.size()[0]
74 | x = F.relu(self.bn1(self.conv1(x)))
75 | x = F.relu(self.bn2(self.conv2(x)))
76 | x = F.relu(self.bn3(self.conv3(x)))
77 | x = torch.max(x, 2, keepdim=True)[0]
78 | x = x.view(-1, 1024)
79 | print(x)
80 | x = F.relu(self.bn4(self.fc1(x)))
81 | x = F.relu(self.bn5(self.fc2(x)))
82 | x = self.fc3(x)
83 |
84 | iden = (
85 | Variable(torch.from_numpy(np.eye(self.k).flatten().astype(np.float32)))
86 | .view(1, self.k * self.k)
87 | .repeat(batchsize, 1)
88 | )
89 | if x.is_cuda:
90 | iden = iden.cuda()
91 | x = x + iden
92 | x = x.view(-1, self.k, self.k)
93 | return x
94 |
95 |
96 | class PointNetEncoder(nn.Module):
97 | def __init__(self, global_feat=True, feature_transform=False, channel=3):
98 | super(PointNetEncoder, self).__init__()
99 | self.stn = STN3d(channel)
100 | self.conv1 = torch.nn.Conv1d(channel, 64, 1)
101 | self.conv2 = torch.nn.Conv1d(64, 128, 1)
102 | self.conv3 = torch.nn.Conv1d(128, 1024, 1)
103 | self.bn1 = nn.BatchNorm1d(64)
104 | self.bn2 = nn.BatchNorm1d(128)
105 | self.bn3 = nn.BatchNorm1d(1024)
106 | self.global_feat = global_feat
107 | self.feature_transform = feature_transform
108 | if self.feature_transform:
109 | self.fstn = STNkd(k=64)
110 |
111 | def forward(self, x, stn=True):
112 | B, D, N = x.size()
113 | if stn:
114 | quat, tran = self.stn(x)
115 | rot = transforms.quaternion_to_matrix(quat)
116 | x = x.transpose(2, 1).contiguous()
117 | if D > 3:
118 | x, feature = x[..., :3], x[..., 3:]
119 | x = torch.bmm(x, rot) + tran
120 | if D > 3:
121 | x = torch.cat([x, feature], dim=2)
122 | x = x.transpose(2, 1).contiguous()
123 | else:
124 | rot, tran = None, None
125 | x = F.relu(self.bn1(self.conv1(x)))
126 |
127 | if self.feature_transform:
128 | trans_feat = self.fstn(x)
129 | x = x.transpose(2, 1)
130 | x = torch.bmm(x, trans_feat)
131 | x = x.transpose(2, 1).contiguous()
132 | else:
133 | trans_feat = None
134 | pointfeat = x
135 |
136 | x = F.relu(self.bn2(self.conv2(x)))
137 | x = self.bn3(self.conv3(x))
138 | x = torch.max(x, 2, keepdim=True)[0] # B,1024,N->B,1024,1
139 | if self.global_feat:
140 | return x.view(-1, 1024), quat, tran, trans_feat
141 | else:
142 | x = x.repeat(1, 1, N)
143 | return torch.cat([x, pointfeat], 1), quat, tran, trans_feat # (B,1088,N)
144 |
145 |
146 | if __name__ == "__main__":
147 | pass
148 |
--------------------------------------------------------------------------------
/SupNet/network/supnet.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.parallel
4 | import torch.utils.data
5 | from SupNet import PointNetEncoder
6 |
7 |
8 | class SupNet(nn.Module):
9 | def __init__(self):
10 | super(SupNet, self).__init__()
11 | self.cls_embedding = nn.Embedding(2, 128)
12 | self.ho_encoder = PointNetEncoder(
13 | global_feat=True, feature_transform=False, channel=3 + 128
14 | )
15 |
16 | self.decoder = nn.Sequential(
17 | nn.Linear(1024, 512),
18 | nn.BatchNorm1d(512),
19 | nn.ReLU(),
20 | nn.Linear(512, 256),
21 | nn.BatchNorm1d(256),
22 | nn.ReLU(),
23 | nn.Linear(256, 2),
24 | )
25 | for m in self.decoder.modules():
26 | if isinstance(m, nn.Linear):
27 | nn.init.xavier_normal_(m.weight)
28 |
29 | def forward(self, obj_pc, hand_pc):
30 | """
31 | :param obj_pc: [B, 3, N]
32 | :param hand_pc: [B, 778, 3]
33 | :return: reconstructed object class
34 | """
35 | B, _, N = obj_pc.shape
36 | obj_cls = torch.zeros(B, N).to(torch.long).to(obj_pc.device)
37 | hand_cls = torch.zeros(B, 778).to(torch.long).to(hand_pc.device)
38 | obj_emb = self.cls_embedding(obj_cls).transpose(1, 2) # B*e*3000
39 | hand_emb = self.cls_embedding(hand_cls) # B*778*e
40 | obj_feature = torch.cat((obj_pc, obj_emb), dim=1)
41 | hand_feature = torch.cat((hand_pc, hand_emb), dim=2).transpose(
42 | 1, 2
43 | ) # B*(3+e)*778
44 | torch.cat((obj_feature, hand_feature), dim=2)
45 | ho_feature, quat, _, _ = self.ho_encoder(
46 | torch.cat((obj_feature, hand_feature), dim=2), stn=True
47 | )
48 | cls = self.decoder(ho_feature)
49 | return cls, quat
50 |
51 |
52 | if __name__ == "__main__":
53 | # import time
54 | device = "cuda"
55 |
--------------------------------------------------------------------------------
/SupNet/obman_dataset.py:
--------------------------------------------------------------------------------
1 | from torch.utils.data import Dataset
2 | import torch
3 | from pytorch3d import transforms
4 | import numpy as np
5 | import os
6 |
7 |
8 | class obman(Dataset):
9 | def __init__(self, obj_pc_path, hand_pc_path):
10 |
11 | self.obj_pc_path = obj_pc_path
12 | self.hand_pc_path = hand_pc_path
13 | self.file_list = os.listdir(self.obj_pc_path)
14 |
15 | def __len__(self):
16 | return len(self.file_list)
17 |
18 | def __getitem__(self, idx):
19 |
20 | obj_pc = np.load(os.path.join(self.obj_pc_path, self.file_list[idx]))
21 | hand_pc = np.load(os.path.join(self.hand_pc_path, self.file_list[idx]))
22 | obj_pc = torch.tensor(obj_pc, dtype=torch.float32) # 3*3000
23 | hand_pc = torch.tensor(hand_pc, dtype=torch.float32)
24 |
25 | if np.random.rand() >= 0.5:
26 | return (obj_pc, hand_pc, torch.tensor(1))
27 | else:
28 | if np.random.rand() > 0.5:
29 | hand_pc = hand_pc.mm(transforms.random_rotation())
30 | else:
31 | hand_pc = hand_pc + 0.05 * torch.randn((1, 3))
32 | return (obj_pc, hand_pc, torch.tensor(0))
33 |
--------------------------------------------------------------------------------
/SupNet/train.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 |
4 | sys.path.append(os.getcwd())
5 | import time
6 | import torch
7 | from torch.optim import lr_scheduler
8 | from torch.utils.data import DataLoader
9 | from collections import defaultdict
10 | from SupNet import obman
11 | from SupNet import SupNet
12 | import numpy as np
13 | import random
14 | from config import cfgs
15 |
16 |
17 | def train(cfg, epoch, model, train_loader, device, optimizer, log_root):
18 | since = time.time()
19 | logs = defaultdict(list)
20 | model.train()
21 |
22 | for batch_idx, (obj_pc, hand_pc, true_cls) in enumerate(train_loader):
23 | obj_pc, hand_pc, true_cls = (
24 | obj_pc.to(device),
25 | hand_pc.to(device),
26 | true_cls.to(device),
27 | )
28 | optimizer.zero_grad()
29 |
30 | pred_cls, quat = model(obj_pc, hand_pc)
31 | rot_loss = torch.square(quat.norm(dim=1) - 1).sum() / obj_pc.shape[0]
32 | cls_loss = torch.nn.functional.cross_entropy(pred_cls, true_cls)
33 | loss = rot_loss + cls_loss
34 | loss.backward()
35 | optimizer.step()
36 | acc = (pred_cls.max(1)[1] == true_cls).sum() / true_cls.shape[0]
37 | logs["loss"].append(loss.item())
38 | logs["rot_loss"].append(rot_loss.item())
39 | logs["cls_loss"].append(cls_loss.item())
40 | logs["acc"].append(acc.item())
41 | if batch_idx % cfg.print_every == 0 or batch_idx == len(train_loader) - 1:
42 | print(
43 | "Train Epoch {:02d}/{:02d}, Batch {:04d}/{:d}, Total Loss {:9.5f}, rot loss {:9.5f}, cls loss{:9.5f}, acc{:9.5f}".format(
44 | epoch,
45 | cfg.epochs,
46 | batch_idx,
47 | len(train_loader) - 1,
48 | loss.item(),
49 | rot_loss.item(),
50 | cls_loss.item(),
51 | acc.item(),
52 | )
53 | )
54 |
55 | time_elapsed = time.time() - since
56 | out_str = "Epoch: {:02d}/{:02d}, train, time {:.0f}m, Mean Toal Loss {:9.5f}, rot loss {:9.5f}, cls loss{:9.5f}, acc{:9.5f}".format(
57 | epoch,
58 | cfg.epochs,
59 | time_elapsed // 60,
60 | sum(logs["loss"]) / len(logs["loss"]),
61 | sum(logs["rot_loss"]) / len(logs["rot_loss"]),
62 | sum(logs["cls_loss"]) / len(logs["cls_loss"]),
63 | sum(logs["acc"]) / len(logs["acc"]),
64 | )
65 | with open(log_root, "a") as f:
66 | f.write(out_str + "\n")
67 |
68 |
69 | def val(cfg, epoch, model, val_loader, device, log_root, checkpoint_root, best_val_acc, mode="val"):
70 | # validation
71 | total_loss, total_rot_loss, total_cls_loss = 0.0, 0.0, 0.0
72 | acc_num, total_num = 0, 0
73 | model.eval()
74 | with torch.no_grad():
75 | for batch_idx, (obj_pc, hand_pc, true_cls) in enumerate(val_loader):
76 | # obj_pc, hand_param, obj_cmap = obj_pc.to(device), hand_param.to(device), obj_cmap.to(device)
77 | obj_pc, hand_pc, true_cls = (
78 | obj_pc.to(device),
79 | hand_pc.to(device),
80 | true_cls.to(device),
81 | )
82 | optimizer.zero_grad()
83 | pred_cls, quat = model(obj_pc, hand_pc) # recon [B,61] mano params
84 | rot_loss = torch.square(quat.norm(dim=1) - 1).sum() / obj_pc.shape[0]
85 | cls_loss = torch.nn.functional.cross_entropy(pred_cls, true_cls)
86 | loss = rot_loss + cls_loss
87 | acc_num += (pred_cls.max(1)[1] == true_cls).sum()
88 | total_num += true_cls.shape[0]
89 | total_loss += loss.item()
90 | total_rot_loss += rot_loss.item()
91 | total_cls_loss += cls_loss.item()
92 |
93 | mean_loss = total_loss / len(val_loader)
94 | mean_rot_loss = total_rot_loss / len(val_loader)
95 | mean_cls_loss = total_cls_loss / len(val_loader)
96 | acc = (acc_num / total_num).item()
97 | if acc > best_val_acc:
98 | best_val_acc = acc
99 | save_name = os.path.join(checkpoint_root, "model_best_{}.pth".format(str(acc)))
100 | torch.save({"network": model.state_dict(), "epoch": epoch}, save_name)
101 |
102 | out_str = "Epoch: {:02d}/{:02d}, {}, mean_loss {:9.5f}, mean_rot_loss {:9.5f}, mean_cls_loss {:9.5f}, acc {:9.5f}, Best Acc: {:9.5f},".format(
103 | epoch, cfg.epochs, mode, mean_loss, mean_rot_loss, mean_cls_loss, acc, best_val_acc
104 | )
105 | print(out_str)
106 | with open(log_root, "a") as f:
107 | f.write(out_str + "\n")
108 |
109 | return max(best_val_acc, acc)
110 |
111 |
112 | if __name__ == "__main__":
113 | cfg = cfgs.supnet_config
114 | cfg.K = cfgs.obman_config.K
115 | del cfgs
116 |
117 | local_time = time.localtime(time.time())
118 | time_str = str(local_time[1]) + "_" + str(local_time[2]) + "_" + str(local_time[3])
119 | save_root = os.path.join("logs", cfg.model_type, time_str)
120 | if not os.path.exists(save_root):
121 | os.makedirs(save_root)
122 | log_root = save_root + "/log.txt"
123 | log_file = open(log_root, "w+")
124 | log_file.write(str(cfg) + "\n")
125 | log_file.close()
126 |
127 | # seed
128 | torch.manual_seed(cfg.seed)
129 | if torch.cuda.is_available():
130 | torch.cuda.manual_seed(cfg.seed)
131 | np.random.seed(cfg.seed)
132 | random.seed(cfg.seed)
133 |
134 | # device
135 | use_cuda = cfg.use_cuda and torch.cuda.is_available()
136 | device = torch.device("cuda" if use_cuda else "cpu")
137 | print("using device", device)
138 | device_num = 1
139 |
140 | # network
141 | model = SupNet().to(device)
142 | # multi-gpu
143 | if device == torch.device("cuda"):
144 | torch.backends.cudnn.benchmark = True
145 | device_ids = range(torch.cuda.device_count())
146 | print("using {} cuda".format(len(device_ids)))
147 | if len(device_ids) > 1:
148 | model = torch.nn.DataParallel(model)
149 | device_num = len(device_ids)
150 |
151 | # dataset
152 | if "Train" in cfg.train_mode:
153 | obj_pc_path = "Data/processed/{}/{}/obj_pc".format(cfg.K, "train")
154 | hand_pc_path = "Data/processed/{}/{}/hand_pc".format(cfg.K, "train")
155 | train_dataset = obman(obj_pc_path, hand_pc_path)
156 | train_loader = DataLoader(
157 | dataset=train_dataset, batch_size=cfg.batch_size, shuffle=True, num_workers=cfg.dataloader_workers
158 | )
159 | if "Val" in cfg.train_mode:
160 | obj_pc_path = "Data/processed/{}/{}/obj_pc".format(cfg.K, "val")
161 | hand_pc_path = "Data/processed/{}/{}/hand_pc".format(cfg.K, "val")
162 | val_dataset = obman(obj_pc_path, hand_pc_path)
163 | val_loader = DataLoader(
164 | dataset=val_dataset, batch_size=cfg.batch_size, shuffle=False, num_workers=cfg.dataloader_workers
165 | )
166 | if "Test" in cfg.train_mode:
167 | obj_pc_path = "Data/processed/{}/{}/obj_pc".format(cfg.K, "test")
168 | hand_pc_path = "Data/processed/{}/{}/hand_pc".format(cfg.K, "test")
169 | eval_dataset = obman(obj_pc_path, hand_pc_path)
170 | eval_loader = DataLoader(
171 | dataset=eval_dataset, batch_size=cfg.batch_size, shuffle=False, num_workers=cfg.dataloader_workers
172 | )
173 |
174 | # optimizer
175 | optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=cfg.learning_rate)
176 | scheduler = lr_scheduler.MultiStepLR(
177 | optimizer,
178 | milestones=[round(cfg.epochs * x) for x in [0.3, 0.6, 0.8, 0.9]],
179 | gamma=0.5,
180 | )
181 |
182 | best_val_acc = 0
183 | best_eval_loss = float("inf")
184 | for epoch in range(1, cfg.epochs + 1):
185 | print("Begin Trian epoch={}".format(epoch))
186 | if "Train" in cfg.train_mode:
187 | train(cfg, epoch, model, train_loader, device, optimizer, log_root)
188 | scheduler.step()
189 | if "Val" in cfg.train_mode:
190 | print("Begin Val epoch={}".format(epoch))
191 | best_val_acc = val(cfg, epoch, model, val_loader, device, log_root, save_root, best_val_acc, "val")
192 | if "Test" in cfg.train_mode:
193 | print("Begin Test epoch={}".format(epoch))
194 | best_val_acc = val(cfg, epoch, model, eval_loader, device, log_root, save_root, best_val_acc, "test")
195 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/__init__.py
--------------------------------------------------------------------------------
/config.json:
--------------------------------------------------------------------------------
1 | {
2 |
3 | "cvae_model_path": "PointCVAE/model.pth",
4 | "dcog_config":
5 | {
6 | "mano_path": "mano/models/MANO_RIGHT.pkl",
7 | "init_handpose_path": "DCoG/init_handpose.npy",
8 | "init_quat_path": "DCoG/init_quat.npy",
9 | "finger_index_path": "HandAnnotation/finger_index.json",
10 | "tip_index_path": "HandAnnotation/tip_index.json",
11 | "supnet_path": "SupNet/model.pth",
12 | "init_move_finger_idx":3,
13 | "weights":
14 | {
15 | "w_dis": 0.5,
16 | "w_dct": 0.8,
17 | "w_dcf": 0.6,
18 | "w_net": 0.6,
19 | "w_pen": 10.0
20 | }
21 | },
22 |
23 | "obman_config":
24 | {
25 | "mode": "test",
26 | "shapeNet_path": "Data/ShapeNetCore.v2.zip",
27 | "obman_path": "Data/obman",
28 | "K":50,
29 | "sample_points_num": 3000,
30 | "save_path": "Data/processed"
31 | },
32 |
33 | "cvae_config":
34 | {
35 | "seed": 0,
36 | "epochs": 200,
37 | "batch_size": 128,
38 | "learning_rate": 0.0005,
39 | "print_every": 1,
40 | "use_cuda": 1,
41 | "save_interval": 5,
42 | "dataloader_workers": 10,
43 | "train_mode": "TrainVal",
44 | "encoder_layer_sizes": [512, 512, 256],
45 | "decoder_layer_sizes": [512, 256, 128, 64, 6],
46 | "latent_size": 1024,
47 | "cls_num": 6,
48 | "emb_dim": 64,
49 | "model_type": "point_cvae_net",
50 | "loss_weight": [0.5, 0.9, 0.01, 0.4]
51 | },
52 |
53 | "supnet_config":
54 | {
55 | "seed": 0,
56 | "epochs": 100,
57 | "batch_size": 128,
58 | "learning_rate": 0.0001,
59 | "print_every": 20,
60 | "use_cuda": 1,
61 | "dataloader_workers": 10,
62 | "train_mode": "TrainVal",
63 | "model_type": "supervision_net"
64 | }
65 |
66 |
67 | }
68 |
--------------------------------------------------------------------------------
/config.py:
--------------------------------------------------------------------------------
1 | import json
2 | from attrdict import AttrDict
3 |
4 | cfgs = json.load(open("config.json"))
5 | cfgs = AttrDict(cfgs)
6 |
--------------------------------------------------------------------------------
/dataprocess.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 | from utils import annotate, tools, Load_obman
4 | import argparse
5 | import os
6 | from config import cfgs
7 |
8 |
9 | class ObmanProcess:
10 | def __init__(self, cfg):
11 | self.mode = cfg.mode
12 | self.obman_path = cfg.obman_path
13 | self.shapeNet_path = cfg.shapeNet_path
14 | self.save_path = cfg.save_path
15 | self.start = cfg.start
16 | self.end = cfg.end
17 | self.sample_points_num = cfg.sample_points_num
18 | self.K = cfg.K
19 | self.load_obman = Load_obman(self.shapeNet_path, self.obman_path, self.mode)
20 |
21 | def process(self, save=True):
22 | actual_number_files = len(self.load_obman.pklNameList)
23 | if self.end == -1 or self.end > actual_number_files - 1:
24 | self.end = actual_number_files - 1
25 |
26 | if save:
27 | tools.check_dir(self.save_path)
28 | hand_pc_path = os.path.join(self.save_path, "hand_pc")
29 | hand_param_path = os.path.join(self.save_path, "hand_param")
30 | obj_pc_path = os.path.join(self.save_path, "obj_pc")
31 | obj_cls_path = os.path.join(self.save_path, "obj_cls")
32 | obj_mesh_path = os.path.join(self.save_path, "obj_mesh")
33 | tools.check_dir(hand_pc_path)
34 | tools.check_dir(obj_pc_path)
35 | tools.check_dir(obj_cls_path)
36 | tools.check_dir(hand_param_path)
37 | tools.check_dir(obj_mesh_path)
38 |
39 | print("Processing total {} files".format(self.end - self.start + 1))
40 | for idx in range(self.start, self.end + 1):
41 | print("Processing {}/{}".format(idx, self.end))
42 | meta = self.load_obman.get_meta(idx)
43 | hand_pc = self.load_obman.get_hand_pc(meta)
44 | hand_pose = self.load_obman.get_hand_pose(meta)
45 | obj_mesh = self.load_obman.get_obj_mesh(meta)
46 | obj_pc = tools.pc_sample(obj_mesh, self.sample_points_num)
47 | obj_cls, _ = annotate.get_obj_cls_and_colors(torch.Tensor(hand_pc), torch.Tensor(obj_pc), K=self.K)
48 | obj_cls = obj_cls.squeeze().cpu().detach().numpy()
49 |
50 | hand_pc_idx_path = os.path.join(hand_pc_path, "{}.npy".format(idx))
51 | np.save(hand_pc_idx_path, hand_pc)
52 | hand_param_idx_path = os.path.join(hand_param_path, "{}.npy".format(idx))
53 | np.save(hand_param_idx_path, hand_pose)
54 | obj_pc_idx_path = os.path.join(obj_pc_path, "{}.npy".format(idx))
55 | np.save(obj_pc_idx_path, obj_pc.T) # [3, 3000]
56 | obj_cls_idx_path = os.path.join(obj_cls_path, "{}.npy".format(idx))
57 | np.save(obj_cls_idx_path, obj_cls)
58 | obj_mesh_idx_path = os.path.join(obj_mesh_path, "{}.obj".format(idx))
59 | obj_mesh.export(obj_mesh_idx_path)
60 | print("Saved idx = {} in directory {}".format(idx, self.save_path))
61 |
62 |
63 | if __name__ == "__main__":
64 | cfg = cfgs.obman_config
65 | del cfgs
66 | parser = argparse.ArgumentParser(description="Process data")
67 | parser.add_argument("--start", "-s", type=int, default=0, help="Start index")
68 | parser.add_argument("--end", "-e", type=int, default=-1, help="End index, -1 means the last index")
69 | args = parser.parse_args()
70 | cfg.save_path = os.path.join(cfg.save_path, str(cfg.K), cfg.mode)
71 | cfg.start = args.start
72 | cfg.end = args.end
73 | obman_process = ObmanProcess(cfg)
74 | obman_process.process()
75 |
--------------------------------------------------------------------------------
/mano/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
4 | # holder of all proprietary rights on this computer program.
5 | # You can only use this computer program if you have closed
6 | # a license agreement with MPG or you get the right to use the computer
7 | # program from someone who is authorized to grant you that right.
8 | # Any use of the computer program without a valid license is prohibited and
9 | # liable to prosecution.
10 | #
11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung
12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
13 | # for Intelligent Systems. All rights reserved.
14 | #
15 | # Contact: ps-license@tuebingen.mpg.de
16 |
17 | from .model import load, MANO
18 |
--------------------------------------------------------------------------------
/mano/joints_info.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
4 | # holder of all proprietary rights on this computer program.
5 | # You can only use this computer program if you have closed
6 | # a license agreement with MPG or you get the right to use the computer
7 | # program from someone who is authorized to grant you that right.
8 | # Any use of the computer program without a valid license is prohibited and
9 | # liable to prosecution.
10 | #
11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung
12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
13 | # for Intelligent Systems. All rights reserved.
14 | #
15 | # Contact: ps-license@tuebingen.mpg.de
16 |
17 | TIP_IDS = {
18 | 'mano': {
19 | 'thumb': 744,
20 | 'index': 320,
21 | 'middle': 443,
22 | 'ring': 554,
23 | 'pinky': 671,
24 | }
25 | }
26 |
27 | JOINT_NAMES = [
28 | 'wrist',
29 | 'index1',
30 | 'index2',
31 | 'index3',
32 | 'middle1',
33 | 'middle2',
34 | 'middle3',
35 | 'pinky1',
36 | 'pinky2',
37 | 'pinky3',
38 | 'ring1',
39 | 'ring2',
40 | 'ring3',
41 | 'thumb1',
42 | 'thumb2',
43 | 'thumb3',
44 | 'thumb_tip',
45 | 'index_tip',
46 | 'middle_tip',
47 | 'ring_tip',
48 | 'pinky_tip',
49 | ]
50 |
--------------------------------------------------------------------------------
/mano/lbs.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
4 | # holder of all proprietary rights on this computer program.
5 | # You can only use this computer program if you have closed
6 | # a license agreement with MPG or you get the right to use the computer
7 | # program from someone who is authorized to grant you that right.
8 | # Any use of the computer program without a valid license is prohibited and
9 | # liable to prosecution.
10 | #
11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung
12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
13 | # for Intelligent Systems. All rights reserved.
14 | #
15 | # Contact: ps-license@tuebingen.mpg.de
16 |
17 | # If you use this code in a research publication please consider citing the following:
18 | #
19 | # Expressive Body Capture: 3D Hands, Face, and Body from a Single Image
20 | #
21 | # Code Developed by:
22 | # Vassilis Choutas
23 | # For the original and better implementation please refer to : https://github.com/vchoutas/smplx
24 |
25 | from __future__ import absolute_import
26 | from __future__ import print_function
27 | from __future__ import division
28 |
29 | import torch
30 | import torch.nn.functional as F
31 |
32 |
33 | def lbs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents,
34 | lbs_weights, pose2rot=True, dtype=torch.float32):
35 | ''' Performs Linear Blend Skinning with the given shape and pose parameters
36 |
37 | Parameters
38 | ----------
39 | betas : torch.tensor BxNB
40 | The tensor of shape parameters
41 | pose : torch.tensor Bx(J + 1) * 3
42 | The pose parameters in axis-angle format
43 | v_template torch.tensor BxVx3
44 | The template mesh that will be deformed
45 | shapedirs : torch.tensor 1xNB
46 | The tensor of PCA shape displacements
47 | posedirs : torch.tensor Px(V * 3)
48 | The pose PCA coefficients
49 | J_regressor : torch.tensor JxV
50 | The regressor array that is used to calculate the joints from
51 | the position of the vertices
52 | parents: torch.tensor J
53 | The array that describes the kinematic tree for the model
54 | lbs_weights: torch.tensor N x V x (J + 1)
55 | The linear blend skinning weights that represent how much the
56 | rotation matrix of each part affects each vertex
57 | pose2rot: bool, optional
58 | Flag on whether to convert the input pose tensor to rotation
59 | matrices. The default value is True. If False, then the pose tensor
60 | should already contain rotation matrices and have a size of
61 | Bx(J + 1)x9
62 | dtype: torch.dtype, optional
63 |
64 | Returns
65 | -------
66 | verts: torch.tensor BxVx3
67 | The vertices of the mesh after applying the shape and pose
68 | displacements.
69 | joints: torch.tensor BxJx3
70 | The joints of the model
71 | '''
72 |
73 | batch_size = max(betas.shape[0], pose.shape[0])
74 | device, dtype = betas.device, betas.dtype
75 |
76 | # Add shape contribution
77 | v_shaped = v_template + blend_shapes(betas, shapedirs)
78 |
79 | # Get the joints
80 | # NxJx3 array
81 | J = vertices2joints(J_regressor, v_shaped)
82 |
83 | # 3. Add pose blend shapes
84 | # N x J x 3 x 3
85 | ident = torch.eye(3, dtype=dtype, device=device)
86 | if pose2rot:
87 | rot_mats = batch_rodrigues(pose.view(-1, 3)).view(
88 | [batch_size, -1, 3, 3])
89 |
90 | pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1])
91 | # (N x P) x (P, V * 3) -> N x V x 3
92 | pose_offsets = torch.matmul(
93 | pose_feature, posedirs).view(batch_size, -1, 3)
94 | else:
95 | pose_feature = pose[:, 1:].view(batch_size, -1, 3, 3) - ident
96 | rot_mats = pose.view(batch_size, -1, 3, 3)
97 |
98 | pose_offsets = torch.matmul(pose_feature.view(batch_size, -1),
99 | posedirs).view(batch_size, -1, 3)
100 |
101 | v_posed = pose_offsets + v_shaped
102 | # 4. Get the global joint location
103 | J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype)
104 |
105 | # 5. Do skinning:
106 | # W is N x V x (J + 1)
107 | W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1])
108 | # (N x V x (J + 1)) x (N x (J + 1) x 16)
109 | num_joints = J_regressor.shape[0]
110 | T = torch.matmul(W, A.view(batch_size, num_joints, 16)) \
111 | .view(batch_size, -1, 4, 4)
112 |
113 | homogen_coord = torch.ones([batch_size, v_posed.shape[1], 1],
114 | dtype=dtype, device=device)
115 | v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2)
116 | v_homo = torch.matmul(T, torch.unsqueeze(v_posed_homo, dim=-1))
117 |
118 | verts = v_homo[:, :, :3, 0]
119 |
120 | return verts, J_transformed
121 |
122 |
123 | def vertices2joints(J_regressor, vertices):
124 | ''' Calculates the 3D joint locations from the vertices
125 |
126 | Parameters
127 | ----------
128 | J_regressor : torch.tensor JxV
129 | The regressor array that is used to calculate the joints from the
130 | position of the vertices
131 | vertices : torch.tensor BxVx3
132 | The tensor of mesh vertices
133 |
134 | Returns
135 | -------
136 | torch.tensor BxJx3
137 | The location of the joints
138 | '''
139 |
140 | return torch.einsum('bik,ji->bjk', [vertices, J_regressor])
141 |
142 |
143 | def blend_shapes(betas, shape_disps):
144 | ''' Calculates the per vertex displacement due to the blend shapes
145 |
146 |
147 | Parameters
148 | ----------
149 | betas : torch.tensor Bx(num_betas)
150 | Blend shape coefficients
151 | shape_disps: torch.tensor Vx3x(num_betas)
152 | Blend shapes
153 |
154 | Returns
155 | -------
156 | torch.tensor BxVx3
157 | The per-vertex displacement due to shape deformation
158 | '''
159 |
160 | # Displacement[b, m, k] = sum_{l} betas[b, l] * shape_disps[m, k, l]
161 | # i.e. Multiply each shape displacement by its corresponding beta and
162 | # then sum them.
163 | blend_shape = torch.einsum('bl,mkl->bmk', [betas, shape_disps])
164 | return blend_shape
165 |
166 |
167 | def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
168 | ''' Calculates the rotation matrices for a batch of rotation vectors
169 | Parameters
170 | ----------
171 | rot_vecs: torch.tensor Nx3
172 | array of N axis-angle vectors
173 | Returns
174 | -------
175 | R: torch.tensor Nx3x3
176 | The rotation matrices for the given axis-angle parameters
177 | '''
178 |
179 | batch_size = rot_vecs.shape[0]
180 | device, dtype = rot_vecs.device, rot_vecs.dtype
181 |
182 | angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
183 | rot_dir = rot_vecs / angle
184 |
185 | cos = torch.unsqueeze(torch.cos(angle), dim=1)
186 | sin = torch.unsqueeze(torch.sin(angle), dim=1)
187 |
188 | # Bx1 arrays
189 | rx, ry, rz = torch.split(rot_dir, 1, dim=1)
190 | K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
191 |
192 | zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
193 | K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
194 | .view((batch_size, 3, 3))
195 |
196 | ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
197 | rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
198 | return rot_mat
199 |
200 |
201 | def transform_mat(R, t):
202 | ''' Creates a batch of transformation matrices
203 | Args:
204 | - R: Bx3x3 array of a batch of rotation matrices
205 | - t: Bx3x1 array of a batch of translation vectors
206 | Returns:
207 | - T: Bx4x4 Transformation matrix
208 | '''
209 | # No padding left or right, only add an extra row
210 | return torch.cat([F.pad(R, [0, 0, 0, 1]),
211 | F.pad(t, [0, 0, 0, 1], value=1)], dim=2)
212 |
213 |
214 | def batch_rigid_transform(rot_mats, joints, parents, dtype=torch.float32):
215 | """
216 | Applies a batch of rigid transformations to the joints
217 |
218 | Parameters
219 | ----------
220 | rot_mats : torch.tensor BxNx3x3
221 | Tensor of rotation matrices
222 | joints : torch.tensor BxNx3
223 | Locations of joints
224 | parents : torch.tensor BxN
225 | The kinematic tree of each object
226 | dtype : torch.dtype, optional:
227 | The data type of the created tensors, the default is torch.float32
228 |
229 | Returns
230 | -------
231 | posed_joints : torch.tensor BxNx3
232 | The locations of the joints after applying the pose rotations
233 | rel_transforms : torch.tensor BxNx4x4
234 | The relative (with respect to the root joint) rigid transformations
235 | for all the joints
236 | """
237 |
238 | joints = torch.unsqueeze(joints, dim=-1)
239 |
240 | rel_joints = joints.clone()
241 | rel_joints[:, 1:] -= joints[:, parents[1:]]
242 |
243 | transforms_mat = transform_mat(
244 | rot_mats.reshape(-1, 3, 3),
245 | rel_joints.reshape(-1, 3, 1)).reshape(-1, joints.shape[1], 4, 4)
246 |
247 | transform_chain = [transforms_mat[:, 0]]
248 | for i in range(1, parents.shape[0]):
249 | # Subtract the joint location at the rest pose
250 | # No need for rotation, since it's identity when at rest
251 | curr_res = torch.matmul(transform_chain[parents[i]],
252 | transforms_mat[:, i])
253 | transform_chain.append(curr_res)
254 |
255 | transforms = torch.stack(transform_chain, dim=1)
256 |
257 | # The last column of the transformations contains the posed joints
258 | posed_joints = transforms[:, :, :3, 3]
259 |
260 | joints_homogen = F.pad(joints, [0, 0, 0, 1])
261 |
262 | rel_transforms = transforms - F.pad(
263 | torch.matmul(transforms, joints_homogen), [3, 0, 0, 0, 0, 0, 0, 0])
264 |
265 | return posed_joints, rel_transforms
266 |
--------------------------------------------------------------------------------
/mano/model.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
4 | # holder of all proprietary rights on this computer program.
5 | # You can only use this computer program if you have closed
6 | # a license agreement with MPG or you get the right to use the computer
7 | # program from someone who is authorized to grant you that right.
8 | # Any use of the computer program without a valid license is prohibited and
9 | # liable to prosecution.
10 | #
11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung
12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
13 | # for Intelligent Systems. All rights reserved.
14 | #
15 | # Contact: ps-license@tuebingen.mpg.de
16 |
17 | from __future__ import absolute_import
18 | from __future__ import print_function
19 | from __future__ import division
20 |
21 | import os
22 | import os.path as osp
23 |
24 | try:
25 | import cPickle as pickle
26 | except ImportError:
27 | import pickle
28 |
29 | import numpy as np
30 |
31 | from collections import namedtuple
32 |
33 | import torch
34 | import torch.nn as nn
35 |
36 | from .lbs import lbs
37 | from .utils import Struct, to_np, to_tensor
38 | from .utils import Mesh, points2sphere, colors
39 | from .joints_info import TIP_IDS
40 |
41 | ModelOutput = namedtuple(
42 | "ModelOutput",
43 | [
44 | "vertices",
45 | "joints",
46 | "full_pose",
47 | "betas",
48 | "transl",
49 | "global_orient",
50 | "hand_pose",
51 | ],
52 | )
53 | ModelOutput.__new__.__defaults__ = (None,) * len(ModelOutput._fields)
54 |
55 |
56 | def load(model_path, is_rhand=True, **kwargs):
57 | """load MANO model from a path
58 |
59 | Parameters
60 | ----------
61 | model_path: str
62 | Either the path to the model you wish to load or a folder,
63 | where each subfolder contains the differents types, i.e.:
64 | model_path:
65 | |
66 | |-- mano
67 | |-- MANO_RIGHT
68 | |-- MANO_LEFT
69 | is_rhand: str, optional
70 | When model_path is the mano folder, then this parameter specifies the
71 | left or right of model to be loaded
72 | **kwargs: dict
73 | Keyword arguments
74 |
75 | Returns
76 | -------
77 | hand_model: nn.Module
78 | The PyTorch module that implements the corresponding hand model
79 | """
80 |
81 | return MANO(model_path, is_rhand, **kwargs)
82 |
83 |
84 | class MANO(nn.Module):
85 | # The hand joints are replaced by MANO
86 | NUM_BODY_JOINTS = 1
87 | NUM_HAND_JOINTS = 15
88 | NUM_JOINTS = NUM_BODY_JOINTS + NUM_HAND_JOINTS
89 | NUM_BETAS = 10
90 |
91 | def __init__(
92 | self,
93 | model_path,
94 | is_rhand=True,
95 | data_struct=None,
96 | create_betas=True,
97 | betas=None,
98 | create_global_orient=True,
99 | global_orient=None,
100 | create_transl=True,
101 | transl=None,
102 | create_hand_pose=True,
103 | hand_pose=None,
104 | use_pca=True,
105 | num_pca_comps=6,
106 | flat_hand_mean=False,
107 | batch_size=1,
108 | joint_mapper=None,
109 | v_template=None,
110 | dtype=torch.float32,
111 | vertex_ids=None,
112 | use_compressed=True,
113 | ext="pkl",
114 | **kwargs
115 | ):
116 | """MANO model constructor
117 |
118 | Parameters
119 | ----------
120 | model_path: str
121 | The path to the folder or to the file where the model
122 | parameters are stored
123 | data_struct: Strct
124 | A struct object. If given, then the parameters of the model are
125 | read from the object. Otherwise, the model tries to read the
126 | parameters from the given `model_path`. (default = None)
127 | create_hand_pose: bool, optional
128 | Flag for creating a member variable for the pose of the
129 | hand. (default = True)
130 | hand_pose: torch.tensor, optional, BxP
131 | The default value for the left hand pose member variable.
132 | (default = None)
133 | num_pca_comps: int, optional
134 | The number of PCA components to use for each hand.
135 | (default = 6)
136 | flat_hand_mean: bool, optional
137 | If False, then the pose of the hand is initialized to False.
138 | batch_size: int, optional
139 | The batch size used for creating the member variables
140 | dtype: torch.dtype, optional
141 | The data type for the created variables
142 | vertex_ids: dict, optional
143 | A dictionary containing the indices of the extra vertices that
144 | will be selected
145 | """
146 |
147 | self.num_pca_comps = num_pca_comps
148 | # If no data structure is passed, then load the data from the given
149 | # model folder
150 | if data_struct is None:
151 | # Load the model
152 | if osp.isdir(model_path):
153 | model_fn = "MANO_{}.{ext}".format(
154 | "RIGHT" if is_rhand else "LEFT", ext=ext
155 | )
156 | mano_path = os.path.join(model_path, model_fn)
157 | else:
158 | mano_path = model_path
159 | self.is_rhand = (
160 | True if "RIGHT" in os.path.basename(model_path) else False
161 | )
162 | assert osp.exists(mano_path), "Path {} does not exist!".format(mano_path)
163 |
164 | if ext == "pkl":
165 | with open(mano_path, "rb") as mano_file:
166 | model_data = pickle.load(mano_file, encoding="latin1")
167 | elif ext == "npz":
168 | model_data = np.load(mano_path, allow_pickle=True)
169 | else:
170 | raise ValueError("Unknown extension: {}".format(ext))
171 | data_struct = Struct(**model_data)
172 |
173 | self.tip_ids = TIP_IDS["mano"]
174 |
175 | super(MANO, self).__init__()
176 |
177 | self.batch_size = batch_size
178 | self.dtype = dtype
179 | self.joint_mapper = joint_mapper
180 |
181 | self.faces = data_struct.f
182 | self.register_buffer(
183 | "faces_tensor",
184 | to_tensor(to_np(self.faces, dtype=np.int64), dtype=torch.long),
185 | )
186 |
187 | if create_betas:
188 | if betas is None:
189 | default_betas = torch.zeros([batch_size, self.NUM_BETAS], dtype=dtype)
190 | else:
191 | if "torch.Tensor" in str(type(betas)):
192 | default_betas = betas.clone().detach()
193 | else:
194 | default_betas = torch.tensor(betas, dtype=dtype)
195 |
196 | self.register_parameter(
197 | "betas", nn.Parameter(default_betas, requires_grad=True)
198 | )
199 |
200 | if create_global_orient:
201 | if global_orient is None:
202 | default_global_orient = torch.zeros([batch_size, 3], dtype=dtype)
203 | else:
204 | if torch.is_tensor(global_orient):
205 | default_global_orient = global_orient.clone().detach()
206 | else:
207 | default_global_orient = torch.tensor(global_orient, dtype=dtype)
208 |
209 | global_orient = nn.Parameter(default_global_orient, requires_grad=True)
210 | self.register_parameter("global_orient", global_orient)
211 |
212 | if create_transl:
213 | if transl is None:
214 | default_transl = torch.zeros(
215 | [batch_size, 3], dtype=dtype, requires_grad=True
216 | )
217 | else:
218 | default_transl = torch.tensor(transl, dtype=dtype)
219 | self.register_parameter(
220 | "transl", nn.Parameter(default_transl, requires_grad=True)
221 | )
222 |
223 | if v_template is None:
224 | v_template = data_struct.v_template
225 | if not torch.is_tensor(v_template):
226 | v_template = to_tensor(to_np(v_template), dtype=dtype)
227 | # The vertices of the template model
228 | self.register_buffer("v_template", to_tensor(v_template, dtype=dtype))
229 |
230 | # The shape components
231 | shapedirs = data_struct.shapedirs
232 | # The shape components
233 | self.register_buffer("shapedirs", to_tensor(to_np(shapedirs), dtype=dtype))
234 |
235 | j_regressor = to_tensor(to_np(data_struct.J_regressor), dtype=dtype)
236 | self.register_buffer("J_regressor", j_regressor)
237 |
238 | # Pose blend shape basis: 6890 x 3 x 207, reshaped to 6890*3 x 207
239 | num_pose_basis = data_struct.posedirs.shape[-1]
240 | # 207 x 20670
241 | posedirs = np.reshape(data_struct.posedirs, [-1, num_pose_basis]).T
242 | self.register_buffer("posedirs", to_tensor(to_np(posedirs), dtype=dtype))
243 |
244 | # indices of parents for each joints
245 | parents = to_tensor(to_np(data_struct.kintree_table[0])).long()
246 | parents[0] = -1
247 | self.register_buffer("parents", parents)
248 |
249 | self.register_buffer(
250 | "lbs_weights", to_tensor(to_np(data_struct.weights), dtype=dtype)
251 | )
252 |
253 | self.use_pca = use_pca
254 | self.num_pca_comps = num_pca_comps
255 | # if self.num_pca_comps == 45:
256 | # self.use_pca = False
257 | self.flat_hand_mean = flat_hand_mean
258 |
259 | hand_components = data_struct.hands_components[:num_pca_comps]
260 |
261 | self.np_hand_components = hand_components
262 |
263 | if self.use_pca:
264 | self.register_buffer(
265 | "hand_components", torch.tensor(hand_components, dtype=dtype)
266 | )
267 |
268 | if self.flat_hand_mean:
269 | hand_mean = np.zeros_like(data_struct.hands_mean)
270 | else:
271 | hand_mean = data_struct.hands_mean
272 |
273 | self.register_buffer("hand_mean", to_tensor(hand_mean, dtype=self.dtype))
274 |
275 | # Create the buffers for the pose of the left hand
276 | hand_pose_dim = num_pca_comps if use_pca else 3 * self.NUM_HAND_JOINTS
277 | if create_hand_pose:
278 | if hand_pose is None:
279 | default_hand_pose = torch.zeros(
280 | [batch_size, hand_pose_dim], dtype=dtype
281 | )
282 | else:
283 | default_hand_pose = torch.tensor(hand_pose, dtype=dtype)
284 |
285 | hand_pose_param = nn.Parameter(default_hand_pose, requires_grad=True)
286 | self.register_parameter("hand_pose", hand_pose_param)
287 |
288 | # Create the buffer for the mean pose.
289 | pose_mean = self.create_mean_pose(data_struct, flat_hand_mean=flat_hand_mean)
290 | pose_mean_tensor = pose_mean.clone().to(dtype)
291 | self.register_buffer("pose_mean", pose_mean_tensor)
292 |
293 | def create_mean_pose(self, data_struct, flat_hand_mean=False):
294 | # Create the array for the mean pose. If flat_hand is false, then use
295 | # the mean that is given by the data, rather than the flat open hand
296 | global_orient_mean = torch.zeros([3], dtype=self.dtype)
297 |
298 | pose_mean = torch.cat([global_orient_mean, self.hand_mean], dim=0)
299 | return pose_mean
300 |
301 | def get_num_verts(self):
302 | return self.v_template.shape[0]
303 |
304 | def get_num_faces(self):
305 | return self.faces.shape[0]
306 |
307 | def extra_repr(self):
308 | msg = "Number of betas: {}".format(self.NUM_BETAS)
309 | if self.use_pca:
310 | msg += "\nNumber of PCA components: {}".format(self.num_pca_comps)
311 | msg += "\nFlat hand mean: {}".format(self.flat_hand_mean)
312 | return msg
313 |
314 | def add_joints(self, vertices, joints, joint_ids=None):
315 |
316 | dev = vertices.device
317 | if joint_ids is None:
318 | joint_ids = to_tensor(list(self.tip_ids.values()), dtype=torch.long).to(dev)
319 | extra_joints = torch.index_select(vertices, 1, joint_ids)
320 | joints = torch.cat([joints, extra_joints], dim=1)
321 |
322 | return joints
323 |
324 | def forward(
325 | self,
326 | betas=None,
327 | global_orient=None,
328 | hand_pose=None,
329 | transl=None,
330 | return_verts=True,
331 | return_tips=False,
332 | return_full_pose=False,
333 | pose2rot=True,
334 | **kwargs
335 | ):
336 | """ """
337 | # If no shape and pose parameters are passed along, then use the
338 | # ones from the module
339 | global_orient = (
340 | global_orient if global_orient is not None else self.global_orient
341 | )
342 | betas = betas if betas is not None else self.betas
343 | hand_pose = hand_pose if hand_pose is not None else self.hand_pose
344 |
345 | apply_trans = transl is not None or hasattr(self, "transl")
346 | if transl is None:
347 | if hasattr(self, "transl"):
348 | transl = self.transl
349 |
350 | if self.use_pca:
351 | hand_pose = torch.einsum("bi,ij->bj", [hand_pose, self.hand_components])
352 |
353 | full_pose = torch.cat([global_orient, hand_pose], dim=1)
354 | full_pose += self.pose_mean
355 |
356 | if return_verts:
357 | vertices, joints = lbs(
358 | betas,
359 | full_pose,
360 | self.v_template,
361 | self.shapedirs,
362 | self.posedirs,
363 | self.J_regressor,
364 | self.parents,
365 | self.lbs_weights,
366 | pose2rot=pose2rot,
367 | dtype=self.dtype,
368 | )
369 |
370 | # Add any extra joints that might be needed
371 | if return_tips:
372 | joints = self.add_joints(vertices, joints)
373 |
374 | if self.joint_mapper is not None:
375 | joints = self.joint_mapper(joints)
376 |
377 | if apply_trans:
378 | joints = joints + transl.unsqueeze(dim=1)
379 | vertices = vertices + transl.unsqueeze(dim=1)
380 |
381 | output = ModelOutput(
382 | vertices=vertices if return_verts else None,
383 | joints=joints if return_verts else None,
384 | betas=betas,
385 | global_orient=global_orient,
386 | hand_pose=hand_pose,
387 | full_pose=full_pose if return_full_pose else None,
388 | )
389 |
390 | return output
391 |
392 | def hand_meshes(self, output, vc=colors["skin"]):
393 |
394 | vertices = to_np(output.vertices)
395 | if vertices.ndim < 3:
396 | vertices = vertices.reshape(-1, 778, 3)
397 |
398 | meshes = []
399 | for v in vertices:
400 | hand_mesh = Mesh(vertices=v, faces=self.faces, vc=vc)
401 | meshes.append(hand_mesh)
402 |
403 | return meshes
404 |
405 | def joint_meshes(self, output, radius=0.002, vc=colors["green"]):
406 |
407 | joints = to_np(output.joints)
408 | if joints.ndim < 3:
409 | joints = joints.reshape(1, -1, 3)
410 |
411 | meshes = []
412 | for j in joints:
413 | joint_mesh = Mesh(vertices=j, radius=radius, vc=vc)
414 | meshes.append(joint_mesh)
415 |
416 | return meshes
417 |
--------------------------------------------------------------------------------
/mano/utils.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
4 | # holder of all proprietary rights on this computer program.
5 | # You can only use this computer program if you have closed
6 | # a license agreement with MPG or you get the right to use the computer
7 | # program from someone who is authorized to grant you that right.
8 | # Any use of the computer program without a valid license is prohibited and
9 | # liable to prosecution.
10 | #
11 | # Copyright©2019 Max-Planck-Gesellschaft zur Förderung
12 | # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
13 | # for Intelligent Systems. All rights reserved.
14 | #
15 | # Contact: ps-license@tuebingen.mpg.de
16 |
17 | from __future__ import print_function
18 | from __future__ import absolute_import
19 | from __future__ import division
20 |
21 | import numpy as np
22 | import torch
23 | import trimesh
24 |
25 |
26 | class Struct(object):
27 | def __init__(self, **kwargs):
28 | for key, val in kwargs.items():
29 | setattr(self, key, val)
30 |
31 |
32 | def to_tensor(array, dtype=torch.float32):
33 | if not torch.is_tensor(array):
34 | array = torch.tensor(array)
35 | return array.to(dtype)
36 |
37 |
38 | def to_np(array, dtype=np.float32):
39 | if 'scipy.sparse' in str(type(array)):
40 | array = np.array(array.todense())
41 | elif 'chumpy' in str(type(array)):
42 | array = np.array(array)
43 | elif torch.is_tensor(array):
44 | array = array.detach().cpu().numpy()
45 | return array.astype(dtype)
46 |
47 |
48 | def rot_mat_to_euler(rot_mats):
49 | # Calculates rotation matrix to euler angles
50 | # Careful for extreme cases of eular angles like [0.0, pi, 0.0]
51 |
52 | sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +
53 | rot_mats[:, 1, 0] * rot_mats[:, 1, 0])
54 | return torch.atan2(-rot_mats[:, 2, 0], sy)
55 |
56 |
57 | ## vis tools
58 |
59 |
60 | class Mesh(trimesh.Trimesh):
61 |
62 | def __init__(self,
63 | filename=None,
64 | vertices=None,
65 | faces=None,
66 | vc=None,
67 | fc=None,
68 | vscale=None,
69 | radius=.002,
70 | process = False,
71 | visual = None,
72 | wireframe=False,
73 | smooth = False,
74 | **kwargs):
75 |
76 | self.wireframe = wireframe
77 | self.smooth = smooth
78 |
79 | if filename is not None:
80 | mesh = trimesh.load(filename, process = process)
81 | vertices = mesh.vertices
82 | faces= mesh.faces
83 | visual = mesh.visual
84 | if vscale is not None:
85 | vertices = vertices*vscale
86 |
87 | if faces is None:
88 | mesh = points2sphere(vertices, radius=radius)
89 | vertices = mesh.vertices
90 | faces = mesh.faces
91 | visual = mesh.visual
92 |
93 | super(Mesh, self).__init__(vertices=vertices, faces=faces, process=process, visual=visual)
94 |
95 | if vc is not None:
96 | self.set_vertex_colors(vc)
97 | if fc is not None:
98 | self.set_face_colors(fc)
99 |
100 | def rotate_vertices(self,rxyz):
101 | visual = self.visual
102 | self.vertices[:] = np.array(self.vertices@rxyz.T)
103 | self.visual = visual
104 | return self
105 |
106 | def colors_like(self,color, array, ids):
107 |
108 | color = np.array(color)
109 |
110 | if color.max() <= 1.:
111 | color = color * 255
112 | color = color.astype(np.int8)
113 |
114 | n_color = color.shape[0]
115 | n_ids = ids.shape[0]
116 |
117 | new_color = np.array(array)
118 | if n_color <= 4:
119 | new_color[ids, :n_color] = np.repeat(color[np.newaxis], n_ids, axis=0)
120 | else:
121 | new_color[ids, :] = color
122 |
123 | return new_color
124 |
125 | def set_vertex_colors(self,vc, vertex_ids = None):
126 |
127 | all_ids = np.arange(self.vertices.shape[0])
128 | if vertex_ids is None:
129 | vertex_ids = all_ids
130 |
131 | vertex_ids = all_ids[vertex_ids]
132 | new_vc = self.colors_like(vc, self.visual.vertex_colors, vertex_ids)
133 | self.visual.vertex_colors[:] = new_vc
134 |
135 | def set_face_colors(self,fc, face_ids = None):
136 |
137 | if face_ids is None:
138 | face_ids = np.arange(self.faces.shape[0])
139 |
140 | new_fc = self.colors_like(fc, self.visual.face_colors, face_ids)
141 | self.visual.face_colors[:] = new_fc
142 |
143 | @staticmethod
144 | def concatenate_meshes(meshes):
145 | return trimesh.util.concatenate(meshes)
146 |
147 | def points2sphere(points, radius = .001, vc = [0., 0., 1.], count = [5,5]):
148 |
149 | points = points.reshape(-1,3)
150 | n_points = points.shape[0]
151 |
152 | spheres = []
153 | for p in range(n_points):
154 | sphs = trimesh.creation.uv_sphere(radius=radius, count = count)
155 | sphs.apply_translation(points[p])
156 | sphs = Mesh(vertices=sphs.vertices, faces=sphs.faces, vc=vc)
157 |
158 | spheres.append(sphs)
159 |
160 | spheres = Mesh.concatenate_meshes(spheres)
161 | return spheres
162 |
163 | colors = {
164 | 'pink': [1.00, 0.75, 0.80],
165 | 'skin': [0.96, 0.75, 0.69],
166 | 'purple': [0.63, 0.13, 0.94],
167 | 'red': [1.0, 0.0, 0.0],
168 | 'green': [.0, 1., .0],
169 | 'yellow': [1., 1., 0],
170 | 'brown': [1.00, 0.25, 0.25],
171 | 'blue': [.0, .0, 1.],
172 | 'white': [1., 1., 1.],
173 | 'orange': [1.00, 0.65, 0.00],
174 | 'grey': [0.75, 0.75, 0.75],
175 | 'black': [0., 0., 0.],
176 | }
177 |
--------------------------------------------------------------------------------
/run_complete.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import torch
4 | import trimesh
5 | import argparse
6 | import numpy as np
7 | import open3d as o3d
8 | import trimesh
9 | from utils import annotate
10 | from utils import vis
11 | from utils import tools
12 | from config import cfgs
13 | from GrainGrasp import GrainGrasp
14 |
15 |
16 | if __name__ == "__main__":
17 | device = "cuda" if torch.cuda.is_available() else "cpu"
18 | parge = argparse.ArgumentParser(description="GrainGrasp")
19 | parge.add_argument("--idx", "-i", type=int, default=1, help="The idx of the object")
20 | parge.add_argument("--epochs", "-e", type=int, default=300, help="The epochs of the optimization")
21 | parge.add_argument("--threshold", "-t", type=float, default=0.0)
22 | parge.add_argument("--select", "-s", type=str, default="12345")
23 | parge.add_argument("--vis_pc", "-vp", type=bool, default=True)
24 | parge.add_argument("--vis_mesh", "-vm", type=bool, default=True)
25 | parge.add_argument("--vis_process", "-vprocess", type=bool, default=False)
26 | args = parge.parse_args()
27 | select_finger_idx = list(map(lambda x: int(x), args.select))
28 | sample_points_num = cfgs.obman_config.sample_points_num
29 | obj_path = os.path.join("sample", str(args.idx), "obj_mesh.obj")
30 | obj_mesh = trimesh.load_mesh(obj_path)
31 | # obj_pc can be sampled from the obj_mesh, but need cvae have more ability to generalize
32 | obj_pc = tools.pc_sample(obj_mesh, sample_points_num)
33 | # obj_pc can be also loaded from the file
34 | obj_pc_path = os.path.join("sample", str(args.idx), "obj_pc.npy") # [3,3000]
35 | obj_pc = np.load(obj_pc_path).T
36 | obj_pc = torch.Tensor(obj_pc)
37 |
38 | # load the GrainGrasp model
39 | grain_grasp = GrainGrasp(cfgs.dcog_config, cfgs.cvae_model_path, device)
40 | time_start = time.time()
41 | result = grain_grasp.inference_complete(
42 | obj_pc,
43 | epochs=args.epochs,
44 | select_finger_idx=select_finger_idx,
45 | threshold=args.threshold,
46 | )
47 | print("The running time is {:.2f}s".format(time.time() - time_start))
48 | print("The Epen is ", result.E_pen)
49 | print("The min_idx is ", result.min_idx)
50 | hand_pc_final = result.min_idx_hand_pc
51 | hand_face = grain_grasp.dcog_model.rh_faces[0].cpu()
52 | hand_color = annotate.get_finger_colors(hand_pc_final)
53 | hand_mesh_o3d = vis.get_o3d_mesh(hand_pc_final, hand_face, [0, 0.8, 1], hand_color)
54 | obj_colors_true = annotate.get_obj_colors(result.obj_cls.cpu())
55 | obj_pcd = vis.get_o3d_pcd(obj_pc.cpu().detach(), obj_colors_true)
56 | obj_mesh_o3d = vis.trimesh2o3d(obj_mesh)
57 |
58 | if args.vis_pc:
59 | vis.vis_HandObject([hand_mesh_o3d], [obj_pcd])
60 | if args.vis_mesh:
61 | vis.vis_HandObject([hand_mesh_o3d], [obj_mesh_o3d])
62 |
63 | # vis the process of the optimization
64 | if args.vis_process:
65 | record_hand_pc = result.min_idx_record_hand_pc
66 | record_handmesh_o3d = hand_mesh_o3d = vis.get_o3d_mesh(record_hand_pc[0], hand_face, [0, 0.8, 1], hand_color)
67 | vis.vis_GraspProcess(record_handmesh_o3d, record_hand_pc[1:], obj_mesh_o3d)
68 |
--------------------------------------------------------------------------------
/run_only_opt.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import torch
4 | import trimesh
5 | import argparse
6 | import numpy as np
7 | import open3d as o3d
8 | from utils import annotate
9 | from utils import vis
10 | from utils import tools
11 | from utils import Load_obman
12 | from config import cfgs
13 | from GrainGrasp import GrainGrasp
14 |
15 |
16 | if __name__ == "__main__":
17 | device = "cuda" if torch.cuda.is_available() else "cpu"
18 | parge = argparse.ArgumentParser(description="GrainGrasp")
19 | parge.add_argument("--idx", "-i", type=int, default=1, help="The idx of the object")
20 | parge.add_argument("--epochs", "-e", type=int, default=300, help="The epochs of the optimization")
21 | parge.add_argument("--K", "-k", type=int, default=50)
22 | parge.add_argument("--threshold", "-t", type=float, default=0.0)
23 | parge.add_argument("--select", "-s", type=str, default="12345")
24 | parge.add_argument("--vis_pc", "-vp", type=bool, default=True)
25 | parge.add_argument("--vis_mesh", "-vm", type=bool, default=True)
26 | parge.add_argument("--vis_process", "-vprocess", type=bool, default=False)
27 | args = parge.parse_args()
28 | select_finger_idx = list(map(lambda x: int(x), args.select))
29 | sample_points_num = cfgs.obman_config.sample_points_num
30 | obj_path = os.path.join("sample", str(args.idx), "obj_mesh.obj")
31 | obj_mesh = trimesh.load_mesh(obj_path)
32 | obj_pc = tools.pc_sample(obj_mesh, sample_points_num)
33 | hand_pc_path = os.path.join("sample", str(args.idx), "hand_pc.npy") # [3,3000]
34 | hand_pc = np.load(hand_pc_path)
35 | obj_pc = torch.Tensor(obj_pc)
36 | hand_pc = torch.Tensor(hand_pc)
37 |
38 | # load the GrainGrasp model
39 | grain_grasp = GrainGrasp(cfgs.dcog_config, None, device)
40 | time_start = time.time()
41 | result = grain_grasp.inference_only_opt(
42 | obj_pc,
43 | hand_pc=hand_pc,
44 | K=args.K,
45 | epochs=args.epochs,
46 | select_finger_idx=select_finger_idx,
47 | threshold=args.threshold,
48 | )
49 | print("The running time is {:.2f}s".format(time.time() - time_start))
50 | print("The Epen is ", result.E_pen)
51 | print("The min_idx is ", result.min_idx)
52 | hand_pc_final = result.min_idx_hand_pc
53 | hand_face = grain_grasp.dcog_model.rh_faces[0].cpu()
54 | hand_color = annotate.get_finger_colors(hand_pc_final)
55 | hand_mesh_o3d = vis.get_o3d_mesh(hand_pc_final, hand_face, [0, 0.8, 1], hand_color)
56 | obj_colors_true = annotate.get_obj_colors(result.obj_cls.cpu())
57 | obj_pcd = vis.get_o3d_pcd(obj_pc.cpu().detach(), obj_colors_true)
58 | obj_mesh_o3d = vis.trimesh2o3d(obj_mesh)
59 |
60 | if args.vis_pc:
61 | vis.vis_HandObject([hand_mesh_o3d], [obj_pcd])
62 | if args.vis_mesh:
63 | vis.vis_HandObject([hand_mesh_o3d], [obj_mesh_o3d])
64 |
65 | # vis the process of the optimization
66 | if args.vis_process:
67 | record_hand_pc = result.min_idx_record_hand_pc
68 | record_handmesh_o3d = hand_mesh_o3d = vis.get_o3d_mesh(record_hand_pc[0], hand_face, [0, 0.8, 1], hand_color)
69 | vis.vis_GraspProcess(record_handmesh_o3d, record_hand_pc[1:], obj_mesh_o3d)
70 |
--------------------------------------------------------------------------------
/sample/1/hand_pc.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/sample/1/hand_pc.npy
--------------------------------------------------------------------------------
/sample/1/obj_mesh.obj:
--------------------------------------------------------------------------------
1 | # https://github.com/mikedh/trimesh
2 | v -0.06859610 0.02018738 -0.57817162
3 | v -0.05849841 0.03514206 -0.57914564
4 | v -0.03658018 -0.00438640 -0.66943218
5 | v -0.04250719 0.00458622 -0.67068730
6 | v -0.04797419 0.01654424 -0.68695221
7 | v -0.04394774 0.01077028 -0.68741865
8 | v -0.03605417 -0.00345612 -0.67640533
9 | v -0.03398134 -0.00621598 -0.67751782
10 | v -0.03204258 -0.00857354 -0.67947658
11 | v -0.03584982 0.00284016 -0.70346636
12 | v -0.03223368 -0.00073013 -0.71051310
13 | v -0.06114957 0.05294357 -0.63628233
14 | v -0.06580917 0.04987430 -0.59573142
15 | v -0.05763243 0.05117672 -0.65013502
16 | v -0.06428047 0.05745116 -0.63599283
17 | v -0.04742727 0.02403928 -0.60001424
18 | v -0.06016726 0.04639021 -0.61528623
19 | v -0.04604825 0.02978038 -0.63184511
20 | v -0.02868030 0.00244995 -0.62390840
21 | v -0.02939689 -0.00520198 -0.70918462
22 | v -0.02392990 -0.01715999 -0.69291970
23 | v -0.04003707 0.03062545 -0.67137899
24 | v -0.01888273 0.00727182 -0.70247786
25 | v -0.05122101 0.02326284 -0.57406631
26 | v -0.03344728 -0.00860895 -0.67090319
27 | v -0.04167812 0.00529953 -0.67858846
28 | v -0.04489714 0.00872440 -0.67332768
29 | v -0.04772715 0.01491442 -0.68174694
30 | v -0.04370182 0.00957813 -0.68400252
31 | v -0.03812019 -0.00048149 -0.67621497
32 | v -0.03037044 -0.01036752 -0.68214817
33 | v -0.02907863 -0.01147599 -0.68535060
34 | v -0.03378385 -0.00013437 -0.70365683
35 | v -0.03186568 -0.00312071 -0.70291208
36 | v -0.06490621 0.04630586 -0.58650698
37 | v -0.06309771 0.05509426 -0.63341837
38 | v -0.05226251 0.04319381 -0.64959838
39 | v -0.04516903 0.03123731 -0.64309842
40 | v -0.03998751 0.02097031 -0.63205930
41 | v -0.03566883 0.01046859 -0.61488013
42 | v -0.02450208 -0.01764143 -0.68751124
43 | v -0.05493336 0.04819705 -0.65410289
44 | v -0.03048492 -0.01226609 -0.67367105
45 | v -0.04292766 0.00771207 -0.68099057
46 | v -0.04003838 0.00250483 -0.67695950
47 | v -0.07894689 0.03765763 -0.58775214
48 | v -0.04667756 0.01224714 -0.67709997
49 | v -0.02789522 -0.01510815 -0.67754737
50 | v -0.02825512 -0.01182357 -0.68886532
51 | v -0.03022591 -0.00591539 -0.70128334
52 | v -0.03698728 0.01204040 -0.61341906
53 | v -0.04507635 0.02921437 -0.63535396
54 | v -0.04394950 0.02696888 -0.63290119
55 | v -0.04026571 0.02222933 -0.63555616
56 | v -0.03630794 0.01120858 -0.61408189
57 | v -0.03508150 0.00983377 -0.61579929
58 | v -0.08852894 0.05675196 -0.60860898
59 | v -0.02585464 -0.01694154 -0.68226783
60 | v -0.02820224 -0.01019401 -0.69586905
61 | v -0.04754338 0.04003878 -0.66496686
62 | v -0.02795624 -0.01138624 -0.69245304
63 | v -0.02897632 -0.00832804 -0.69888111
64 | v -0.04621602 0.03548059 -0.65422770
65 | v -0.03341467 0.00997111 -0.62636356
66 | v -0.04310763 0.02789249 -0.64174205
67 | v -0.04450778 0.02847313 -0.63572385
68 | v -0.04357994 0.02668336 -0.63394694
69 | v -0.07596002 0.06900473 -0.61332359
70 | v -0.04194342 0.02408146 -0.63308978
71 | v -0.04060363 0.02306640 -0.63696334
72 | v -0.03455627 0.00931526 -0.61682304
73 | v -0.05188106 0.04283236 -0.65040390
74 | v -0.04252719 0.02677688 -0.64064710
75 | v -0.04478534 0.02974461 -0.63927567
76 | v -0.04457076 0.02877557 -0.63658695
77 | v -0.04399484 0.02739006 -0.63435735
78 | v -0.04204871 0.02447974 -0.63409227
79 | v -0.04081102 0.02315306 -0.63607458
80 | v -0.06470342 0.01404248 -0.57631342
81 | v -0.04052888 0.02317808 -0.63787011
82 | v -0.03410242 0.00892223 -0.61793337
83 | v -0.03895587 0.02194968 -0.64226757
84 | v -0.03322809 0.00942477 -0.62524133
85 | v -0.05146054 0.04240015 -0.65115353
86 | v -0.04674294 0.03627797 -0.65433800
87 | v -0.04200498 0.02602541 -0.64069685
88 | v -0.04305100 0.02747399 -0.64036468
89 | v -0.04428871 0.02880066 -0.63838214
90 | v -0.04431137 0.02800059 -0.63496334
91 | v -0.09177773 0.06638187 -0.62863009
92 | v -0.04309473 0.02592833 -0.63376010
93 | v -0.04257250 0.02517673 -0.63380951
94 | v -0.04155900 0.02388461 -0.63458852
95 | v -0.04113677 0.02343202 -0.63526477
96 | v -0.04059196 0.02348058 -0.63873286
97 | v -0.03372833 0.00866197 -0.61910998
98 | v -0.03965705 0.02209932 -0.63867460
99 | v -0.04112690 0.02502833 -0.64187397
100 | v -0.03313657 0.00900198 -0.62405560
101 | v -0.05100567 0.04190211 -0.65183916
102 | v -0.07856021 0.07774219 -0.63357437
103 | v -0.04728442 0.03707286 -0.65435084
104 | v -0.04151977 0.02527038 -0.64051000
105 | v -0.04354071 0.02806912 -0.63986842
106 | v -0.04449608 0.02888734 -0.63749361
107 | v -0.04396273 0.02852144 -0.63919229
108 | v -0.04078829 0.02395304 -0.63949349
109 | v -0.03344029 0.00853875 -0.62033258
110 | v -0.04110474 0.02456348 -0.64009960
111 | v -0.03314169 0.00871020 -0.62282765
112 | v -0.05052093 0.04134311 -0.65245382
113 | v -0.07575096 0.03204993 -0.58391778
114 | v -0.06781678 0.05371096 -0.59942854
115 | v -0.04783478 0.03785718 -0.65426694
116 | v -0.03324360 0.00855498 -0.62157932
117 | v -0.05001140 0.04072897 -0.65299102
118 | v -0.04838850 0.03862286 -0.65408643
119 | v -0.04948256 0.04006636 -0.65344514
120 | v -0.04893966 0.03936184 -0.65381173
121 | v -0.06346957 0.04570101 -0.59264481
122 | v -0.06084101 0.04126780 -0.59022551
123 | v -0.07025548 0.05642511 -0.59593339
124 | v -0.08442754 0.04796610 -0.59716664
125 | v -0.07805337 0.07529787 -0.62658576
126 | v -0.08622338 0.05786125 -0.62699374
127 | v -0.05135592 0.02950384 -0.69023454
128 | v -0.06945538 0.05714018 -0.60366797
129 | v -0.07069482 0.06009890 -0.60837170
130 | v -0.07229000 0.02620841 -0.58071420
131 | v -0.05949782 0.03910890 -0.58942608
132 | v -0.00510671 -0.05175013 -0.66392617
133 | v -0.06772370 0.05151538 -0.59097813
134 | v -0.07438886 0.06520211 -0.60714746
135 | v -0.08666141 0.05257119 -0.60265932
136 | v -0.07718454 0.07237827 -0.61981893
137 | v -0.06740932 0.02037012 -0.58604198
138 | v -0.03244591 0.00272922 -0.72343422
139 | v -0.07151247 0.06253302 -0.61345360
140 | v -0.04085169 0.01790756 -0.70567673
141 | v -0.06182343 0.04083447 -0.58255307
142 | v -0.04773570 0.03197964 -0.68963116
143 | v -0.06682462 0.05290631 -0.60207976
144 | v -0.03236447 0.00979148 -0.63192759
145 | v -0.05797165 0.03665590 -0.58851775
146 | v 0.00837570 -0.04252976 -0.66167906
147 | v -0.07248262 0.06099858 -0.60133668
148 | v -0.03939811 -0.01055324 -0.62722108
149 | v -0.04400876 0.01449218 -0.70232431
150 | v -0.07178077 0.06384753 -0.61723755
151 | v -0.04610369 0.02370557 -0.69795581
152 | v -0.09109900 0.06368946 -0.62165494
153 | v -0.04248347 0.02618137 -0.69735243
154 | v -0.01896349 0.01194958 -0.72118711
155 | v -0.06819648 0.05564004 -0.60506583
156 | v -0.06434444 0.05016828 -0.60572586
157 | v -0.05395563 0.03248944 -0.59551751
158 | v -0.05491450 0.03195052 -0.58755314
159 | v -0.05172571 0.02723808 -0.58734939
160 | v -0.05821217 0.02693789 -0.66817284
161 | v -0.04604942 0.01632566 -0.69760375
162 | v -0.04141906 0.01165012 -0.70620064
163 | v -0.05495555 0.02927038 -0.57630971
164 | v -0.07189306 0.06439737 -0.61881990
165 | v -0.04294060 0.03086672 -0.65494818
166 | v -0.03723147 0.02038336 -0.70507335
167 | v -0.07132372 0.06629141 -0.63000753
168 | v -0.06571630 0.05290201 -0.60871192
169 | v -0.05224912 0.03060551 -0.59802624
170 | v -0.06241808 0.04782997 -0.60768922
171 | v -0.04817450 0.02341830 -0.59298291
172 | v 0.00249829 -0.03349264 -0.66349644
173 | v -0.04282546 0.01086023 -0.69452132
174 | v -0.08184804 0.04297949 -0.59218226
175 | v -0.04153362 0.00975164 -0.69772340
176 | v -0.03845680 0.00799319 -0.70896872
177 | v -0.07182976 0.06565788 -0.62437185
178 | v -0.06765928 0.06224600 -0.63539460
179 | v -0.06359395 0.05017315 -0.61024869
180 | v -0.05021012 0.02841779 -0.60128338
181 | v -0.05273074 0.03004856 -0.59285134
182 | v -0.06070473 0.04593850 -0.61020799
183 | v -0.04300347 0.01785623 -0.60118616
184 | v -0.02522642 -0.01286311 -0.70277172
185 | v -0.09001261 0.06046940 -0.61496051
186 | v -0.02449171 -0.00369666 -0.62381861
187 | v -0.01905948 0.00946595 -0.71042033
188 | v -0.03967030 0.00011417 -0.66935858
189 | v -0.04740190 0.01702547 -0.69236044
190 | v -0.04364886 0.01120760 -0.69100637
191 | v -0.03986140 0.00795757 -0.70039511
192 | v -0.03792270 0.00560011 -0.70235398
193 | v -0.03532393 0.00377062 -0.71043951
194 | v -0.06251664 0.05393633 -0.63215357
195 | v -0.06232335 0.05635533 -0.64323892
196 | v -0.09204262 0.06852182 -0.63582148
197 | v -0.04889712 0.02696829 -0.60321361
198 | v -0.05102423 0.02816464 -0.59536007
199 | v -0.06188061 0.04828168 -0.61276746
200 | v -0.05899139 0.04404703 -0.61272676
201 | v -0.04874027 0.02548879 -0.59808401
202 | v -0.02700684 -0.00934036 -0.70654401
203 | v -0.02417683 -0.01553039 -0.69812475
204 | v -0.02274189 -0.00383899 -0.63373331
205 | v -0.04604949 0.03926194 -0.67074250
206 | v -0.00158755 -0.02719262 -0.66483218
207 | f 1 112 129
208 | f 129 112 1
209 | f 140 1 112
210 | f 151 112 1
211 | f 140 162 1
212 | f 173 140 112
213 | f 184 112 151
214 | f 151 1 195
215 | f 162 2 140
216 | f 140 2 162
217 | f 140 13 162
218 | f 162 24 1
219 | f 173 35 140
220 | f 112 173 46
221 | f 46 173 112
222 | f 57 173 112
223 | f 57 112 184
224 | f 151 68 184
225 | f 195 1 79
226 | f 195 90 151
227 | f 151 90 195
228 | f 101 151 195
229 | f 13 140 113
230 | f 162 13 120
231 | f 162 121 24
232 | f 1 24 79
233 | f 122 35 173
234 | f 35 68 140
235 | f 173 57 123
236 | f 184 68 57
237 | f 124 68 151
238 | f 195 79 125
239 | f 101 124 151
240 | f 195 126 101
241 | f 140 127 113
242 | f 127 13 113
243 | f 13 128 120
244 | f 162 120 121
245 | f 24 121 130
246 | f 24 131 79
247 | f 35 132 122
248 | f 122 132 35
249 | f 133 35 122
250 | f 122 173 123
251 | f 35 133 68
252 | f 140 68 124
253 | f 57 134 123
254 | f 123 134 57
255 | f 57 122 123
256 | f 68 133 57
257 | f 124 68 135
258 | f 135 68 124
259 | f 125 79 136
260 | f 195 125 137
261 | f 138 124 101
262 | f 126 195 139
263 | f 101 126 141
264 | f 140 124 127
265 | f 13 127 128
266 | f 120 128 138
267 | f 120 142 121
268 | f 143 130 121
269 | f 24 130 144
270 | f 131 24 145
271 | f 131 136 79
272 | f 133 122 146
273 | f 146 122 133
274 | f 57 133 122
275 | f 147 125 136
276 | f 137 125 148
277 | f 137 139 195
278 | f 128 124 138
279 | f 138 101 149
280 | f 126 139 150
281 | f 126 152 141
282 | f 101 141 153
283 | f 127 124 128
284 | f 120 138 154
285 | f 142 120 154
286 | f 121 142 155
287 | f 130 143 156
288 | f 121 155 143
289 | f 130 156 144
290 | f 24 144 157
291 | f 24 158 145
292 | f 153 131 145
293 | f 136 131 147
294 | f 125 147 159
295 | f 148 125 160
296 | f 137 148 161
297 | f 139 137 153
298 | f 149 101 163
299 | f 149 164 138
300 | f 165 150 139
301 | f 152 126 150
302 | f 152 153 141
303 | f 166 101 153
304 | f 154 138 167
305 | f 167 142 154
306 | f 142 167 155
307 | f 156 143 168
308 | f 143 155 169
309 | f 144 156 157
310 | f 24 157 158
311 | f 145 158 170
312 | f 131 153 137
313 | f 145 171 153
314 | f 147 131 159
315 | f 160 125 159
316 | f 160 172 148
317 | f 148 172 160
318 | f 148 174 161
319 | f 161 174 148
320 | f 137 161 175
321 | f 139 153 165
322 | f 163 101 176
323 | f 164 149 163
324 | f 167 138 164
325 | f 150 165 152
326 | f 165 153 152
327 | f 176 101 166
328 | f 166 153 177
329 | f 167 178 155
330 | f 168 143 179
331 | f 168 180 156
332 | f 169 155 178
333 | f 143 169 181
334 | f 157 156 158
335 | f 158 180 170
336 | f 145 170 182
337 | f 137 183 131
338 | f 145 185 171
339 | f 186 153 171
340 | f 159 131 187
341 | f 160 159 188
342 | f 160 189 172
343 | f 172 189 160
344 | f 148 172 174
345 | f 174 172 148
346 | f 161 174 190
347 | f 190 174 161
348 | f 161 191 175
349 | f 175 191 161
350 | f 137 175 192
351 | f 193 163 176
352 | f 164 163 193
353 | f 167 164 178
354 | f 193 176 166
355 | f 177 153 194
356 | f 193 166 177
357 | f 179 143 196
358 | f 168 179 197
359 | f 180 168 197
360 | f 180 158 156
361 | f 198 169 178
362 | f 169 198 181
363 | f 143 181 199
364 | f 170 180 197
365 | f 170 200 182
366 | f 145 182 185
367 | f 137 201 183
368 | f 202 131 183
369 | f 185 203 171
370 | f 204 153 186
371 | f 171 205 186
372 | f 187 131 3
373 | f 159 187 4
374 | f 188 159 5
375 | f 188 6 160
376 | f 160 6 188
377 | f 160 6 189
378 | f 189 6 160
379 | f 189 7 172
380 | f 172 8 174
381 | f 174 9 190
382 | f 161 190 191
383 | f 191 190 161
384 | f 175 191 10
385 | f 10 191 175
386 | f 175 10 192
387 | f 192 10 175
388 | f 137 192 11
389 | f 164 193 12
390 | f 178 164 198
391 | f 194 153 14
392 | f 194 15 177
393 | f 193 177 15
394 | f 196 143 16
395 | f 196 200 179
396 | f 197 179 170
397 | f 17 181 198
398 | f 181 17 199
399 | f 143 199 18
400 | f 200 170 179
401 | f 182 200 16
402 | f 185 182 19
403 | f 137 20 201
404 | f 201 202 183
405 | f 183 202 201
406 | f 21 131 202
407 | f 203 185 22
408 | f 171 203 205
409 | f 14 153 204
410 | f 22 204 186
411 | f 23 186 205
412 | f 3 131 25
413 | f 26 187 3
414 | f 3 187 26
415 | f 5 4 187
416 | f 187 4 5
417 | f 159 4 27
418 | f 5 159 28
419 | f 5 187 188
420 | f 188 187 5
421 | f 188 29 6
422 | f 6 29 188
423 | f 6 30 189
424 | f 189 30 7
425 | f 172 7 8
426 | f 174 8 9
427 | f 190 9 31
428 | f 190 31 191
429 | f 191 32 10
430 | f 192 10 33
431 | f 33 10 192
432 | f 192 34 11
433 | f 11 34 192
434 | f 137 11 20
435 | f 12 193 36
436 | f 164 12 194
437 | f 198 164 17
438 | f 37 194 14
439 | f 15 194 12
440 | f 193 15 36
441 | f 16 143 182
442 | f 200 196 16
443 | f 17 164 199
444 | f 199 38 18
445 | f 143 18 39
446 | f 182 40 19
447 | f 185 19 204
448 | f 20 202 201
449 | f 201 202 20
450 | f 41 131 21
451 | f 20 21 202
452 | f 202 21 20
453 | f 204 22 185
454 | f 23 203 22
455 | f 203 23 205
456 | f 14 204 42
457 | f 22 186 23
458 | f 25 131 43
459 | f 30 3 25
460 | f 25 3 30
461 | f 187 26 44
462 | f 44 26 187
463 | f 26 3 45
464 | f 45 3 26
465 | f 28 4 5
466 | f 5 4 28
467 | f 28 27 4
468 | f 4 27 28
469 | f 159 27 47
470 | f 28 159 47
471 | f 188 187 44
472 | f 44 187 188
473 | f 188 44 29
474 | f 29 44 188
475 | f 29 45 6
476 | f 6 45 30
477 | f 30 25 7
478 | f 7 25 30
479 | f 7 25 8
480 | f 8 25 7
481 | f 8 43 9
482 | f 9 43 8
483 | f 9 48 31
484 | f 31 48 9
485 | f 191 31 32
486 | f 10 32 49
487 | f 10 49 33
488 | f 192 33 34
489 | f 34 33 192
490 | f 11 34 50
491 | f 50 34 11
492 | f 11 21 20
493 | f 20 21 11
494 | f 12 36 15
495 | f 164 194 37
496 | f 37 14 42
497 | f 182 143 51
498 | f 199 164 38
499 | f 18 38 52
500 | f 39 18 53
501 | f 143 39 54
502 | f 40 182 55
503 | f 19 40 56
504 | f 143 204 19
505 | f 58 131 41
506 | f 59 41 21
507 | f 21 41 59
508 | f 42 204 60
509 | f 43 131 48
510 | f 8 25 43
511 | f 43 25 8
512 | f 45 3 30
513 | f 30 3 45
514 | f 26 29 44
515 | f 29 26 45
516 | f 27 28 47
517 | f 47 28 27
518 | f 9 43 48
519 | f 48 43 9
520 | f 31 48 32
521 | f 32 48 31
522 | f 32 58 49
523 | f 49 58 32
524 | f 33 49 61
525 | f 33 61 34
526 | f 34 59 50
527 | f 11 50 62
528 | f 62 50 11
529 | f 62 21 11
530 | f 11 21 62
531 | f 164 37 63
532 | f 60 37 42
533 | f 51 143 64
534 | f 55 182 51
535 | f 38 164 65
536 | f 52 38 66
537 | f 53 18 52
538 | f 39 53 67
539 | f 54 39 69
540 | f 143 54 70
541 | f 55 64 40
542 | f 40 64 56
543 | f 19 56 71
544 | f 164 204 143
545 | f 19 64 143
546 | f 48 131 58
547 | f 49 58 41
548 | f 41 58 49
549 | f 61 41 59
550 | f 59 41 61
551 | f 59 21 62
552 | f 62 21 59
553 | f 60 204 164
554 | f 32 48 58
555 | f 58 48 32
556 | f 49 41 61
557 | f 61 41 49
558 | f 34 61 59
559 | f 50 59 62
560 | f 72 63 37
561 | f 164 63 60
562 | f 37 60 72
563 | f 64 55 51
564 | f 65 164 73
565 | f 65 74 38
566 | f 66 38 75
567 | f 52 66 53
568 | f 67 53 76
569 | f 39 67 69
570 | f 69 77 54
571 | f 54 78 70
572 | f 143 70 80
573 | f 56 64 71
574 | f 19 71 81
575 | f 143 82 164
576 | f 19 83 64
577 | f 63 72 84
578 | f 85 60 63
579 | f 72 60 84
580 | f 73 164 86
581 | f 87 65 73
582 | f 88 74 65
583 | f 75 38 74
584 | f 67 66 75
585 | f 53 66 89
586 | f 76 53 89
587 | f 76 66 67
588 | f 69 67 91
589 | f 77 69 92
590 | f 54 77 93
591 | f 54 94 78
592 | f 78 73 70
593 | f 70 86 80
594 | f 143 80 95
595 | f 71 64 81
596 | f 19 81 96
597 | f 143 97 82
598 | f 98 164 82
599 | f 19 99 83
600 | f 99 64 83
601 | f 63 84 100
602 | f 102 60 85
603 | f 63 102 85
604 | f 84 60 100
605 | f 86 164 103
606 | f 70 73 86
607 | f 104 65 87
608 | f 78 87 73
609 | f 74 88 105
610 | f 88 65 106
611 | f 75 74 105
612 | f 67 75 91
613 | f 66 76 89
614 | f 69 91 92
615 | f 92 88 77
616 | f 77 106 93
617 | f 54 93 94
618 | f 94 87 78
619 | f 80 86 103
620 | f 80 103 95
621 | f 143 95 107
622 | f 81 64 96
623 | f 19 96 108
624 | f 143 107 97
625 | f 82 97 98
626 | f 109 164 98
627 | f 19 110 99
628 | f 110 64 99
629 | f 63 100 111
630 | f 114 60 102
631 | f 63 114 102
632 | f 100 60 111
633 | f 103 164 109
634 | f 106 65 104
635 | f 94 104 87
636 | f 92 105 88
637 | f 77 88 106
638 | f 91 75 105
639 | f 91 105 92
640 | f 93 106 104
641 | f 93 104 94
642 | f 95 103 109
643 | f 95 109 107
644 | f 96 64 108
645 | f 19 108 115
646 | f 97 107 98
647 | f 109 98 107
648 | f 19 115 110
649 | f 115 64 110
650 | f 63 111 116
651 | f 117 60 114
652 | f 63 117 114
653 | f 111 60 116
654 | f 108 64 115
655 | f 63 116 118
656 | f 119 60 117
657 | f 63 119 117
658 | f 116 60 118
659 | f 63 118 119
660 | f 118 60 119
661 | f 112 1 140
662 | f 1 112 151
663 | f 1 162 140
664 | f 112 140 173
665 | f 151 112 184
666 | f 195 1 151
667 | f 162 13 140
668 | f 1 24 162
669 | f 140 35 173
670 | f 112 173 57
671 | f 184 112 57
672 | f 184 68 151
673 | f 79 1 195
674 | f 195 151 101
675 | f 113 140 13
676 | f 120 13 162
677 | f 24 121 162
678 | f 79 24 1
679 | f 173 35 122
680 | f 140 68 35
681 | f 123 57 173
682 | f 57 68 184
683 | f 151 68 124
684 | f 125 79 195
685 | f 151 124 101
686 | f 101 126 195
687 | f 113 127 140
688 | f 113 13 127
689 | f 120 128 13
690 | f 121 120 162
691 | f 130 121 24
692 | f 79 131 24
693 | f 122 35 133
694 | f 123 173 122
695 | f 68 133 35
696 | f 124 68 140
697 | f 123 122 57
698 | f 57 133 68
699 | f 136 79 125
700 | f 137 125 195
701 | f 101 124 138
702 | f 139 195 126
703 | f 141 126 101
704 | f 127 124 140
705 | f 128 127 13
706 | f 138 128 120
707 | f 121 142 120
708 | f 121 130 143
709 | f 144 130 24
710 | f 145 24 131
711 | f 79 136 131
712 | f 122 133 57
713 | f 136 125 147
714 | f 148 125 137
715 | f 195 139 137
716 | f 138 124 128
717 | f 149 101 138
718 | f 150 139 126
719 | f 141 152 126
720 | f 153 141 101
721 | f 128 124 127
722 | f 154 138 120
723 | f 154 120 142
724 | f 155 142 121
725 | f 156 143 130
726 | f 143 155 121
727 | f 144 156 130
728 | f 157 144 24
729 | f 145 158 24
730 | f 145 131 153
731 | f 147 131 136
732 | f 159 147 125
733 | f 160 125 148
734 | f 161 148 137
735 | f 153 137 139
736 | f 163 101 149
737 | f 138 164 149
738 | f 139 150 165
739 | f 150 126 152
740 | f 141 153 152
741 | f 153 101 166
742 | f 167 138 154
743 | f 154 142 167
744 | f 155 167 142
745 | f 168 143 156
746 | f 169 155 143
747 | f 157 156 144
748 | f 158 157 24
749 | f 170 158 145
750 | f 137 153 131
751 | f 153 171 145
752 | f 159 131 147
753 | f 159 125 160
754 | f 175 161 137
755 | f 165 153 139
756 | f 176 101 163
757 | f 163 149 164
758 | f 164 138 167
759 | f 152 165 150
760 | f 152 153 165
761 | f 166 101 176
762 | f 177 153 166
763 | f 155 178 167
764 | f 179 143 168
765 | f 156 180 168
766 | f 178 155 169
767 | f 181 169 143
768 | f 158 156 157
769 | f 170 180 158
770 | f 182 170 145
771 | f 131 183 137
772 | f 171 185 145
773 | f 171 153 186
774 | f 187 131 159
775 | f 188 159 160
776 | f 192 175 137
777 | f 176 163 193
778 | f 193 163 164
779 | f 178 164 167
780 | f 166 176 193
781 | f 194 153 177
782 | f 177 166 193
783 | f 196 143 179
784 | f 197 179 168
785 | f 197 168 180
786 | f 156 158 180
787 | f 178 169 198
788 | f 181 198 169
789 | f 199 181 143
790 | f 197 180 170
791 | f 182 200 170
792 | f 185 182 145
793 | f 183 201 137
794 | f 183 131 202
795 | f 186 153 204
796 | f 3 131 187
797 | f 4 187 159
798 | f 5 159 188
799 | f 11 192 137
800 | f 12 193 164
801 | f 198 164 178
802 | f 14 153 194
803 | f 177 15 194
804 | f 15 177 193
805 | f 16 143 196
806 | f 179 200 196
807 | f 170 179 197
808 | f 198 181 17
809 | f 199 17 181
810 | f 18 199 143
811 | f 179 170 200
812 | f 16 200 182
813 | f 19 182 185
814 | f 201 20 137
815 | f 202 131 21
816 | f 204 153 14
817 | f 25 131 3
818 | f 27 4 159
819 | f 28 159 5
820 | f 20 11 137
821 | f 36 193 12
822 | f 194 12 164
823 | f 17 164 198
824 | f 14 194 37
825 | f 12 194 15
826 | f 36 15 193
827 | f 182 143 16
828 | f 16 196 200
829 | f 199 164 17
830 | f 18 38 199
831 | f 39 18 143
832 | f 19 40 182
833 | f 204 19 185
834 | f 21 131 41
835 | f 42 204 14
836 | f 43 131 25
837 | f 47 27 159
838 | f 47 159 28
839 | f 15 36 12
840 | f 37 194 164
841 | f 42 14 37
842 | f 51 143 182
843 | f 38 164 199
844 | f 52 38 18
845 | f 53 18 39
846 | f 54 39 143
847 | f 55 182 40
848 | f 56 40 19
849 | f 19 204 143
850 | f 41 131 58
851 | f 60 204 42
852 | f 48 131 43
853 | f 63 37 164
854 | f 42 37 60
855 | f 64 143 51
856 | f 51 182 55
857 | f 65 164 38
858 | f 66 38 52
859 | f 67 53 39
860 | f 70 54 143
861 | f 71 56 19
862 | f 143 204 164
863 | f 143 64 19
864 | f 58 131 48
865 | f 164 204 60
866 | f 60 63 164
867 | f 72 60 37
868 | f 73 164 65
869 | f 75 38 66
870 | f 53 66 52
871 | f 76 53 67
872 | f 69 67 39
873 | f 54 77 69
874 | f 70 78 54
875 | f 80 70 143
876 | f 81 71 19
877 | f 164 82 143
878 | f 64 83 19
879 | f 63 60 85
880 | f 84 60 72
881 | f 86 164 73
882 | f 73 65 87
883 | f 65 74 88
884 | f 74 38 75
885 | f 89 66 53
886 | f 89 53 76
887 | f 91 67 69
888 | f 92 69 77
889 | f 93 77 54
890 | f 78 94 54
891 | f 95 80 143
892 | f 96 81 19
893 | f 82 97 143
894 | f 82 164 98
895 | f 83 99 19
896 | f 85 60 102
897 | f 100 60 84
898 | f 103 164 86
899 | f 87 65 104
900 | f 105 88 74
901 | f 106 65 88
902 | f 105 74 75
903 | f 92 91 69
904 | f 94 93 54
905 | f 107 95 143
906 | f 108 96 19
907 | f 97 107 143
908 | f 98 164 109
909 | f 99 110 19
910 | f 102 60 114
911 | f 111 60 100
912 | f 109 164 103
913 | f 104 65 106
914 | f 115 108 19
915 | f 98 107 97
916 | f 107 98 109
917 | f 110 115 19
918 | f 114 60 117
919 | f 116 60 111
920 | f 117 60 119
921 | f 118 60 116
922 | f 119 60 118
923 | f 171 203 185
924 | f 186 205 171
925 | f 22 185 203
926 | f 205 203 171
927 | f 186 204 22
928 | f 205 186 23
929 | f 185 22 204
930 | f 23 186 22
931 | f 52 18 53
932 | f 69 39 54
933 | f 40 64 55
934 | f 56 64 40
935 | f 37 63 72
936 | f 51 55 64
937 | f 38 74 65
938 | f 71 64 56
939 | f 84 72 63
940 | f 75 66 67
941 | f 67 66 76
942 | f 70 73 78
943 | f 80 86 70
944 | f 81 64 71
945 | f 83 64 99
946 | f 100 84 63
947 | f 85 102 63
948 | f 86 73 70
949 | f 73 87 78
950 | f 91 75 67
951 | f 89 76 66
952 | f 77 88 92
953 | f 93 106 77
954 | f 78 87 94
955 | f 103 86 80
956 | f 95 103 80
957 | f 96 64 81
958 | f 98 97 82
959 | f 99 64 110
960 | f 111 100 63
961 | f 102 114 63
962 | f 87 104 94
963 | f 88 105 92
964 | f 106 88 77
965 | f 105 75 91
966 | f 92 105 91
967 | f 104 106 93
968 | f 94 104 93
969 | f 109 103 95
970 | f 107 109 95
971 | f 108 64 96
972 | f 110 64 115
973 | f 116 111 63
974 | f 114 117 63
975 | f 115 64 108
976 | f 118 116 63
977 | f 117 119 63
978 | f 119 118 63
979 | f 172 7 189
980 | f 174 8 172
981 | f 190 9 174
982 | f 189 30 6
983 | f 7 30 189
984 | f 8 7 172
985 | f 9 8 174
986 | f 31 9 190
987 | f 191 31 190
988 | f 10 32 191
989 | f 6 45 29
990 | f 30 45 6
991 | f 32 31 191
992 | f 49 32 10
993 | f 33 49 10
994 | f 44 29 26
995 | f 45 26 29
996 | f 61 49 33
997 | f 34 61 33
998 | f 50 59 34
999 | f 59 61 34
1000 | f 62 59 50
1001 | f 22 203 23
1002 | f 205 23 203
1003 |
1004 |
--------------------------------------------------------------------------------
/sample/1/obj_pc.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/sample/1/obj_pc.npy
--------------------------------------------------------------------------------
/sample/10/hand_pc.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/sample/10/hand_pc.npy
--------------------------------------------------------------------------------
/sample/10/obj_mesh.obj:
--------------------------------------------------------------------------------
1 | # https://github.com/mikedh/trimesh
2 | v 0.08772740 -0.01949496 -0.65831003
3 | v 0.01572647 0.00115952 -0.56294781
4 | v 0.06681306 -0.00701846 -0.66988796
5 | v 0.06551843 -0.00697651 -0.67017956
6 | v 0.06253814 -0.00708608 -0.67083837
7 | v 0.06059365 -0.00796841 -0.66735048
8 | v 0.05967317 -0.01411331 -0.66716954
9 | v 0.06148906 -0.01672934 -0.66659920
10 | v 0.06363484 -0.01781584 -0.66991824
11 | v 0.06728650 -0.01683940 -0.66658635
12 | v 0.06861527 -0.00796844 -0.66684781
13 | v 0.06750946 -0.00725531 -0.66713977
14 | v 0.06227375 -0.01454217 -0.59483643
15 | v 0.06805580 -0.00740914 -0.66958579
16 | v 0.06425953 -0.00728672 -0.67044134
17 | v 0.06312220 -0.00792761 -0.67065495
18 | v 0.05978427 -0.00935060 -0.66744411
19 | v 0.06041715 -0.01554528 -0.66691312
20 | v 0.06527903 -0.01726156 -0.66958597
21 | v 0.06403626 -0.01687089 -0.66988798
22 | v 0.06657360 -0.01730336 -0.66929421
23 | v 0.06602756 -0.01714945 -0.66684813
24 | v 0.06812130 -0.01243295 -0.66667735
25 | v 0.08089832 0.00985444 -0.66834168
26 | v 0.06706318 -0.01423885 -0.66679994
27 | v 0.06797518 -0.01100363 -0.66679983
28 | v 0.06916126 -0.00812219 -0.66929388
29 | v 0.06626694 -0.00686424 -0.66744188
30 | v 0.06371318 -0.00713289 -0.66799532
31 | v 0.06218437 -0.00885539 -0.67080584
32 | v 0.06141302 -0.00819927 -0.67101945
33 | v 0.05934534 -0.01090651 -0.66744420
34 | v 0.06123652 -0.01577615 -0.67058209
35 | v 0.05930749 -0.01253131 -0.66735037
36 | v 0.04102978 -0.00596968 -0.67624953
37 | v 0.06230843 -0.01696021 -0.67026817
38 | v 0.06293073 -0.01615769 -0.67017972
39 | v 0.06645518 -0.01458154 -0.66691427
40 | v 0.06813807 -0.01170889 -0.66671911
41 | v 0.06792570 -0.01312671 -0.66667721
42 | v 0.06756504 -0.01374260 -0.66671906
43 | v 0.06497208 -0.00682269 -0.66773354
44 | v 0.06163807 -0.00870172 -0.66835983
45 | v 0.06060359 -0.00958114 -0.67111324
46 | v 0.06016474 -0.01113737 -0.67111333
47 | v 0.02706954 -0.02229168 -0.55893912
48 | v 0.06049266 -0.01434414 -0.67083832
49 | v 0.06348992 -0.01671706 -0.66744196
50 | v 0.06238439 -0.01600386 -0.66773370
51 | v 0.06578197 -0.01474735 -0.66705399
52 | v 0.06764360 -0.01036525 -0.66691411
53 | v 0.06716569 -0.00983754 -0.66705388
54 | v 0.06590990 -0.00924728 -0.66737154
55 | v 0.06521748 -0.00922467 -0.66752771
56 | v 0.06257617 -0.00777370 -0.66820887
57 | v 0.06096354 -0.00985338 -0.66843813
58 | v 0.05994958 -0.01483393 -0.59095096
59 | v 0.06150958 -0.01000729 -0.67088420
60 | v 0.06012691 -0.01276216 -0.67101967
61 | v 0.06203723 -0.01517070 -0.67044143
62 | v 0.06473269 -0.01710773 -0.66713995
63 | v 0.06149119 -0.01501679 -0.66799536
64 | v 0.06508944 -0.01472510 -0.66721003
65 | v 0.06454434 -0.00939063 -0.66766759
66 | v 0.06657458 -0.00945598 -0.66720993
67 | v 0.06393613 -0.00973338 -0.66778163
68 | v 0.06343463 -0.01022953 -0.66786277
69 | v 0.06237348 -0.01515511 -0.59931262
70 | v 0.06286142 -0.01226329 -0.66786259
71 | v 0.06114399 -0.01130437 -0.67088430
72 | v 0.06141737 -0.01397719 -0.67065503
73 | v 0.06335604 -0.01360689 -0.66766756
74 | v 0.06059795 -0.01115046 -0.66843823
75 | v 0.06307361 -0.01084551 -0.66790437
76 | v 0.06287822 -0.01153922 -0.66790452
77 | v 0.06302428 -0.01296855 -0.66778171
78 | v 0.06111252 -0.01265855 -0.67080595
79 | v 0.06087103 -0.01382336 -0.66820901
80 | v 0.07871153 0.00616057 -0.66419335
81 | v 0.06442506 -0.01451616 -0.66737174
82 | v 0.06383376 -0.01413464 -0.66752766
83 | v 0.06056628 -0.01250453 -0.66835990
84 | v 0.05089445 -0.00784628 -0.66986809
85 | v 0.04844247 -0.01361772 -0.67005241
86 | v 0.04690148 -0.00943098 -0.67066013
87 | v 0.05243560 -0.01203299 -0.66926034
88 | v 0.06682825 -0.03682318 -0.59417639
89 | v 0.06472933 -0.03898412 -0.59261041
90 | v 0.06714412 -0.03834073 -0.59430225
91 | v 0.07964496 0.00589743 -0.66837241
92 | v 0.06441309 -0.03746667 -0.59248431
93 | v 0.06458776 -0.04014229 -0.59325292
94 | v 0.06458129 -0.03990108 -0.59855363
95 | v 0.06318142 -0.02096442 -0.59335003
96 | v 0.06216650 -0.04054447 -0.59686180
97 | v 0.06697886 -0.03969296 -0.59505238
98 | v 0.06549940 -0.04059377 -0.59750682
99 | v 0.06386888 -0.03862508 -0.59908522
100 | v 0.06076681 -0.02160776 -0.59165832
101 | v 0.06145409 -0.03926847 -0.59739339
102 | v 0.04834025 -0.02582864 -0.67336731
103 | v 0.06456406 -0.04033635 -0.59336055
104 | v 0.06308448 -0.04123719 -0.59581518
105 | v 0.05422730 -0.02558937 -0.60250652
106 | v 0.05664191 -0.02494603 -0.60419822
107 | v 0.06396215 -0.04116096 -0.59453346
108 | v 0.06637677 -0.04051762 -0.59622517
109 | v 0.05805791 -0.04257645 -0.59229338
110 | v 0.05955494 -0.04124028 -0.59503212
111 | v 0.06047274 -0.04193305 -0.59398537
112 | v 0.05714015 -0.04188367 -0.59334029
113 | v 0.06619987 -0.01182487 -0.55085882
114 | v 0.02633147 -0.02764878 -0.55876680
115 | v 0.06197606 -0.04083815 -0.59142326
116 | v 0.05893546 -0.04250025 -0.59101185
117 | v 0.05970267 -0.04032341 -0.58908896
118 | v 0.05884238 -0.03996432 -0.59556374
119 | v 0.06195248 -0.04103218 -0.59153071
120 | v 0.06211762 -0.03967998 -0.59078076
121 | v 0.06135042 -0.04185682 -0.59270365
122 | v 0.05953771 -0.04167556 -0.58983903
123 | v 0.05938674 -0.03880587 -0.58896279
124 | v 0.05642768 -0.04060769 -0.59387155
125 | v 0.04557088 -0.02068149 -0.59477195
126 | v 0.06180184 -0.03816240 -0.59065456
127 | v 0.05161556 -0.02628523 -0.60067671
128 | v 0.05815507 -0.02230363 -0.58982851
129 | v 0.05574025 -0.02294703 -0.58813652
130 | v 0.04920073 -0.02692863 -0.59898472
131 | v 0.05602391 -0.01595199 -0.58869794
132 | v 0.09760955 -0.02189925 -0.55016830
133 | v 0.08054614 -0.02635686 -0.54778370
134 | v 0.08485427 -0.02529773 -0.54123182
135 | v 0.08837327 -0.02427142 -0.55326747
136 | v 0.08612387 -0.04781437 -0.54798260
137 | v 0.10318727 -0.04335676 -0.55036720
138 | v 0.09330142 -0.02295838 -0.55672018
139 | v 0.08576171 -0.02496724 -0.55143778
140 | v 0.06449269 -0.02057038 -0.59396135
141 | v 0.09043199 -0.04675524 -0.54143072
142 | v 0.09133944 -0.04642475 -0.55163668
143 | v 0.09887915 -0.04441589 -0.55691908
144 | v 0.05771630 -0.03713843 -0.59609635
145 | v 0.09395099 -0.04572894 -0.55346636
146 | v 0.06032804 -0.03644257 -0.59792616
147 | v 0.05849524 -0.04013500 -0.59612412
148 | v 0.06022056 -0.04219148 -0.59444359
149 | v 0.06283227 -0.04149562 -0.59627324
150 | v 0.06110698 -0.03943913 -0.59795393
151 | v 0.06459248 -0.02118331 -0.59843785
152 | v 0.05923464 -0.04145338 -0.59557013
153 | v 0.06184638 -0.04075752 -0.59739994
154 | v 0.06022190 -0.01650851 -0.60318055
155 | v 0.07066904 -0.01446788 -0.66469139
156 | v 0.05022351 -0.03438033 -0.66574901
157 | v 0.04450560 -0.00804955 -0.67534240
158 | v 0.02869602 -0.02671039 -0.55829780
159 | v 0.06383533 -0.01276326 -0.55132782
160 | v 0.04567036 -0.02129450 -0.59924851
161 | v 0.04772216 -0.01932817 -0.59090407
162 | v 0.05154855 -0.01759705 -0.58868082
163 | v 0.05824294 -0.02198018 -0.58782333
164 | v 0.06216858 -0.02086212 -0.59007619
165 | v 0.06244090 -0.02253671 -0.60230579
166 | v 0.05639585 -0.01823972 -0.60540375
167 | v 0.06985961 -0.01584974 -0.66478518
168 | v 0.07110773 -0.01291169 -0.66469133
169 | v 0.05070498 -0.02489020 -0.67289844
170 | v 0.09009187 -0.01855642 -0.65784088
171 | v 0.05258802 -0.03344195 -0.66527986
172 | v 0.05143959 -0.02915786 -0.66806159
173 | v 0.04788962 -0.02732263 -0.59837336
174 | v 0.04778989 -0.02670969 -0.59389718
175 | v 0.05376752 -0.02362509 -0.58780605
176 | v 0.05021364 -0.02703091 -0.60225887
177 | v 0.05861485 -0.02426791 -0.60452898
178 | v 0.06873449 -0.01696294 -0.66496626
179 | v 0.07148844 -0.01469874 -0.66836053
180 | v 0.07114544 -0.01128710 -0.66478502
181 | v 0.05559498 0.01698363 -0.55503980
182 | v 0.04357223 -0.00778672 -0.67116318
183 | v 0.04799452 -0.02100291 -0.60313367
184 | v 0.04994147 -0.02535629 -0.59002924
185 | v 0.05413943 -0.02591282 -0.60451170
186 | v 0.05192011 -0.01988470 -0.60538653
187 | v 0.07067898 -0.01608061 -0.66845415
188 | v 0.07192713 -0.01314255 -0.66836046
189 | v 0.07078000 -0.00970503 -0.66496631
190 | v 0.06459338 -0.00571432 -0.66659921
191 | v 0.06737020 -0.01773178 -0.66522268
192 | v 0.06220885 -0.00834455 -0.55196913
193 | v 0.06955389 -0.01719380 -0.66863539
194 | v 0.07058249 -0.01427274 -0.66858956
195 | v 0.07097955 -0.01162148 -0.66866782
196 | v 0.07196487 -0.01151794 -0.66845432
197 | v 0.07003599 -0.00827307 -0.66522257
198 | v 0.06614711 -0.00576451 -0.66624878
199 | v 0.06308302 -0.00608638 -0.66691281
200 | v 0.04682874 -0.00941043 -0.67033392
201 | v 0.06585964 -0.01810406 -0.66553667
202 | v 0.06818960 -0.01796263 -0.66889181
203 | v 0.08820884 -0.01000468 -0.66545946
204 | v 0.06990762 -0.01542449 -0.66866776
205 | v 0.07094773 -0.01297558 -0.66858952
206 | v 0.07067472 -0.01030283 -0.66881890
207 | v 0.07085552 -0.00850389 -0.66889151
208 | v 0.07159940 -0.00993589 -0.66863544
209 | v 0.06896424 -0.00708897 -0.66553646
210 | v 0.06763781 -0.00623317 -0.66588654
211 | v 0.06541276 -0.00594535 -0.67026803
212 | v 0.06390242 -0.00631724 -0.67058195
213 | v 0.06171874 -0.00685522 -0.66716923
214 | v 0.08657875 -0.01521093 -0.66109147
215 | v 0.05082151 -0.00782562 -0.66954191
216 | v 0.04836977 -0.01359733 -0.66972620
217 | v 0.06430623 -0.01805395 -0.66588687
218 | v 0.06896986 -0.01635242 -0.66881882
219 | v 0.07040169 -0.01282167 -0.66614345
220 | v 0.07003610 -0.01411875 -0.66614354
221 | v 0.06978361 -0.00731983 -0.66920543
222 | v 0.07012869 -0.01014893 -0.66637282
223 | v 0.07043320 -0.01146765 -0.66622180
224 | v 0.06696654 -0.00599536 -0.66991808
225 | v 0.08584426 -0.01094308 -0.66592830
226 | v 0.05236289 -0.01201260 -0.66893413
227 | v 0.06281544 -0.01758498 -0.66624911
228 | v 0.06512560 -0.01828482 -0.66955585
229 | v 0.06667904 -0.01833492 -0.66920580
230 | v 0.06783254 -0.01699331 -0.66903242
231 | v 0.06842348 -0.01619843 -0.66637280
232 | v 0.06936128 -0.01527066 -0.66622175
233 | v 0.07005453 -0.00910941 -0.66903239
234 | v 0.06950849 -0.00895550 -0.66658632
235 | v 0.06845719 -0.00646404 -0.66955552
236 | f 1 112 158
237 | f 112 1 169
238 | f 158 112 1
239 | f 169 1 112
240 | f 180 158 112
241 | f 112 158 180
242 | f 191 1 158
243 | f 158 1 191
244 | f 1 202 169
245 | f 169 202 1
246 | f 169 180 112
247 | f 112 180 169
248 | f 158 180 191
249 | f 191 180 158
250 | f 1 191 213
251 | f 213 191 1
252 | f 202 1 224
253 | f 224 1 202
254 | f 180 169 202
255 | f 202 169 180
256 | f 191 180 2
257 | f 2 180 191
258 | f 191 13 213
259 | f 213 13 191
260 | f 1 213 224
261 | f 224 213 1
262 | f 224 24 202
263 | f 202 24 224
264 | f 180 202 24
265 | f 24 202 180
266 | f 180 35 2
267 | f 2 35 180
268 | f 191 2 46
269 | f 46 2 191
270 | f 13 191 57
271 | f 57 191 13
272 | f 213 13 68
273 | f 68 13 213
274 | f 224 213 79
275 | f 79 213 224
276 | f 24 224 90
277 | f 90 224 24
278 | f 35 180 24
279 | f 24 180 35
280 | f 101 2 35
281 | f 35 2 101
282 | f 46 2 113
283 | f 113 2 46
284 | f 46 124 191
285 | f 191 124 46
286 | f 57 191 130
287 | f 130 191 57
288 | f 57 139 13
289 | f 13 139 57
290 | f 13 150 68
291 | f 68 150 13
292 | f 213 68 153
293 | f 153 68 213
294 | f 213 154 79
295 | f 79 154 213
296 | f 224 79 90
297 | f 90 79 224
298 | f 24 90 35
299 | f 35 90 24
300 | f 155 2 101
301 | f 101 2 155
302 | f 35 156 101
303 | f 101 156 35
304 | f 2 155 113
305 | f 113 155 2
306 | f 113 157 46
307 | f 46 157 113
308 | f 124 46 159
309 | f 159 46 124
310 | f 191 124 160
311 | f 160 124 191
312 | f 130 191 161
313 | f 161 191 130
314 | f 162 57 130
315 | f 130 57 162
316 | f 139 57 163
317 | f 163 57 139
318 | f 150 13 139
319 | f 139 13 150
320 | f 164 68 150
321 | f 150 68 164
322 | f 68 164 153
323 | f 153 164 68
324 | f 213 153 165
325 | f 165 153 213
326 | f 154 213 166
327 | f 166 213 154
328 | f 79 154 167
329 | f 167 154 79
330 | f 79 156 90
331 | f 35 90 156
332 | f 156 90 35
333 | f 168 155 101
334 | f 101 155 168
335 | f 101 156 168
336 | f 168 156 101
337 | f 155 157 113
338 | f 113 157 155
339 | f 170 46 157
340 | f 157 46 170
341 | f 159 46 171
342 | f 171 46 159
343 | f 172 124 159
344 | f 159 124 172
345 | f 173 160 124
346 | f 124 160 173
347 | f 191 160 161
348 | f 161 160 191
349 | f 174 130 161
350 | f 161 130 174
351 | f 57 162 163
352 | f 163 162 57
353 | f 130 174 162
354 | f 162 174 130
355 | f 163 173 139
356 | f 139 173 163
357 | f 139 172 150
358 | f 150 172 139
359 | f 150 175 164
360 | f 164 175 150
361 | f 176 153 164
362 | f 164 153 176
363 | f 153 176 165
364 | f 165 176 153
365 | f 213 165 171
366 | f 171 165 213
367 | f 166 213 177
368 | f 177 213 166
369 | f 154 178 166
370 | f 154 167 178
371 | f 79 167 179
372 | f 179 167 79
373 | f 156 79 181
374 | f 155 168 170
375 | f 170 168 155
376 | f 181 168 156
377 | f 156 168 181
378 | f 157 155 170
379 | f 170 155 157
380 | f 46 170 171
381 | f 171 170 46
382 | f 159 171 182
383 | f 182 171 159
384 | f 124 172 173
385 | f 173 172 124
386 | f 159 175 172
387 | f 172 175 159
388 | f 160 173 183
389 | f 183 173 160
390 | f 183 161 160
391 | f 160 161 183
392 | f 161 183 174
393 | f 174 183 161
394 | f 162 183 163
395 | f 163 183 162
396 | f 183 162 174
397 | f 174 162 183
398 | f 163 183 173
399 | f 173 183 163
400 | f 139 173 172
401 | f 172 173 139
402 | f 150 172 175
403 | f 175 172 150
404 | f 164 175 184
405 | f 184 175 164
406 | f 164 184 176
407 | f 176 184 164
408 | f 184 165 176
409 | f 176 165 184
410 | f 185 171 165
411 | f 165 171 185
412 | f 177 213 171
413 | f 171 213 177
414 | f 166 186 177
415 | f 186 166 178
416 | f 187 178 167
417 | f 167 179 187
418 | f 79 179 188
419 | f 188 179 79
420 | f 79 189 181
421 | f 181 189 79
422 | f 171 170 168
423 | f 168 170 171
424 | f 171 168 181
425 | f 181 168 171
426 | f 182 171 185
427 | f 185 171 182
428 | f 175 159 182
429 | f 182 159 175
430 | f 175 185 184
431 | f 184 185 175
432 | f 165 184 185
433 | f 185 184 165
434 | f 177 171 190
435 | f 190 171 177
436 | f 192 177 186
437 | f 186 178 193
438 | f 178 187 194
439 | f 195 187 179
440 | f 179 188 195
441 | f 196 79 188
442 | f 188 79 196
443 | f 189 79 197
444 | f 197 79 189
445 | f 181 189 198
446 | f 198 189 181
447 | f 181 199 171
448 | f 171 199 181
449 | f 185 175 182
450 | f 182 175 185
451 | f 190 171 200
452 | f 200 171 190
453 | f 190 177 201
454 | f 192 201 177
455 | f 192 186 203
456 | f 193 178 204
457 | f 203 186 193
458 | f 194 187 205
459 | f 204 178 194
460 | f 187 195 206
461 | f 207 195 188
462 | f 208 79 196
463 | f 196 79 208
464 | f 188 196 207
465 | f 197 79 209
466 | f 209 79 197
467 | f 189 210 197
468 | f 198 211 189
469 | f 181 198 212
470 | f 212 198 181
471 | f 181 214 199
472 | f 199 214 181
473 | f 215 171 199
474 | f 199 171 215
475 | f 200 171 216
476 | f 216 171 200
477 | f 190 201 200
478 | f 201 192 217
479 | f 217 192 203
480 | f 204 218 193
481 | f 193 219 203
482 | f 205 187 220
483 | f 205 221 194
484 | f 194 222 204
485 | f 207 206 195
486 | f 220 187 206
487 | f 209 79 208
488 | f 208 79 209
489 | f 196 208 206
490 | f 206 207 196
491 | f 197 223 209
492 | f 223 197 210
493 | f 210 189 211
494 | f 198 212 211
495 | f 181 212 214
496 | f 214 212 181
497 | f 225 171 215
498 | f 215 171 225
499 | f 216 171 226
500 | f 226 171 216
501 | f 216 200 227
502 | f 228 200 201
503 | f 229 201 217
504 | f 217 203 230
505 | f 219 193 218
506 | f 218 204 222
507 | f 231 203 219
508 | f 205 220 232
509 | f 222 194 221
510 | f 221 205 233
511 | f 220 206 208
512 | f 208 209 220
513 | f 234 209 223
514 | f 210 3 223
515 | f 211 4 210
516 | f 5 211 212
517 | f 214 212 6
518 | f 6 212 214
519 | f 7 171 225
520 | f 225 171 7
521 | f 226 171 8
522 | f 8 171 226
523 | f 226 216 9
524 | f 228 227 200
525 | f 227 9 216
526 | f 228 201 229
527 | f 229 217 10
528 | f 231 230 203
529 | f 230 10 217
530 | f 219 218 11
531 | f 218 222 233
532 | f 231 219 12
533 | f 234 232 220
534 | f 232 233 205
535 | f 221 233 222
536 | f 234 220 209
537 | f 223 14 234
538 | f 3 14 223
539 | f 4 3 210
540 | f 15 4 211
541 | f 5 16 211
542 | f 212 6 5
543 | f 214 6 17
544 | f 17 6 214
545 | f 18 171 7
546 | f 7 171 18
547 | f 214 7 225
548 | f 225 7 214
549 | f 8 171 18
550 | f 18 171 8
551 | f 226 9 8
552 | f 227 228 19
553 | f 9 227 20
554 | f 21 228 229
555 | f 10 22 229
556 | f 230 231 23
557 | f 10 230 25
558 | f 11 218 233
559 | f 12 219 11
560 | f 26 231 12
561 | f 27 232 234
562 | f 233 232 11
563 | f 14 27 234
564 | f 14 3 12
565 | f 3 4 28
566 | f 15 29 4
567 | f 16 15 211
568 | f 30 16 5
569 | f 31 5 6
570 | f 6 17 31
571 | f 214 17 32
572 | f 32 17 214
573 | f 18 33 7
574 | f 214 34 7
575 | f 7 34 214
576 | f 8 36 18
577 | f 36 8 9
578 | f 19 228 21
579 | f 20 227 19
580 | f 37 9 20
581 | f 21 229 22
582 | f 22 10 38
583 | f 23 231 39
584 | f 40 230 23
585 | f 25 230 41
586 | f 38 10 25
587 | f 11 27 12
588 | f 39 231 26
589 | f 12 28 26
590 | f 27 11 232
591 | f 14 12 27
592 | f 28 12 3
593 | f 42 28 4
594 | f 42 4 29
595 | f 15 16 29
596 | f 30 43 16
597 | f 31 30 5
598 | f 44 31 17
599 | f 32 45 17
600 | f 214 32 34
601 | f 34 32 214
602 | f 47 7 33
603 | f 33 18 36
604 | f 7 47 34
605 | f 36 9 37
606 | f 21 22 19
607 | f 20 19 48
608 | f 37 20 49
609 | f 50 22 38
610 | f 23 39 51
611 | f 41 230 40
612 | f 40 23 52
613 | f 25 41 53
614 | f 38 25 54
615 | f 26 51 39
616 | f 51 26 28
617 | f 42 52 28
618 | f 29 54 42
619 | f 55 29 16
620 | f 55 16 43
621 | f 43 30 56
622 | f 58 30 31
623 | f 44 58 31
624 | f 44 17 45
625 | f 45 32 59
626 | f 34 59 32
627 | f 47 33 59
628 | f 33 36 45
629 | f 59 34 47
630 | f 60 36 37
631 | f 61 19 22
632 | f 61 48 19
633 | f 48 49 20
634 | f 49 62 37
635 | f 63 22 50
636 | f 50 38 64
637 | f 52 23 51
638 | f 41 40 65
639 | f 65 40 52
640 | f 53 41 65
641 | f 54 25 53
642 | f 64 38 54
643 | f 52 51 28
644 | f 65 52 42
645 | f 54 53 42
646 | f 64 54 29
647 | f 55 66 29
648 | f 43 67 55
649 | f 58 56 30
650 | f 56 69 43
651 | f 70 58 44
652 | f 45 70 44
653 | f 59 33 45
654 | f 36 71 45
655 | f 71 36 60
656 | f 60 37 62
657 | f 61 22 63
658 | f 48 61 72
659 | f 49 48 56
660 | f 62 49 73
661 | f 63 50 66
662 | f 66 50 64
663 | f 53 65 42
664 | f 66 64 29
665 | f 67 66 55
666 | f 74 67 43
667 | f 58 70 56
668 | f 69 75 43
669 | f 76 69 56
670 | f 77 70 45
671 | f 71 77 45
672 | f 71 60 78
673 | f 62 78 60
674 | f 80 61 63
675 | f 72 61 81
676 | f 76 48 72
677 | f 48 76 56
678 | f 73 49 56
679 | f 82 62 73
680 | f 67 63 66
681 | f 74 80 67
682 | f 75 74 43
683 | f 73 56 70
684 | f 69 72 75
685 | f 76 72 69
686 | f 70 77 73
687 | f 77 71 82
688 | f 78 82 71
689 | f 78 62 82
690 | f 81 61 80
691 | f 80 63 67
692 | f 72 81 75
693 | f 82 73 77
694 | f 81 80 74
695 | f 75 81 74
696 | f 90 156 79
697 | f 181 79 156
698 | f 83 199 214
699 | f 214 199 83
700 | f 199 84 215
701 | f 215 84 199
702 | f 199 83 85
703 | f 85 83 199
704 | f 214 86 83
705 | f 83 86 214
706 | f 215 86 225
707 | f 225 86 215
708 | f 84 199 85
709 | f 85 199 84
710 | f 86 215 84
711 | f 84 215 86
712 | f 83 84 85
713 | f 85 84 83
714 | f 86 214 225
715 | f 225 214 86
716 | f 84 83 86
717 | f 86 83 84
718 | f 166 178 154
719 | f 178 167 154
720 | f 177 186 166
721 | f 178 166 186
722 | f 167 178 187
723 | f 187 179 167
724 | f 186 177 192
725 | f 193 178 186
726 | f 194 187 178
727 | f 179 187 195
728 | f 195 188 179
729 | f 201 177 190
730 | f 177 201 192
731 | f 203 186 192
732 | f 204 178 193
733 | f 193 186 203
734 | f 205 187 194
735 | f 194 178 204
736 | f 206 195 187
737 | f 188 195 207
738 | f 207 196 188
739 | f 197 210 189
740 | f 189 211 198
741 | f 200 201 190
742 | f 217 192 201
743 | f 203 192 217
744 | f 193 218 204
745 | f 203 219 193
746 | f 220 187 205
747 | f 194 221 205
748 | f 204 222 194
749 | f 195 206 207
750 | f 206 187 220
751 | f 206 208 196
752 | f 196 207 206
753 | f 209 223 197
754 | f 210 197 223
755 | f 211 189 210
756 | f 211 212 198
757 | f 227 200 216
758 | f 201 200 228
759 | f 217 201 229
760 | f 230 203 217
761 | f 218 193 219
762 | f 222 204 218
763 | f 219 203 231
764 | f 232 220 205
765 | f 221 194 222
766 | f 233 205 221
767 | f 208 206 220
768 | f 220 209 208
769 | f 223 209 234
770 | f 223 3 210
771 | f 210 4 211
772 | f 212 211 5
773 | f 9 216 226
774 | f 200 227 228
775 | f 216 9 227
776 | f 229 201 228
777 | f 10 217 229
778 | f 203 230 231
779 | f 217 10 230
780 | f 11 218 219
781 | f 233 222 218
782 | f 12 219 231
783 | f 220 232 234
784 | f 205 233 232
785 | f 222 233 221
786 | f 209 220 234
787 | f 234 14 223
788 | f 223 14 3
789 | f 210 3 4
790 | f 211 4 15
791 | f 211 16 5
792 | f 5 6 212
793 | f 8 9 226
794 | f 19 228 227
795 | f 20 227 9
796 | f 229 228 21
797 | f 229 22 10
798 | f 23 231 230
799 | f 25 230 10
800 | f 233 218 11
801 | f 11 219 12
802 | f 12 231 26
803 | f 234 232 27
804 | f 11 232 233
805 | f 234 27 14
806 | f 12 3 14
807 | f 28 4 3
808 | f 4 29 15
809 | f 211 15 16
810 | f 5 16 30
811 | f 6 5 31
812 | f 31 17 6
813 | f 7 33 18
814 | f 18 36 8
815 | f 9 8 36
816 | f 21 228 19
817 | f 19 227 20
818 | f 20 9 37
819 | f 22 229 21
820 | f 38 10 22
821 | f 39 231 23
822 | f 23 230 40
823 | f 41 230 25
824 | f 25 10 38
825 | f 12 27 11
826 | f 26 231 39
827 | f 26 28 12
828 | f 232 11 27
829 | f 27 12 14
830 | f 3 12 28
831 | f 4 28 42
832 | f 29 4 42
833 | f 29 16 15
834 | f 16 43 30
835 | f 5 30 31
836 | f 17 31 44
837 | f 17 45 32
838 | f 33 7 47
839 | f 36 18 33
840 | f 34 47 7
841 | f 37 9 36
842 | f 19 22 21
843 | f 48 19 20
844 | f 49 20 37
845 | f 38 22 50
846 | f 40 230 41
847 | f 28 26 51
848 | f 28 52 42
849 | f 42 54 29
850 | f 16 29 55
851 | f 43 16 55
852 | f 56 30 43
853 | f 31 30 58
854 | f 31 58 44
855 | f 45 17 44
856 | f 59 32 45
857 | f 32 59 34
858 | f 59 33 47
859 | f 45 36 33
860 | f 47 34 59
861 | f 37 36 60
862 | f 22 19 61
863 | f 19 48 61
864 | f 20 49 48
865 | f 37 62 49
866 | f 50 22 63
867 | f 28 51 52
868 | f 42 52 65
869 | f 42 53 54
870 | f 29 54 64
871 | f 29 66 55
872 | f 55 67 43
873 | f 30 56 58
874 | f 43 69 56
875 | f 44 58 70
876 | f 44 70 45
877 | f 45 33 59
878 | f 45 71 36
879 | f 60 36 71
880 | f 62 37 60
881 | f 63 22 61
882 | f 72 61 48
883 | f 56 48 49
884 | f 73 49 62
885 | f 42 65 53
886 | f 29 64 66
887 | f 55 66 67
888 | f 43 67 74
889 | f 56 70 58
890 | f 43 75 69
891 | f 56 69 76
892 | f 45 70 77
893 | f 45 77 71
894 | f 78 60 71
895 | f 60 78 62
896 | f 63 61 80
897 | f 81 61 72
898 | f 72 48 76
899 | f 56 76 48
900 | f 56 49 73
901 | f 73 62 82
902 | f 43 74 75
903 | f 70 56 73
904 | f 73 77 70
905 | f 82 71 77
906 | f 71 82 78
907 | f 82 62 78
908 | f 80 61 81
909 | f 77 73 82
910 | f 51 39 23
911 | f 52 23 40
912 | f 53 41 25
913 | f 54 25 38
914 | f 39 51 26
915 | f 64 38 50
916 | f 51 23 52
917 | f 65 40 41
918 | f 52 40 65
919 | f 65 41 53
920 | f 53 25 54
921 | f 54 38 64
922 | f 66 50 63
923 | f 64 50 66
924 | f 66 63 67
925 | f 67 80 74
926 | f 75 72 69
927 | f 69 72 76
928 | f 67 63 80
929 | f 75 81 72
930 | f 74 80 81
931 | f 74 81 75
932 | f 87 88 89
933 | f 88 87 91
934 | f 92 89 88
935 | f 89 93 87
936 | f 94 91 87
937 | f 95 88 91
938 | f 89 92 96
939 | f 95 92 88
940 | f 89 97 93
941 | f 87 93 98
942 | f 91 94 99
943 | f 87 98 94
944 | f 95 91 100
945 | f 96 92 102
946 | f 96 97 89
947 | f 103 92 95
948 | f 103 93 97
949 | f 95 98 93
950 | f 104 94 99
951 | f 100 91 99
952 | f 94 98 105
953 | f 98 95 100
954 | f 103 102 92
955 | f 106 96 102
956 | f 97 96 107
957 | f 93 103 95
958 | f 107 103 97
959 | f 94 104 105
960 | f 100 99 104
961 | f 100 105 98
962 | f 102 103 106
963 | f 96 106 107
964 | f 103 107 106
965 | f 105 100 104
966 | f 89 88 87
967 | f 91 87 88
968 | f 88 89 92
969 | f 87 93 89
970 | f 87 91 94
971 | f 91 88 95
972 | f 96 92 89
973 | f 88 92 95
974 | f 93 97 89
975 | f 98 93 87
976 | f 99 94 91
977 | f 94 98 87
978 | f 100 91 95
979 | f 102 92 96
980 | f 89 97 96
981 | f 95 92 103
982 | f 97 93 103
983 | f 93 98 95
984 | f 99 94 104
985 | f 99 91 100
986 | f 105 98 94
987 | f 100 95 98
988 | f 92 102 103
989 | f 102 96 106
990 | f 107 96 97
991 | f 95 103 93
992 | f 97 103 107
993 | f 105 104 94
994 | f 104 99 100
995 | f 98 105 100
996 | f 106 103 102
997 | f 107 106 96
998 | f 106 107 103
999 | f 104 100 105
1000 | f 108 109 110
1001 | f 109 108 111
1002 | f 114 110 109
1003 | f 110 115 108
1004 | f 108 116 111
1005 | f 111 117 109
1006 | f 118 110 114
1007 | f 114 109 119
1008 | f 115 110 120
1009 | f 121 108 115
1010 | f 108 121 116
1011 | f 111 116 122
1012 | f 117 111 123
1013 | f 125 109 117
1014 | f 110 118 120
1015 | f 114 121 118
1016 | f 119 109 125
1017 | f 119 121 114
1018 | f 118 115 120
1019 | f 115 118 121
1020 | f 121 119 116
1021 | f 116 125 122
1022 | f 111 122 123
1023 | f 123 126 117
1024 | f 125 117 127
1025 | f 125 116 119
1026 | f 127 122 125
1027 | f 123 122 128
1028 | f 126 123 129
1029 | f 127 117 126
1030 | f 122 127 128
1031 | f 123 128 129
1032 | f 127 129 126
1033 | f 129 127 128
1034 | f 110 109 108
1035 | f 111 108 109
1036 | f 109 110 114
1037 | f 108 115 110
1038 | f 111 116 108
1039 | f 109 117 111
1040 | f 114 110 118
1041 | f 119 109 114
1042 | f 120 110 115
1043 | f 115 108 121
1044 | f 116 121 108
1045 | f 122 116 111
1046 | f 123 111 117
1047 | f 117 109 125
1048 | f 120 118 110
1049 | f 118 121 114
1050 | f 125 109 119
1051 | f 114 121 119
1052 | f 120 115 118
1053 | f 121 118 115
1054 | f 116 119 121
1055 | f 122 125 116
1056 | f 123 122 111
1057 | f 117 126 123
1058 | f 127 117 125
1059 | f 119 116 125
1060 | f 125 122 127
1061 | f 128 122 123
1062 | f 129 123 126
1063 | f 126 117 127
1064 | f 128 127 122
1065 | f 129 128 123
1066 | f 126 129 127
1067 | f 128 127 129
1068 | f 131 132 133
1069 | f 132 131 134
1070 | f 135 133 132
1071 | f 133 136 131
1072 | f 134 131 137
1073 | f 132 134 138
1074 | f 133 135 140
1075 | f 132 141 135
1076 | f 136 133 140
1077 | f 136 137 131
1078 | f 142 134 137
1079 | f 134 143 138
1080 | f 141 132 138
1081 | f 135 136 140
1082 | f 136 135 141
1083 | f 137 136 142
1084 | f 134 142 144
1085 | f 143 134 145
1086 | f 146 138 143
1087 | f 138 147 141
1088 | f 136 141 144
1089 | f 136 144 142
1090 | f 148 134 144
1091 | f 134 149 145
1092 | f 145 146 143
1093 | f 151 138 146
1094 | f 147 138 151
1095 | f 147 144 141
1096 | f 134 148 152
1097 | f 144 147 148
1098 | f 134 152 149
1099 | f 146 145 149
1100 | f 149 151 146
1101 | f 152 147 151
1102 | f 147 152 148
1103 | f 151 149 152
1104 | f 133 132 131
1105 | f 134 131 132
1106 | f 132 133 135
1107 | f 131 136 133
1108 | f 137 131 134
1109 | f 138 134 132
1110 | f 140 135 133
1111 | f 135 141 132
1112 | f 140 133 136
1113 | f 131 137 136
1114 | f 137 134 142
1115 | f 138 143 134
1116 | f 138 132 141
1117 | f 140 136 135
1118 | f 141 135 136
1119 | f 142 136 137
1120 | f 144 142 134
1121 | f 145 134 143
1122 | f 143 138 146
1123 | f 141 147 138
1124 | f 144 141 136
1125 | f 142 144 136
1126 | f 144 134 148
1127 | f 145 149 134
1128 | f 143 146 145
1129 | f 146 138 151
1130 | f 151 138 147
1131 | f 141 144 147
1132 | f 152 148 134
1133 | f 148 147 144
1134 | f 149 152 134
1135 | f 149 145 146
1136 | f 146 151 149
1137 | f 151 147 152
1138 | f 148 152 147
1139 | f 152 149 151
1140 |
1141 |
--------------------------------------------------------------------------------
/sample/10/obj_pc.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/sample/10/obj_pc.npy
--------------------------------------------------------------------------------
/sample/2/hand_pc.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/sample/2/hand_pc.npy
--------------------------------------------------------------------------------
/sample/2/obj_pc.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/sample/2/obj_pc.npy
--------------------------------------------------------------------------------
/sample/3/hand_pc.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/sample/3/hand_pc.npy
--------------------------------------------------------------------------------
/sample/3/obj_pc.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/sample/3/obj_pc.npy
--------------------------------------------------------------------------------
/sample/4/hand_pc.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/sample/4/hand_pc.npy
--------------------------------------------------------------------------------
/sample/4/obj_pc.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/sample/4/obj_pc.npy
--------------------------------------------------------------------------------
/sample/5/hand_pc.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/sample/5/hand_pc.npy
--------------------------------------------------------------------------------
/sample/5/obj_pc.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/sample/5/obj_pc.npy
--------------------------------------------------------------------------------
/sample/6/hand_pc.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/sample/6/hand_pc.npy
--------------------------------------------------------------------------------
/sample/6/obj_pc.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/sample/6/obj_pc.npy
--------------------------------------------------------------------------------
/sample/7/hand_pc.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/sample/7/hand_pc.npy
--------------------------------------------------------------------------------
/sample/7/obj_pc.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/sample/7/obj_pc.npy
--------------------------------------------------------------------------------
/sample/8/hand_pc.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/sample/8/hand_pc.npy
--------------------------------------------------------------------------------
/sample/8/obj_pc.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/sample/8/obj_pc.npy
--------------------------------------------------------------------------------
/sample/9/hand_pc.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/sample/9/hand_pc.npy
--------------------------------------------------------------------------------
/sample/9/obj_pc.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/sample/9/obj_pc.npy
--------------------------------------------------------------------------------
/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .load_obman import Load_obman
2 |
--------------------------------------------------------------------------------
/utils/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/utils/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/annotate.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/utils/__pycache__/annotate.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/energy.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/utils/__pycache__/energy.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/load_obman.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/utils/__pycache__/load_obman.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/loss.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/utils/__pycache__/loss.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/tools.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/utils/__pycache__/tools.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/utils.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/utils/__pycache__/utils.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/utils_annotate.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/utils/__pycache__/utils_annotate.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/utils_cls.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/utils/__pycache__/utils_cls.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/utils_loss.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/utils/__pycache__/utils_loss.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/vis.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wmtlab/GrainGrasp/537dcbaa3e61df03e47c0f591a3c2f789446085b/utils/__pycache__/vis.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/annotate.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 | from pytorch3d.ops import knn_points
4 |
5 | # device = "cuda" if torch.cuda.is_available() else "cpu"
6 |
7 | tip_index = {
8 | "thumb": [740, 743, 756, 760, 762, 763, 768, 767, 739],
9 | "index": [328, 329, 332, 343, 347, 349, 350, 354, 355],
10 | "middle": [455, 459, 461, 462, 466, 435, 436, 467, 438, 439, 442],
11 | "ring": [549, 550, 553, 566, 569, 570, 571, 572, 573, 577, 578],
12 | "pinky": [687, 689, 690, 683],
13 | }
14 |
15 | finger_colors = {
16 | "thumb": [1.0, 0.0, 0.0],
17 | "index": [0.0, 1.0, 0.0],
18 | "middle": [0.0, 0.0, 1.0],
19 | "ring": [1.0, 1.0, 0.0],
20 | "pinky": [1.0, 0.0, 1.0],
21 | }
22 |
23 | finger_cls = {"thumb": 1, "index": 2, "middle": 3, "ring": 4, "pinky": 5}
24 | finger_names = list(finger_cls.keys())
25 |
26 |
27 | def get_obj_cls_and_colors(hand_points, obj_points, K=50, input_finger_colors=None, device="cuda"):
28 | """
29 | :param hand_points: Tensor,(B,778,3)
30 | :param obj_points: Tensor,(B,N,3)
31 | :param K: Int, for input of Knn.
32 | :return: obj_cls: Tensor-(B,N), colors: Tensor-(B,N,3)
33 | """
34 | input_finger_colors = finger_colors if input_finger_colors == None else input_finger_colors
35 |
36 | if len(hand_points.shape) == 2:
37 | hand_points = hand_points.unsqueeze(0)
38 | if len(obj_points.shape) == 2:
39 | obj_points = obj_points.unsqueeze(0)
40 | assert hand_points.shape[0] == obj_points.shape[0]
41 |
42 | obj_cls = np.zeros((*obj_points.shape[:2],), dtype=np.int32)
43 | colors = np.zeros_like(obj_points.cpu())
44 | _, idx_Batch, _ = knn_points(hand_points.to(device), obj_points.to(device), K=K) # cuda
45 | idx_Batch = idx_Batch.cpu().numpy()
46 |
47 | for b in range(hand_points.shape[0]):
48 | idx = idx_Batch[b] # (N, K)
49 | obj_index = dict()
50 | for i in range(len(finger_cls)):
51 | finger = finger_names[i]
52 | obj_index_finger = np.unique(idx[tip_index[finger]]) # (len(index),K)
53 | obj_index[finger] = obj_index_finger
54 | for j in range(i):
55 | inter_index = np.intersect1d(obj_index[finger_names[j]], obj_index_finger) # (?,)
56 | split = len(tip_index[finger])
57 | if len(inter_index) > 0:
58 |
59 | """Calculate the minimum distance between the current finger and
60 | another finger with intersection using these points, and assign
61 | them to the corresponding finger based on the smaller distance."""
62 | two_index = tip_index[finger] + tip_index[finger_names[j]]
63 | _, idx_min, _ = knn_points(
64 | obj_points[b : b + 1, inter_index].cpu(),
65 | hand_points[b : b + 1, two_index].cpu(),
66 | )
67 | idx_min = idx_min.numpy().reshape((-1,))
68 | now_index = idx_min < split
69 | past_index = np.logical_not(now_index)
70 | # Remove the current set from the previous set
71 | obj_index[finger_names[j]] = np.setdiff1d(obj_index[finger_names[j]], inter_index[now_index])
72 | # Remove the previous set from the current set
73 | obj_index[finger] = np.setdiff1d(obj_index[finger], inter_index[past_index])
74 |
75 | for finger in obj_index:
76 | index = obj_index[finger]
77 | obj_cls[b][index] = finger_cls[finger]
78 | colors[b][index] = finger_colors[finger]
79 |
80 | return torch.tensor(obj_cls), torch.tensor(colors)
81 |
82 |
83 | def get_finger_colors(hand_points, input_finger_colors=None):
84 | """
85 | Get colors of fingertips
86 | :param hand_points: (N,3) or (B,N,3)
87 | :return: colors: (N,3) or (B,N,3)
88 | """
89 | input_finger_colors = finger_colors if input_finger_colors == None else input_finger_colors
90 | colors = np.zeros_like(hand_points)
91 | for finger in finger_colors:
92 | colors[tip_index[finger]] = finger_colors[finger]
93 | return colors
94 |
95 |
96 | def get_obj_colors(obj_cls, cls_colors=None):
97 |
98 | cls_colors = finger_colors if cls_colors == None else cls_colors
99 | # if obj_cls.shape[-1] == 1:
100 | # obj_cls = obj_cls.reshape()
101 | # obj_cls = obj_cls.squeeze()
102 | # if len(obj_cls.shape) == 1:
103 | # obj_cls = obj_cls.reshape(1, -1)
104 | if obj_cls.shape[-1] == 1: # (B,N,1) or (N,1)
105 | obj_cls = obj_cls.reshape(*obj_cls.shape[:-1]) # (B,N) or (N,)
106 | obj_colors = np.zeros((*obj_cls.shape, 3)) # (B,N,3) or (N,3)
107 |
108 | for finger in tip_index:
109 | obj_colors[obj_cls == finger_cls[finger]] = cls_colors[finger]
110 | return obj_colors
111 |
--------------------------------------------------------------------------------
/utils/load_obman.py:
--------------------------------------------------------------------------------
1 | import os
2 | import zipfile
3 | import pickle
4 | import trimesh
5 | import numpy as np
6 | from utils import tools
7 |
8 |
9 | class Load_obman:
10 | def __init__(self, shapeNet_path, obman_path, mode):
11 | self.shapeNet_path = shapeNet_path
12 | self.obman_path = obman_path
13 | self.shapeNet_zip = zipfile.ZipFile(self.shapeNet_path)
14 | self.mode = mode
15 | self.meta_path = os.path.join(self.obman_path, self.mode, "meta")
16 | self.pklNameList = os.listdir(self.meta_path)
17 |
18 | def set_mode(self, mode):
19 | self.mode = mode
20 | self.meta_path = os.path.join(self.obman_path, self.mode, "meta")
21 | self.pklNameList = os.listdir(self.meta_path)
22 |
23 | def get_meta(self, idx):
24 | pkl_file = os.path.join(self.meta_path, self.pklNameList[idx])
25 | meta = pickle.load(open(pkl_file, "rb"))
26 | return meta
27 |
28 | def get_obj_mesh(self, meta):
29 | obj_path_seg = meta["obj_path"].split("/")[5:]
30 | obj_mesh_path = "/".join(obj_path_seg)
31 | obj_mesh = tools.fast_load_obj(self.shapeNet_zip.read(obj_mesh_path))[0]
32 | obj_vertices, obj_faces = obj_mesh["vertices"], obj_mesh["faces"]
33 | obj_vertices = tools.vertices_transformation(obj_vertices, rt=meta["affine_transform"])
34 | obj_mesh = trimesh.Trimesh(vertices=obj_vertices, faces=obj_faces)
35 | return obj_mesh
36 |
37 | def get_hand_pc(self, meta):
38 | return meta["verts_3d"]
39 |
40 | def get_hand_pose(self, meta):
41 | return meta["hand_pose"]
42 |
--------------------------------------------------------------------------------
/utils/loss.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn.functional as F
3 |
4 |
5 | class DiceLoss(torch.nn.Module):
6 | def __init__(self, n_classes, weight=None):
7 | super(DiceLoss, self).__init__()
8 | self.n_classes = n_classes
9 | if weight == None:
10 | self.weights = [i for i in range(n_classes)]
11 | else:
12 | self.weights = weight
13 |
14 | def forward(self, input, target):
15 | target = F.one_hot(target).transpose(2, 1)
16 | input = F.softmax(input, dim=1)
17 | total_dice_loss = 0
18 | for i in range(self.n_classes):
19 | numerator = 2 * torch.sum(input[:, i] * target[:, i])
20 | denominator = torch.sum(input[:, i]) + torch.sum(target[:, i])
21 | loss = 1 - numerator / denominator
22 | total_dice_loss += loss * self.weights[i]
23 |
24 | return total_dice_loss / self.n_classes
25 |
26 |
27 | dice_loss = DiceLoss(6)
28 |
29 |
30 | def CVAE_loss(recon_cls, cls, mean, log_var, w_cross, w_dice, mode="train"):
31 | """
32 | :param recon_x: reconstructed hand xyz [B,778,3]
33 | :param x: ground truth hand xyz [B,778,3]
34 | :param recon_cls: reconstructed cls [B,6,N]
35 | :param cls: ground truth hand xyz [B,N]
36 | :param mean: [B,z]
37 | :param log_var: [B,z]
38 | :return:
39 | """
40 | cross_loss = F.cross_entropy(
41 | recon_cls,
42 | cls,
43 | weight=torch.Tensor([0.05, 0.4, 0.4, 0.5, 0.6, 0.6]).to(cls.device),
44 | ).sum()
45 | recon_cls_loss = w_cross * cross_loss + w_dice * dice_loss(recon_cls, cls)
46 |
47 | if mode != "train":
48 | return recon_cls_loss, None
49 | else:
50 | # KLD loss
51 | KLD = -0.5 * torch.sum(1 + log_var - mean.pow(2) - log_var.exp()) / log_var.shape[0] * 2
52 | return recon_cls_loss, KLD
53 |
54 |
55 | def transform_loss(rot):
56 | d = rot.size()[1]
57 | I = torch.eye(d)[None, :, :]
58 | if rot.is_cuda:
59 | I = I.cuda()
60 | loss = torch.mean(torch.norm(torch.bmm(rot, rot.transpose(2, 1)) - I, dim=(1, 2)))
61 | return loss
62 |
--------------------------------------------------------------------------------
/utils/tools.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import json
3 | import open3d as o3d
4 | import trimesh
5 | import os
6 |
7 |
8 | def check_dir(dir_path):
9 | if not os.path.exists(dir_path):
10 | os.makedirs(dir_path)
11 |
12 |
13 | def readJson(file_path):
14 | with open(file_path, "r") as f:
15 | data = json.load(f)
16 | return data
17 |
18 |
19 | def fingerName2fingerId(dict_old):
20 | """
21 | change the old key str("thumb", "index", "middle", "ring", "pinky") to int(1, 2, 3, 4, 5)
22 | """
23 | dict_new = {}
24 | dict_new[1] = dict_old["thumb"]
25 | dict_new[2] = dict_old["index"]
26 | dict_new[3] = dict_old["middle"]
27 | dict_new[4] = dict_old["ring"]
28 | dict_new[5] = dict_old["pinky"]
29 | return dict_new
30 |
31 |
32 | def vertices_transformation(vertices, rt):
33 | """
34 | rt: 4x4 matrix [R|T]
35 | """
36 | p = np.matmul(rt[:3, 0:3], vertices.T) + rt[:3, 3].reshape(-1, 1)
37 | return p.T
38 |
39 |
40 | def vertices_rotation(vertices, rt):
41 | p = np.matmul(rt[:3, 0:3], vertices.T)
42 | return p.T
43 |
44 |
45 | def fast_load_obj(file_obj_text, **kwargs):
46 | """
47 | Based on the modification of the original code: https://github.com/hwjiang1510/GraspTTA/blob/master/utils/utils.py#35
48 | Parameters:
49 | - file_obj_text: A text containing the content of the OBJ file (result of read() method).
50 | Returns:
51 | - dict: A representation of the loaded OBJ file.
52 | """
53 |
54 | # make sure text is utf-8 with only \n newlines
55 | text = file_obj_text.decode("utf-8")
56 | text = text.replace("\r\n", "\n").replace("\r", "\n") + " \n"
57 | meshes = []
58 |
59 | def append_mesh():
60 | # append kwargs for a trimesh constructor
61 | # to our list of meshes
62 | if len(current["f"]) > 0:
63 | # get vertices as clean numpy array
64 | vertices = np.array(current["v"], dtype=np.float64).reshape((-1, 3))
65 | # do the same for faces
66 | faces = np.array(current["f"], dtype=np.int64).reshape((-1, 3))
67 | # get keys and values of remap as numpy arrays
68 | # we are going to try to preserve the order as
69 | # much as possible by sorting by remap key
70 | keys, values = (
71 | np.array(list(remap.keys())),
72 | np.array(list(remap.values())),
73 | )
74 | # new order of vertices
75 | vert_order = values[keys.argsort()]
76 | # we need to mask to preserve index relationship
77 | # between faces and vertices
78 | face_order = np.zeros(len(vertices), dtype=np.int64)
79 | face_order[vert_order] = np.arange(len(vertices), dtype=np.int64)
80 | # apply the ordering and put into kwarg dict
81 | loaded = {
82 | "vertices": vertices[vert_order],
83 | "faces": face_order[faces],
84 | "metadata": {},
85 | }
86 | # build face groups information
87 | # faces didn't move around so we don't have to reindex
88 | if len(current["g"]) > 0:
89 | face_groups = np.zeros(len(current["f"]) // 3, dtype=np.int64)
90 | for idx, start_f in current["g"]:
91 | face_groups[start_f:] = idx
92 | loaded["metadata"]["face_groups"] = face_groups
93 | # we're done, append the loaded mesh kwarg dict
94 | meshes.append(loaded)
95 |
96 | attribs = {k: [] for k in ["v"]}
97 | current = {k: [] for k in ["v", "f", "g"]}
98 | # remap vertex indexes {str key: int index}
99 | remap = {}
100 | next_idx = 0
101 | group_idx = 0
102 | for line in text.split("\n"):
103 | line_split = line.strip().split()
104 | if len(line_split) < 2:
105 | continue
106 | if line_split[0] in attribs:
107 | # v, vt, or vn
108 | # vertex, vertex texture, or vertex normal
109 | # only parse 3 values, ignore colors
110 | attribs[line_split[0]].append([float(x) for x in line_split[1:4]])
111 | elif line_split[0] == "f":
112 | # a face
113 | ft = line_split[1:]
114 | if len(ft) == 4:
115 | # hasty triangulation of quad
116 | ft = [ft[0], ft[1], ft[2], ft[2], ft[3], ft[0]]
117 | for f in ft:
118 | # loop through each vertex reference of a face
119 | # we are reshaping later into (n,3)
120 | if f not in remap:
121 | remap[f] = next_idx
122 | next_idx += 1
123 | # faces are "vertex index"/"vertex texture"/"vertex normal"
124 | # you are allowed to leave a value blank, which .split
125 | # will handle by nicely maintaining the index
126 | f_split = f.split("/")
127 | current["v"].append(attribs["v"][int(f_split[0]) - 1])
128 | current["f"].append(remap[f])
129 | elif line_split[0] == "o":
130 | # defining a new object
131 | append_mesh()
132 | # reset current to empty lists
133 | current = {k: [] for k in current.keys()}
134 | remap = {}
135 | next_idx = 0
136 | group_idx = 0
137 | elif line_split[0] == "g":
138 | # defining a new group
139 | group_idx += 1
140 | current["g"].append((group_idx, len(current["f"]) // 3))
141 | if next_idx > 0:
142 | append_mesh()
143 | return meshes
144 |
145 |
146 | def pc_normalize(pc):
147 | centroid = np.mean(pc, axis=0)
148 | pc = pc - centroid
149 | m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
150 | pc = pc / m
151 | return pc
152 |
153 |
154 | def pc_sample(mesh, num_points=3000):
155 | sample_pc = trimesh.sample.sample_surface(mesh, num_points)[0]
156 | return sample_pc
157 |
--------------------------------------------------------------------------------
/utils/vis.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import open3d as o3d
3 | import numpy as np
4 | import trimesh
5 | import time
6 |
7 |
8 | def vis_HandObject(hand, obj, window_name="HandObject"):
9 | """
10 | :param hand: list->[1,2,...]
11 | :param obj: list->[1,2,...]
12 | :return: None
13 | """
14 | assert len(obj) == len(hand)
15 |
16 | for i in range(len(obj)):
17 | o3d.visualization.draw_geometries([hand[i], obj[i]], window_name=window_name)
18 |
19 |
20 | def vis_GraspProcess(first_hand, hand_vertex_list, obj, time_sleep=0.05):
21 | """
22 | :param first_hand: o3d.geometry.TriangleMesh corresponding to the first hand mesh
23 | :param hand_vertex_list: list->[[N,3],[N,3],...] don't include the first_hand
24 | :param obj: o3d.geometry.TriangleMesh
25 | """
26 | hand = first_hand
27 | vis = o3d.visualization.Visualizer()
28 | vis.create_window(window_name="Grasp_Process", visible=True)
29 | vis.add_geometry(hand)
30 | vis.add_geometry(obj)
31 | vis.update_renderer()
32 | for i in range(len(hand_vertex_list)):
33 | hand.vertices = o3d.utility.Vector3dVector(hand_vertex_list[i])
34 | hand.compute_vertex_normals()
35 | vis.update_geometry(hand)
36 | vis.poll_events()
37 | vis.update_renderer()
38 | time.sleep(time_sleep)
39 | vis.run()
40 |
41 |
42 | def get_o3d_pcd(points, colors=None, vis=False):
43 | """
44 | :param points: (B,N,3) or (N,3)
45 | :param colors: Array or Tensor --->(B,N,3) or (N,3) or None
46 | :return pcd_batch: list->[pcd1,pcd2,...] or pcd
47 | """
48 |
49 | def get_o3d_pcd_single(point, color, vis=False):
50 | pcd = o3d.geometry.PointCloud()
51 | pcd.points = o3d.utility.Vector3dVector(point)
52 | pcd.colors = o3d.utility.Vector3dVector(color)
53 | if vis:
54 | o3d.visualization.draw_geometries([pcd], window_name="points")
55 | return pcd
56 |
57 | if colors is None:
58 | colors = np.zeros_like(points)
59 | else:
60 | assert points.shape == colors.shape
61 |
62 | if len(points.shape) == 2:
63 | return get_o3d_pcd_single(points, colors, vis)
64 | else:
65 | pcd_batch = []
66 | for i in range(points.shape[0]):
67 | pcd = get_o3d_pcd_single(points[i], colors[i], vis)
68 | pcd_batch.append(pcd)
69 | return pcd_batch
70 |
71 |
72 | def get_o3d_mesh(points, faces, paint_color=[0.3, 0.3, 0.3], vertex_colors=None):
73 | """
74 | :param points: (B,N,3) or (N,3)
75 | :param faces: (B,M,3) or (M,3)
76 | :param paint_color: list->[r,g,b]
77 | :param vertex_colors: (B,N,3) or (N,3) or None
78 | :return mesh_batch: list->[mesh1,mesh2,...] or mesh
79 | """
80 |
81 | def get_o3d_mesh_single(point, face, paint_color, vertex_colors):
82 | mesh = o3d.geometry.TriangleMesh()
83 | mesh.vertices = o3d.utility.Vector3dVector(point)
84 | mesh.triangles = o3d.utility.Vector3iVector(face)
85 | mesh.compute_vertex_normals()
86 | if vertex_colors is not None:
87 | vertex_colors = vertex_colors.reshape(-1, 3)
88 | vertex_colors[
89 | np.logical_and(
90 | vertex_colors[:, 0] == 0,
91 | vertex_colors[:, 1] == 0,
92 | vertex_colors[:, 2] == 0,
93 | )
94 | ] = np.array(paint_color)
95 | mesh.vertex_colors = o3d.utility.Vector3dVector(
96 | vertex_colors.reshape(-1, 3)
97 | )
98 | else:
99 | mesh.paint_uniform_color(paint_color)
100 | return mesh
101 |
102 | if vertex_colors is not None:
103 | assert len(points.shape) == len(faces.shape) == len(vertex_colors.shape)
104 | if len(points.shape) == 3:
105 | assert points.shape[0] == faces.shape[0] == vertex_colors.shape[0]
106 | else:
107 | return get_o3d_mesh_single(points, faces, paint_color, vertex_colors)
108 | else:
109 | assert len(points.shape) == len(faces.shape)
110 | if len(points.shape) == 3:
111 | assert points.shape[0] == faces.shape[0]
112 | else:
113 | return get_o3d_mesh_single(points, faces, paint_color, None)
114 |
115 | mesh_batch = []
116 | for i in range(points.shape[0]):
117 | if vertex_colors is None:
118 | mesh = get_o3d_mesh_single(points[i], faces[i], paint_color, None)
119 | else:
120 | mesh = get_o3d_mesh_single(
121 | points[i], faces[i], paint_color, vertex_colors[i]
122 | )
123 | mesh_batch.append(mesh)
124 | return mesh_batch
125 |
126 |
127 | def trimesh2o3d(tm_mesh, paint_color=[0.3, 0.3, 0.3], vertex_colors=None):
128 | mesh = tm_mesh.as_open3d
129 | mesh.compute_vertex_normals()
130 | if vertex_colors is not None:
131 | vertex_colors = vertex_colors.reshape(-1, 3)
132 | vertex_colors[
133 | np.logical_and(
134 | vertex_colors[:, 0] == 0,
135 | vertex_colors[:, 1] == 0,
136 | vertex_colors[:, 2] == 0,
137 | )
138 | ] = np.array(paint_color)
139 | mesh.vertex_colors = o3d.utility.Vector3dVector(vertex_colors.reshape(-1, 3))
140 | else:
141 | mesh.paint_uniform_color(paint_color)
142 | return mesh
143 |
--------------------------------------------------------------------------------