├── results ├── q_dyn.npy ├── p_3D_dyn.npy └── q_iter_dyn.npy ├── requirements.txt ├── sample_data ├── test.npy ├── sample_dance.npy ├── sample_dance2.npy ├── sample_cam_params.npy ├── sample_floor_position.npy └── c_0_2D_interpolate_smooth_headHandled.npy ├── __pycache__ ├── networks.cpython-37.pyc ├── lossFunctions.cpython-37.pyc └── pipeline_util.cpython-37.pyc ├── Utils ├── __pycache__ │ ├── misc.cpython-37.pyc │ ├── phys.cpython-37.pyc │ ├── angles.cpython-37.pyc │ ├── phys2.cpython-37.pyc │ ├── phys3.cpython-37.pyc │ ├── contacts.cpython-37.pyc │ ├── core_utils.cpython-37.pyc │ ├── initializer.cpython-37.pyc │ ├── openpose_util.cpython-37.pyc │ └── posenpose_util.cpython-37.pyc ├── openpose_util.py ├── phys.py ├── contacts.py ├── angles.py ├── initializer.py ├── core_utils.py └── misc.py ├── process_openpose.py ├── Visualizations └── simple.py ├── README.md ├── pipeline_util.py ├── lossFunctions.py ├── networks.py ├── demo.py └── URDF └── manual.urdf /results/q_dyn.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soshishimada/Neural_Physcap_Demo/HEAD/results/q_dyn.npy -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.19.3 2 | pybullet==3.2.1 3 | cvxpy==1.1.18 4 | cvxpylayers==0.1.5 5 | scipy==1.7.3 -------------------------------------------------------------------------------- /results/p_3D_dyn.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soshishimada/Neural_Physcap_Demo/HEAD/results/p_3D_dyn.npy -------------------------------------------------------------------------------- /sample_data/test.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soshishimada/Neural_Physcap_Demo/HEAD/sample_data/test.npy -------------------------------------------------------------------------------- /results/q_iter_dyn.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soshishimada/Neural_Physcap_Demo/HEAD/results/q_iter_dyn.npy -------------------------------------------------------------------------------- /sample_data/sample_dance.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soshishimada/Neural_Physcap_Demo/HEAD/sample_data/sample_dance.npy -------------------------------------------------------------------------------- /sample_data/sample_dance2.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soshishimada/Neural_Physcap_Demo/HEAD/sample_data/sample_dance2.npy -------------------------------------------------------------------------------- /__pycache__/networks.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soshishimada/Neural_Physcap_Demo/HEAD/__pycache__/networks.cpython-37.pyc -------------------------------------------------------------------------------- /sample_data/sample_cam_params.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soshishimada/Neural_Physcap_Demo/HEAD/sample_data/sample_cam_params.npy -------------------------------------------------------------------------------- /Utils/__pycache__/misc.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soshishimada/Neural_Physcap_Demo/HEAD/Utils/__pycache__/misc.cpython-37.pyc -------------------------------------------------------------------------------- /Utils/__pycache__/phys.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soshishimada/Neural_Physcap_Demo/HEAD/Utils/__pycache__/phys.cpython-37.pyc -------------------------------------------------------------------------------- /sample_data/sample_floor_position.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soshishimada/Neural_Physcap_Demo/HEAD/sample_data/sample_floor_position.npy -------------------------------------------------------------------------------- /Utils/__pycache__/angles.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soshishimada/Neural_Physcap_Demo/HEAD/Utils/__pycache__/angles.cpython-37.pyc -------------------------------------------------------------------------------- /Utils/__pycache__/phys2.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soshishimada/Neural_Physcap_Demo/HEAD/Utils/__pycache__/phys2.cpython-37.pyc -------------------------------------------------------------------------------- /Utils/__pycache__/phys3.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soshishimada/Neural_Physcap_Demo/HEAD/Utils/__pycache__/phys3.cpython-37.pyc -------------------------------------------------------------------------------- /__pycache__/lossFunctions.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soshishimada/Neural_Physcap_Demo/HEAD/__pycache__/lossFunctions.cpython-37.pyc -------------------------------------------------------------------------------- /__pycache__/pipeline_util.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soshishimada/Neural_Physcap_Demo/HEAD/__pycache__/pipeline_util.cpython-37.pyc -------------------------------------------------------------------------------- /Utils/__pycache__/contacts.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soshishimada/Neural_Physcap_Demo/HEAD/Utils/__pycache__/contacts.cpython-37.pyc -------------------------------------------------------------------------------- /Utils/__pycache__/core_utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soshishimada/Neural_Physcap_Demo/HEAD/Utils/__pycache__/core_utils.cpython-37.pyc -------------------------------------------------------------------------------- /Utils/__pycache__/initializer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soshishimada/Neural_Physcap_Demo/HEAD/Utils/__pycache__/initializer.cpython-37.pyc -------------------------------------------------------------------------------- /Utils/__pycache__/openpose_util.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soshishimada/Neural_Physcap_Demo/HEAD/Utils/__pycache__/openpose_util.cpython-37.pyc -------------------------------------------------------------------------------- /Utils/__pycache__/posenpose_util.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soshishimada/Neural_Physcap_Demo/HEAD/Utils/__pycache__/posenpose_util.cpython-37.pyc -------------------------------------------------------------------------------- /sample_data/c_0_2D_interpolate_smooth_headHandled.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soshishimada/Neural_Physcap_Demo/HEAD/sample_data/c_0_2D_interpolate_smooth_headHandled.npy -------------------------------------------------------------------------------- /process_openpose.py: -------------------------------------------------------------------------------- 1 | import Utils.openpose_util as opp 2 | import numpy as np 3 | import argparse 4 | 5 | def process(interpolate,smoothing): 6 | head_keys = [14, 15, 16, 17] 7 | neck_key = [0] 8 | print('processing .....') 9 | p_2ds = opp.get_2ds_from_jsons(args.input_path) 10 | p_2ds = opp.handle_head_keypoints(p_2ds, neck_key, head_keys) 11 | if interpolate: 12 | p_2ds = opp.openpose_interpolate(p_2ds) 13 | if smoothing: 14 | p_2ds = opp.openpose_smoothing(p_2ds) 15 | print("Done") 16 | return p_2ds 17 | 18 | def save_data(save_path,save_name,data): 19 | np.save(save_path+save_name,data) 20 | return 0 21 | 22 | if __name__ == "__main__": 23 | parser = argparse.ArgumentParser(description='arguments for predictions') 24 | parser.add_argument('--input_path', default="") 25 | parser.add_argument('--save_path', default="./sample_data/") 26 | parser.add_argument('--save_file_name', default='test.npy') 27 | args = parser.parse_args() 28 | 29 | interpolate=1 30 | smoothing=1 31 | p_2ds = process(interpolate,smoothing)# 32 | save_data(args.save_path,args.save_file_name,p_2ds) -------------------------------------------------------------------------------- /Visualizations/simple.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pybullet as p 3 | import rbdl 4 | import Utils.misc as ut 5 | from Utils.initializer import InitializerConsistentHumanoid2 6 | from Utils.angles import angle_util 7 | import time 8 | import copy 9 | #### 10 | #### Initialization 11 | #### 12 | id_simulator = p.connect(p.GUI) 13 | p.configureDebugVisualizer(flag=p.COV_ENABLE_Y_AXIS_UP, enable=1) 14 | p.configureDebugVisualizer(flag=p.COV_ENABLE_SHADOWS, enable=0) 15 | target_joints = [ "head", "neck", "left_knee", "left_ankle", "left_toe", "right_knee", "right_ankle", "right_toe", "left_shoulder", "left_elbow", "left_wrist", "right_shoulder", "right_elbow", "right_wrist"] 16 | AU = angle_util() 17 | ini = InitializerConsistentHumanoid2(1, target_joints) 18 | rbdl2bullet = ini.get_rbdl2bullet() 19 | 20 | #### 21 | #### sample rotation and translation only for better visualization purpose (you need to provide correct R and T to see the results in a global frame if known.) 22 | #### 23 | RT = np.array([0.0753311, 0.007644453, -0.9971293, 117.6134, -0.03987908, -0.9991937, -0.004647529, 1435.971, -0.9963609 ,0.0394145, 0.07557522, 4628.016 ,0, 0, 0 ,1 ]).reshape(4, 4) 24 | R=RT[:3,:3].T 25 | T=RT[:-1,3:].reshape(3)/1000 26 | 27 | if __name__ == "__main__": 28 | urdf_base_path = "./URDF/" 29 | model0 = rbdl.loadModel((urdf_base_path+'manual.urdf').encode(), floating_base=True) 30 | 31 | id_robot = p.loadURDF(urdf_base_path+'manual.urdf',useFixedBase=False) 32 | _, _, jointIds, jointNames = ut.get_jointIds_Names(id_robot) 33 | q=copy.copy(np.load("./results/q_iter_dyn.npy", mmap_mode='r')) 34 | q[:,6+27]=0 35 | q[:,6+35]=0 36 | q[:,6+36]=0 37 | q[:,6+37]=0 38 | q[:,6+38]=0 39 | count=0 40 | while(1): 41 | print(count) 42 | ut.visualization3D_multiple([id_robot], jointIds, rbdl2bullet, [q[count]], Rot=R,T=T , overlay=True) 43 | 44 | time.sleep(0.005) 45 | 46 | count+=1 47 | if count >= len(q): 48 | count =0 49 | -------------------------------------------------------------------------------- /Utils/openpose_util.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import copy 3 | from scipy.interpolate import interp1d 4 | from scipy.ndimage import gaussian_filter1d 5 | import json 6 | import os 7 | 8 | def openpose_interpolate_realtime(p_2ds): 9 | for i in range(len(p_2ds)): 10 | # find frames that contain 0 (error). 11 | if not np.all(p_2ds[i]): 12 | # Provide the latest non-error joint position to the error joints . 13 | for j in range(len(p_2ds[i])): 14 | if p_2ds[i][j][0]==0: 15 | p_2ds[i][j][0]=copy.copy(p_2ds[i-1][j][0]) 16 | p_2ds[i][j][1]=copy.copy(p_2ds[i-1][j][1]) 17 | return p_2ds 18 | 19 | def find_start_end_of_errors(j_line): 20 | zero_in_flag = 0 21 | all_pairs = [] 22 | for j in range(len(j_line)): 23 | if j_line[j] == 0: 24 | if not zero_in_flag: 25 | start_index = j - 1 26 | zero_in_flag = 1 27 | 28 | if j_line[j] != 0 and zero_in_flag: 29 | zero_in_flag = 0 30 | end_index = j 31 | all_pairs.append((start_index, end_index)) 32 | return all_pairs 33 | 34 | def apply_interpolations(all_pairs,j_line,p_2ds): 35 | for pair in all_pairs: 36 | f = interp1d([pair[0], pair[1]], [j_line[pair[0]], j_line[pair[1]]]) 37 | x = np.linspace(pair[0], pair[1], num=(pair[1] - pair[0] - 1) + 2, endpoint=True) 38 | for k, inter_val in enumerate(f(x)[1:-1]): 39 | j_line[pair[0] + k + 1] = inter_val 40 | return j_line 41 | 42 | def substitute_avg_values(heads_sub,necks_sub): 43 | for i in range(len(heads_sub)): 44 | if not np.all(heads_sub[i]): 45 | head_values=heads_sub[i] 46 | n_non_zero = np.count_nonzero(heads_sub[i]) 47 | if n_non_zero !=0: 48 | avg_head = sum(head_values) / n_non_zero 49 | for j in range(len(head_values)): 50 | if head_values[j] == 0: 51 | head_values[j]=avg_head 52 | else: 53 | for j in range(len(head_values)): 54 | head_values[j]=necks_sub[i] 55 | 56 | heads_sub[i]=head_values 57 | return heads_sub 58 | 59 | def handle_head_keypoints(p_2ds,neck_key,head_keys): 60 | heads = p_2ds[:,head_keys] 61 | necks = p_2ds[:,neck_key] 62 | 63 | heads_u = substitute_avg_values(copy.copy(heads[:,:,0]),copy.copy(necks[:,:,0])) 64 | heads_v = substitute_avg_values(copy.copy(heads[:,:,1]),copy.copy(necks[:,:,1])) 65 | 66 | 67 | heads[:,:,0]=heads_u 68 | heads[:,:,1]=heads_v 69 | 70 | p_2ds[:, head_keys] = heads 71 | return p_2ds 72 | 73 | def openpose_interpolate(p_2ds): 74 | p_2ds=p_2ds.reshape(len(p_2ds),-1).T 75 | for i,j_line in enumerate(p_2ds): 76 | # find joints that contain 0 (error) through a sequence. 77 | if not np.all(j_line): 78 | # find pairs of frame index where error (start, end) 79 | all_pairs = find_start_end_of_errors(j_line) 80 | 81 | # apply linear interpolation on each pairs of indices 82 | p_2ds[i] = apply_interpolations(all_pairs,j_line,p_2ds) 83 | p_2ds=p_2ds.T 84 | p_2ds = p_2ds.reshape(len(p_2ds),-1,2) 85 | return p_2ds 86 | 87 | def openpose_smoothing(p_2ds): 88 | p_2ds = p_2ds.reshape(len(p_2ds),-1).T 89 | for i,l in enumerate(p_2ds): 90 | p_2ds[i]= gaussian_filter1d(l,1.5) 91 | #print(p_2ds.shape) 92 | p_2ds=p_2ds.T 93 | p_2ds = p_2ds.reshape(len(p_2ds),-1,2) 94 | return p_2ds 95 | 96 | def get_2ds_from_jsons(data_path): 97 | json_files = os.listdir(data_path) 98 | json_files.sort() 99 | json_files=json_files[1:] 100 | all_2ds = [] 101 | for i,json_file in enumerate(json_files): 102 | with open(data_path+json_file) as f: 103 | data = json.load(f) 104 | if len(data['people'])==0: 105 | p_2ds = np.zeros(72) 106 | else: 107 | contents = data['people'][0] 108 | p_2ds = np.array(contents['pose_keypoints_2d'])[3:] 109 | all_2ds.append(p_2ds) 110 | all_2ds=np.array(all_2ds) 111 | all_2ds=all_2ds.reshape(len(all_2ds),-1,3)[:,:,:-1] 112 | return all_2ds -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Demo: "Neural PhysCap" Neural Monocular 3D Human Motion Capture with Physical Awareness 2 | The implementation is based on **Neural Monocular 3D Human Motion Capture with Physical Awareness** 3 | [SIGGRAPH '21](http://vcai.mpi-inf.mpg.de/projects/PhysAware/). 4 | 5 | Authors: Soshi Shimada Vladislav Golyanik Weipeng Xu Patrick Pérez and Christian Theobalt 6 | 7 | ## Dependencies 8 | - Python 3.7 9 | - Ubuntu 18.04 (The system should run on other Ubuntu versions and Windows, however not tested.) 10 | - RBDL: Rigid Body Dynamics Library (https://rbdl.github.io/). Tested on V.2.6.0. (Important: set "RBDL_BUILD_ADDON_URDFREADER " to be "ON" when you compile. Also don't forget to add the compiled rbdl library in your python path use it.) 11 | - pytorch 1.10.1 12 | - For other python packages, please check requirements.txt 13 | 14 | ## Installation 15 | - Download and install Python binded RBDL from https://github.com/rbdl/rbdl 16 | - Install Pytorch 1.8.1 with GPU support (https://pytorch.org/) (other versions should also work but not tested) 17 | - Install python packages by: 18 | 19 | pip install -r requirements.txt 20 | 21 | ## How to Run 22 | 1) Download pretrained model from [here](https://drive.google.com/file/d/1sMXJRvq1tTNuqCwrYjugMlM-kOv6s06Q/view?usp=sharing). Below, we assume all the pretrained networks are place under "../pretrained_neuralPhys/". 23 | 24 | 2) We provide a sample data under "sample_data" To run the code on our sample data, first go to root directory (neuralphyscap_demo_release) and run: 25 | 26 | python demo.py --input_path sample_data/sample_dance.npy --net_path ../pretrained_neuralPhys/ --img_width 1280 --img_height 720 27 | 28 | The predictions will be saved under "./results/" 29 | 30 | 3) To visualize the predictions, run: 31 | 32 | python Visualizations/simple.py 33 | 34 | ## How to Run on Your Data 35 | 1. Run [OpenPose](https://github.com/CMU-Perceptual-Computing-Lab/openpose) and save the prediction. 36 | 37 | 38 | 2. Process your openpose data to be compatible with NeuralPhyscap: 39 | 40 | python process_openpose.py --input_path /PATH/TO/OPENPOSE/JSON/FILE --save_path /PATH/TO/SAVE --save_file_name YOUR_DATA.npy 41 | 42 | This will generate ".npy" input file to run NeuralPhyscap. Say we name the npy file "YOUR_DATA.npy". 43 | 44 | 3. Run NeuralPhyscap on the generated npy file: 45 | 46 | python demo.py --input_path PATH/TO/YOUR_DATA.npy --net_path ../pretrained_neuralPhys/ --img_width IMAGE_WIDTH --img_height IMAGE_HEIGHT 47 | 48 | Replace IMAGE_WIDTH and IMAGE_HEIGHT with your own video width and height (integer values) 49 | 50 | ## License Terms 51 | Permission is hereby granted, free of charge, to any person or company obtaining a copy of this software and associated documentation files (the "Software") from the copyright holders to use the Software for any non-commercial purpose. Publication, redistribution and (re)selling of the software, of modifications, extensions, and derivates of it, and of other software containing portions of the licensed Software, are not permitted. The Copyright holder is permitted to publically disclose and advertise the use of the software by any licensee. 52 | 53 | Packaging or distributing parts or whole of the provided software (including code, models and data) as is or as part of other software is prohibited. Commercial use of parts or whole of the provided software (including code, models and data) is strictly prohibited. Using the provided software for promotion of a commercial entity or product, or in any other manner which directly or indirectly results in commercial gains is strictly prohibited. 54 | 55 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 56 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 57 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 58 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 59 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 60 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 61 | SOFTWARE. 62 | 63 | ## Citation 64 | If the code is used, the licesnee is required to cite the following publication in any documentation 65 | or publication that results from the work: 66 | ``` 67 | @article{ 68 | PhysAwareTOG2021, 69 | author = {Shimada, Soshi and Golyanik, Vladislav and Xu, Weipeng and P\'{e}rez, Patrick and Theobalt, Christian}, 70 | title = {Neural Monocular 3D Human Motion Capture with Physical Awareness}, 71 | journal = {ACM Transactions on Graphics}, 72 | month = {aug}, 73 | volume = {40}, 74 | number = {4}, 75 | articleno = {83}, 76 | year = {2021}, 77 | publisher = {ACM}, 78 | keywords = {Monocular 3D Human Motion Capture, Physical Awareness, Global 3D, Physionical Approach} 79 | } 80 | ``` 81 | -------------------------------------------------------------------------------- /pipeline_util.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import rbdl 4 | class PyPerspectivieDivision(torch.autograd.Function): 5 | @staticmethod 6 | def forward(ctx, p_proj): 7 | 8 | ctx.save_for_backward(p_proj) 9 | batch_size, _, _ = p_proj.shape 10 | p_proj = p_proj.view(batch_size, -1, 3) 11 | 12 | p_proj /= p_proj[:, :, 2].view(batch_size, -1, 1).clone() 13 | p_proj = p_proj[:, :, :-1] 14 | p_proj = p_proj.reshape(batch_size, -1, 1) 15 | 16 | return p_proj 17 | 18 | @classmethod 19 | def backward(self, ctx, grad_output): 20 | p_proj, = ctx.saved_tensors 21 | 22 | p_proj = p_proj.view(p_proj.shape[0], -1, 3) 23 | batch_size, n_points, _ = p_proj.shape 24 | p_proj = p_proj.data.numpy() 25 | 26 | grads = np.array([[self.deriv_pd_comp(p_proj[i][j]) for j in range(n_points)] for i in range(batch_size)]) 27 | grads = torch.tensor(grads).float()#.cuda() 28 | 29 | final_grads = torch.zeros(batch_size, n_points * 2, n_points * 3)#.cuda() 30 | for i in range(n_points): 31 | final_grads[:, i * 2:(i + 1) * 2, i * 3:(i + 1) * 3] = grads[:, i] 32 | final = torch.bmm(grad_output.view(batch_size, 1, -1), final_grads).view(batch_size, -1, 1) 33 | 34 | return final 35 | 36 | @staticmethod 37 | def deriv_pd_comp(coord): 38 | jacobi = [[1 / coord[2], 0, -coord[0] / (coord[2] ** 2)], 39 | [0, 1 / coord[2], -coord[1] / (coord[2] ** 2)]] 40 | return np.array(jacobi) 41 | 42 | class PyForwardKinematicsQuaternionHuman36M(torch.autograd.Function): 43 | def __init__(self, model_addresses, target_joint_ids,delta_t=0.0001): 44 | #self.model = model 45 | self.model_addresses=model_addresses 46 | 47 | self.qdot_size=model_addresses["1"].qdot_size 48 | self.target_joint_ids = target_joint_ids 49 | self.q = None 50 | self.jacobi_inits = np.array([np.zeros((3, self.qdot_size)) for i in range(len( target_joint_ids))]) 51 | self.delta_t=delta_t 52 | 53 | # @staticmethod 54 | def forward(self, sub_ids, q): 55 | self.q = q.data.numpy() 56 | self.models = [self.model_addresses[str(int(x))] for x in sub_ids.numpy()] 57 | coords = torch.tensor([[rbdl.CalcBodyToBaseCoordinates(self.models[batch], self.q[batch].flatten().astype(float), i, np.zeros(3)) for i in self.target_joint_ids] for batch in range(len(self.q))]).view(len(self.q), -1).float() 58 | 59 | return coords 60 | 61 | # @staticmethod 62 | def backward(self, grad_coords): 63 | 64 | jacobis = np.zeros((len(self.q), len(self.target_joint_ids), 3, self.qdot_size)) # np.array([np.zeros((3, self.model.qdot_size)) for i in range(len(self.target_joint_ids))]) 65 | for batch in range(len(self.q)): 66 | for i, id in enumerate(self.target_joint_ids): 67 | rbdl.CalcPointJacobian(self.models[batch], self.q[batch].flatten().astype(float), id, np.array([0., 0., 0.]), jacobis[batch][i]) 68 | jacobis = torch.tensor(jacobis).view(len(self.q), -1,self.models[0].qdot_size).float() 69 | jacobis_root_ori = jacobis[:,:,3:6] 70 | 71 | 72 | quat = np.concatenate((self.q[:,3:6].flatten(),self.q[:,-1]),0) 73 | quat_inv = np.array([-quat[0],-quat[1],-quat[2],quat[3]]) 74 | 75 | jac_angVel_quat = self.get_jacobi_anglularVel_quaternion(quat_inv) 76 | jac_theta_angVelQuatForm=self.get_jacobi_theta_angVel(self.delta_t,order = "xyzw") 77 | 78 | quat_jac = torch.FloatTensor(np.dot(jacobis_root_ori,np.dot(jac_theta_angVelQuatForm,jac_angVel_quat))) 79 | 80 | final_jac=torch.cat((jacobis[:,:,:3],quat_jac[:,:,:3],jacobis[:,:,6:],quat_jac[:,:,-1].view(quat_jac.shape[0],quat_jac.shape[1],1)),2)#.cuda() 81 | 82 | return None,torch.bmm(grad_coords.view(len(self.q), 1, -1), final_jac).view(len(self.q), -1) 83 | 84 | class PyProjection(torch.autograd.Function): 85 | @staticmethod 86 | def forward(ctx, P_tensor, p_3D): 87 | #print(P_tensor.shape,p_3D.shape) 88 | N,_,=p_3D.shape 89 | p_3D = p_3D.view(N, -1, 3) # Nx15x3 90 | ones = torch.ones((N, p_3D.shape[1], 1))#.cuda() # Nx15x3 91 | p_3D = torch.cat((p_3D, ones), 2).view(N, -1) 92 | 93 | p_proj = torch.bmm(P_tensor, p_3D.view(p_3D.shape[0], p_3D.shape[1], 1)) 94 | ctx.save_for_backward(P_tensor) 95 | 96 | return p_proj 97 | 98 | @staticmethod 99 | def backward(ctx, grad_output): 100 | P_tensor, = ctx.saved_tensors 101 | get_ids = [x for x in range(P_tensor.shape[2]) if (x + 1) % 4 != 0] 102 | P_tensor_deriv= P_tensor[:,:,get_ids] 103 | return None, torch.bmm(grad_output.view(len(grad_output), 1, -1), P_tensor_deriv).view(len(grad_output), -1).float() 104 | -------------------------------------------------------------------------------- /lossFunctions.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from Utils.angles import angle_util 3 | class LossFunctions: 4 | 5 | def __init__(self): 6 | self.AU=angle_util() 7 | def compute_contact_loss(self,feet,pre_feet,labels): 8 | n_b,_=labels.shape 9 | #labels = torch.FloatTensor([[0, 1], [0, 0], [1, 1]]) 10 | #feet = torch.arange(0, 36).view(n_b, 4, 3) - 18 11 | # pre_feet = 0.2 * (torch.arange(0, 36).view(n_b, 4, 3) - 18) 12 | residual_feet = (feet - pre_feet).pow(2).sum(2) 13 | 14 | residual_feet_l = residual_feet[:, :2] 15 | residual_feet_r = residual_feet[:, 2:] 16 | residual_final = torch.cat((residual_feet_l.sum(1).view(n_b, 1), residual_feet_r.sum(1).view(n_b, 1)), 1) 17 | loss = (labels * residual_final).mean() 18 | return loss 19 | 20 | def compute_trans_loss(self,q0,q_trans,frame_gt_qs): 21 | loss_trans = (q_trans - frame_gt_qs[:, :3]).pow(2).mean() 22 | loss_trans_smooth =0.3* (q_trans - q0[:,:3]).pow(2).mean() 23 | return loss_trans,loss_trans_smooth 24 | def compute_ori_loss(self,quat,frame_gt_rotMats): 25 | n_b,_=quat.shape 26 | out_mat = self.AU.compute_rotation_matrix_from_quaternion(quat) 27 | out_mat = self.AU.get_44_rotation_matrix_from_33_rotation_matrix(out_mat) # (batch*joint_num)*4*4 28 | loss_rootOri = self.AU.compute_rotation_matrix_loss(out_mat, frame_gt_rotMats.view(n_b, 4, 4)) # .mean() 29 | 30 | return loss_rootOri 31 | 32 | def compute_ori_loss_quat_quat(self,quat,quat2): 33 | n_b,_=quat.shape 34 | out_mat = self.AU.compute_rotation_matrix_from_quaternion(quat) 35 | out_mat = self.AU.get_44_rotation_matrix_from_33_rotation_matrix(out_mat) # (batch*joint_num)*4*4 36 | 37 | out_mat2 = self.AU.compute_rotation_matrix_from_quaternion(quat2) 38 | out_mat2= self.AU.get_44_rotation_matrix_from_33_rotation_matrix(out_mat2) # (batch*joint_num)*4*4 39 | 40 | loss_rootOri = self.AU.compute_rotation_matrix_loss(out_mat, out_mat2.view(n_b, 4, 4)) # .mean() 41 | 42 | return loss_rootOri 43 | 44 | 45 | 46 | def compute_smooth_ori_loss(self,quat,quat_pre): 47 | n_b,_=quat.shape 48 | out_mat = self.AU.compute_rotation_matrix_from_quaternion(quat) 49 | out_mat = self.AU.get_44_rotation_matrix_from_33_rotation_matrix(out_mat) # (batch*joint_num)*4*4 50 | 51 | out_mat_pre = self.AU.compute_rotation_matrix_from_quaternion(quat_pre) 52 | out_mat_pre = self.AU.get_44_rotation_matrix_from_33_rotation_matrix(out_mat_pre) # (batch*joint_num)*4*4 53 | 54 | loss_rootOri = self.AU.compute_rotation_matrix_loss(out_mat,out_mat_pre) # .mean() 55 | 56 | return loss_rootOri 57 | def compute_five_loss(self,out_quat,out_trans,q_art,p_3D_p,p_2D,gt_qs,gt_rotMats,gt_3Ds,gt_2Ds): 58 | n_b,_=out_quat.shape 59 | out_mat = self.AU.compute_rotation_matrix_from_quaternion(out_quat) 60 | out_mat = self.AU.get_44_rotation_matrix_from_33_rotation_matrix(out_mat) # (batch*joint_num)*4*4 61 | loss_rootOri = self.AU.compute_rotation_matrix_loss(out_mat, gt_rotMats.view(n_b, 4, 4))/n_b 62 | 63 | 64 | loss_trans = (out_trans - gt_qs[:,:3]).pow(2).sum()/n_b 65 | loss_q = ((torch.sin(torch.squeeze(q_art)) - torch.sin(gt_qs[:,7:])).pow(2).sum() + (torch.cos(torch.squeeze(q_art)) - torch.cos(gt_qs[:,7:])).pow(2).sum()) / 2 66 | loss_q /=n_b 67 | loss3D_p = ( p_3D_p - gt_3Ds.view(n_b,-1) ).pow(2).sum()/n_b 68 | loss2D = (torch.squeeze(p_2D).view(n_b, -1, 2) - gt_2Ds).pow(2).sum()/n_b 69 | 70 | return loss_trans,loss_rootOri,loss_q,loss3D_p,loss2D 71 | 72 | def compute_three_loss(self,out_quat,out_trans,q_art,gt_qs,gt_rotMats): 73 | batch_size,_= out_quat.shape 74 | out_mat = self.AU.compute_rotation_matrix_from_quaternion(out_quat) 75 | out_mat = self.AU.get_44_rotation_matrix_from_33_rotation_matrix(out_mat) # (batch*joint_num)*4*4 76 | loss_rootOri = self.AU.compute_rotation_matrix_loss(out_mat, gt_rotMats.view(batch_size, 4, 4)) 77 | loss_trans = (out_trans - gt_qs[:3]).pow(2).sum() 78 | loss_q = ((torch.sin(torch.squeeze(q_art)) - torch.sin(gt_qs[6:])).pow(2).sum() + (torch.cos(torch.squeeze(q_art)) - torch.cos(gt_qs[6:])).pow(2).sum()) / 2 79 | 80 | 81 | return loss_trans,loss_rootOri,loss_q 82 | 83 | def compute_q_p3d_p2d_loss(self,q_art,p_3D_p,p_2D,gt_qs,gt_3Ds,gt_2Ds): 84 | n_b,_= q_art.shape 85 | 86 | loss_q = ((torch.sin(torch.squeeze(q_art)) - torch.sin(gt_qs[:, 7:])).pow(2) + ( torch.cos(torch.squeeze(q_art)) - torch.cos(gt_qs[:, 7:])).pow(2) ).mean() 87 | 88 | 89 | loss3D_p = ( p_3D_p - gt_3Ds.view(n_b,-1) ).pow(2).mean()#/n_b 90 | 91 | loss2D = (torch.squeeze(p_2D) - gt_2Ds.view(n_b,-1)).pow(2).mean()#/n_b 92 | return loss_q,loss3D_p,loss2D 93 | 94 | def compute_part_q_p3d_p2d_loss(self, q_art, p_3D_p, p_2D, gt_qs, gt_3Ds, gt_2Ds): 95 | n_b, _ = q_art.shape 96 | gt_qs_art = gt_qs[:,7:] 97 | 98 | loss_q_right_leg = ((torch.sin(q_art[:,[12,14]]) - torch.sin(gt_qs_art[:, [12,14]])).pow(2) + (torch.cos(q_art[:, [12,14]]) - torch.cos(gt_qs_art[:, [12,14]])).pow(2)).mean() 99 | 100 | 101 | loss3D_p = (p_3D_p - gt_3Ds.view(n_b, -1)).pow(2).mean() # /n_b 102 | 103 | loss2D = (torch.squeeze(p_2D) - gt_2Ds.view(n_b, -1)).pow(2).mean() # /n_b 104 | return loss_q_right_leg, loss3D_p, loss2D 105 | 106 | def compute_q_p3d_p2d_more_hand_loss(self,q_art,p_3D_p,p_2D,gt_qs,gt_3Ds,gt_2Ds,weights): 107 | n_b,_= q_art.shape 108 | 109 | 110 | loss_q = ((torch.sin(torch.squeeze(q_art)) - torch.sin(gt_qs[:, 7:])).pow(2) + ( torch.cos(torch.squeeze(q_art)) - torch.cos(gt_qs[:, 7:])).pow(2) ).mean() 111 | 112 | 113 | residual_3D = ( p_3D_p - gt_3Ds.view(n_b,-1) ).view(n_b,-1,3) 114 | hand_loss = residual_3D[:,11].detach().clone().pow(2).mean()+residual_3D[:,14].detach().clone().pow(2).mean() 115 | residual_3D = weights*residual_3D 116 | loss3D_p = residual_3D.pow(2).mean() 117 | residual_2D =(torch.squeeze(p_2D) - gt_2Ds.view(n_b,-1)).view(n_b,-1,2) 118 | residual_2D = weights * residual_2D 119 | loss2D = residual_2D.pow(2).mean() 120 | return loss_q,loss3D_p,loss2D,hand_loss 121 | 122 | def irregular_angle_loss(self,q_art, max_angles, min_angles): 123 | 124 | mask = torch.ones(q_art.shape).cuda() 125 | larger_lables = ((q_art > max_angles) * mask).detach() 126 | larger_loss = ((q_art - max_angles) * larger_lables).pow(2).mean() 127 | smaller_labels = ((q_art < min_angles) * mask).detach() 128 | smaller_loss = ((min_angles - q_art) * smaller_labels).pow(2).mean() 129 | return larger_loss + smaller_loss 130 | 131 | def compute_q_p3d_p2d_loss_self(self,q_art,p_3D_p,p_2D,q_ref,gt_3Ds,gt_2Ds): 132 | n_b,_= q_art.shape 133 | 134 | 135 | loss_q = ((torch.sin(torch.squeeze(q_art)) - torch.sin(q_ref[:, 6:-1])).pow(2) + ( torch.cos(torch.squeeze(q_art)) - torch.cos(q_ref[:, 6:-1])).pow(2) ).mean() 136 | 137 | 138 | loss3D_p = ( p_3D_p - gt_3Ds.view(n_b,-1) ).pow(2).mean() 139 | 140 | loss2D = (torch.squeeze(p_2D) - gt_2Ds.view(n_b,-1)).pow(2).mean() 141 | return loss_q,loss3D_p,loss2D -------------------------------------------------------------------------------- /Utils/phys.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import rbdl 3 | import numpy as np 4 | from Utils.angles import angle_util 5 | AU = angle_util() 6 | 7 | 8 | class PyForwardKinematicsQuaternion(torch.autograd.Function): 9 | 10 | @staticmethod 11 | def forward(ctx, models,target_joint_ids,delta_t,sub_ids, q): 12 | ctx.delta_t=delta_t 13 | ctx.q = q.cpu().data.numpy() 14 | ctx.qdot_size=models[0].qdot_size 15 | ctx.target_joint_ids= target_joint_ids 16 | ctx.models = models 17 | 18 | coords = torch.tensor([[rbdl.CalcBodyToBaseCoordinates( models[batch], ctx.q[batch].flatten().astype(float), i, np.zeros(3)) for i in target_joint_ids] for batch in range(len(q))]).view(len(q), -1).float() 19 | coords = coords#.cuda() 20 | return coords 21 | 22 | @staticmethod 23 | def backward(ctx, grad_coords): 24 | jacobis = np.zeros((len(ctx.q), len(ctx.target_joint_ids), 3, ctx.qdot_size)) 25 | for batch in range(len(ctx.q)): 26 | for i, id in enumerate(ctx.target_joint_ids): 27 | rbdl.CalcPointJacobian(ctx.models[batch], ctx.q[batch].flatten().astype(float), id, np.array([0., 0., 0.]), jacobis[batch][i]) 28 | jacobis = torch.tensor(jacobis).view(len(ctx.q), -1,ctx.models[0].qdot_size).float() 29 | jacobis_root_ori = jacobis[:,:,3:6] 30 | 31 | 32 | quat = np.concatenate((ctx.q[:,3:6].flatten(),ctx.q[:,-1]),0) 33 | quat_inv = np.array([-quat[0],-quat[1],-quat[2],quat[3]]) 34 | 35 | 36 | jac_angVel_quat = PyForwardKinematicsQuaternion.get_jacobi_anglularVel_quaternion(ctx.delta_t,quat_inv) 37 | jac_theta_angVelQuatForm= PyForwardKinematicsQuaternion.get_jacobi_theta_angVel(ctx.delta_t,order = "xyzw") 38 | 39 | quat_jac = torch.FloatTensor(np.dot(jacobis_root_ori,np.dot(jac_theta_angVelQuatForm,jac_angVel_quat))) 40 | 41 | final_jac=torch.cat((jacobis[:,:,:3],quat_jac[:,:,:3],jacobis[:,:,6:],quat_jac[:,:,-1].view(quat_jac.shape[0],quat_jac.shape[1],1)),2).cuda() 42 | return None,None,None,None,torch.bmm(grad_coords.view(len(ctx.q), 1, -1).cuda(), final_jac).view(len(ctx.q), -1) 43 | 44 | @staticmethod 45 | def get_jacobi_anglularVel_quaternion(ctx,delta_t,quat): 46 | x=quat[0] 47 | y=quat[1] 48 | z=quat[2] 49 | w=quat[3] 50 | jac=(2/ delta_t)*np.array([[w,z,-y,x], 51 | [-z,w,x,y], 52 | [y,-x,w,z], 53 | [-x,-y,-z,w]]) 54 | 55 | return jac 56 | 57 | @staticmethod 58 | def get_jacobi_theta_angVel(ctx, delta_t,order): 59 | 60 | if order == "xyzw": 61 | jac = np.array([[delta_t,0,0,0], 62 | [0, delta_t, 0, 0], 63 | [0, 0, delta_t, 0], 64 | ]) 65 | elif order == "wxyz": 66 | jac = np.array([[0,delta_t,0,0], 67 | [0, 0, delta_t, 0], 68 | [0, 0, 0, delta_t], 69 | ]) 70 | else: 71 | print('not supported order of the quaternion') 72 | return jac 73 | 74 | 75 | class PyPDController(torch.autograd.Function): 76 | @staticmethod 77 | def forward(ctx, action, kp,kd,q0,qdot0): 78 | 79 | tau = kp*(action-q0.view(-1))-kd*qdot0.view(-1) 80 | ctx.save_for_backward(kp,q0) 81 | 82 | return tau 83 | @staticmethod 84 | def backward(ctx, grad_output): 85 | 86 | kp,q0, = ctx.saved_tensors 87 | 88 | batch_size,dof,_ = q0.shape 89 | 90 | return torch.bmm(grad_output.view(N,1,6),kp*torch.eye(dof).repeat(batch_size,1,1)) , None, None, None, None 91 | 92 | class PyForwardDynamics(torch.autograd.Function): 93 | @staticmethod 94 | def forward(ctx, tau, M_inv): 95 | N, w, h = M_inv.shape 96 | ctx.save_for_backward(M_inv) 97 | 98 | return torch.bmm(M_inv, tau.view(N, -1, 1)).view(N, w) 99 | 100 | @staticmethod 101 | def backward(ctx, grad_output): 102 | 103 | M_inv, = ctx.saved_tensors 104 | N, w, h = M_inv.shape 105 | grad_input = grad_output.clone() 106 | return torch.bmm(grad_input.view(N, 1, w), M_inv).view(N, w), None 107 | 108 | 109 | class PyPoseUpdate(torch.autograd.Function): 110 | 111 | @staticmethod 112 | def forward(ctx, delta_t, q0, qdot0, qddot): 113 | ctx.save_for_backward(delta_t) 114 | 115 | qdot = qdot0 + delta_t*qddot #$torch.bmm(delta_t, qddot) 116 | print(q0.shape,delta_t.shape,qdot.shape) 117 | q = q0 + delta_t*qdot#torch.bmm(delta_t, qdot) 118 | return qdot, q 119 | 120 | @staticmethod 121 | def backward(ctx, grad_qdot, grad_q): 122 | 123 | delta_t, = ctx.saved_tensors 124 | grad_q_input = grad_q.clone() 125 | return None, None, None, torch.bmm(grad_q_input.view(len(grad_q), 1, -1), delta_t).view(len(grad_q), -1, 1) 126 | 127 | class PyProjection(torch.autograd.Function): 128 | @staticmethod 129 | def forward(ctx, P_tensor, p_3D): 130 | N,_,=p_3D.shape 131 | p_3D = p_3D.view(N, -1, 3) # Nx15x3 132 | ones = torch.ones((N, p_3D.shape[1], 1))#.cuda() # Nx15x3 133 | p_3D = torch.cat((p_3D, ones), 2).view(N, -1) 134 | 135 | p_proj = torch.bmm(P_tensor, p_3D.view(p_3D.shape[0], p_3D.shape[1], 1)) 136 | ctx.save_for_backward(P_tensor) 137 | 138 | return p_proj 139 | 140 | @staticmethod 141 | def backward(ctx, grad_output): 142 | P_tensor, = ctx.saved_tensors 143 | get_ids = [x for x in range(P_tensor.shape[2]) if (x + 1) % 4 != 0] 144 | P_tensor_deriv= P_tensor[:,:,get_ids] 145 | return None, torch.bmm(grad_output.view(len(grad_output), 1, -1), P_tensor_deriv).view(len(grad_output), -1).float() 146 | 147 | def get_P_tensor(N, target_joint_ids, P): 148 | P_tensor = torch.zeros(N, 3 * len(target_joint_ids), 3 * len(target_joint_ids), device=device, dtype=dtype) 149 | for i in range(int(P_tensor.shape[1] / 3)): 150 | P_tensor[:, i * 3:(i + 1) * 3, i * 3:(i + 1) * 3] = P 151 | return P_tensor.type(torch.FloatTensor) 152 | 153 | class PyPerspectivieDivision(torch.autograd.Function): 154 | @staticmethod 155 | def forward(ctx, p_proj): 156 | 157 | ctx.save_for_backward(p_proj) 158 | batch_size, _, _ = p_proj.shape 159 | p_proj = p_proj.view(batch_size, -1, 3) 160 | 161 | p_proj /= p_proj[:, :, 2].view(batch_size, -1, 1).clone() 162 | p_proj = p_proj[:, :, :-1] 163 | p_proj = p_proj.reshape(batch_size, -1, 1) 164 | 165 | return p_proj 166 | 167 | @classmethod 168 | def backward(self, ctx, grad_output): 169 | p_proj, = ctx.saved_tensors 170 | 171 | p_proj = p_proj.view(p_proj.shape[0], -1, 3) 172 | batch_size, n_points, _ = p_proj.shape 173 | p_proj = p_proj.cpu().data.numpy() 174 | 175 | grads = np.array([[self.deriv_pd_comp(p_proj[i][j]) for j in range(n_points)] for i in range(batch_size)]) 176 | grads = torch.tensor(grads).float()#.cuda() 177 | 178 | final_grads = torch.zeros(batch_size, n_points * 2, n_points * 3)#.cuda() 179 | for i in range(n_points): 180 | final_grads[:, i * 2:(i + 1) * 2, i * 3:(i + 1) * 3] = grads[:, i] 181 | final = torch.bmm(grad_output.view(batch_size, 1, -1), final_grads).view(batch_size, -1, 1) 182 | 183 | return final 184 | 185 | @staticmethod 186 | def deriv_pd_comp(coord): 187 | jacobi = [[1 / coord[2], 0, -coord[0] / (coord[2] ** 2)], 188 | [0, 1 / coord[2], -coord[1] / (coord[2] ** 2)]] 189 | return np.array(jacobi) 190 | 191 | -------------------------------------------------------------------------------- /Utils/contacts.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import rbdl 4 | 5 | def get_mass_mat(model, q): 6 | n_b, _ = q.shape 7 | M_np = np.zeros((n_b, model.qdot_size, model.qdot_size)) 8 | [rbdl.CompositeRigidBodyAlgorithm(model, q[i].astype(float), M_np[i]) for i in range(n_b)] 9 | return M_np 10 | 11 | def get_contact_point_in_ankle_frame(model, q, rbdl_dic, contact_link_name, ankle_name): 12 | point_local = np.zeros(3) 13 | point_ankle_world = rbdl.CalcBodyToBaseCoordinates(model, q, rbdl_dic[ankle_name], point_local) 14 | point_contact_world = rbdl.CalcBodyToBaseCoordinates(model, q, rbdl_dic[contact_link_name], point_local) 15 | anckle_rot = rbdl.CalcBodyWorldOrientation(model, q, rbdl_dic[ankle_name]) 16 | contact_in_ankle = np.dot(anckle_rot, point_contact_world - point_ankle_world) 17 | return contact_in_ankle 18 | 19 | def mat_concatenate( mat): 20 | out = None 21 | for i in range(len(mat)): 22 | if i == 0: 23 | out = mat[i] 24 | else: 25 | out = np.concatenate((out, mat[i]), 1) 26 | return out 27 | 28 | def cross2dot_b(v): 29 | n_b,_=v.shape 30 | vx=v[:,0].view(-1,1) 31 | vy=v[:,1].view(-1,1) 32 | vz=v[:,2].view(-1,1) 33 | zeros = torch.zeros(n_b).view(-1,1)#.cuda()#.view(-1,1) 34 | mat_top = torch.cat((zeros,-vz,vy),1).view(n_b,1,3) 35 | mat_mid = torch.cat((vz,zeros,-vx),1).view(n_b,1,3) 36 | mat_bot = torch.cat((-vy,vx,zeros),1).view(n_b,1,3) 37 | return torch.cat((mat_top,mat_mid,mat_bot),1) 38 | def cross2dot_b_cpu(v): 39 | n_b,_=v.shape 40 | vx=v[:,0].view(-1,1) 41 | vy=v[:,1].view(-1,1) 42 | vz=v[:,2].view(-1,1) 43 | zeros = torch.zeros(n_b).view(-1,1) 44 | mat_top = torch.cat((zeros,-vz,vy),1).view(n_b,1,3) 45 | mat_mid = torch.cat((vz,zeros,-vx),1).view(n_b,1,3) 46 | mat_bot = torch.cat((-vy,vx,zeros),1).view(n_b,1,3) 47 | return torch.cat((mat_top,mat_mid,mat_bot),1) 48 | def cross2dot_convert( vectors): 49 | out = np.array(list(map( c2d_func,vectors))) 50 | out = mat_concatenate(out) 51 | return out 52 | 53 | def get_wrench( contact_in_ankle_frame): 54 | n_b,_=contact_in_ankle_frame.shape 55 | 56 | G_tau = cross2dot_b(contact_in_ankle_frame) 57 | G_lin = torch.eye(3).view(1,3,3)#.cuda() 58 | G_lin=G_lin.expand(n_b,-1,-1) 59 | 60 | return torch.cat((G_tau,G_lin),1) 61 | def get_wrench_cpu( contact_in_ankle_frame): 62 | n_b,_=contact_in_ankle_frame.shape 63 | 64 | G_tau = cross2dot_b_cpu(contact_in_ankle_frame) 65 | G_lin = torch.eye(3).view(1,3,3) 66 | G_lin=G_lin.expand(n_b,-1,-1) 67 | 68 | return torch.cat((G_tau,G_lin),1) 69 | 70 | def get_contact_jacobis6D(model, q, ids): 71 | jacobis = np.zeros((len(q), len(ids), 6, model.qdot_size)) 72 | for batch in range(len(q)): 73 | for i, id in enumerate(ids): 74 | rbdl.CalcPointJacobian6D(model, q[batch].flatten().astype(float), id, np.array([0., 0., 0.]), jacobis[batch][i]) 75 | jacobis = torch.FloatTensor(jacobis).view(len(q), -1, model.qdot_size)#.cuda() 76 | return jacobis 77 | 78 | def get_contact_jacobis6D_cpu(model, q, ids): 79 | jacobis = np.zeros((len(q), len(ids), 6, model.qdot_size)) 80 | for batch in range(len(q)): 81 | for i, id in enumerate(ids): 82 | rbdl.CalcPointJacobian6D(model, q[batch].flatten().astype(float), id, np.array([0., 0., 0.]), jacobis[batch][i]) 83 | jacobis = torch.FloatTensor(jacobis).view(len(q), -1, model.qdot_size) 84 | return jacobis 85 | 86 | def get_contact_wrench( model,qs,rbdl_dic,conF,con_labels): 87 | n_b,_=conF.shape 88 | qs=qs.detach().cpu().numpy().astype(float) 89 | 90 | l_toe_in_ankle = torch.FloatTensor([get_contact_point_in_ankle_frame(model,q,rbdl_dic,"left_toe","left_ankle") for q in qs])#.cuda() 91 | l_heel_in_ankle= torch.FloatTensor([get_contact_point_in_ankle_frame(model,q,rbdl_dic,"left_heel","left_ankle") for q in qs])#.cuda() 92 | r_toe_in_ankle = torch.FloatTensor([get_contact_point_in_ankle_frame(model,q,rbdl_dic,"right_toe","right_ankle") for q in qs])#.cuda() 93 | r_heel_in_ankle= torch.FloatTensor([get_contact_point_in_ankle_frame(model,q,rbdl_dic,"right_heel","right_ankle") for q in qs])#.cuda() 94 | 95 | l_toe_tau_lin_mat = get_wrench(l_toe_in_ankle) 96 | l_heel_tau_lin_mat = get_wrench(l_heel_in_ankle) 97 | r_toe_tau_lin_mat= get_wrench(r_toe_in_ankle) 98 | r_heel_tau_lin_mat = get_wrench(r_heel_in_ankle) 99 | 100 | l_toe_conF = conF[:,:3].view(n_b,3,1) 101 | l_heel_conF = conF[:,3:6].view(n_b,3,1) 102 | r_toe_conF = conF[:,6:9].view(n_b,3,1) 103 | r_heel_conF = conF[:,9:].view(n_b,3,1) 104 | 105 | l_toe_tau_lin_F = torch.bmm(l_toe_tau_lin_mat,l_toe_conF) 106 | l_heel_tau_lin_F = torch.bmm(l_heel_tau_lin_mat,l_heel_conF) 107 | r_toe_tau_lin_F = torch.bmm(r_toe_tau_lin_mat,r_toe_conF) 108 | r_heel_tau_lin_F = torch.bmm(r_heel_tau_lin_mat,r_heel_conF) 109 | 110 | #print(l_toe_tau_lin_F.shape,con_labels[:,0].shape) 111 | l_toe_tau_lin_F = con_labels[:,0].view(n_b,1)*l_toe_tau_lin_F.view(n_b,6) 112 | l_heel_tau_lin_F = con_labels[:,1].view(n_b,1)*l_heel_tau_lin_F.view(n_b,6) 113 | r_toe_tau_lin_F = con_labels[:,2].view(n_b,1)*r_toe_tau_lin_F.view(n_b,6) 114 | r_heel_tau_lin_F = con_labels[:,3].view(n_b,1)*r_heel_tau_lin_F.view(n_b,6) 115 | 116 | l_ankle_F = l_toe_tau_lin_F+l_heel_tau_lin_F 117 | r_ankle_F = r_toe_tau_lin_F+r_heel_tau_lin_F 118 | 119 | 120 | J = get_contact_jacobis6D(model, qs, [rbdl_dic["left_ankle"], rbdl_dic["right_ankle"]]) # ankles 121 | JT = torch.transpose(J, 1, 2) 122 | JT_l = JT[:,:,:6] 123 | JT_r = JT[:,:,6:] 124 | 125 | 126 | gen_l_F = torch.bmm(JT_l,l_ankle_F.view(n_b,6,1)) 127 | gen_r_F = torch.bmm(JT_r,r_ankle_F.view(n_b,6,1)) 128 | return (gen_l_F+gen_r_F).view(n_b,model.qdot_size) 129 | 130 | 131 | def get_contact_wrench_cpu(model, qs, rbdl_dic, conF, con_labels): 132 | n_b, _ = conF.shape 133 | qs = qs.numpy().astype(float) 134 | 135 | l_toe_in_ankle = torch.FloatTensor( [get_contact_point_in_ankle_frame(model, q, rbdl_dic, "left_toe", "left_ankle") for q in qs]) 136 | l_heel_in_ankle = torch.FloatTensor( [get_contact_point_in_ankle_frame(model, q, rbdl_dic, "left_heel", "left_ankle") for q in qs]) 137 | r_toe_in_ankle = torch.FloatTensor( [get_contact_point_in_ankle_frame(model, q, rbdl_dic, "right_toe", "right_ankle") for q in qs]) 138 | r_heel_in_ankle = torch.FloatTensor( [get_contact_point_in_ankle_frame(model, q, rbdl_dic, "right_heel", "right_ankle") for q in qs]) 139 | 140 | l_toe_tau_lin_mat = get_wrench_cpu(l_toe_in_ankle) 141 | l_heel_tau_lin_mat = get_wrench_cpu(l_heel_in_ankle) 142 | r_toe_tau_lin_mat = get_wrench_cpu(r_toe_in_ankle) 143 | r_heel_tau_lin_mat = get_wrench_cpu(r_heel_in_ankle) 144 | 145 | l_toe_conF = conF[:, :3].view(n_b, 3, 1) 146 | l_heel_conF = conF[:, 3:6].view(n_b, 3, 1) 147 | r_toe_conF = conF[:, 6:9].view(n_b, 3, 1) 148 | r_heel_conF = conF[:, 9:].view(n_b, 3, 1) 149 | 150 | l_toe_tau_lin_F = torch.bmm(l_toe_tau_lin_mat, l_toe_conF) 151 | l_heel_tau_lin_F = torch.bmm(l_heel_tau_lin_mat, l_heel_conF) 152 | r_toe_tau_lin_F = torch.bmm(r_toe_tau_lin_mat, r_toe_conF) 153 | r_heel_tau_lin_F = torch.bmm(r_heel_tau_lin_mat, r_heel_conF) 154 | 155 | l_toe_tau_lin_F = con_labels[:, 0].view(n_b, 1) * l_toe_tau_lin_F.view(n_b, 6) 156 | l_heel_tau_lin_F = con_labels[:, 1].view(n_b, 1) * l_heel_tau_lin_F.view(n_b, 6) 157 | r_toe_tau_lin_F = con_labels[:, 2].view(n_b, 1) * r_toe_tau_lin_F.view(n_b, 6) 158 | r_heel_tau_lin_F = con_labels[:, 3].view(n_b, 1) * r_heel_tau_lin_F.view(n_b, 6) 159 | 160 | l_ankle_F = l_toe_tau_lin_F + l_heel_tau_lin_F 161 | r_ankle_F = r_toe_tau_lin_F + r_heel_tau_lin_F 162 | 163 | J = get_contact_jacobis6D_cpu(model, qs, [rbdl_dic["left_ankle"], rbdl_dic["right_ankle"]]) 164 | JT = torch.transpose(J, 1, 2) 165 | JT_l = JT[:, :, :6] 166 | JT_r = JT[:, :, 6:] 167 | gen_l_F = torch.bmm(JT_l, l_ankle_F.view(n_b, 6, 1)) 168 | gen_r_F = torch.bmm(JT_r, r_ankle_F.view(n_b, 6, 1)) 169 | 170 | return (gen_l_F + gen_r_F).view(n_b, model.qdot_size) -------------------------------------------------------------------------------- /Utils/angles.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import math 3 | 4 | class angle_util(): 5 | def pysinc(self,x): 6 | return torch.sin(math.pi * x) / (math.pi * x) 7 | def expmap_to_quaternion(self,e): 8 | """ 9 | Convert axis-angle rotations (aka exponential maps) to quaternions. 10 | Stable formula from "Practical Parameterization of Rotations Using the Exponential Map". 11 | Expects a tensor of shape (*, 3), where * denotes any number of dimensions. 12 | Returns a tensor of shape (*, 4). 13 | """ 14 | assert e.shape[-1] == 3 15 | original_shape = list(e.shape) 16 | original_shape[-1] = 4 17 | e = e.reshape(-1, 3) 18 | theta = torch.norm(e, dim=1).view(-1, 1) 19 | w = torch.cos(0.5 * theta).view(-1, 1) 20 | xyz = 0.5 * self.pysinc(0.5 * theta / math.pi) * e 21 | return torch.cat((w, xyz), 1).view(original_shape) 22 | 23 | 24 | def quat_shortest_path(self,quat): 25 | n_b = quat.size()[0] 26 | masks = torch.ones(n_b)#.cuda() 27 | larger_id = torch.flatten(torch.nonzero((quat[:, 0] < 0) * masks).detach()) 28 | quat[larger_id] = self.q_conj(quat[larger_id].view(larger_id.shape[0], 4)) 29 | return quat 30 | 31 | def quat_shortest_path_cpu(self,quat): 32 | n_b = quat.size()[0] 33 | masks = torch.ones(n_b).float()##.cuda() 34 | larger_id = torch.flatten(torch.nonzero((quat[:, 0] < 0).float() * masks).detach()) 35 | quat[larger_id] = self.q_conj(quat[larger_id].view(larger_id.shape[0], 4)) 36 | return quat 37 | def qmul(self,q, r): 38 | """ 39 | Multiply quaternion(s) q with quaternion(s) r. 40 | Expects two equally-sized tensors of shape (*, 4), where * denotes any number of dimensions. 41 | Returns q*r as a tensor of shape (*, 4). 42 | """ 43 | assert q.shape[-1] == 4 44 | assert r.shape[-1] == 4 45 | 46 | original_shape = q.shape 47 | 48 | # Compute outer product 49 | terms = torch.bmm(r.view(-1, 4, 1), q.view(-1, 1, 4)) 50 | 51 | w = terms[:, 0, 0] - terms[:, 1, 1] - terms[:, 2, 2] - terms[:, 3, 3] 52 | x = terms[:, 0, 1] + terms[:, 1, 0] - terms[:, 2, 3] + terms[:, 3, 2] 53 | y = terms[:, 0, 2] + terms[:, 1, 3] + terms[:, 2, 0] - terms[:, 3, 1] 54 | z = terms[:, 0, 3] - terms[:, 1, 2] + terms[:, 2, 1] + terms[:, 3, 0] 55 | return torch.stack((w, x, y, z), dim=1).view(original_shape) 56 | 57 | def target_correction_batch(self,error): 58 | masks = torch.ones(error.size()[0], error.size()[1], )#.cuda() 59 | 60 | larger_id = torch.nonzero((error > math.pi) * masks).detach() 61 | smaller_id = torch.nonzero((error < - math.pi) * masks).detach() 62 | 63 | error[larger_id[:, 0], larger_id[:, 1]] = error[larger_id[:, 0], larger_id[:, 1]] - 2 * math.pi 64 | error[smaller_id[:, 0], smaller_id[:, 1]] = error[smaller_id[:, 0], smaller_id[:, 1]] + 2 * math.pi 65 | return error 66 | 67 | def target_correction_batch_cpu(self,error): 68 | masks = torch.ones(error.size()[0], error.size()[1], ).float()##.cuda() 69 | 70 | larger_id = torch.nonzero((error > math.pi).float() * masks).detach() 71 | smaller_id = torch.nonzero((error < - math.pi).float() * masks).detach() 72 | 73 | error[larger_id[:, 0], larger_id[:, 1]] = error[larger_id[:, 0], larger_id[:, 1]] - 2 * math.pi 74 | error[smaller_id[:, 0], smaller_id[:, 1]] = error[smaller_id[:, 0], smaller_id[:, 1]] + 2 * math.pi 75 | return error 76 | def angle_normalize_batch(self,q_all): 77 | 78 | q=q_all[:,3:-1].clone() 79 | masks = torch.ones(q.size()[0], q.size()[1])#.cuda() 80 | mod = torch.remainder(q, 2 * math.pi) 81 | larger_id = torch.nonzero((mod > math.pi) * masks).detach()#.cuda() 82 | smaller_id = torch.nonzero((mod <= math.pi) * masks).detach()#.cuda() 83 | q[larger_id[:, 0], larger_id[:, 1]] = mod[larger_id[:, 0], larger_id[:, 1]] - 2 * math.pi 84 | q[smaller_id[:, 0], smaller_id[:, 1]] = mod[smaller_id[:, 0], smaller_id[:, 1]] 85 | q_all[:, 3:-1]=q.clone() 86 | return q_all 87 | def angle_normalize_batch_cpu(self,q_all): 88 | 89 | q=q_all[:,3:-1].clone() 90 | masks = torch.ones(q.size()[0], q.size()[1]).float()##.cuda() 91 | mod = torch.remainder(q, 2 * math.pi) 92 | larger_id = torch.nonzero((mod > math.pi).float() * masks).detach()#.cuda() 93 | smaller_id = torch.nonzero((mod <= math.pi).float() * masks).detach()#.cuda() 94 | q[larger_id[:, 0], larger_id[:, 1]] = mod[larger_id[:, 0], larger_id[:, 1]] - 2 * math.pi 95 | q[smaller_id[:, 0], smaller_id[:, 1]] = mod[smaller_id[:, 0], smaller_id[:, 1]] 96 | q_all[:, 3:-1]=q.clone() 97 | return q_all 98 | def angle_normalize_art_batch(self,q_art): 99 | 100 | #q=q_all[:,3:-1].clone() 101 | masks = torch.ones(q_art.size()[0], q_art.size()[1])#.cuda() 102 | mod = torch.remainder(q_art, 2 * math.pi) 103 | larger_id = torch.nonzero((mod > math.pi) * masks).detach()#.cuda() 104 | smaller_id = torch.nonzero((mod <= math.pi) * masks).detach()#.cuda() 105 | q_art[larger_id[:, 0], larger_id[:, 1]] = mod[larger_id[:, 0], larger_id[:, 1]] - 2 * math.pi 106 | q_art[smaller_id[:, 0], smaller_id[:, 1]] = mod[smaller_id[:, 0], smaller_id[:, 1]] 107 | q_art[:, 3:-1]=q_art.clone() 108 | return q_art 109 | 110 | 111 | def angle_normalize_euler_batch(self,q_all): 112 | 113 | q=q_all[:,3: ].clone() 114 | masks = torch.ones(q.size()[0], q.size()[1])#.cuda() 115 | mod = torch.remainder(q, 2 * math.pi) 116 | larger_id = torch.nonzero((mod > math.pi) * masks).detach()#.cuda() 117 | smaller_id = torch.nonzero((mod <= math.pi) * masks).detach()#.cuda() 118 | q[larger_id[:, 0], larger_id[:, 1]] = mod[larger_id[:, 0], larger_id[:, 1]] - 2 * math.pi 119 | q[smaller_id[:, 0], smaller_id[:, 1]] = mod[smaller_id[:, 0], smaller_id[:, 1]] 120 | q_all[:, 3:]=q.clone() 121 | return q_all 122 | def angle_normalize_batch_exp(self,q_all): 123 | 124 | q=q_all[:,3:].clone() 125 | masks = torch.ones(q.size()[0], q.size()[1])#.cuda() 126 | mod = torch.remainder(q, 2 * math.pi) 127 | larger_id = torch.nonzero((mod > math.pi) * masks).detach()#.cuda() 128 | smaller_id = torch.nonzero((mod <= math.pi) * masks).detach()#.cuda() 129 | q[larger_id[:, 0], larger_id[:, 1]] = mod[larger_id[:, 0], larger_id[:, 1]] - 2 * math.pi 130 | q[smaller_id[:, 0], smaller_id[:, 1]] = mod[smaller_id[:, 0], smaller_id[:, 1]] 131 | q_all[:, 3:]=q.clone() 132 | return q_all 133 | 134 | def angle_clean(self,q): 135 | mod = q % (2 * math.pi) 136 | if mod >= math.pi: 137 | return mod - 2 * math.pi 138 | else: 139 | return mod 140 | 141 | def normalize_vector_prep(self,v): 142 | batch = v.shape[0] 143 | v_mag = torch.sqrt(v.pow(2).sum(1)) # batch 144 | v_mag = torch.max(v_mag, torch.autograd.Variable(torch.FloatTensor([1e-8]))) 145 | v_mag = v_mag.view(batch, 1).expand(batch, v.shape[1]) 146 | v = v / v_mag 147 | return v 148 | 149 | 150 | def normalize_vector(self,v): 151 | batch = v.shape[0] 152 | v_mag = torch.sqrt(v.pow(2).sum(1)) # batch 153 | v_mag = torch.max(v_mag, torch.autograd.Variable(torch.FloatTensor([1e-8])))#.cuda()) 154 | v_mag = v_mag.view(batch, 1).expand(batch, v.shape[1]) 155 | v = v / v_mag 156 | return v 157 | 158 | def quat_bullet2general(self,q): 159 | return torch.FloatTensor([q[3],q[0],q[1],q[2]]) 160 | 161 | def quat_bullet2general_b(self,q): 162 | return torch.cat((q[:,3].view(len(q),1),q[:,0].view(len(q),1),q[:,1].view(len(q),1),q[:,2].view(len(q),1)),1) 163 | 164 | def quat_bullet2general_b_s(self,q): 165 | B,T,C=q.shape 166 | return torch.cat((q[:,:,3].view(B,T,1),q[:,:,0].view(B,T,1),q[:,:,1].view(B,T,1), q[:,:,2].view(B,T,1)),2) 167 | 168 | def q_conj(self,q): 169 | w = q[:, 0] 170 | x = -q[:, 1] 171 | y = -q[:, 2] 172 | z = -q[:, 3] 173 | return torch.stack((w, x, y, z), dim=1) 174 | # angvel_quat = np.array([0,qdot[3],qdot[4],qdot[5]]) 175 | 176 | def get_angvel_quat(self,qdot): 177 | return torch.stack((torch.zeros(len(qdot)) ,qdot[:,0],qdot[:,1],qdot[:,2]), dim=1)#.cuda() 178 | def get_angvel_quat_cpu(self,qdot): 179 | return torch.stack((torch.zeros(len(qdot)) ,qdot[:,0],qdot[:,1],qdot[:,2]), dim=1) 180 | def quat_conj(self,q): 181 | return torch.FloatTensor([q[0],-q[1],-q[2],-q[3]]) 182 | 183 | def compute_rotation_matrix_from_quaternion(self,quaternion): 184 | 185 | batch = quaternion.shape[0] 186 | 187 | quat = self.normalize_vector(quaternion) 188 | 189 | qw = quat[..., 0].view(batch, 1) 190 | qx = quat[..., 1].view(batch, 1) 191 | qy = quat[..., 2].view(batch, 1) 192 | qz = quat[..., 3].view(batch, 1) 193 | 194 | # Unit quaternion rotation matrices computatation 195 | xx = qx * qx 196 | yy = qy * qy 197 | zz = qz * qz 198 | xy = qx * qy 199 | xz = qx * qz 200 | yz = qy * qz 201 | xw = qx * qw 202 | yw = qy * qw 203 | zw = qz * qw 204 | 205 | row0 = torch.cat((1 - 2 * yy - 2 * zz, 2 * xy - 2 * zw, 2 * xz + 2 * yw), 1) # batch*3 206 | row1 = torch.cat((2 * xy + 2 * zw, 1 - 2 * xx - 2 * zz, 2 * yz - 2 * xw), 1) # batch*3 207 | row2 = torch.cat((2 * xz - 2 * yw, 2 * yz + 2 * xw, 1 - 2 * xx - 2 * yy), 1) # batch*3 208 | 209 | matrix = torch.cat((row0.view(batch, 1, 3), row1.view(batch, 1, 3), row2.view(batch, 1, 3)), 1) # batch*3*3 210 | 211 | return matrix#.detach() # .numpy() 212 | 213 | 214 | 215 | 216 | 217 | def compute_rotation_matrix_from_quaternion_prep(self, quaternion): 218 | 219 | batch = quaternion.shape[0] 220 | 221 | quat = self.normalize_vector_prep(quaternion) 222 | 223 | qw = quat[..., 0].view(batch, 1) 224 | qx = quat[..., 1].view(batch, 1) 225 | qy = quat[..., 2].view(batch, 1) 226 | qz = quat[..., 3].view(batch, 1) 227 | 228 | # Unit quaternion rotation matrices computatation 229 | xx = qx * qx 230 | yy = qy * qy 231 | zz = qz * qz 232 | xy = qx * qy 233 | xz = qx * qz 234 | yz = qy * qz 235 | xw = qx * qw 236 | yw = qy * qw 237 | zw = qz * qw 238 | 239 | row0 = torch.cat((1 - 2 * yy - 2 * zz, 2 * xy - 2 * zw, 2 * xz + 2 * yw), 1) # batch*3 240 | row1 = torch.cat((2 * xy + 2 * zw, 1 - 2 * xx - 2 * zz, 2 * yz - 2 * xw), 1) # batch*3 241 | row2 = torch.cat((2 * xz - 2 * yw, 2 * yz + 2 * xw, 1 - 2 * xx - 2 * yy), 1) # batch*3 242 | 243 | matrix = torch.cat((row0.view(batch, 1, 3), row1.view(batch, 1, 3), row2.view(batch, 1, 3)), 1) # batch*3*3 244 | 245 | return matrix # .detach() # .numpy() 246 | 247 | def cross_product(self,u, v): 248 | batch = u.shape[0] 249 | i = u[:, 1] * v[:, 2] - u[:, 2] * v[:, 1] 250 | j = u[:, 2] * v[:, 0] - u[:, 0] * v[:, 2] 251 | k = u[:, 0] * v[:, 1] - u[:, 1] * v[:, 0] 252 | 253 | out = torch.cat((i.view(batch, 1), j.view(batch, 1), k.view(batch, 1)), 1) # batch*3 254 | 255 | return out 256 | 257 | 258 | def compute_rotation_matrix_from_ortho6d(self,poses): 259 | x_raw = poses[:, 0:3] # batch*3 260 | y_raw = poses[:, 3:6] # batch*3 261 | 262 | x = self.normalize_vector(x_raw) # batch*3 263 | z = self.cross_product(x, y_raw) # batch*3 264 | z = self.normalize_vector(z) # batch*3 265 | y = self.cross_product(z, x) # batch*3 266 | 267 | x = x.view(-1, 3, 1) 268 | y = y.view(-1, 3, 1) 269 | z = z.view(-1, 3, 1) 270 | matrix = torch.cat((x, y, z), 2) # batch*3*3 271 | return matrix 272 | 273 | 274 | def compute_rotation_matrix_loss(self,gt_rotation_matrix, predict_rotation_matrix): 275 | loss_function = torch.nn.MSELoss() 276 | loss = loss_function(predict_rotation_matrix, gt_rotation_matrix) 277 | return loss 278 | 279 | 280 | def get_44_rotation_matrix_from_33_rotation_matrix(self,m): 281 | batch = m.shape[0] 282 | 283 | row4 = torch.autograd.Variable(torch.zeros(batch, 1, 3))#.cuda()) 284 | 285 | m43 = torch.cat((m, row4), 1) # batch*4,3 286 | 287 | col4 = torch.autograd.Variable(torch.zeros(batch, 4, 1))#.cuda()) 288 | col4[:, 3, 0] = col4[:, 3, 0] + 1 289 | 290 | out = torch.cat((m43, col4), 2) # batch*4*4 291 | 292 | return out 293 | 294 | 295 | def get_44_rotation_matrix_from_33_rotation_matrix_prep(self,m): 296 | batch = m.shape[0] 297 | 298 | row4 = torch.autograd.Variable(torch.zeros(batch, 1, 3)) 299 | 300 | m43 = torch.cat((m, row4), 1) # batch*4,3 301 | 302 | col4 = torch.autograd.Variable(torch.zeros(batch, 4, 1)) 303 | col4[:, 3, 0] = col4[:, 3, 0] + 1 304 | 305 | out = torch.cat((m43, col4), 2) # batch*4*4 306 | out = out.view(4, 4).detach().cpu().numpy() 307 | 308 | return out 309 | 310 | 311 | def compute_euler_angles_from_rotation_matrices(self,rotation_matrices): 312 | batch = rotation_matrices.shape[0] 313 | R = rotation_matrices 314 | sy = torch.sqrt(R[:, 0, 0] * R[:, 0, 0] + R[:, 1, 0] * R[:, 1, 0]) 315 | singular = sy < 1e-6 316 | singular = singular.float() 317 | 318 | x = torch.atan2(R[:, 2, 1], R[:, 2, 2]) 319 | y = torch.atan2(-R[:, 2, 0], sy) 320 | z = torch.atan2(R[:, 1, 0], R[:, 0, 0]) 321 | 322 | xs = torch.atan2(-R[:, 1, 2], R[:, 1, 1]) 323 | ys = torch.atan2(-R[:, 2, 0], sy) 324 | zs = R[:, 1, 0] * 0 325 | 326 | out_euler = torch.autograd.Variable(torch.zeros(batch, 3)) 327 | out_euler[:, 0] = x * (1 - singular) + xs * singular 328 | out_euler[:, 1] = y * (1 - singular) + ys * singular 329 | out_euler[:, 2] = z * (1 - singular) + zs * singular 330 | 331 | return out_euler 332 | 333 | 334 | def compute_quaternions_from_rotation_matrices(self,matrices): 335 | batch = matrices.shape[0] 336 | 337 | w = torch.sqrt(1.0 + matrices[:, 0, 0] + matrices[:, 1, 1] + matrices[:, 2, 2]) / 2.0 338 | w = torch.max(w, torch.autograd.Variable(torch.zeros(batch)) + 1e-8) # batch 339 | w4 = 4.0 * w 340 | x = (matrices[:, 2, 1] - matrices[:, 1, 2]) / w4 341 | y = (matrices[:, 0, 2] - matrices[:, 2, 0]) / w4 342 | z = (matrices[:, 1, 0] - matrices[:, 0, 1]) / w4 343 | 344 | quats = torch.cat((w.view(batch, 1), x.view(batch, 1), y.view(batch, 1), z.view(batch, 1)), 1) 345 | 346 | return quats 347 | 348 | -------------------------------------------------------------------------------- /Utils/initializer.py: -------------------------------------------------------------------------------- 1 | 2 | import pybullet as p 3 | import numpy as np 4 | import torch 5 | import math 6 | 7 | import rbdl 8 | class Initializer(): 9 | def __init__(self,batch_size,target_joints): 10 | self.rbdl_joint_dic = { 11 | "base": 1, "left_hip": 3, "left_knee": 6, "left_ankle": 7, "left_toe": 11, "left_heel": 12, "right_hip": 13, 12 | "right_knee": 16, "right_ankle": 17, "right_toe": 21, "right_heel": 22, "neck": 37, "left_clavicle": 23, "left_shoulder": 25, "left_elbow": 27, 13 | "left_wrist": 29, "right_clavicle": 30, "right_shoulder": 32, "right_elbow": 34, "right_wrist": 36 14 | } 15 | self.rbdl_marker_dic = { 16 | "base": 1, "left_hip": 4, "left_knee": 6, "left_ankle": 7, "left_toe": 11, "left_heel": 12, "right_hip": 15, 17 | "right_knee": 16, "right_ankle": 17, "right_toe": 21, "right_heel": 22, "neck": 37, "left_clavicle": 23, "left_shoulder": 25, "left_elbow": 27, 18 | "left_wrist": 29, "right_clavicle": 30, "right_shoulder": 32, "right_elbow": 34, "right_wrist": 36 19 | } 20 | self.target_joints = target_joints 21 | self.target_joint_ids = [self.rbdl_joint_dic[key] for key in self.target_joints] 22 | self.target_marker_ids = [self.rbdl_marker_dic[key] for key in self.target_joints] 23 | self.scaler= 2000 24 | self.K = np.array([752.6881, 0, 517.431, 0, 0, 752.8311, 500.631, 0, 0, 0, 1, 0, 0, 0, 0, 1]).reshape(4, 4)#[:3, :3] 25 | #self.RT = np.array([ -0.0121125, 0.0119637, -0.9998551, -708.5256/self.scaler, 0.07398777, -0.9971766, -0.0128279, 887.3783/self.scaler, -0.9971856, -0.07413242, 0.01119314, 3340.92/self.scaler, 0, 0, 0, 1 ]).reshape(4,4) 26 | self.RT = np.array([1, 0, 0, -0, 0, 1, 0, 0, 0, 0, 1,0, 0, 0, 0, 1 ]).reshape(4,4) 27 | self.batch_size=batch_size 28 | self.rbdl2bullet = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 20, 21, 22] 29 | self.bullet2rbdl = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 34, 35, 36, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33] 30 | 31 | def get_rbdl_dic(self): 32 | return self.rbdl_joint_dic 33 | 34 | def change_color_bullet(self,id_robot,color): 35 | for j in range(p.getNumJoints(id_robot)): 36 | p.changeVisualShape(id_robot, j, rgbaColor=color) 37 | return 0 38 | def remove_collision(self,robot1,robot2): 39 | for i in range(p.getNumJoints(robot1)): 40 | for j in range(p.getNumJoints(robot2)): 41 | p.setCollisionFilterPair(robot1, robot2, i, j, 0) 42 | def get_rbdl2bullet(self): 43 | return self.rbdl2bullet 44 | 45 | def get_lthrth_ids(self): 46 | return np.array([self.rbdl_joint_dic[key] for key in ["left_toe","left_heel","right_toe","right_heel",]]) 47 | 48 | def get_target_joint_ids(self): 49 | return self.target_joint_ids 50 | 51 | def get_target_marker_ids(self): 52 | return self.target_marker_ids 53 | 54 | def get_P_tensor(self,P): 55 | P_tensor = torch.zeros(self.batch_size, 3 * len(self.target_joint_ids), 4 * len(self.target_joint_ids)) 56 | for i in range(int(P_tensor.shape[1] / 3)): 57 | P_tensor[:, i * 3:(i + 1) * 3, i * 4:(i + 1) * 4] = P 58 | return P_tensor.type(torch.FloatTensor) 59 | 60 | def get_projection_and_deriv(self): 61 | P = torch.FloatTensor(np.dot(self.K, self.RT)[:3]) 62 | P_tensor = self.get_P_tensor(P) 63 | get_ids_P = [x for x in range(P_tensor.shape[2]) if (x + 1) % 4 != 0] 64 | P_tensor_deriv = P_tensor[:, :, get_ids_P] 65 | return P_tensor,P_tensor_deriv 66 | 67 | def get_bone_length(self,model): 68 | init_q = np.zeros(model.q_size) 69 | init_q[-1] = 1 70 | target_joints = ["base", "head", "neck", "left_hip", "left_knee", "left_ankle", "left_toe", "right_hip", "right_knee", "right_ankle", "right_toe", "left_shoulder", "left_elbow", "left_wrist", "right_shoulder", "right_elbow", "right_wrist"] 71 | joint_pos_dic = dict( [(key, rbdl.CalcBodyToBaseCoordinates(model, init_q, self.rbdl_joint_dic[key], np.zeros(3))) for key in target_joints]) 72 | joint_pairs = [("head", "neck"), ("neck", "base"), ("base", "left_hip"), ("base", "right_hip"), 73 | ("left_hip", "left_knee"), ("left_knee", "left_ankle"), ("left_ankle", "left_toe"), 74 | ("right_hip", "right_knee"), ("right_knee", "right_ankle"), ("right_ankle", "right_toe"), 75 | ("left_shoulder", "right_shoulder"), ("left_shoulder", "left_elbow"), 76 | ("left_elbow", "left_wrist"), 77 | ("right_shoulder", "right_elbow"), ("right_elbow", "right_wrist")] 78 | bone_len_list = [] 79 | for i, pairs in enumerate(joint_pairs): 80 | length = np.linalg.norm(joint_pos_dic[pairs[0]] - joint_pos_dic[pairs[1]]) 81 | bone_len_list.append(length) 82 | return np.array(bone_len_list) 83 | 84 | def model_bone_length_getter(self,model_addresses): 85 | model_bone_len_dic = {} 86 | for key in model_addresses.keys(): 87 | model_bone_len_dic[key] = self.get_bone_length(model_addresses[key]) 88 | return model_bone_len_dic 89 | 90 | class InitializerConsistentHumanoid(Initializer): 91 | def __init__(self,batch_size,target_joints): 92 | self.rbdl_joint_dic = { 93 | "base": 1, "head": 40, "neck": 37, "left_hip": 3, "left_knee": 7, "left_ankle": 8, "left_toe": 11, 94 | "left_heel": 12, "right_hip": 13, "right_knee": 17, "right_ankle": 18, "right_toe": 21, "right_heel": 22, "left_clavicle": 23, 95 | "left_shoulder": 25, "left_elbow": 27, "left_wrist": 29, "right_clavicle": 30, "right_shoulder": 32, "right_elbow": 34, "right_wrist": 36 96 | } 97 | self.gt_mpi_dic = { 98 | "base": 4, "head": 7, "neck": 5, "left_hip": 18, "left_knee": 19, "left_ankle": 20, "left_toe": 22, 99 | "right_hip": 23, 100 | "right_knee": 24, "right_ankle": 25, "right_toe": 27, "left_clavicle": 8, "left_shoulder": 9, "left_elbow": 10, 101 | "left_wrist": 11, "right_clavicle": 13, "right_shoulder": 14, "right_elbow": 15, "right_wrist": 16 102 | } 103 | self.gt_human36M_dic={ 104 | "base":0,"left_hip":6,"left_knee":7,"left_ankle":8, "left_toe":9,"right_hip":1,"right_knee":2,"right_ankle":3,"right_toe":4, "neck":13, "head":15, 105 | "left_shoulder":17,"left_elbow":18,"left_wrist":19,"right_shoulder":25, "right_elbow":26,"right_wrist":27 106 | } 107 | self.gt_DeepCap_dic = { 108 | "base": 14, "left_hip": 11, "left_knee": 12, "left_ankle": 13, "left_toe": 16, "right_hip": 8, 109 | "right_knee": 9, "right_ankle": 10, "right_toe": 15, "neck": 1, "head": 0, 110 | "left_shoulder": 5, "left_elbow": 6, "left_wrist": 7, "right_shoulder": 2, "right_elbow": 3, 111 | "right_wrist": 4 112 | } 113 | self.target_joints = target_joints 114 | self.target_joint_ids = [self.rbdl_joint_dic[key] for key in self.target_joints] 115 | #self.target_marker_ids = [self.rbdl_marker_dic[key] for key in self.target_joints] 116 | self.scaler= 1000 117 | self.batch_size=batch_size 118 | 119 | """ caution!!!! this might be opposite!""" 120 | self.bullet2rbdl = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 34, 35, 36, 37, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33] 121 | self.rbdl2bullet = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 20, 21, 22, 23] 122 | 123 | def get_max_min_anlges(self): 124 | max_angles=math.pi*torch.ones(self.batch_size,len(self.bullet2rbdl)) 125 | min_angles=-math.pi*torch.ones(self.batch_size,len(self.bullet2rbdl)) 126 | #print(max_angles.shape,min_angles) 127 | max_min_dic ={ 128 | 'left_hip_X': (1, [-2.5, 1]), 129 | 'left_hip_Y': (2,[-math.pi/2,math.pi/2]), 130 | 'left_hip_Z': (3, [-0.8,1.57]), 131 | 'left_knee_Y': (4, [0, math.pi]), 132 | 'left_ankle_X': (5 ,[-0.72, 0.72]), 133 | 'left_ankle_Y': (6, [-math.pi/2,math.pi/2]), 134 | 'left_ankle_Z': (7, [-0.54, 0.54]), 135 | 'left_toe': (8, [0,0]), 136 | 'left_heel': (9, [0,0]), 137 | 138 | 'right_hip_X': (11, [-2.5,1]), 139 | 'right_hip_Y': (12, [-math.pi/2,math.pi/2]), 140 | 'right_hip_Z': (13, [-1.57, 0.8]), 141 | 'right_knee_Y': (14, [0, math.pi]), 142 | 'right_ankle_X': (15, [-0.72, 0.72]), 143 | 'right_ankle_Y': (16, [-math.pi / 2, math.pi / 2]), 144 | 'right_ankle_Z': (17, [-0.54, 0.54]), 145 | 'right_toe': (18, [0, 0]), 146 | 'right_heel': (19, [0, 0]), 147 | 148 | 'neck_X': (34, [-1, 1]), 149 | 'neck_Y': (35, [-math.pi / 2, math.pi / 2]), 150 | 'neck_Z': (36, [-0.6, 0.6]), 151 | } 152 | for key in max_min_dic.keys(): 153 | index = max_min_dic[key][0] 154 | min_value = max_min_dic[key][1][0] 155 | max_value = max_min_dic[key][1][1] 156 | max_angles[:,index]=max_value 157 | min_angles[:,index]=min_value 158 | return max_angles, min_angles 159 | 160 | class InitializerConsistentHumanoid2(Initializer): 161 | def __init__(self,batch_size,target_joints): 162 | self.rbdl_joint_dic = { 163 | "base": 1, "head": 42, "neck": 39, "left_hip": 3, "left_knee": 7, "left_ankle": 8, "left_heel": 11, "left_toe": 12,"right_hip": 13, "right_knee": 17, "right_ankle": 18, "right_heel": 21,"right_toe": 22, 164 | "left_clavicle": 23, "left_shoulder": 25, "left_elbow": 28, "left_wrist": 30, "right_clavicle": 31, "right_shoulder": 33, "right_elbow": 36, "right_wrist": 38 165 | } 166 | 167 | self.gt_mpi_dic = { 168 | "base": 4, "head": 7, "neck": 5, "left_hip": 18, "left_knee": 19, "left_ankle": 20, "left_toe": 22, 169 | "right_hip": 23, 170 | "right_knee": 24, "right_ankle": 25, "right_toe": 27, "left_clavicle": 8, "left_shoulder": 9, "left_elbow": 10, 171 | "left_wrist": 11, "right_clavicle": 13, "right_shoulder": 14, "right_elbow": 15, "right_wrist": 16 172 | } 173 | self.gt_human36M_dic={ 174 | "base":0,"left_hip":6,"left_knee":7,"left_ankle":8, "left_toe":9,"right_hip":1,"right_knee":2,"right_ankle":3,"right_toe":4, "neck":13, "head":15, 175 | "left_shoulder":17,"left_elbow":18,"left_wrist":19,"right_shoulder":25, "right_elbow":26,"right_wrist":27 176 | } 177 | self.gt_DeepCap_dic = { 178 | "base": 14, "left_hip": 11, "left_knee": 12, "left_ankle": 13, "left_toe": 16, "right_hip": 8, 179 | "right_knee": 9, "right_ankle": 10, "right_toe": 15, "neck": 1, "head": 0, 180 | "left_shoulder": 5, "left_elbow": 6, "left_wrist": 7, "right_shoulder": 2, "right_elbow": 3, 181 | "right_wrist": 4 182 | } 183 | self.target_joints = target_joints 184 | self.target_joint_ids = [self.rbdl_joint_dic[key] for key in self.target_joints] 185 | #self.target_marker_ids = [self.rbdl_marker_dic[key] for key in self.target_joints] 186 | self.scaler= 1000 187 | self.batch_size=batch_size 188 | 189 | """ caution!!!! this might be opposite!""" 190 | self.bullet2rbdl = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,36,37,38,39,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35] 191 | self.rbdl2bullet = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,20,21,22,23] 192 | 193 | def get_max_min_anlges(self): 194 | max_angles=math.pi*torch.ones(self.batch_size,len(self.bullet2rbdl)) 195 | min_angles=-math.pi*torch.ones(self.batch_size,len(self.bullet2rbdl)) 196 | max_min_dic ={ 197 | 'left_hip_X': (1, [-2.5, 1]), 198 | 'left_hip_Y': (2,[-math.pi/2,math.pi/2]), 199 | 'left_hip_Z': (3, [-0.8,1.57]), 200 | 'left_knee_Y': (4, [0, math.pi]), 201 | 'left_ankle_X': (5 ,[-0.72, 0.72]), 202 | 'left_ankle_Y': (6, [-math.pi/2,math.pi/2]), 203 | 'left_ankle_Z': (7, [-0.54, 0.54]), 204 | 'left_toe': (8, [0,0]), 205 | 'left_heel': (9, [0,0]), 206 | 207 | 'right_hip_X': (11, [-2.5,1]), 208 | 'right_hip_Y': (12, [-math.pi/2,math.pi/2]), 209 | 'right_hip_Z': (13, [-1.57, 0.8]), 210 | 'right_knee_Y': (14, [0, math.pi]), 211 | 'right_ankle_X': (15, [-0.72, 0.72]), 212 | 'right_ankle_Y': (16, [-math.pi / 2, math.pi / 2]), 213 | 'right_ankle_Z': (17, [-0.54, 0.54]), 214 | 'right_toe': (18, [0, 0]), 215 | 'right_heel': (19, [0, 0]), 216 | 217 | 218 | 'left_clavicle_ry': (20, [-0.37,0.37]), 219 | 'left_clavicle_rz': (21, [-2.2,0.6]), 220 | 'left_shoulder_rx': (22, [-1.59,1.59]), 221 | 'left_shoulder_ry': (23, [-1.6,0.66]), 222 | 'left_shoulder_rz': (24, [-2.2,0.4]), 223 | 'left_elbow_rx': (25, [-0.3,0.3]), 224 | 'left_elbow_ry' : (26, [-2.8,0.0]), 225 | 226 | 'right_clavicle_ry': (28, [-0.37,0.37]), 227 | 'right_clavicle_rz': (29, [-0.6,2.2]), 228 | 'right_shoulder_rx': (30, [-1.59,1.59]), 229 | 'right_shoulder_ry': (31, [-0.66,1.6]), 230 | 'right_shoulder_rz': (32, [-0.4,2.2]), 231 | 'right_elbow_rx': (33, [-0.3,0.3]), 232 | 'right_elbow_ry' : (34, [0.0,2.8]), 233 | 234 | 'neck_X': (36, [-0.4, 0.4]), #'neck_X': (34, [-0.4, 0.4]), 235 | 'neck_Y': (37, [-1.1, 1.1]),#'neck_Y': (35, [-1.1, 1.1]), 236 | 'neck_Z': (38, [-0.36, 0.36]),#'neck_Z': (36, [-0.36, 0.36]), 237 | } 238 | for key in max_min_dic.keys(): 239 | index = max_min_dic[key][0] 240 | min_value = max_min_dic[key][1][0] 241 | max_value = max_min_dic[key][1][1] 242 | max_angles[:,index]=max_value 243 | min_angles[:,index]=min_value 244 | return max_angles, min_angles -------------------------------------------------------------------------------- /networks.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | import torch 3 | import math 4 | 5 | class GRFNet(nn.Module): 6 | def __init__(self, input_dim, output_dim): 7 | super(GRFNet, self).__init__() 8 | hidden = 512 9 | self.fc1 = nn.Linear(input_dim, hidden) 10 | self.fc2 = nn.Linear(hidden, hidden) 11 | self.fc3 = nn.Linear(hidden, hidden) 12 | self.fc_conF = nn.Linear(hidden, 4 * 3) 13 | self.LReLU1 = nn.LeakyReLU(0.1) 14 | self.LReLU2 = nn.LeakyReLU(0.1) 15 | self.LReLU3 = nn.LeakyReLU(0.1) 16 | 17 | self.sig = nn.Sigmoid() 18 | 19 | def forward(self, x): 20 | x = self.LReLU1(self.fc1(x)) 21 | x = self.LReLU2(self.fc2(x)) 22 | x = self.LReLU3(self.fc3(x)) 23 | conF = 10 * self.fc_conF(x) 24 | return conF 25 | class DynamicNetwork(nn.Module): 26 | def __init__(self, input_dim,output_dim,offset_coef): 27 | super(DynamicNetwork, self).__init__() 28 | hidden = 1024 29 | self.bn1 = nn.BatchNorm1d(input_dim, affine=True) 30 | self.bn2 = nn.BatchNorm1d(hidden, affine=True) 31 | self.bn3 = nn.BatchNorm1d(hidden, affine=True) 32 | 33 | self.tanh = nn.Tanh() 34 | self.fc1 = nn.Linear(input_dim, hidden) 35 | self.fc2 = nn.Linear(hidden, hidden) 36 | self.fc_offset = nn.Linear(hidden, output_dim) 37 | self.fc_gains = nn.Linear(hidden, output_dim) 38 | self.sig=nn.Sigmoid() 39 | self.LReLU = nn.LeakyReLU(0.1) 40 | self.offset_coef=offset_coef 41 | def forward(self, x): 42 | x = self.LReLU( self.fc1(x))#) 43 | x = self.LReLU( self.fc2(x))#) 44 | gains = 2 *self.sig(self.fc_gains(x)) 45 | offset =self.offset_coef * self.tanh(self.fc_offset(x)) 46 | 47 | return gains,offset 48 | 49 | class TransCan3Dkeys2(nn.Module): 50 | def __init__(self, in_channels=74, num_features=256, out_channels=44, time_window=10, num_blocks=2): 51 | super().__init__() 52 | self.in_channels = in_channels 53 | self.num_features = num_features 54 | self.out_channels = out_channels 55 | self.num_blocks = num_blocks 56 | self.time_window = time_window 57 | self.conv1 = nn.Sequential( 58 | nn.ReplicationPad1d(1), 59 | nn.Conv1d(self.in_channels, self.num_features, kernel_size=3, bias=False ), 60 | nn.BatchNorm1d(self.num_features), 61 | nn.ReLU(inplace=True), 62 | nn.Dropout(p=0.25) 63 | ) 64 | self._make_blocks() 65 | self.pad = nn.ReplicationPad1d(4) 66 | self.relu = nn.ReLU(inplace=True) 67 | self.drop = nn.Dropout(p=0.25) 68 | self.reduce = nn.Conv1d(self.num_features, self.num_features, kernel_size= self.time_window ) 69 | self.embedding_3d_1 = nn.Linear(3*16,500) 70 | self.embedding_3d_2 = nn.Linear(500,500) 71 | self.LReLU1=nn.LeakyReLU() 72 | self.LReLU2=nn.LeakyReLU() 73 | self.LReLU3=nn.LeakyReLU() 74 | self.LReLU4=nn.LeakyReLU() 75 | self.out1 = nn.Linear(self.num_features+500, self.num_features) 76 | self.out2 = nn.Linear(self.num_features, self.num_features) 77 | self.out3 = nn.Linear(self.num_features, self.out_channels) 78 | 79 | def _make_blocks(self): 80 | layers_conv = [] 81 | layers_bn = [] 82 | for i in range(self.num_blocks): 83 | layers_conv.append(nn.Conv1d(self.num_features, self.num_features, kernel_size=5, bias=False,dilation=2)) 84 | layers_bn.append(nn.BatchNorm1d(self.num_features)) 85 | self.layers_conv = nn.ModuleList(layers_conv) 86 | self.layers_bn = nn.ModuleList(layers_bn) 87 | 88 | def forward(self, p2ds,p3d): 89 | """ 90 | Args: 91 | x - (B x T x J x C) 92 | """ 93 | 94 | B,T,C = p2ds.shape 95 | x = p2ds.permute((0, 2, 1)) 96 | x = self.conv1(x) 97 | for i in range(self.num_blocks): 98 | pre = x 99 | x = self.pad(x) 100 | x = self.layers_conv[i](x) 101 | x = self.layers_bn[i](x) 102 | x = self.drop(self.relu(x)) 103 | x = pre + x 104 | x_2d = self.relu(self.reduce(x)) 105 | x_2d = x_2d.view(B, -1) 106 | x_3d = self.LReLU1(self.embedding_3d_1(p3d)) 107 | x = torch.cat((x_2d,x_3d),1) 108 | x = self.LReLU3(self.out1(x)) 109 | x = self.LReLU4(self.out2(x)) 110 | x = self.out3(x) 111 | return x 112 | 113 | class TransCan3Dkeys(nn.Module): 114 | def __init__(self, in_channels=74, num_features=256, out_channels=44, time_window=10, num_blocks=2): 115 | super().__init__() 116 | self.in_channels = in_channels 117 | self.num_features = num_features 118 | self.out_channels = out_channels 119 | self.num_blocks = num_blocks 120 | self.time_window = time_window 121 | self.conv1 = nn.Sequential( 122 | nn.ReplicationPad1d(1), 123 | nn.Conv1d(self.in_channels, self.num_features, kernel_size=3, bias=False ), 124 | nn.BatchNorm1d(self.num_features), 125 | nn.ReLU(inplace=True), 126 | nn.Dropout(p=0.25) 127 | ) 128 | self._make_blocks() 129 | self.pad = nn.ReplicationPad1d(4) 130 | self.relu = nn.ReLU(inplace=True) 131 | self.drop = nn.Dropout(p=0.25) 132 | self.reduce = nn.Conv1d(self.num_features, self.num_features, kernel_size= self.time_window ) 133 | self.embedding_3d_1 = nn.Linear(3*16,500) 134 | self.embedding_3d_2 = nn.Linear(500,500) 135 | self.LReLU1=nn.LeakyReLU() 136 | self.LReLU2=nn.LeakyReLU() 137 | self.LReLU3=nn.LeakyReLU() 138 | self.out1 = nn.Linear(self.num_features+500, self.num_features) 139 | self.out2 = nn.Linear(self.num_features, self.out_channels) 140 | 141 | def _make_blocks(self): 142 | layers_conv = [] 143 | layers_bn = [] 144 | for i in range(self.num_blocks): 145 | layers_conv.append(nn.Conv1d(self.num_features, self.num_features, kernel_size=5, bias=False,dilation=2)) 146 | layers_bn.append(nn.BatchNorm1d(self.num_features)) 147 | self.layers_conv = nn.ModuleList(layers_conv) 148 | self.layers_bn = nn.ModuleList(layers_bn) 149 | 150 | def forward(self, p2ds,p3d): 151 | """ 152 | Args: 153 | x - (B x T x J x C) 154 | """ 155 | 156 | B,T,C = p2ds.shape 157 | x = p2ds.permute((0, 2, 1)) 158 | x = self.conv1(x) 159 | for i in range(self.num_blocks): 160 | pre = x 161 | x = self.pad(x) 162 | x = self.layers_conv[i](x) 163 | x = self.layers_bn[i](x) 164 | x = self.drop(self.relu(x)) 165 | x = pre + x 166 | x_2d = self.relu(self.reduce(x)) 167 | x_2d = x_2d.view(B, -1) 168 | x_3d = self.LReLU1(self.embedding_3d_1(p3d)) 169 | x = torch.cat((x_2d,x_3d),1) 170 | x = self.LReLU3(self.out1(x)) 171 | x = self.out2(x) 172 | return x 173 | 174 | class ContactEstimationNetwork(nn.Module): 175 | def __init__(self, in_channels=74, num_features=256, out_channels=44, time_window=10, num_blocks=2): 176 | super().__init__() 177 | self.in_channels = in_channels 178 | self.num_features = num_features 179 | self.out_channels = out_channels 180 | self.num_blocks = num_blocks 181 | self.time_window = time_window 182 | self.conv1 = nn.Sequential( 183 | nn.ReplicationPad1d(1), 184 | nn.Conv1d(self.in_channels, self.num_features, kernel_size=3, bias=False ), 185 | nn.BatchNorm1d(self.num_features), 186 | nn.ReLU(inplace=True), 187 | nn.Dropout(p=0.25) 188 | ) 189 | self._make_blocks() 190 | self.pad = nn.ReplicationPad1d(4) 191 | self.relu = nn.ReLU(inplace=True) 192 | self.drop = nn.Dropout(p=0.25) 193 | self.reduce = nn.Conv1d(self.num_features, self.num_features, kernel_size= self.time_window ) 194 | 195 | self.out = nn.Linear(self.num_features, self.out_channels) 196 | self.sig = nn.Sigmoid() 197 | def _make_blocks(self): 198 | layers_conv = [] 199 | layers_bn = [] 200 | for i in range(self.num_blocks): 201 | layers_conv.append(nn.Conv1d(self.num_features, self.num_features, kernel_size=5, bias=False,dilation=2)) 202 | layers_bn.append(nn.BatchNorm1d(self.num_features)) 203 | self.layers_conv = nn.ModuleList(layers_conv) 204 | self.layers_bn = nn.ModuleList(layers_bn) 205 | 206 | def forward(self,x): 207 | """ 208 | Args: 209 | x - (B x T x J x C) 210 | """ 211 | 212 | B,T,C = x.shape 213 | 214 | x = x.permute((0, 2, 1)) 215 | x = self.conv1(x) 216 | 217 | for i in range(self.num_blocks): 218 | pre = x 219 | x = self.pad(x) 220 | x = self.layers_conv[i](x) 221 | x = self.layers_bn[i](x) 222 | x = self.drop(self.relu(x)) 223 | x = pre + x 224 | x = self.relu(self.reduce(x)) 225 | x = x.view(B, -1) 226 | x = self.out(x) 227 | pred = self.sig(x) 228 | return pred 229 | 230 | class TargetPoseNetOriCon(nn.Module): 231 | def __init__(self, in_channels=74, num_features=256, out_channels=44, time_window=10, num_blocks=2): 232 | super().__init__() 233 | self.in_channels = in_channels 234 | self.num_features = num_features 235 | self.out_channels = out_channels 236 | self.num_blocks = num_blocks 237 | self.time_window = time_window 238 | self.conv1 = nn.Sequential( 239 | nn.ReplicationPad1d(1), 240 | nn.Conv1d(self.in_channels, self.num_features, kernel_size=3, bias=False ), 241 | nn.BatchNorm1d(self.num_features), 242 | nn.ReLU(inplace=True), 243 | nn.Dropout(p=0.25) 244 | ) 245 | self._make_blocks() 246 | self.pad = nn.ReplicationPad1d(4) 247 | self.relu = nn.ReLU(inplace=True) 248 | self.drop = nn.Dropout(p=0.25) 249 | self.reduce = nn.Conv1d(self.num_features, self.num_features, kernel_size= self.time_window ) 250 | self.tanh=nn.Tanh() 251 | self.sig = nn.Sigmoid() 252 | self.outCon = nn.Linear(self.num_features, 4) 253 | self.outOri = nn.Linear(self.num_features, 4) 254 | def _make_blocks(self): 255 | layers_conv = [] 256 | layers_bn = [] 257 | for i in range(self.num_blocks): 258 | layers_conv.append(nn.Conv1d(self.num_features, self.num_features, kernel_size=5, bias=False,dilation=2)) 259 | layers_bn.append(nn.BatchNorm1d(self.num_features)) 260 | self.layers_conv = nn.ModuleList(layers_conv) 261 | self.layers_bn = nn.ModuleList(layers_bn) 262 | 263 | def forward(self,x): 264 | """ 265 | Args: 266 | x - (B x T x J x C) 267 | """ 268 | B,T,C = x.shape 269 | x = x.permute((0, 2, 1)) 270 | x = self.conv1(x) 271 | for i in range(self.num_blocks): 272 | pre = x 273 | x = self.pad(x) 274 | x = self.layers_conv[i](x) 275 | x = self.layers_bn[i](x) 276 | x = self.drop(self.relu(x)) 277 | x = pre + x 278 | x = self.relu(self.reduce(x)) 279 | x = x.view(B, -1) 280 | x_con = self.sig(self.outCon(x)) 281 | x_ori = self.outOri(x) 282 | return x_con,x_ori 283 | 284 | 285 | 286 | class TargetPoseNetArtOri(nn.Module): 287 | def __init__(self, in_channels=74, num_features=256, out_channels=44, time_window=10, num_blocks=2): 288 | super().__init__() 289 | self.in_channels = in_channels 290 | self.num_features = num_features 291 | self.out_channels = out_channels 292 | self.num_blocks = num_blocks 293 | self.time_window = time_window 294 | self.conv1 = nn.Sequential( 295 | nn.ReplicationPad1d(1), 296 | nn.Conv1d(self.in_channels, self.num_features, kernel_size=3, bias=False ), 297 | nn.BatchNorm1d(self.num_features), 298 | nn.ReLU(inplace=True), 299 | nn.Dropout(p=0.25) 300 | ) 301 | self._make_blocks() 302 | self.pad = nn.ReplicationPad1d(4) 303 | self.relu = nn.ReLU(inplace=True) 304 | self.drop = nn.Dropout(p=0.25) 305 | self.reduce = nn.Conv1d(self.num_features, self.num_features, kernel_size= self.time_window ) 306 | self.tanh=nn.Tanh() 307 | 308 | self.out = nn.Linear(self.num_features, self.out_channels) 309 | self.outOri = nn.Linear(self.num_features, 4) 310 | def _make_blocks(self): 311 | layers_conv = [] 312 | layers_bn = [] 313 | for i in range(self.num_blocks): 314 | layers_conv.append(nn.Conv1d(self.num_features, self.num_features, kernel_size=5, bias=False,dilation=2)) 315 | layers_bn.append(nn.BatchNorm1d(self.num_features)) 316 | self.layers_conv = nn.ModuleList(layers_conv) 317 | self.layers_bn = nn.ModuleList(layers_bn) 318 | 319 | def forward(self,x): 320 | """ 321 | Args: 322 | x - (B x T x J x C) 323 | """ 324 | B,T,C = x.shape 325 | x = x.permute((0, 2, 1)) 326 | x = self.conv1(x) 327 | for i in range(self.num_blocks): 328 | pre = x 329 | x = self.pad(x) 330 | x = self.layers_conv[i](x) 331 | x = self.layers_bn[i](x) 332 | x = self.drop(self.relu(x)) 333 | x = pre + x 334 | x = self.relu(self.reduce(x)) 335 | x = x.view(B, -1) 336 | x_art = math.pi*self.tanh(self.out(x)) 337 | x_ori = self.outOri(x) 338 | return x_art,x_ori 339 | 340 | class TargetPoseNetArt(nn.Module): 341 | def __init__(self, in_channels=74, num_features=256, out_channels=44, time_window=10, num_blocks=2): 342 | super().__init__() 343 | self.in_channels = in_channels 344 | self.num_features = num_features 345 | self.out_channels = out_channels 346 | self.num_blocks = num_blocks 347 | self.time_window = time_window 348 | self.conv1 = nn.Sequential( 349 | nn.ReplicationPad1d(1), 350 | nn.Conv1d(self.in_channels, self.num_features, kernel_size=3, bias=False), 351 | nn.BatchNorm1d(self.num_features), 352 | nn.ReLU(inplace=True), 353 | nn.Dropout(p=0.25) 354 | ) 355 | self._make_blocks() 356 | self.pad = nn.ReplicationPad1d(4) 357 | self.relu = nn.ReLU(inplace=True) 358 | self.drop = nn.Dropout(p=0.25) 359 | self.reduce = nn.Conv1d(self.num_features, self.num_features, kernel_size=self.time_window) 360 | self.tanh = nn.Tanh() 361 | self.out = nn.Linear(self.num_features, self.out_channels) 362 | 363 | def _make_blocks(self): 364 | layers_conv = [] 365 | layers_bn = [] 366 | for i in range(self.num_blocks): 367 | layers_conv.append( 368 | nn.Conv1d(self.num_features, self.num_features, kernel_size=5, bias=False, dilation=2)) 369 | layers_bn.append(nn.BatchNorm1d(self.num_features)) 370 | self.layers_conv = nn.ModuleList(layers_conv) 371 | self.layers_bn = nn.ModuleList(layers_bn) 372 | 373 | def forward(self, x): 374 | """ 375 | Args: 376 | x - (B x T x J x C) 377 | """ 378 | B, T, C = x.shape 379 | x = x.permute((0, 2, 1)) 380 | x = self.conv1(x) 381 | for i in range(self.num_blocks): 382 | pre = x 383 | x = self.pad(x) 384 | x = self.layers_conv[i](x) 385 | x = self.layers_bn[i](x) 386 | x = self.drop(self.relu(x)) 387 | x = pre + x 388 | x = self.relu(self.reduce(x)) 389 | x = x.view(B, -1) 390 | x = math.pi * self.tanh(self.out(x)) 391 | return x 392 | 393 | 394 | class TargetPoseNetOri(nn.Module): 395 | def __init__(self, in_channels=74, num_features=256, out_channels=44, time_window=10, num_blocks=2): 396 | super().__init__() 397 | self.in_channels = in_channels 398 | self.num_features = num_features 399 | self.out_channels = out_channels 400 | self.num_blocks = num_blocks 401 | self.time_window = time_window 402 | self.conv1 = nn.Sequential( 403 | nn.ReplicationPad1d(1), 404 | nn.Conv1d(self.in_channels, self.num_features, kernel_size=3, bias=False ), 405 | nn.BatchNorm1d(self.num_features), 406 | nn.ReLU(inplace=True), 407 | nn.Dropout(p=0.25) 408 | ) 409 | self._make_blocks() 410 | self.pad = nn.ReplicationPad1d(4) 411 | self.relu = nn.ReLU(inplace=True) 412 | self.drop = nn.Dropout(p=0.25) 413 | self.reduce = nn.Conv1d(self.num_features, self.num_features, kernel_size= self.time_window ) 414 | 415 | self.out = nn.Linear(self.num_features, self.out_channels) 416 | 417 | def _make_blocks(self): 418 | layers_conv = [] 419 | layers_bn = [] 420 | for i in range(self.num_blocks): 421 | layers_conv.append(nn.Conv1d(self.num_features, self.num_features, kernel_size=5, bias=False,dilation=2)) 422 | layers_bn.append(nn.BatchNorm1d(self.num_features)) 423 | self.layers_conv = nn.ModuleList(layers_conv) 424 | self.layers_bn = nn.ModuleList(layers_bn) 425 | 426 | def forward(self,x): 427 | """ 428 | Args: 429 | x - (B x T x J x C) 430 | """ 431 | 432 | B,T,C = x.shape 433 | 434 | x = x.permute((0, 2, 1)) 435 | x = self.conv1(x) 436 | for i in range(self.num_blocks): 437 | pre = x 438 | x = self.pad(x) 439 | x = self.layers_conv[i](x) 440 | x = self.layers_bn[i](x) 441 | x = self.drop(self.relu(x)) 442 | x = pre + x 443 | x = self.relu(self.reduce(x)) 444 | x = x.view(B, -1) 445 | x = self.out(x) 446 | return x -------------------------------------------------------------------------------- /demo.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import sys 3 | import os 4 | import rbdl 5 | import warnings 6 | import time 7 | import os 8 | dir_path = os.path.dirname(os.path.realpath(__file__)) 9 | import numpy as np 10 | from networks import TargetPoseNetArt,TargetPoseNetOri,ContactEstimationNetwork,TransCan3Dkeys,DynamicNetwork,GRFNet 11 | from Utils.angles import angle_util 12 | import Utils.misc as ut 13 | import Utils.phys as ppf 14 | from Utils.core_utils import CoreUtils 15 | from Utils.initializer import InitializerConsistentHumanoid2 16 | from lossFunctions import LossFunctions 17 | import pybullet as p 18 | import Utils.contacts as cut 19 | from pipeline_util import PyProjection,PyPerspectivieDivision 20 | from scipy.spatial.transform import Rotation as Rot 21 | from torch.autograd import Variable 22 | import argparse 23 | 24 | class InferencePipeline(): 25 | def __init__(self,urdf_path,net_path,data_path,save_base_path,w,h,K,RT,neural_PD=1,grad_descent=0, n_iter=6,con_thresh=0.01,limit=50,speed_limit=35): 26 | 27 | ### configuration ### 28 | self.w=w 29 | self.h=h 30 | self.neural_PD=neural_PD 31 | self.n_iter = n_iter 32 | self.con_thresh = con_thresh 33 | self.limit = limit 34 | self.speed_limit = speed_limit 35 | self.grad_descent = grad_descent 36 | self.save_base_path=save_base_path 37 | self.n_iter_GD=90 38 | 39 | ### joint mapping ### 40 | self.openpose_dic2 = { "base": 7, "left_hip": 11, "left_knee": 12, "left_ankle": 13, "left_toe": 19, "right_hip": 8, "right_knee": 9, "right_ankle": 10, "right_toe": 22, "neck": 0, "head": 14, "left_shoulder": 4, "left_elbow": 5, "left_wrist": 6, "right_shoulder": 1, "right_elbow": 2, "right_wrist": 3 } 41 | self.target_joints = ["head", "neck", "left_hip", "left_knee", "left_ankle", "left_toe", "right_hip", "right_knee", "right_ankle", "right_toe", "left_shoulder", "left_elbow", "left_wrist", "right_shoulder", "right_elbow", "right_wrist"] 42 | self.target_ids = [self.openpose_dic2[key] for key in self.target_joints] 43 | 44 | ### load humanoid model ### 45 | 46 | self.model = rbdl.loadModel(urdf_path.encode(), floating_base=True) 47 | self.id_robot = p.loadURDF(urdf_path, useFixedBase=False) 48 | 49 | ### initilization ### 50 | ini = InitializerConsistentHumanoid2(n_b, self.target_joints) 51 | self.rbdl_dic = ini.get_rbdl_dic() 52 | self.target_joint_ids = ini.get_target_joint_ids() 53 | _, _, jointIds, jointNames = ut.get_jointIds_Names(self.id_robot) 54 | self.model_addresses = {"0": self.model, "1": self.model} 55 | 56 | ### build and load pretrained models ### 57 | self.TempConvNetArt = TargetPoseNetArt(in_channels=32, num_features=1024, out_channels=40, num_blocks=4)#.cuda() 58 | self.TempConvNetOri = TargetPoseNetOri(in_channels=32, num_features=1024, out_channels=4, num_blocks=4)#.cuda() 59 | self.ConNet = ContactEstimationNetwork(in_channels=32, num_features=1024, out_channels=4, num_blocks=4)#.cuda() 60 | self.TempConvNetTrans = TransCan3Dkeys(in_channels=32, num_features=1024, out_channels=3, num_blocks=4)#.cuda() 61 | self.GRFNet = GRFNet(input_dim=577, output_dim=46 + 46 + 3 * 4)#.cuda() 62 | self.DyNet = DynamicNetwork(input_dim=2302, output_dim=46, offset_coef=10)#.cuda() 63 | 64 | if os.path.exists(net_path + "ArtNet.pkl"): 65 | self.TempConvNetArt.load_state_dict(torch.load(net_path + "ArtNet.pkl", map_location=torch.device('cpu'))) 66 | self.TempConvNetOri.load_state_dict(torch.load(net_path + "OriNet.pkl", map_location=torch.device('cpu'))) 67 | self.ConNet.load_state_dict(torch.load(net_path + "ConNet.pkl", map_location=torch.device('cpu'))) 68 | self.GRFNet.load_state_dict(torch.load(net_path + "GRFNet.pkl" ,map_location=torch.device('cpu'))) 69 | self.TempConvNetTrans.load_state_dict(torch.load(net_path+"TransNet.pkl", map_location=torch.device('cpu'))) 70 | self.DyNet.load_state_dict(torch.load(net_path+ "DyNet.pkl", map_location=torch.device('cpu'))) 71 | else: 72 | print('no trained model found!!!') 73 | sys.exit() 74 | self.TempConvNetArt.eval() 75 | self.TempConvNetOri.eval() 76 | self.TempConvNetTrans.eval() 77 | self.ConNet.eval() 78 | self.DyNet.eval() 79 | self.GRFNet.eval() 80 | 81 | ### setup custom pytorch functions including the Physics model 82 | self.PyFK = ppf.PyForwardKinematicsQuaternion().apply 83 | self.PyFK_rr = ppf.PyForwardKinematicsQuaternion().apply 84 | self.PyFD = ppf.PyForwardDynamics.apply 85 | self.PyProj = PyProjection.apply 86 | self.PyPD = PyPerspectivieDivision.apply 87 | 88 | ### load input data ### 89 | self.RT=RT 90 | self.Rs = torch.FloatTensor(self.RT[:3, :3]).view(n_b, 3, 3) 91 | self.P = torch.FloatTensor(K[:3]) 92 | self.P_tensor = self.get_P_tensor(n_b, self.target_joint_ids, self.P) 93 | self.p_2ds = np.load(data_path) 94 | 95 | self.p_2d_basee = self.p_2ds[:, self.openpose_dic2["base"]] 96 | self.p_2ds = self.p_2ds[:, self.target_ids] 97 | self.p_2ds = torch.FloatTensor(self.p_2ds) 98 | self.p_2d_basee = torch.FloatTensor(self.p_2d_basee) 99 | 100 | self.canoical_2Ds = self.canonicalize_2Ds(torch.FloatTensor(K[:3, :3]), self.p_2ds) 101 | self.p_2ds[:, :, 0] /= self.w 102 | self.p_2ds[:, :, 1] /= self.h # h 103 | self.p_2d_basee[:, 0] /= self.w 104 | self.p_2d_basee[:, 1] /= self.h # h 105 | self.p_2ds_rr = self.p_2ds - self.p_2d_basee.view(-1, 1, 2) 106 | 107 | def get_P_tensor(self,N, target_joint_ids, P): 108 | P_tensor = torch.zeros(N, 3 * len(target_joint_ids), 4 * len(target_joint_ids)) 109 | for i in range(int(P_tensor.shape[1] / 3)): 110 | P_tensor[:, i * 3:(i + 1) * 3, i * 4:(i + 1) * 4] = P 111 | return torch.FloatTensor(np.array(P_tensor)) 112 | 113 | def canonicalize_2Ds(self,K, p_2Ds): 114 | cs = torch.FloatTensor([K[0][2], K[1][2]]).view(1, 1, 2) 115 | fs = torch.FloatTensor([K[0][0], K[1][1]]).view(1, 1, 2) 116 | canoical_2Ds = (p_2Ds - cs) / fs 117 | return canoical_2Ds 118 | 119 | def get_grav_corioli(self,sub_ids, floor_noramls, q, qdot): 120 | n_b, _ = q.shape 121 | q = q.cpu().numpy().astype(float) 122 | qdot = qdot.cpu().numpy().astype(float) 123 | gcc = np.zeros((n_b, self.model.qdot_size)) 124 | floor_noramls = floor_noramls.cpu().numpy() 125 | for batch_id in range(n_b): 126 | sid = sub_ids[batch_id] 127 | model_address = self.model_addresses[str(int(sid))] 128 | model_address.gravity = -9.8 * floor_noramls[batch_id] 129 | rbdl.InverseDynamics(model_address, q[batch_id], qdot[batch_id], np.zeros(self.model.qdot_size).astype(float), gcc[batch_id]) 130 | return torch.FloatTensor(gcc) 131 | 132 | def get_mass_mat(self,model, q): 133 | n_b, _ = q.shape 134 | M_np = np.zeros((n_b, model.qdot_size, model.qdot_size)) 135 | [rbdl.CompositeRigidBodyAlgorithm(model, q[i].astype(float), M_np[i]) for i in range(n_b)] 136 | return M_np 137 | 138 | 139 | def contact_label_estimation(self,input_rr): 140 | pred_labels = self.ConNet(input_rr) 141 | pred_labels = pred_labels.clone().cpu() 142 | pred_labels_prob = pred_labels.clone() 143 | pred_labels[pred_labels < self.con_thresh] = 0 144 | pred_labels[pred_labels >= self.con_thresh] = 1 145 | return pred_labels,pred_labels_prob 146 | 147 | def gradientDescent(self,trans0,target_2D,rr_3ds): 148 | trans_variable = trans0.clone() 149 | for j in range(self.n_iter_GD): 150 | trans_variable = Variable(trans_variable, requires_grad=True) 151 | p_3D =(rr_3ds.view(n_b,-1,3)+trans_variable.view(n_b,1,3)).view(n_b,-1) 152 | 153 | p_proj = self.PyProj(self.P_tensor, p_3D) 154 | p_2D = self.PyPD(p_proj) 155 | p_2D = p_2D.view(n_b,-1,2) 156 | p_2D[:,:,0]/=self.w 157 | p_2D[:,:,1]/=self.h 158 | loss2D = (p_2D.view(1, -1) - target_2D.view(1, -1)).pow(2).sum() + 10* (trans_variable-trans0).pow(2).sum() 159 | 160 | loss2D.backward() 161 | with torch.no_grad(): 162 | trans_variable -= 0.003 * trans_variable.grad 163 | trans_variable.grad.zero_() 164 | 165 | trans_variable = trans_variable.clone().detach() 166 | p_2D = p_2D.detach() 167 | 168 | p_2D = p_2D.clone().detach()#*1000 169 | p_2D = p_2D.view(1, -1, 2) 170 | #p_2D[:, :, 0] *= self.w 171 | #p_2D[:, :, 1] *=self.h 172 | return trans_variable ,p_2D 173 | 174 | def get_translations_GD(self,target_2D,rr_p_3D_p,trans0): 175 | 176 | 177 | """ set 2D and 3D targets """ 178 | trans, _ = self.gradientDescent( trans0, target_2D, rr_p_3D_p.view(n_b, -1)) 179 | 180 | #target_2D = target_2D.view(n_b, -1, 2) 181 | #target_2D[:, :, 0] *= self.w 182 | #target_2D[:, :, 1] *= self.h 183 | 184 | return trans.clone() 185 | 186 | def get_target_pose(self,input_can,input_rr,target_2d,trans0,first_frame_flag): 187 | art_tar = self.TempConvNetArt(input_rr) 188 | quat_tar = self.TempConvNetOri(input_rr) 189 | rr_q = torch.cat((torch.zeros(n_b, 3) , quat_tar[:, 1:], art_tar, quat_tar[:, 0].view(-1, 1)), 1)#.cuda() 190 | 191 | rr_p_3D_p = self.PyFK_rr([self.model_addresses["0"]], self.target_joint_ids,delta_t, torch.FloatTensor([0]) , rr_q) 192 | q_tar = rr_q.clone() 193 | 194 | if not first_frame_flag and self.grad_descent: 195 | trans_tar = self.get_translations_GD(target_2d.cpu(),rr_p_3D_p.cpu().detach(),trans0.cpu().detach()) 196 | else: 197 | trans_tar = self.TempConvNetTrans(input_can, rr_p_3D_p) 198 | trans_tar = torch.clamp(trans_tar, -50, 50) 199 | 200 | q_tar[:, :3] = trans_tar.clone() 201 | return art_tar,quat_tar,trans_tar,q_tar 202 | 203 | def inference(self): 204 | 205 | ### Initializatoin ### 206 | all_q , all_p_3ds, all_tau, all_iter_q = [],[],[],[] 207 | 208 | p_2ds_rr = self.p_2ds_rr#.cuda() 209 | canoical_2Ds = self.canoical_2Ds#.cuda() 210 | p_2ds = self.p_2ds#.cuda() 211 | ### set axis vectors ### 212 | basis_vec_w = torch.FloatTensor(np.array([[1, 0, 0, ], [0, 1, 0, ], [0, 0, 1, ]])).view(1, 3, 3) 213 | basis_vec_w = basis_vec_w.expand(n_b, -1, -1) 214 | 215 | for i in range(temporal_window, len(p_2ds_rr)): 216 | print(i) 217 | frame_canonical_2Ds = canoical_2Ds[i - temporal_window:i, ].reshape(n_b, temporal_window, -1) 218 | frame_rr_2Ds = p_2ds_rr[i - temporal_window:i, ].reshape(n_b, temporal_window, -1) 219 | floor_noramls = torch.transpose(torch.bmm(self.Rs, torch.transpose(basis_vec_w, 1, 2)), 1, 2)[:, 1].view(n_b, 3) 220 | input_rr = frame_rr_2Ds.reshape(n_b, temporal_window, -1) 221 | input_can = frame_canonical_2Ds.reshape(n_b, temporal_window, -1) 222 | target_2d = p_2ds[i].reshape(n_b,-1) 223 | 224 | if i==temporal_window: 225 | tar_trans0 = None 226 | first_frame_flag=1 227 | else: 228 | first_frame_flag=0 229 | 230 | ### compute Target Pose ### 231 | art_tar, quat_tar, trans_tar, q_tar = self.get_target_pose(input_can,input_rr,target_2d,tar_trans0,first_frame_flag) 232 | tar_trans0=trans_tar.clone() 233 | with torch.no_grad(): 234 | ### compute contact labels ### 235 | pred_labels,pred_labels_prob = self.contact_label_estimation(input_rr) 236 | 237 | if i == temporal_window: 238 | q0 = q_tar.clone().cpu() 239 | pre_lr_th_cons = torch.zeros(n_b, 4 * 3) 240 | qdot0 = torch.zeros(n_b, self.model.qdot_size) 241 | 242 | quat_tar = quat_tar.cpu() 243 | art_tar = art_tar.cpu() 244 | trans_tar = trans_tar.cpu() 245 | q_tar = q_tar.cpu() 246 | 247 | ### Dynamic Cycle ### 248 | for iter in range(self.n_iter): 249 | ### Compute dynamic quantitites and pose errors ### 250 | M = ut.get_mass_mat_cpu(self.model, q0.detach().clone().cpu().numpy()) 251 | M_inv = torch.inverse(M).clone() 252 | M_inv = ut.clean_massMat(M_inv) 253 | J = CU.get_contact_jacobis6D_cpu(self.model, q0.numpy(), [self.rbdl_dic['left_ankle'], self.rbdl_dic['right_ankle']]) # ankles 254 | 255 | quat0 = torch.cat((q0[:, -1].view(-1, 1), q0[:, 3:6]), 1).detach().clone() 256 | errors_trans, errors_ori, errors_art = CU.get_PD_errors_cpu(quat_tar, quat0, trans_tar, q0[:, :3], art_tar, q0[:, 6:-1]) 257 | current_errors = torch.cat((errors_trans, errors_ori, errors_art), 1) 258 | 259 | ### Force Vector Computation ### 260 | if self.neural_PD: 261 | dynInput = torch.cat((q_tar, q0, qdot0, torch.flatten(M_inv, 1), current_errors,), 1) 262 | neural_gain, neural_offset = self.DyNet(dynInput )#.cuda() 263 | tau = CU.get_neural_development_cpu(errors_trans, errors_ori, errors_art, qdot0, neural_gain.cpu(), neural_offset.cpu(), self.limit,art_only=1, small_z=1) 264 | else: 265 | tau = CU.get_tau(errors_trans, errors_ori, errors_art, qdot0, self.limit, small_z=1) 266 | 267 | gcc = self.get_grav_corioli([0], floor_noramls, q0.clone(), qdot0.clone()) 268 | tau_gcc = tau + gcc 269 | 270 | ### GRF computation ### 271 | GRFInput = torch.cat((tau_gcc[:, :6], torch.flatten(J, 1), floor_noramls, pred_labels, pre_lr_th_cons), 1)#.cuda() 272 | lr_th_cons = self.GRFNet(GRFInput) 273 | gen_conF = cut.get_contact_wrench_cpu(self.model, q0, self.rbdl_dic, lr_th_cons.cpu(), pred_labels) 274 | 275 | ### Forward Dynamics and Pose Update ### 276 | tau_special = tau_gcc - gen_conF 277 | qddot = self.PyFD(tau_special + gen_conF - gcc, M_inv) 278 | quat, q, qdot, _ = CU.pose_update_quat_cpu(qdot0.detach(), q0.detach(), quat0.detach(), delta_t, qddot, self.speed_limit, th_zero=1) 279 | 280 | qdot0 = qdot.detach().clone() 281 | q0 = AU.angle_normalize_batch_cpu(q.detach().clone()) 282 | if iter == 0: all_tau.append(torch.flatten(tau_special).numpy()) 283 | all_iter_q.append(torch.flatten(q0).numpy()) 284 | 285 | ### store the predictions ### 286 | 287 | p_3D_p = self.PyFK( [self.model_addresses["0"]], self.target_joint_ids,delta_t, torch.FloatTensor([0]) , q0) 288 | all_q.append(torch.flatten(q0).detach().numpy()) 289 | all_p_3ds.append(p_3D_p[0].cpu().numpy()) 290 | 291 | print('saving predictions ...') 292 | all_q = np.array(all_q) 293 | all_p_3ds=np.array(all_p_3ds) 294 | all_iter_q=np.array(all_iter_q) 295 | 296 | 297 | ########### save the predictions ############### 298 | if not os.path.exists(self.save_base_path + "/"): 299 | os.makedirs(self.save_base_path + "/") 300 | print(self.save_base_path + "/q_iter_dyn.npy") 301 | np.save(self.save_base_path + "/q_iter_dyn.npy", all_iter_q) 302 | np.save(self.save_base_path + "/q_dyn.npy", all_q) 303 | np.save(self.save_base_path + "/p_3D_dyn.npy", all_p_3ds) 304 | print('Done.') 305 | return 0 306 | 307 | if __name__ == "__main__": 308 | parser = argparse.ArgumentParser(description='arguments for predictions') 309 | parser.add_argument('--input_path', default='sample_data/sample_dance.npy') 310 | parser.add_argument('--net_path', default="../pretrained_neuralPhys/") 311 | parser.add_argument('--n_iter', type=int, default=6) 312 | parser.add_argument('--con_thresh', type=float, default= 0.001 ) 313 | parser.add_argument('--tau_limit', type=float, default= 80 ) 314 | parser.add_argument('--speed_limit', type=float, default=15) 315 | parser.add_argument('--img_width', type=float, default=1280) 316 | parser.add_argument('--img_height', type=float, default=720) 317 | parser.add_argument('--floor_known', type=int, default=1) 318 | parser.add_argument('--floor_position_path', default="./sample_data/sample_floor_position.npy") 319 | parser.add_argument('--cam_params_known', type=int, default=0) 320 | parser.add_argument('--cam_params_path', default="./sample_data/sample_cam_params.npy") 321 | 322 | args = parser.parse_args() 323 | """ 324 | todo: 325 | start from frame 1 (not 10) 326 | """ 327 | AU = angle_util() 328 | LF = LossFunctions() 329 | delta_t = 0.011 330 | CU = CoreUtils(45, delta_t) 331 | warnings.filterwarnings("ignore") 332 | n_b = 1 333 | id_simulator = p.connect(p.DIRECT) 334 | p.configureDebugVisualizer(flag=p.COV_ENABLE_Y_AXIS_UP, enable=1) 335 | save_base_path = "./results/" 336 | urdf_path = "./URDF/manual.urdf" 337 | net_path=args.net_path 338 | w=args.img_width 339 | h=args.img_height 340 | temporal_window = 10 341 | 342 | if args.floor_known: 343 | RT = np.load(args.floor_position_path ) 344 | else: 345 | RT = None 346 | 347 | if args.cam_params_known: 348 | K = np.load(args.cam_params_path) 349 | grad_descent=0 350 | else: 351 | K = np.array([1000, 0, w/2, 0, 0, 1000, h/2, 0, 0, 0, 1, 0, 0, 0, 0, 1]).reshape(4, 4) 352 | grad_descent=1 353 | 354 | IPL = InferencePipeline(urdf_path, 355 | net_path, 356 | args.input_path, 357 | save_base_path, 358 | w,h,K,RT, 359 | neural_PD=1, 360 | grad_descent=grad_descent, 361 | n_iter=args.n_iter, 362 | con_thresh=args.con_thresh, 363 | limit=args.tau_limit, 364 | speed_limit=args.speed_limit) 365 | IPL.inference() -------------------------------------------------------------------------------- /Utils/core_utils.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import torch 4 | import cvxpy as cp 5 | sys.path.append("../util") 6 | from Utils.angles import angle_util 7 | import rbdl 8 | from cvxpylayers.torch import CvxpyLayer 9 | AU = angle_util() 10 | import numpy as np 11 | import warnings 12 | warnings.filterwarnings("ignore") 13 | 14 | class CoreUtils(): 15 | def __init__(self,q_size,delta_t): 16 | n, m = q_size, q_size 17 | Q_sqrt = cp.Parameter((n, n)) 18 | q = cp.Parameter(n) 19 | G = cp.Parameter((6, n)) 20 | h = cp.Parameter(6) # 6 + 43) 21 | x = cp.Variable(n) 22 | obj = cp.Minimize(1e-10*cp.norm(x, p=1) + 0.5 * cp.sum_squares(Q_sqrt * x) + q.T @ x) 23 | cons = [G @ x >= h] # , G @ x <= h] 24 | prob = cp.Problem(obj, cons) 25 | self.layer = CvxpyLayer(prob, parameters=[Q_sqrt, q, G, h], variables=[x]) #, G, h 26 | self.q_size=q_size 27 | self.selecton = torch.zeros(q_size, q_size)#.cuda() 28 | remain_id = np.arange(6, q_size) 29 | #print(remain_id) 30 | for id in remain_id: 31 | self.selecton[id][id] = 1 32 | self.selecton[0][0] = 1 33 | self.selecton[1][1] = 1 34 | self.selecton[2][2] = 1 35 | L = torch.mm(self.selecton, delta_t * torch.eye(q_size))#.cuda()) 36 | self.LTL = torch.mm(L.T, L) 37 | self.delta_t_eye = delta_t * torch.eye(q_size)##.cuda() 38 | self.label_mat_base=torch.eye(6)##.cuda().view(2,3,6) 39 | 40 | self.label_mat_base_cpu=torch.eye(6).view(2,3,6) 41 | 42 | def get_contact_jacobis6D(self,model, q, ids): 43 | jacobis = np.zeros((len(q), len(ids), 6, model.qdot_size)) 44 | for batch in range(len(q)): 45 | for i, id in enumerate(ids): 46 | rbdl.CalcPointJacobian6D(model, q[batch].flatten().astype(float), id, np.array([0., 0., 0.]), jacobis[batch][i]) 47 | jacobis = torch.FloatTensor(jacobis).view(len(q), -1, model.qdot_size)#.cuda() 48 | return jacobis 49 | def get_contact_jacobis6D_cpu(self,model, q, ids): 50 | jacobis = np.zeros((len(q), len(ids), 6, model.qdot_size)) 51 | for batch in range(len(q)): 52 | for i, id in enumerate(ids): 53 | rbdl.CalcPointJacobian6D(model, q[batch].flatten().astype(float), id, np.array([0., 0., 0.]), jacobis[batch][i]) 54 | jacobis = torch.FloatTensor(jacobis).view(len(q), -1, model.qdot_size)##.cuda() 55 | return jacobis 56 | 57 | def get_predictions_Kinematics_exf(self,TempConvNetArt,TempConvNetOri,TempConvNetTrans,input1,q0_ref,q0_ref_ref,depth0,external_force): 58 | 59 | n_b,_= q0_ref.shape 60 | ref_art = TempConvNetArt(q0_ref[:, 3:], input1, external_force) 61 | #ref_quat = TempConvNetOri(q0_ref[:, 3:], input1, external_force) 62 | ref_quat = TempConvNetOri(q0_ref [:, 3:], input1, external_force) 63 | #print(depth0.shape,input1.shape,external_force.shape) 64 | ref_trans = TempConvNetTrans(depth0, input1, external_force) 65 | ref_trans = torch.clamp(ref_trans, -5, 5) 66 | l = torch.sqrt(ref_quat.pow(2).sum(1)).view(ref_quat.shape[0], 1) 67 | ref_quat = ref_quat / l 68 | q_ref = torch.cat((ref_trans, ref_quat[:, 1:], ref_art, ref_quat[:, 0].view(-1, 1)), 1) 69 | ref_trans = q_ref[:, :3].view(n_b, 3) # #.cuda() 70 | return q_ref,ref_trans,ref_quat,ref_art 71 | 72 | def get_PD_errors(self,ref_quat,quat0,ref_trans,trans0,ref_art,art0): 73 | errors_ori=self.get_ori_errors(ref_quat,quat0) 74 | errors_trans=self.get_trans_errors(ref_trans,trans0) 75 | errors_art=self.get_art_errors(ref_art,art0) 76 | return errors_trans,errors_ori,errors_art 77 | 78 | def get_PD_errors_cpu(self,ref_quat,quat0,ref_trans,trans0,ref_art,art0): 79 | errors_ori=self.get_ori_errors_cpu(ref_quat,quat0) 80 | errors_trans=self.get_trans_errors(ref_trans,trans0) 81 | errors_art=self.get_art_errors_cpu(ref_art,art0) 82 | return errors_trans,errors_ori,errors_art 83 | def get_ori_errors_cpu(self,ref_quat,quat0): 84 | errors_ori = AU.qmul(ref_quat, AU.q_conj(quat0)) 85 | errors_ori = AU.quat_shortest_path_cpu(errors_ori) 86 | l = torch.sqrt(errors_ori.pow(2).sum(1)).view(errors_ori.shape[0], 1) 87 | errors_ori = errors_ori / l 88 | errors_ori = errors_ori[:, 1:] 89 | return errors_ori 90 | def get_ori_errors(self,ref_quat,quat0): 91 | errors_ori = AU.qmul(ref_quat, AU.q_conj(quat0)) 92 | errors_ori = AU.quat_shortest_path(errors_ori) 93 | l = torch.sqrt(errors_ori.pow(2).sum(1)).view(errors_ori.shape[0], 1) 94 | errors_ori = errors_ori / l 95 | errors_ori = errors_ori[:, 1:] 96 | return errors_ori 97 | 98 | def get_trans_errors(self,ref_trans,trans0): 99 | return ref_trans - trans0 100 | 101 | def get_art_errors(self,ref_art,art0): 102 | return AU.target_correction_batch(ref_art -art0) 103 | 104 | def get_art_errors_cpu(self,ref_art,art0): 105 | return AU.target_correction_batch_cpu(ref_art -art0) 106 | 107 | def get_tau(self,errors_trans,errors_ori,errors_art,qdot0,limit, small_z=1): 108 | tau_art = 324 * errors_art - 20 * qdot0[:, 6:] 109 | tau_ori = 6000 * errors_ori - 536 * qdot0[:, 3:6] 110 | tau_trans = 15000 * errors_trans - 678 * qdot0[:, :3] 111 | #tau = torch.cat((tau_trans, tau_ori, tau_art), 1) 112 | if small_z: 113 | tau_trans[:,2] = 3000 * errors_trans[:,2] - 2000 * qdot0[:,2] 114 | tau_art = torch.clamp(tau_art, -limit, limit) 115 | tau_ori = torch.clamp(tau_ori, -200, 200) 116 | tau_trans = torch.clamp(tau_trans, -400, 400) 117 | tau = torch.cat((tau_trans,tau_ori,tau_art),1) 118 | tau[:, 6 + 9] = 0 119 | tau[:, 6 + 8] = 0 120 | tau[:, 6 + 18] = 0 121 | tau[:, 6 + 19] = 0 122 | #tau = torch.clamp(tau, -25, 25) 123 | return tau 124 | 125 | 126 | def get_neural_development(self, errors_trans, errors_ori, errors_art, qdot0,gains, offset,limit_art,small_z = 0): 127 | 128 | tau_art = 324 * gains[:,6:]* errors_art - 20 * qdot0[:, 6:]+ offset[:,6:] 129 | tau_ori = 6000 * gains[:,3:6]*errors_ori - 536 * qdot0[:, 3:6]+ offset[:,3:6] 130 | #tau_ori = 6000 * errors_ori - 536 * qdot0[:, 3:6]#+ offset[:,3:6] 131 | tau_trans = 15000 * gains[:,:3]* errors_trans - 678 * qdot0[:, :3]+ offset[:,:3] 132 | #tau_trans = 15000 * errors_trans - 678 * qdot0[:, :3] 133 | if small_z: 134 | tau_trans[:,2] = 3000 * errors_trans[:,2] - 2000 * qdot0[:,2] 135 | #tau = torch.cat((tau_trans, tau_ori, tau_art), 1) 136 | 137 | tau_art = torch.clamp(tau_art, -limit_art, limit_art) 138 | tau_ori = torch.clamp(tau_ori, -200, 200) 139 | tau_trans = torch.clamp(tau_trans, -400, 400) 140 | 141 | tau = torch.cat((tau_trans,tau_ori,tau_art),1) 142 | tau[:, 6 + 9] = 0 143 | tau[:, 6 + 8] = 0 144 | tau[:, 6 + 18] = 0 145 | tau[:, 6 + 19] = 0 146 | return tau 147 | 148 | def get_neural_development_cpu(self, errors_trans, errors_ori, errors_art, qdot0,gains, offset,limit_art,art_only=0,small_z = 0): 149 | 150 | tau_art = 324 * gains[:,6:]* errors_art - 20 * qdot0[:, 6:]+ offset[:,6:] 151 | if art_only: 152 | tau_ori = 6000 * errors_ori - 536 * qdot0[:, 3:6] 153 | tau_trans = 15000 * errors_trans - 678 * qdot0[:, :3] 154 | else: 155 | tau_ori = 6000 * gains[:, 3:6] * errors_ori - 536 * qdot0[:, 3:6] + offset[:, 3:6] 156 | tau_trans = 15000 * gains[:,:3]* errors_trans - 678 * qdot0[:, :3]+ offset[:,:3] 157 | if small_z: 158 | tau_trans[:,2] = 3000 * errors_trans[:,2] - 2000 * qdot0[:,2] 159 | #tau = torch.cat((tau_trans, tau_ori, tau_art), 1) 160 | 161 | tau_art = torch.clamp(tau_art, -limit_art, limit_art) 162 | tau_ori = torch.clamp(tau_ori, -200, 200) 163 | tau_trans = torch.clamp(tau_trans, -400, 400) 164 | 165 | tau = torch.cat((tau_trans,tau_ori,tau_art),1) 166 | tau[:, 6 + 9] = 0 167 | tau[:, 6 + 8] = 0 168 | tau[:, 6 + 18] = 0 169 | tau[:, 6 + 19] = 0 170 | return tau 171 | 172 | def pose_update_quat(self,qdot0,q0,quat0,delta_t,qddot,speed_limit,th_zero = 0): 173 | n_b,_=q0.shape 174 | qdot = qdot0 + delta_t * qddot 175 | qdot = torch.clamp(qdot, -speed_limit, speed_limit) 176 | #print(qdot) 177 | if th_zero: 178 | qdot[:,6+9]=0 179 | qdot[:,6+8]=0 180 | qdot[:,6+18]=0 181 | qdot[:,6+19]=0 182 | qdot[:,-1]=0 183 | q_trans = q0[:, :3] + delta_t * qdot[:, :3] 184 | q_trans= torch.clamp(q_trans, -50, 50) 185 | q_art = q0[:, 6:-1] + delta_t * qdot[:, 6:] 186 | 187 | if th_zero: 188 | q_art[:,9]=0 189 | q_art[:,8]=0 190 | q_art[:,18]=0 191 | q_art[:,19]=0 192 | 193 | angvel_quat = AU.get_angvel_quat(qdot[:, 3:6]) 194 | quat = quat0.detach() + delta_t * AU.qmul(angvel_quat, quat0.detach()) / 2 195 | loss_unit = (quat.pow(2).sum(1) - 1).pow(2).mean() 196 | 197 | l = torch.sqrt(quat.pow(2).sum(1)).view(n_b, 1) 198 | quat = quat / l 199 | q = torch.cat((q_trans, quat[:, 1:], q_art, quat[:, 0].view(-1, 1)), 1) 200 | return quat,q,qdot,loss_unit 201 | 202 | def pose_update_quat_cpu(self,qdot0,q0,quat0,delta_t,qddot,speed_limit,th_zero = 0): 203 | n_b, _ = q0.shape 204 | qdot = qdot0 + delta_t * qddot 205 | qdot = torch.clamp(qdot, -speed_limit, speed_limit) 206 | # print(qdot) 207 | if th_zero: 208 | qdot[:, 6 + 9] = 0 209 | qdot[:, 6 + 8] = 0 210 | qdot[:, 6 + 18] = 0 211 | qdot[:, 6 + 19] = 0 212 | qdot[:, -1] = 0 213 | q_trans = q0[:, :3] + delta_t * qdot[:, :3] 214 | q_trans = torch.clamp(q_trans, -50, 50) 215 | q_art = q0[:, 6:-1] + delta_t * qdot[:, 6:] 216 | 217 | if th_zero: 218 | q_art[:, 9] = 0 219 | q_art[:, 8] = 0 220 | q_art[:, 18] = 0 221 | q_art[:, 19] = 0 222 | 223 | angvel_quat = AU.get_angvel_quat_cpu(qdot[:, 3:6]) 224 | quat = quat0.detach() + delta_t * AU.qmul(angvel_quat, quat0.detach()) / 2 225 | 226 | l = torch.sqrt(quat.pow(2).sum(1)).view(n_b, 1) 227 | quat = quat / l 228 | q = torch.cat((q_trans, quat[:, 1:], q_art, quat[:, 0].view(-1, 1)), 1) 229 | return quat, q, qdot, _ 230 | 231 | def pose_update_quat_debug(self,qdot0,q0,quat0,delta_t,qddot,th_zero = 0): 232 | n_b,_=q0.shape 233 | qdot = qdot0 + delta_t * qddot 234 | 235 | if th_zero: 236 | qdot[:,6+9]=0 237 | qdot[:,6+8]=0 238 | qdot[:,6+18]=0 239 | qdot[:,6+19]=0 240 | 241 | q_trans = q0[:, :3] + delta_t * qdot[:, :3] 242 | q_art = q0[:, 6:-1] + delta_t * qdot[:, 6:] 243 | 244 | if th_zero: 245 | q_art[:,9]=0 246 | q_art[:,8]=0 247 | q_art[:,18]=0 248 | q_art[:,19]=0 249 | 250 | angvel_quat = AU.get_angvel_quat(qdot[:, 3:6]) 251 | quat = quat0.detach() + delta_t * AU.qmul(angvel_quat, quat0.detach()) / 2 252 | loss_unit = (quat.pow(2).sum(1) - 1).pow(2).mean() 253 | 254 | l = torch.sqrt(quat.pow(2).sum(1)).view(n_b, 1) 255 | quat = quat / l 256 | q = torch.cat((q_trans, quat[:, 1:], q_art, quat[:, 0].view(-1, 1)), 1) 257 | return quat,q,qdot,loss_unit 258 | 259 | def foot_constraint(self,J,q0, qdot,label_mat,Rs): 260 | n_b,_=q0.shape 261 | E = torch.eye(43) .view(1,43,43)#.cuda() 262 | E = E.expand(n_b,-1,-1 ) 263 | selection = self.selecton.view(1,43,43).expand(n_b,-1,-1 ) 264 | delta_t_eye = self.delta_t_eye.view(1,43,43).expand(n_b,-1,-1 ) 265 | 266 | 267 | #LTL = LTL.view(1,) 268 | # Q = E#torch.bmm(torch.transpose(J,1,2),J)+ 0.8*self.LTL+E#torch.eye(43).cuda() 269 | #k = torch.bmm( selection,(q0 -q_t )[:,:-1].view(n_b,-1,1)) 270 | # P = - qdot.view(n_b,1,-1)#torch.bmm(k.view(n_b,1,-1),delta_t_eye) 271 | qval =- qdot.view(n_b,-1) #Variable(P.view(n_b,-1), requires_grad=True).float().cuda() 272 | Q_sqrtval = E#Q #Variable(Q, requires_grad=True).cuda() # .view(1, 43, 43) 273 | 274 | #select_J = torch.mm(A_select_mat, J) 275 | #select_J=torch.zeros(3,3).cuda() 276 | #select_J[0][0]=1 277 | #select_J[1][1]=1 278 | #select_J[2][2]=1 279 | #select_J_top=torch.cat((select_J,torch.zeros(3,3).cuda()),1) 280 | #select_J_bottom=torch.cat(( torch.zeros(3,3).cuda(),select_J),1) 281 | #select_J=torch.cat((select_J_top,select_J_bottom),0) 282 | #Aval = torch.bmm(label_mat,J) #torch.mm(select_J,J)# torch.cat((select_J, M1), 0) 283 | #zeros = torch.zeros(6, requires_grad=True).cuda() 284 | #bval = torch.zeros(n_b,6).cuda() 285 | #bval[2] = -0.1#*torch.ones(6).cuda()#torch.cat((zeros.view(-1, 1), Mqdot), 0).view(-1) 286 | #bval[5] = -0.1 287 | Rtranspose = torch.transpose(Rs,1,2) 288 | 289 | Rtranspose_mat_top = torch.cat((Rtranspose, torch.zeros(n_b,3, 3) ), 2)#.cuda() 290 | Rtranspose_mat_bottom = torch.cat((torch.zeros(n_b,3, 3) ,Rtranspose), 2)#.cuda() 291 | Rtranspose_mat = torch.cat((Rtranspose_mat_top, Rtranspose_mat_bottom), 1).view(n_b, 6, 6) 292 | #Rtranspose_mat = Rtranspose_mat.expand(n_b,-1,-1) 293 | Gval = torch.bmm(Rtranspose_mat, J) 294 | #print(Gval[0]) 295 | Gval[:, 0] = 0 296 | Gval[:, 2] = 0 297 | Gval[:, 3] = 0 298 | Gval[:, 5] = 0 299 | 300 | 301 | Gval = torch.bmm(label_mat, Gval) 302 | #for l in Gval.detach().cpu().numpy(): 303 | # print(list(l)) 304 | #print(Gval) 305 | hval = torch.zeros(n_b, 6)#.cuda() 306 | y, = self.layer(Q_sqrtval, qval, Gval, hval) 307 | return y 308 | 309 | def foot_constraint_cpu(self,J,q0, qdot,label_mat,Rs): 310 | n_b,_=q0.shape 311 | E = torch.eye(46).view(1,46,46) 312 | E = E.expand(n_b,-1,-1 ) 313 | qval =- qdot.view(n_b,-1) #Variable(P.view(n_b,-1), requires_grad=True).float().cuda() 314 | Q_sqrtval = E#Q #Variable(Q, requires_grad=True).cuda() # .view(1, 43, 43) 315 | 316 | Rtranspose = torch.transpose(Rs,1,2) 317 | 318 | Rtranspose_mat_top = torch.cat((Rtranspose, torch.zeros(n_b,3, 3)), 2) 319 | Rtranspose_mat_bottom = torch.cat((torch.zeros(n_b,3, 3),Rtranspose), 2) 320 | Rtranspose_mat = torch.cat((Rtranspose_mat_top, Rtranspose_mat_bottom), 1).view(n_b, 6, 6) 321 | 322 | Gval = torch.bmm(Rtranspose_mat, J) 323 | 324 | Gval[:, 0] = 0 325 | Gval[:, 2] = 0 326 | Gval[:, 3] = 0 327 | Gval[:, 5] = 0 328 | 329 | Gval = torch.bmm(label_mat, Gval) 330 | hval = torch.zeros(n_b, 6) 331 | y, = self.layer(Q_sqrtval, qval, Gval, hval ) 332 | 333 | return y 334 | 335 | def pose_update_quat_hard_debug(self,qdot0, q0, quat0, delta_t, qddot, th_zero=0): 336 | n_b, _ = q0.shape 337 | qdot = qdot0 + delta_t * qddot 338 | if th_zero: 339 | qdot[:, 6 + 9] = 0 340 | qdot[:, 6 + 8] = 0 341 | qdot[:, 6 + 18] = 0 342 | qdot[:, 6 + 19] = 0 343 | 344 | q_trans = q0[:, :3] + delta_t * qdot[:, :3] 345 | q_art = q0[:, 6:-1] + delta_t * qdot[:, 6:] 346 | if th_zero: 347 | q_art[:, 9] = 0 348 | q_art[:, 8] = 0 349 | q_art[:, 18] = 0 350 | q_art[:, 19] = 0 351 | angvel_quat = AU.get_angvel_quat(qdot[:, 3:6]) 352 | quat = quat0.detach() + delta_t * AU.qmul(angvel_quat, quat0.detach()) / 2 353 | loss_unit = (quat.pow(2).sum(1) - 1).pow(2).mean() 354 | 355 | l = torch.sqrt(quat.pow(2).sum(1)).view(n_b, 1) 356 | quat = quat / l 357 | q = torch.cat((q_trans, quat[:, 1:], q_art, quat[:, 0].view(-1, 1)), 1) 358 | return quat, q, qdot, loss_unit#, 0 359 | 360 | def pose_update_quat_hard(self,qdot0,q0,quat0,delta_t,qddot,J, lr_labels,Rs, th_zero=0): 361 | error_flag= 0 362 | n_b,_=q0.shape 363 | qdot = qdot0 + delta_t * qddot 364 | qdot = torch.clamp(qdot, -50, 50) 365 | if th_zero: 366 | qdot[:, 6 + 9] = 0 367 | qdot[:, 6 + 8] = 0 368 | qdot[:, 6 + 18] = 0 369 | qdot[:, 6 + 19] = 0 370 | label_mat=self.label_mat_base.expand(n_b,-1,-1,-1) 371 | label_mat = lr_labels.view(n_b, 2, 1, 1) * label_mat 372 | label_mat =label_mat.reshape(n_b,6,6) 373 | 374 | try: 375 | qdot2 = self.foot_constraint(J,q0, qdot,label_mat,Rs) 376 | 377 | except: 378 | print("failed") 379 | qdot2=qdot.clone() 380 | error_flag=1 381 | pass 382 | 383 | q_trans = q0[:, :3] + delta_t * qdot2[:, :3] 384 | q_art = q0[:, 6:-1] + delta_t * qdot2[:, 6:] 385 | if th_zero: 386 | q_art[:, 9] = 0 387 | q_art[:, 8] = 0 388 | q_art[:, 18] = 0 389 | q_art[:, 19] = 0 390 | 391 | angvel_quat = AU.get_angvel_quat(qdot2[:, 3:6]) 392 | quat = quat0.detach() + delta_t * AU.qmul(angvel_quat, quat0.detach()) / 2 393 | loss_unit = (quat.pow(2).sum(1) - 1).pow(2).mean() 394 | 395 | l = torch.sqrt(quat.pow(2).sum(1)).view(n_b, 1) 396 | quat = quat / l 397 | q = torch.cat((q_trans, quat[:, 1:], q_art, quat[:, 0].view(-1, 1)), 1) 398 | if error_flag: 399 | return quat, q, qdot2, loss_unit, 1 400 | else: 401 | return quat,q,qdot2,loss_unit, 0 402 | def pose_update_quat_hard_cpu(self,qdot0,q0,quat0,delta_t,qddot,J, lr_labels,Rs, th_zero=0): 403 | error_flag= 0 404 | n_b,_=q0.shape 405 | 406 | qdot = qdot0 + delta_t * qddot 407 | qdot = torch.clamp(qdot, -5, 5) 408 | if th_zero: 409 | qdot[:, 6 + 9] = 0 410 | qdot[:, 6 + 8] = 0 411 | qdot[:, 6 + 18] = 0 412 | qdot[:, 6 + 19] = 0 413 | 414 | label_mat=self.label_mat_base_cpu.expand(n_b,-1,-1,-1) 415 | label_mat = lr_labels.view(n_b, 2, 1, 1) * label_mat 416 | label_mat =label_mat.reshape(n_b,6,6) 417 | 418 | try: 419 | qdot2 = self.foot_constraint_cpu(J,q0, qdot,label_mat,Rs) 420 | qdot2 = torch.clamp(qdot2, -5, 5) 421 | 422 | 423 | except: 424 | print("failed") 425 | qdot2=qdot.clone() 426 | error_flag=1 427 | pass 428 | 429 | q_trans = q0[:, :3] + delta_t * qdot2[:, :3] 430 | q_art = q0[:, 6:-1] + delta_t * qdot2[:, 6:] 431 | if th_zero: 432 | q_art[:, 9] = 0 433 | q_art[:, 8] = 0 434 | q_art[:, 18] = 0 435 | q_art[:, 19] = 0 436 | 437 | angvel_quat = AU.get_angvel_quat_cpu(qdot2[:, 3:6]) 438 | quat = quat0.detach() + delta_t * AU.qmul(angvel_quat, quat0.detach()) / 2 439 | loss_unit = (quat.pow(2).sum(1) - 1).pow(2).mean() 440 | 441 | l = torch.sqrt(quat.pow(2).sum(1)).view(n_b, 1) 442 | quat = quat / l 443 | q = torch.cat((q_trans, quat[:, 1:], q_art, quat[:, 0].view(-1, 1)), 1) 444 | if error_flag: 445 | return quat, q, qdot2, loss_unit, 1 446 | else: 447 | return quat,q,qdot2,loss_unit, 0 448 | 449 | def process_contact_info(self,labels): 450 | """ 451 | labels:Bx4 452 | """ 453 | left_flag = labels[:,0]+labels[:,1] 454 | left_flag[left_flag > 0] = 1 455 | right_flag = labels[:,2]+labels[:,3] 456 | right_flag[right_flag > 0] = 1 457 | 458 | return torch.cat((left_flag.view(-1,1),right_flag.view(-1,1)),1) 459 | 460 | def check_contact_presence(self,labels): 461 | """ 462 | labels:Bx4 463 | """ 464 | flags = labels[:,0]+labels[:,1]+labels[:,2]+labels[:,3] 465 | flags[flags > 0] = 1 466 | 467 | return flags -------------------------------------------------------------------------------- /Utils/misc.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from scipy.spatial.transform import Rotation as R 4 | import pybullet as p 5 | import math 6 | import sys 7 | import rbdl 8 | sys.path.append("./util") 9 | from Utils.angles import angle_util 10 | import copy 11 | AU=angle_util() 12 | 13 | def valid_flags_by_vnect_accuracy(th_error,th_count,data,gt): 14 | N, T, J, _ = gt.shape 15 | gt = gt.view(N, T, -1) 16 | data = data.view(N, T, -1) 17 | residual = gt - data 18 | norms = torch.norm(residual, dim=2) 19 | labels = torch.zeros(norms.shape) 20 | higher_id = torch.nonzero(norms > th_error) 21 | labels[higher_id[:, 0], higher_id[:, 1]] = 1 22 | 23 | labels_sequence_wise = torch.sum(labels, 1) 24 | higher_id_sequence_wise = torch.nonzero(labels_sequence_wise > th_count) 25 | valid_labels = torch.ones(labels_sequence_wise.shape) 26 | valid_labels[higher_id_sequence_wise[:, 0]] = 0 27 | return valid_labels 28 | 29 | def valid_flags_by_frameout(gt_2Ds): 30 | tmp = gt_2Ds.clone() 31 | 32 | tmp[tmp > 1] = 100 33 | tmp[tmp < 0] = 100 34 | 35 | tmp[tmp != 100] = 0 36 | tmp[tmp == 100] = 1 37 | 38 | labels = tmp.sum(1).sum(1).sum(1) 39 | 40 | final_labels = torch.zeros(labels.shape) 41 | for i in range(len(final_labels)): 42 | if labels[i] == 0: 43 | final_labels[i]=1 44 | 45 | return final_labels#valid_indices 46 | 47 | def valid_flags(th_error,th_count,data, gt): 48 | """ 49 | th_error: threshold value of the projection error 50 | th_count: max # frames that exceeds the th_error 51 | data: predicted 2D keypoints (keypoints normalized between 0 and 1) 52 | gt : GT 2D keypoints (keypoints normalized between 0 and 1) 53 | """ 54 | 55 | valid_flags_vnect_acc = valid_flags_by_vnect_accuracy(th_error,th_count,data,gt) 56 | valid_flags_frameout = valid_flags_by_frameout(gt) 57 | final_labels = valid_flags_vnect_acc*valid_flags_frameout 58 | valid_indices = torch.FloatTensor(np.array([x for x in np.arange(len(final_labels)) if final_labels[x] == 1])) 59 | return valid_indices.numpy().astype(int) 60 | 61 | def img_size_getter(sub_ids): 62 | labels = sub_ids.clone() 63 | 64 | DC_id =torch.nonzero(labels < 1) 65 | HM_id =torch.nonzero((labels < 20)*(labels>0)) 66 | MPI_id =torch.nonzero(labels > 20) 67 | WI_id = torch.nonzero(labels >= 30) 68 | labels[DC_id[:, 0] ]=1024 69 | labels[HM_id[:, 0] ]=1000 70 | labels[MPI_id[:, 0] ]=2048 71 | labels[WI_id[:, 0]] = 1280 72 | return labels 73 | def rigid_transform_3D(A, B): 74 | assert len(A) == len(B) 75 | 76 | num_rows, num_cols = A.shape; 77 | 78 | if num_rows != 3: 79 | raise Exception("matrix A is not 3xN, it is {}x{}".format(num_rows, num_cols)) 80 | 81 | [num_rows, num_cols] = B.shape; 82 | if num_rows != 3: 83 | raise Exception("matrix B is not 3xN, it is {}x{}".format(num_rows, num_cols)) 84 | 85 | # find mean column wise 86 | centroid_A = np.mean(A, axis=1).reshape(3, 1) 87 | centroid_B = np.mean(B, axis=1).reshape(3, 1) 88 | # subtract mean 89 | Am = A - np.tile(centroid_A, (1, num_cols)) 90 | Bm = B - np.tile(centroid_B, (1, num_cols)) 91 | 92 | # dot is matrix multiplication for array 93 | H = Am.dot(np.transpose(Bm)) 94 | 95 | # sanity check 96 | if np.linalg.matrix_rank(H) < 3: 97 | raise ValueError("rank of H = {}, expecting 3".format(np.linalg.matrix_rank(H))) 98 | 99 | # find rotation 100 | U, S, Vt = np.linalg.svd(H) 101 | R = Vt.T * U.T 102 | 103 | # special reflection case 104 | if np.linalg.det(R) < 0: 105 | print("det(R) < R, reflection detected!, correcting for it ...\n"); 106 | Vt[2, :] *= -1 107 | R = Vt.T * U.T 108 | 109 | t = -R * centroid_A + centroid_B 110 | 111 | return R, t 112 | def mat2zerodiag(data,K): 113 | """ 114 | Input: data, K 115 | data BxTxC 116 | 117 | return BxTKxCK with zeros 118 | """ 119 | B,T,C = data.shape 120 | data_pad = torch.zeros( B, T *K, C * K)###.cuda() 121 | for i in range(K): 122 | data_pad[:, i * T:(i + 1) * T, i * C:(i + 1) * C] = data 123 | return data_pad 124 | 125 | 126 | def reverse_labels(labels): 127 | labels += 1 128 | labels[labels==2]=0 129 | labels[labels==1]=1 130 | return labels 131 | 132 | def reverse_labels_np(labels): 133 | reversed_labels = copy.copy(labels) 134 | reversed_labels += 1 135 | reversed_labels[reversed_labels==2]=0 136 | reversed_labels[reversed_labels==1]=1 137 | return reversed_labels 138 | def get_WILD_labels(sub_ids): 139 | wild_labels = copy.copy(sub_ids) 140 | wild_labels[wild_labels<30]=0 141 | wild_labels[wild_labels==30]=1 142 | non_wild_labels = reverse_labels_np(wild_labels) 143 | return wild_labels,non_wild_labels 144 | def pysinc(x): 145 | return torch.sin(math.pi*x)/(math.pi*x) 146 | 147 | def exp2quat(e): 148 | """ 149 | Convert axis-angle rotations (aka exponential maps) to quaternions. 150 | Stable formula from "Practical Parameterization of Rotations Using the Exponential Map". 151 | Expects a tensor of shape (*, 3), where * denotes any number of dimensions. 152 | Returns a tensor of shape (*, 4). 153 | """ 154 | assert e.shape[-1] == 3 155 | original_shape = list(e.shape) 156 | original_shape[-1] = 4 157 | e = e.reshape(-1, 3) 158 | theta = torch.norm(e,dim=1).view(-1, 1) 159 | w = torch.cos(0.5 * theta).view(-1, 1) 160 | xyz = 0.5 * pysinc(0.5 * theta / math.pi) * e 161 | return torch.cat((w, xyz), 1).view(original_shape) 162 | 163 | def quat2exp(quat): 164 | half_theta = torch.acos(quat[:,0].view(-1,1)) 165 | exp = (2 / torch.sin(half_theta)) * half_theta * torch.FloatTensor( [quat[:,1], quat[:,2], quat[:,3]]).view(-1, 3)###.cuda() 166 | 167 | return exp 168 | def get_q_axis_from_quat_bullet(q): 169 | quat = q[:, 3:7] 170 | quat = AU.quat_bullet2general_b(quat) 171 | exp = quat2exp(quat) 172 | new_q = torch.cat((q[:,:3],exp.view(-1,3),q[:,7:]),1) 173 | return new_q 174 | 175 | def get_q_axis_from_quat_rbdl(q): 176 | quat = torch.cat((q[:, -1].view(-1,1), q[:, 3:6]),1) 177 | exp = quat2exp(quat) 178 | new_q = torch.cat((q[:,:3],exp.view(-1,3),q[:,6:-1]),1) 179 | return new_q 180 | 181 | def get_q_quat_from_axis_rbdl(q): 182 | exp=q[:,3:6] 183 | quat = AU.expmap_to_quaternion(exp) 184 | new_q = torch.cat((q[:, :3], quat[:,1:].view(-1, 3), q[:, 6:],quat[:,0].view(-1,1)), 1) 185 | return new_q 186 | 187 | def get_mass_mat(model, q): 188 | n_b, _ = q.shape 189 | M_np = np.zeros((n_b, model.qdot_size, model.qdot_size)) 190 | [rbdl.CompositeRigidBodyAlgorithm(model, q[i].astype(float), M_np[i]) for i in range(n_b)] 191 | 192 | return torch.FloatTensor(M_np)##.cuda() 193 | def get_mass_mat_cpu(model, q): 194 | 195 | n_b, _ = q.shape 196 | M_np = np.zeros((n_b, model.qdot_size, model.qdot_size)) 197 | [rbdl.CompositeRigidBodyAlgorithm(model, q[i].astype(float), M_np[i]) for i in range(n_b)] 198 | 199 | return torch.FloatTensor(M_np)###.cuda() 200 | 201 | def clean_massMat(M_inv): 202 | M_inv[:, 6] = 0 203 | 204 | M_inv[:, 6 + 8] = 0 205 | M_inv[:, 6 + 9] = 0 206 | M_inv[:, 6 + 10] = 0 207 | 208 | M_inv[:, 6 + 18] = 0 209 | M_inv[:, 6 + 19] = 0 210 | 211 | M_inv[:, 6 + 27] = 0 212 | M_inv[:, 6 + 35] = 0 213 | M_inv[:, -1] = 0 214 | 215 | M_inv[:, :, 6] = 0 216 | 217 | M_inv[:, :, 6 + 8] = 0 218 | M_inv[:, :, 6 + 9] = 0 219 | M_inv[:, :, 6 + 10] = 0 220 | 221 | M_inv[:, :, 6 + 18] = 0 222 | M_inv[:, :, 6 + 19] = 0 223 | 224 | M_inv[:, :, 6 + 27] = 0 225 | M_inv[:, :, 6 + 35] = 0 226 | M_inv[:, :, -1] = 0 227 | return M_inv 228 | def vec2zerodiag(data): 229 | """ 230 | data BxTxC 231 | 232 | return BxTxTC with zeros 233 | """ 234 | B,T,C = data.shape 235 | data = torch.diag_embed(data.contiguous().view(B, -1) , offset=0, dim1=-2, dim2=-1) 236 | data = data.view(B, -1, C, T*C).sum(2) 237 | 238 | return data 239 | 240 | 241 | 242 | def motion_update(id_robot, jointIds, qs): 243 | [p.resetJointState(id_robot, jid, q) for jid, q in zip(jointIds, qs)] 244 | return 0 245 | def visualization3D(id_robot,id_robot_ref,jointIds,rbdl2bullet,q, target_qs): 246 | q = np.squeeze(q.detach().cpu().numpy()) 247 | jointIds_reordered = np.array(jointIds)[rbdl2bullet] 248 | 249 | motion_update(id_robot, jointIds_reordered , q[6:-1]) 250 | motion_update(id_robot_ref, jointIds_reordered, target_qs[7:]) 251 | 252 | p.resetBasePositionAndOrientation(id_robot, [q[0], q[1], q[2]], [q[3], q[4], q[5], q[-1]]) 253 | 254 | p.resetBasePositionAndOrientation(id_robot_ref, [target_qs[0], target_qs[1], target_qs[2] ], target_qs[3:7]) 255 | p.stepSimulation() 256 | return 0 257 | def visualization3D_single(id_robot, jointIds,rbdl2bullet,q,Rot,T ): 258 | q = np.squeeze(q.cpu().numpy()) 259 | 260 | jointIds_reordered = np.array(jointIds)[rbdl2bullet] 261 | out_pos = np.array([q[0], q[1], q[2]]) 262 | out_quat = np.array([q[3], q[4], q[5], q[-1]]) 263 | r1 = R.from_quat(out_quat) 264 | mat1 = r1.as_matrix() 265 | r12 = R.from_matrix(np.dot(Rot.T, mat1)) 266 | out_quat = r12.as_quat() 267 | out_pos = np.dot(Rot.T, out_pos - T) 268 | 269 | motion_update(id_robot, jointIds_reordered , q[6:-1]) 270 | p.resetBasePositionAndOrientation(id_robot,out_pos,out_quat) 271 | p.stepSimulation() 272 | return 0 273 | 274 | def visualization3D_double(id_robot, id_robot2, jointIds,rbdl2bullet,q ,q2,Rot,T ): 275 | jointIds_reordered = np.array(jointIds)[rbdl2bullet] 276 | 277 | out_pos = np.array([q[0], q[1], q[2]]) 278 | out_quat = np.array([q[3], q[4], q[5], q[-1]]) 279 | r1 = R.from_quat(out_quat) 280 | mat1 = r1.as_matrix() 281 | r12 = R.from_matrix(np.dot(Rot.T, mat1)) 282 | out_quat = r12.as_quat() 283 | out_pos = np.dot(Rot.T, out_pos - T) 284 | 285 | out_pos2 = np.array([q2[0], q2[1], q2[2]]) 286 | out_quat2 = np.array([q2[3], q2[4], q2[5], q2[-1]]) 287 | r2 = R.from_quat(out_quat2) 288 | mat2 = r2.as_matrix() 289 | r22 = R.from_matrix(np.dot(Rot.T, mat2)) 290 | out_quat2 = r22.as_quat() 291 | out_pos2 = np.dot(Rot.T, out_pos2 - T) 292 | motion_update(id_robot, jointIds_reordered , q[6:-1]) 293 | p.resetBasePositionAndOrientation(id_robot, out_pos, out_quat) 294 | motion_update(id_robot2, jointIds_reordered , q2[6:-1]) 295 | out_pos2[2]+=1#0.5 296 | p.resetBasePositionAndOrientation(id_robot2, out_pos2, out_quat2) 297 | 298 | p.stepSimulation() 299 | return 0 300 | 301 | def visualization3D_double_HM(id_robot, id_robot2, jointIds,rbdl2bullet,q ,q2,Rot,T ): 302 | jointIds_reordered = np.array(jointIds)[rbdl2bullet] 303 | 304 | out_pos = np.array([q[0], q[1], q[2]]) 305 | out_quat = np.array([q[3], q[4], q[5], q[-1]]) 306 | r1 = R.from_quat(out_quat) 307 | mat1 = r1.as_matrix() 308 | r12 = R.from_matrix(np.dot(Rot, mat1)) 309 | out_quat = r12.as_quat() 310 | out_pos = np.dot(Rot, out_pos) + T 311 | 312 | out_pos2 = np.array([q2[0], q2[1], q2[2]]) 313 | out_quat2 = np.array([q2[3], q2[4], q2[5], q2[-1]]) 314 | r2 = R.from_quat(out_quat2) 315 | mat2 = r2.as_matrix() 316 | r22 = R.from_matrix(np.dot(Rot, mat2)) 317 | out_quat2 = r22.as_quat() 318 | out_pos2 = np.dot(Rot, out_pos2) + T 319 | motion_update(id_robot, jointIds_reordered , q[6:-1]) 320 | p.resetBasePositionAndOrientation(id_robot, out_pos, out_quat) 321 | motion_update(id_robot2, jointIds_reordered , q2[6:-1]) 322 | #out_pos2[2]+=0.5 323 | p.resetBasePositionAndOrientation(id_robot2, out_pos2, out_quat2) 324 | 325 | p.stepSimulation() 326 | return 0 327 | def visualization3D_multiple(robot_ids, jointIds,rbdl2bullet,qs ,Rot=[],T=[] ,overlay=False,visu=False): 328 | jointIds_reordered = np.array(jointIds)[rbdl2bullet] 329 | if len(Rot)==0: 330 | Rot = np.eye(3) 331 | T=np.zeros(3) 332 | for i,(id_robot, q) in enumerate(zip(robot_ids,qs)): 333 | 334 | if visu: 335 | 336 | out_pos, out_quat = get_transformed_root_visualisation(np.array([q[0], q[1], q[2]]), np.array([q[3], q[4], q[5], q[-1]]), Rot,T) 337 | else: 338 | 339 | out_pos ,out_quat =get_transformed_root( np.array([q[0], q[1], q[2] ]), np.array([q[3], q[4], q[5], q[-1]]), Rot,T) 340 | 341 | motion_update(id_robot, jointIds_reordered , q[6:-1]) 342 | if not overlay: 343 | out_pos[2]+=(out_pos[2]+i ) 344 | p.resetBasePositionAndOrientation(id_robot, out_pos, out_quat) 345 | p.stepSimulation() 346 | return 0 347 | 348 | def visualization3D_multiple_invTrans(robot_ids, jointIds,rbdl2bullet,qs ,Rot,T ,overlay=False): 349 | jointIds_reordered = np.array(jointIds)[rbdl2bullet] 350 | for i,(id_robot, q) in enumerate(zip(robot_ids,qs)): 351 | 352 | out_pos ,out_quat =get_transformed_root_inv( np.array([q[0], q[1], q[2] ]), np.array([q[3], q[4], q[5], q[-1]]), Rot,T) 353 | motion_update(id_robot, jointIds_reordered , q[6:-1]) 354 | if not overlay: 355 | out_pos[0]+=(out_pos[0]+i ) 356 | p.resetBasePositionAndOrientation(id_robot, out_pos, out_quat) 357 | p.stepSimulation() 358 | return 0 359 | def qeuler(q, order, epsilon=0): 360 | """ 361 | Convert quaternion(s) q to Euler angles. 362 | Expects a tensor of shape (*, 4), where * denotes any number of dimensions. 363 | Returns a tensor of shape (*, 3). 364 | """ 365 | assert q.shape[-1] == 4 366 | 367 | original_shape = list(q.shape) 368 | original_shape[-1] = 3 369 | q = q.view(-1, 4) 370 | 371 | q0 = q[:, 0] 372 | q1 = q[:, 1] 373 | q2 = q[:, 2] 374 | q3 = q[:, 3] 375 | 376 | if order == 'xyz': 377 | x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2)) 378 | y = torch.asin(torch.clamp(2 * (q1 * q3 + q0 * q2), -1 + epsilon, 1 - epsilon)) 379 | z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3)) 380 | elif order == 'yzx': 381 | x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2 * (q1 * q1 + q3 * q3)) 382 | y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2 * (q2 * q2 + q3 * q3)) 383 | z = torch.asin(torch.clamp(2 * (q1 * q2 + q0 * q3), -1 + epsilon, 1 - epsilon)) 384 | elif order == 'zxy': 385 | x = torch.asin(torch.clamp(2 * (q0 * q1 + q2 * q3), -1 + epsilon, 1 - epsilon)) 386 | y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2 * (q1 * q1 + q2 * q2)) 387 | z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2 * (q1 * q1 + q3 * q3)) 388 | elif order == 'xzy': 389 | x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q3 * q3)) 390 | y = torch.atan2(2 * (q0 * q2 + q1 * q3), 1 - 2 * (q2 * q2 + q3 * q3)) 391 | z = torch.asin(torch.clamp(2 * (q0 * q3 - q1 * q2), -1 + epsilon, 1 - epsilon)) 392 | elif order == 'yxz': 393 | x = torch.asin(torch.clamp(2 * (q0 * q1 - q2 * q3), -1 + epsilon, 1 - epsilon)) 394 | y = torch.atan2(2 * (q1 * q3 + q0 * q2), 1 - 2 * (q1 * q1 + q2 * q2)) 395 | z = torch.atan2(2 * (q1 * q2 + q0 * q3), 1 - 2 * (q1 * q1 + q3 * q3)) 396 | elif order == 'zyx': 397 | x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2)) 398 | y = torch.asin(torch.clamp(2 * (q0 * q2 - q1 * q3), -1 + epsilon, 1 - epsilon)) 399 | z = torch.atan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3)) 400 | else: 401 | raise 402 | 403 | return torch.stack((x, y, z), dim=1).view(original_shape) 404 | def get_transformed_root(pos,ori,Rot,T): 405 | r1 = R.from_quat(ori) 406 | mat1 = r1.as_matrix() 407 | r12 = R.from_matrix(np.dot(Rot.T, mat1)) 408 | ori = r12.as_quat() 409 | pos = np.dot(Rot.T, pos - T) 410 | return pos,ori 411 | 412 | def get_transformed_root_visualisation(pos,ori,Rot,T): 413 | r_adjust = R.from_euler('xyz', [-0.2, 0, 0]) 414 | R2 = r_adjust.as_matrix() 415 | 416 | 417 | 418 | r1 = R.from_quat(ori) 419 | mat1 = r1.as_matrix() 420 | mat1 = np.dot(R2, mat1) 421 | mat1 = np.dot(Rot.T, mat1) 422 | r12 = R.from_matrix(mat1) 423 | ori = r12.as_quat() 424 | pos = np.dot(Rot.T, pos - T) 425 | return pos,ori 426 | 427 | def get_transformed_root_inv(pos,ori,Rot,T): 428 | r1 = R.from_quat(ori) 429 | mat1 = r1.as_matrix() 430 | r12 = R.from_matrix(np.dot(Rot, mat1)) 431 | ori = r12.as_quat() 432 | pos = np.dot(Rot, pos).reshape(3,1) + T.reshape(3,1) 433 | return pos,ori 434 | 435 | def visualization3D_triple(id_robot, id_robot2, id_robot3, jointIds,rbdl2bullet,q ,q2,q3,Rot,T ): 436 | jointIds_reordered = np.array(jointIds)[rbdl2bullet] 437 | out_pos,out_quat=get_transformed_root( np.array([q[0], q[1], q[2]]), np.array([q[3], q[4], q[5], q[-1]]), Rot,T) 438 | out_pos2,out_quat2=get_transformed_root( np.array([q2[0], q2[1], q2[2]]), np.array([q2[3], q2[4], q2[5], q2[-1]]), Rot,T) 439 | out_pos3,out_quat3=get_transformed_root( np.array([q3[0], q3[1], q3[2]]), np.array([q3[3], q3[4], q3[5], q3[-1]]), Rot,T) 440 | 441 | """ 442 | out_quat = np.array([q[3], q[4], q[5], q[-1]]) 443 | r1 = R.from_quat(out_quat) 444 | mat1 = r1.as_matrix() 445 | r12 = R.from_matrix(np.dot(Rot.T, mat1)) 446 | out_quat = r12.as_quat() 447 | out_pos = np.dot(Rot.T, out_pos - T) 448 | 449 | out_pos2 = np.array([q2[0], q2[1], q2[2]]) 450 | out_quat2 = np.array([q2[3], q2[4], q2[5], q2[-1]]) 451 | r2 = R.from_quat(out_quat2) 452 | mat2 = r2.as_matrix() 453 | r22 = R.from_matrix(np.dot(Rot.T, mat2)) 454 | out_quat2 = r22.as_quat() 455 | out_pos2 = np.dot(Rot.T, out_pos2 - T) 456 | """ 457 | 458 | motion_update(id_robot, jointIds_reordered , q[6:-1]) 459 | p.resetBasePositionAndOrientation(id_robot, out_pos, out_quat) 460 | motion_update(id_robot2, jointIds_reordered , q2[6:-1]) 461 | p.resetBasePositionAndOrientation(id_robot2, out_pos2, out_quat2) 462 | motion_update(id_robot3, jointIds_reordered , q3[6:-1]) 463 | p.resetBasePositionAndOrientation(id_robot3, out_pos3, out_quat3) 464 | p.stepSimulation() 465 | return 0 466 | def visualization3D_bb(id_robot,id_robot_ref,jointIds,rbdl2bullet,q, target_qs): 467 | q = np.squeeze(q.detach().cpu().numpy()) 468 | jointIds_reordered = np.array(jointIds)[rbdl2bullet] 469 | 470 | motion_update(id_robot, jointIds_reordered , q[6:-1]) 471 | motion_update(id_robot_ref, jointIds_reordered, target_qs[6:-1]) 472 | 473 | p.resetBasePositionAndOrientation(id_robot, [q[0], q[1], q[2]], [q[3], q[4], q[5], q[-1]]) 474 | p.resetBasePositionAndOrientation(id_robot_ref, [target_qs[0], target_qs[1], target_qs[2]], [target_qs[3], target_qs[4], target_qs[5], target_qs[-1]]) 475 | p.stepSimulation() 476 | return 0 477 | 478 | def visualization3DCam2World_bb3(id_robot,id_robot_ref,id_robot_ref2,jointIds,rbdl2bullet,q, target_qs, ref_qs, Rot, T): 479 | q = np.squeeze(q.detach().cpu().numpy()) 480 | jointIds_reordered = np.array(jointIds)[rbdl2bullet] 481 | 482 | motion_update(id_robot, jointIds_reordered , q[6:-1]) 483 | motion_update(id_robot_ref, jointIds_reordered, target_qs[6:-1]) 484 | motion_update(id_robot_ref2, jointIds_reordered, ref_qs[6:-1]) 485 | out_pos = np.array([q[0], q[1], q[2]]) 486 | out_quat = np.array([q[3], q[4], q[5], q[-1]]) 487 | out_pos = np.dot(Rot.T, out_pos-T) 488 | gt_pos = np.array([target_qs[0], target_qs[1], target_qs[2] ]) 489 | gt_pos = np.dot(Rot.T,gt_pos-T) 490 | gt_quat= np.array([target_qs[3], target_qs[4], target_qs[5], target_qs[-1]]) 491 | 492 | 493 | ref_pos = np.array([ref_qs[0], ref_qs[1], ref_qs[2] ]) 494 | ref_pos = np.dot(Rot.T,ref_pos-T) 495 | ref_pos+=np.array([0,0,1]) 496 | ref_quat= np.array([ref_qs[3], ref_qs[4], ref_qs[5], ref_qs[-1]]) 497 | 498 | 499 | r1 = R.from_quat(out_quat) 500 | mat1=r1.as_matrix() 501 | r12 = R.from_matrix(np.dot(Rot.T,mat1)) 502 | out_quat = r12.as_quat() 503 | 504 | r2 = R.from_quat(gt_quat) 505 | mat2=r2.as_matrix() 506 | r22 = R.from_matrix(np.dot(Rot.T,mat2)) 507 | gt_quat = r22.as_quat() 508 | 509 | r3 = R.from_quat(ref_quat) 510 | mat3=r3.as_matrix() 511 | r32 = R.from_matrix(np.dot(Rot.T,mat3)) 512 | ref_quat = r32.as_quat() 513 | 514 | p.resetBasePositionAndOrientation(id_robot, out_pos, out_quat) 515 | p.resetBasePositionAndOrientation(id_robot_ref,gt_pos , gt_quat) 516 | p.resetBasePositionAndOrientation(id_robot_ref2,ref_pos , ref_quat) 517 | p.stepSimulation() 518 | return 0 519 | 520 | def visualization3DCam2World_bb(id_robot,id_robot_ref,jointIds,rbdl2bullet,q, target_qs, Rot, T): 521 | q = np.squeeze(q.detach().cpu().numpy()) 522 | jointIds_reordered = np.array(jointIds)[rbdl2bullet] 523 | 524 | #quaternion=r.as_quaternion() 525 | #AU.qmul() 526 | motion_update(id_robot, jointIds_reordered , q[6:-1]) 527 | motion_update(id_robot_ref, jointIds_reordered, target_qs[6:-1]) 528 | out_pos = np.array([q[0], q[1], q[2]]) 529 | out_quat = np.array([q[3], q[4], q[5], q[-1]]) 530 | out_pos = np.dot(Rot.T, out_pos-T) 531 | gt_pos = np.array([target_qs[0], target_qs[1], target_qs[2] ]) 532 | gt_pos = np.dot(Rot.T,gt_pos-T) 533 | #gt_pos+=np.array([0,0,1]) 534 | gt_quat= np.array([target_qs[3], target_qs[4], target_qs[5], target_qs[-1]]) 535 | 536 | r1 = R.from_quat(out_quat) 537 | mat1=r1.as_matrix() 538 | r12 = R.from_matrix(np.dot(Rot.T,mat1)) 539 | out_quat = r12.as_quat() 540 | 541 | r2 = R.from_quat(gt_quat) 542 | mat2=r2.as_matrix() 543 | r22 = R.from_matrix(np.dot(Rot.T,mat2)) 544 | gt_quat = r22.as_quat() 545 | 546 | p.resetBasePositionAndOrientation(id_robot, out_pos, out_quat) 547 | 548 | p.resetBasePositionAndOrientation(id_robot_ref,gt_pos , gt_quat) 549 | p.stepSimulation() 550 | return 0 551 | 552 | def visualization3DCam2World_three(id_robot,id_robot_ref,id_robot_target,jointIds,rbdl2bullet,q,q_target, target_qs, Rot, T): 553 | q = np.squeeze(q.detach().cpu().numpy()) 554 | jointIds_reordered = np.array(jointIds)[rbdl2bullet] 555 | motion_update(id_robot, jointIds_reordered , q[6:-1]) 556 | motion_update(id_robot_target, jointIds_reordered , q_target[6:-1]) 557 | motion_update(id_robot_ref, jointIds_reordered, target_qs[7:]) 558 | out_pos = np.array([q[0], q[1], q[2]]) 559 | out_quat = np.array([q[3], q[4], q[5], q[-1]]) 560 | out_pos = np.dot(Rot.T,out_pos-T) 561 | 562 | target_pos = np.array([q_target[0], q_target[1], q_target[2]]) 563 | target_quat = np.array([q_target[3], q_target[4], q_target[5], q_target[-1]]) 564 | target_pos = np.dot(Rot.T,target_pos-T) 565 | gt_pos = np.array([target_qs[0], target_qs[1], target_qs[2] ]) 566 | gt_pos = np.dot(Rot.T,gt_pos-T) 567 | gt_quat= target_qs[3:7] 568 | 569 | r1 = R.from_quat(out_quat) 570 | mat1=r1.as_matrix() 571 | r12 = R.from_matrix(np.dot(Rot.T,mat1)) 572 | out_quat = r12.as_quat() 573 | 574 | r2 = R.from_quat(gt_quat) 575 | mat2=r2.as_matrix() 576 | r22 = R.from_matrix(np.dot(Rot.T,mat2)) 577 | gt_quat = r22.as_quat() 578 | 579 | r3 = R.from_quat(target_quat) 580 | mat3=r3.as_matrix() 581 | r32 = R.from_matrix(np.dot(Rot.T,mat3)) 582 | target_quat = r32.as_quat() 583 | 584 | 585 | 586 | p.resetBasePositionAndOrientation(id_robot, out_pos, out_quat) 587 | 588 | p.resetBasePositionAndOrientation(id_robot_target, target_pos, target_quat) 589 | p.resetBasePositionAndOrientation(id_robot_ref,gt_pos , gt_quat) 590 | p.stepSimulation() 591 | return 0 592 | 593 | def visualization3DCam2World(id_robot,id_robot_ref,jointIds,rbdl2bullet,q, target_qs, Rot, T): 594 | q = np.squeeze(q.detach().cpu().numpy()) 595 | jointIds_reordered = np.array(jointIds)[rbdl2bullet] 596 | 597 | motion_update(id_robot, jointIds_reordered , q[6:-1]) 598 | motion_update(id_robot_ref, jointIds_reordered, target_qs[7:]) 599 | out_pos = np.array([q[0], q[1], q[2]]) 600 | out_quat = np.array([q[3], q[4], q[5], q[-1]]) 601 | out_pos = np.dot(Rot.T,out_pos-T) 602 | gt_pos = np.array([target_qs[0], target_qs[1], target_qs[2] ]) 603 | gt_pos = np.dot(Rot.T,gt_pos-T) 604 | gt_quat= target_qs[3:7] 605 | 606 | r1 = R.from_quat(out_quat) 607 | mat1=r1.as_matrix() 608 | r12 = R.from_matrix(np.dot(Rot.T,mat1)) 609 | out_quat = r12.as_quat() 610 | 611 | r2 = R.from_quat(gt_quat) 612 | mat2=r2.as_matrix() 613 | r22 = R.from_matrix(np.dot(Rot.T,mat2)) 614 | gt_quat = r22.as_quat() 615 | 616 | p.resetBasePositionAndOrientation(id_robot, out_pos, out_quat) 617 | 618 | p.resetBasePositionAndOrientation(id_robot_ref,gt_pos , gt_quat) 619 | p.stepSimulation() 620 | return 0 621 | 622 | def get_qs_with_euler(qs): 623 | qs_new = [] 624 | for q in qs: 625 | quat = q[3:7] 626 | r = R.from_quat(quat) 627 | euler = r.as_euler("zyx") 628 | # quat = np.quaternion(quat[-1], quat[0], quat[1], quat[2]) # p.getEulerFromQuaternion(axis_ori) 629 | # rotvec = quaternion.as_rotation_vector(quat) 630 | root_config = np.concatenate((q[:3], euler), 0) 631 | qs_new.append(np.concatenate((root_config, q[7:]), 0)) 632 | return np.array(qs_new) 633 | 634 | def target_correction(error): 635 | mask = torch.ones(error.shape[0]) 636 | 637 | larger_id = torch.nonzero((error > math.pi) * mask).detach() 638 | smaller_id = torch.nonzero((error < - math.pi) * mask).detach() 639 | 640 | 641 | error[larger_id] = error[larger_id] - 2 * math.pi 642 | error[smaller_id] = error[smaller_id] + 2 * math.pi 643 | if len(larger_id)!=0 or len(larger_id)!=0: 644 | print("I'm in!!!!") 645 | return error 646 | 647 | def get_jointIds_Names( id_robot): 648 | jointNamesAll = [] 649 | jointIdsAll = [] 650 | jointNames = [] 651 | jointIds = [] 652 | for j in range(p.getNumJoints(id_robot)): 653 | info = p.getJointInfo(id_robot, j) 654 | p.changeDynamics(id_robot, j, linearDamping=0, angularDamping=0) 655 | jointName = info[1] 656 | jointType = info[2] 657 | jointIdsAll.append(j) 658 | jointNamesAll.append(jointName) 659 | if (jointType == p.JOINT_PRISMATIC or jointType == p.JOINT_REVOLUTE): 660 | jointIds.append(j) 661 | jointNames.append(jointName) 662 | return jointIdsAll, jointNamesAll, jointIds, jointNames 663 | 664 | -------------------------------------------------------------------------------- /URDF/manual.urdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | 190 | 191 | 192 | 193 | 194 | 195 | 196 | 197 | 198 | 199 | 200 | 201 | 202 | 203 | 204 | 205 | 206 | 207 | 208 | 209 | 210 | 211 | 212 | 213 | 214 | 215 | 216 | 217 | 218 | 219 | 220 | 221 | 222 | 223 | 224 | 225 | 226 | 227 | 228 | 229 | 230 | 231 | 232 | 233 | 234 | 235 | 236 | 237 | 238 | 239 | 240 | 241 | 242 | 243 | 244 | 245 | 246 | 247 | 248 | 249 | 250 | 251 | 252 | 253 | 254 | 255 | 256 | 257 | 258 | 259 | 260 | 261 | 262 | 263 | 264 | 265 | 266 | 267 | 268 | 269 | 270 | 271 | 272 | 273 | 274 | 275 | 276 | 277 | 278 | 279 | 280 | 281 | 282 | 283 | 284 | 285 | 286 | 287 | 288 | 289 | 290 | 291 | 292 | 293 | 294 | 295 | 296 | 297 | 298 | 299 | 300 | 301 | 302 | 303 | 304 | 305 | 306 | 307 | 308 | 309 | 310 | 311 | 312 | 313 | 314 | 315 | 316 | 317 | 318 | 319 | 320 | 321 | 322 | 323 | 324 | 325 | 326 | 327 | 328 | 329 | 330 | 331 | 332 | 333 | 334 | 335 | 336 | 337 | 338 | 339 | 340 | 341 | 342 | 343 | 344 | 345 | 346 | 347 | 348 | 349 | 350 | 351 | 352 | 353 | 354 | 355 | 356 | 357 | 358 | 359 | 360 | 361 | 362 | 363 | 364 | 365 | 366 | 367 | 368 | 369 | 370 | 371 | 372 | 373 | 374 | 375 | 376 | 377 | 378 | 379 | 380 | 381 | 382 | 383 | 384 | 385 | 386 | 387 | 388 | 389 | 390 | 391 | 392 | 393 | 394 | 395 | 396 | 397 | 398 | 399 | 400 | 401 | 402 | 403 | 404 | 405 | 406 | 407 | 408 | 409 | 410 | 411 | 412 | 413 | 414 | 415 | 416 | 417 | 418 | 419 | 420 | 421 | 422 | 423 | 424 | 425 | 426 | 427 | 428 | 429 | 430 | 431 | 432 | 433 | 434 | 435 | 436 | 437 | 438 | 439 | 440 | 441 | 442 | 443 | 444 | 445 | 446 | 447 | 448 | 449 | 450 | 451 | 452 | 453 | 454 | 455 | 456 | 457 | 458 | 459 | 460 | 461 | 462 | 463 | 464 | 465 | 466 | 467 | 468 | 469 | 470 | 471 | 472 | 473 | 474 | 475 | 476 | 477 | 478 | 479 | 480 | 481 | 482 | 483 | 484 | 485 | 486 | 487 | 488 | 489 | 490 | 491 | 492 | 493 | 494 | 495 | 496 | 497 | 498 | 499 | 500 | 501 | 502 | 503 | 504 | 505 | 506 | 507 | 508 | 509 | 510 | 511 | 512 | 513 | 514 | 515 | 516 | 517 | 518 | 519 | 520 | 521 | 522 | 523 | 524 | 525 | 526 | 527 | 528 | 529 | 530 | 531 | 532 | 533 | 534 | 535 | 536 | 537 | 538 | 539 | 540 | 541 | 542 | 543 | 544 | 545 | 546 | 547 | 548 | 549 | 550 | 551 | 552 | 553 | 554 | 555 | 556 | 557 | 558 | 559 | 560 | 561 | 562 | 563 | 564 | 565 | 566 | 567 | 568 | 569 | 570 | 571 | 572 | 573 | 574 | 575 | 576 | 577 | 578 | 579 | 580 | 581 | 582 | 583 | 584 | 585 | 586 | 587 | 588 | 589 | 590 | 591 | 592 | 593 | 594 | 595 | 596 | 597 | 598 | 599 | 600 | 601 | 602 | 603 | 604 | 605 | 606 | 607 | 608 | 609 | 610 | 611 | 612 | 613 | 614 | 615 | 616 | 617 | 618 | 619 | 620 | 621 | 622 | 623 | 624 | 625 | 626 | 627 | 628 | 629 | 630 | 631 | 632 | 633 | 634 | 635 | 636 | 637 | 638 | 639 | 640 | 641 | 642 | 643 | 644 | 645 | 646 | 647 | 648 | 649 | 650 | 651 | 652 | 653 | 654 | 655 | 656 | 657 | 658 | 659 | 660 | 661 | 662 | 663 | 664 | 665 | 666 | 667 | 668 | 669 | 670 | 671 | 672 | 673 | 674 | 675 | 676 | 677 | 678 | 679 | 680 | 681 | 682 | 683 | 684 | 685 | 686 | 687 | 688 | 689 | 690 | 691 | 692 | 693 | 694 | 695 | 696 | 697 | 698 | 699 | 700 | 701 | 702 | 703 | 704 | 705 | 706 | 707 | 708 | 709 | 710 | 711 | 712 | 713 | 714 | 715 | 716 | 717 | 718 | 719 | 720 | 721 | 722 | 723 | 724 | 725 | 726 | 727 | 728 | 729 | 730 | 731 | 732 | 733 | 734 | 735 | 736 | 737 | 738 | 739 | 740 | 741 | 742 | 743 | 744 | 745 | 746 | 747 | 748 | 749 | 750 | 751 | 752 | 753 | 754 | 755 | 756 | 757 | 758 | 759 | 760 | 761 | 762 | 763 | 764 | 765 | 766 | 767 | 768 | 769 | 770 | 771 | 772 | 773 | 774 | 775 | 776 | 777 | 778 | 779 | 780 | 781 | 782 | 783 | 784 | 785 | 786 | 787 | 788 | 789 | 790 | 791 | 792 | 793 | 794 | 795 | 796 | 797 | 798 | 799 | 800 | 801 | 802 | 803 | 804 | 805 | 806 | 807 | 808 | 809 | 810 | 811 | 812 | 813 | 814 | 815 | 816 | 817 | 818 | 819 | 820 | 821 | 822 | 823 | 824 | 825 | 826 | 827 | 828 | 829 | 830 | 831 | 832 | 833 | 834 | 835 | 836 | 837 | 838 | 839 | 840 | 841 | 842 | 843 | 844 | 845 | 846 | 847 | 848 | 849 | 850 | 851 | 852 | 853 | 854 | 855 | 856 | 857 | 858 | 859 | 860 | 861 | 862 | 863 | 864 | 865 | 866 | 867 | 868 | 869 | 870 | 871 | 872 | 873 | 874 | 875 | 876 | 877 | 878 | 879 | 880 | 881 | 882 | 883 | 884 | 885 | 886 | 887 | 888 | 889 | 890 | 891 | 892 | 893 | 894 | 895 | 896 | 897 | 898 | 899 | 900 | 901 | 902 | 903 | 904 | 905 | 906 | 907 | 908 | 909 | 910 | 911 | 912 | 913 | 914 | 915 | 916 | 917 | 918 | 919 | 920 | 921 | 922 | 923 | 924 | 925 | 926 | 927 | 928 | 929 | 930 | 931 | 932 | 933 | 934 | 935 | 936 | 937 | 938 | 939 | 940 | 941 | 942 | 943 | 944 | 945 | 946 | 947 | 948 | 949 | 950 | 951 | 952 | 953 | 954 | 955 | 956 | 957 | 958 | 959 | 960 | 961 | 962 | 963 | 964 | 965 | 966 | 967 | 968 | 969 | 970 | 971 | 972 | 973 | 974 | 975 | 976 | 977 | 978 | 979 | 980 | 981 | 982 | 983 | 984 | 985 | 986 | 987 | 988 | 989 | 990 | 991 | 992 | 993 | 994 | 995 | 996 | 997 | 998 | 999 | 1000 | 1001 | 1002 | 1003 | 1004 | 1005 | 1006 | 1007 | 1008 | 1009 | 1010 | 1011 | 1012 | 1013 | 1014 | 1015 | 1016 | 1017 | 1018 | 1019 | 1020 | 1021 | 1022 | 1023 | 1024 | 1025 | 1026 | 1027 | 1028 | 1029 | 1030 | 1031 | 1032 | 1033 | 1034 | 1035 | 1036 | 1037 | 1038 | 1039 | 1040 | 1041 | 1042 | 1043 | 1044 | 1045 | 1046 | 1047 | 1048 | 1049 | 1050 | 1051 | 1052 | 1053 | 1054 | 1055 | 1056 | 1057 | 1058 | 1059 | 1060 | 1061 | 1062 | 1063 | 1064 | 1065 | 1066 | 1067 | 1068 | 1069 | 1070 | 1071 | 1072 | 1073 | 1074 | 1075 | 1076 | 1077 | 1078 | 1079 | 1080 | 1081 | 1082 | 1083 | 1084 | 1085 | 1086 | 1087 | 1088 | 1089 | 1090 | 1091 | 1092 | 1093 | 1094 | 1095 | 1096 | 1097 | 1098 | 1099 | 1100 | 1101 | 1102 | 1103 | 1104 | 1105 | 1106 | 1107 | 1108 | 1109 | 1110 | 1111 | 1112 | 1113 | 1114 | 1115 | 1116 | 1117 | 1118 | 1119 | 1120 | 1121 | 1122 | 1123 | 1124 | 1125 | 1126 | 1127 | 1128 | 1129 | 1130 | 1131 | 1132 | 1133 | 1134 | 1135 | 1136 | 1137 | 1138 | 1139 | 1140 | 1141 | 1142 | 1143 | 1144 | 1145 | 1146 | 1147 | 1148 | 1149 | 1150 | 1151 | 1152 | 1153 | 1154 | 1155 | 1156 | 1157 | 1158 | 1159 | 1160 | 1161 | 1162 | 1163 | 1164 | 1165 | 1166 | 1167 | 1168 | 1169 | 1170 | 1171 | 1172 | 1173 | 1174 | 1175 | 1176 | 1177 | 1178 | 1179 | 1180 | 1181 | 1182 | 1183 | 1184 | 1185 | 1186 | 1187 | 1188 | 1189 | 1190 | 1191 | 1192 | 1193 | 1194 | 1195 | 1196 | 1197 | 1198 | 1199 | 1200 | 1201 | 1202 | 1203 | 1204 | 1205 | 1206 | 1207 | 1208 | 1209 | 1210 | 1211 | 1212 | 1213 | 1214 | 1215 | 1216 | 1217 | 1218 | 1219 | 1220 | 1221 | 1222 | 1223 | 1224 | 1225 | 1226 | 1227 | 1228 | 1229 | 1230 | 1231 | 1232 | 1233 | 1234 | 1235 | 1236 | 1237 | 1238 | 1239 | 1240 | 1241 | 1242 | 1243 | 1244 | 1245 | 1246 | 1247 | 1248 | 1249 | 1250 | 1251 | 1252 | 1253 | 1254 | 1255 | 1256 | 1257 | 1258 | 1259 | 1260 | 1261 | 1262 | 1263 | 1264 | 1265 | 1266 | 1267 | 1268 | 1269 | 1270 | 1271 | 1272 | 1273 | 1274 | 1275 | 1276 | 1277 | 1278 | 1279 | 1280 | 1281 | 1282 | 1283 | 1284 | 1285 | 1286 | 1287 | 1288 | 1289 | 1290 | 1291 | 1292 | 1293 | 1294 | 1295 | 1296 | 1297 | 1298 | 1299 | 1300 | 1301 | 1302 | 1303 | 1304 | 1305 | 1306 | 1307 | 1308 | 1309 | 1310 | 1311 | 1312 | 1313 | 1314 | 1315 | 1316 | 1317 | 1318 | 1319 | 1320 | 1321 | 1322 | 1323 | 1324 | 1325 | 1326 | 1327 | 1328 | 1329 | 1330 | 1331 | 1332 | 1333 | 1334 | 1335 | 1336 | 1337 | 1338 | 1339 | 1340 | 1341 | 1342 | 1343 | 1344 | 1345 | 1346 | 1347 | 1348 | 1349 | 1350 | 1351 | 1352 | 1353 | 1354 | 1355 | 1356 | 1357 | 1358 | 1359 | 1360 | 1361 | 1362 | 1363 | 1364 | 1365 | 1366 | 1367 | 1368 | 1369 | 1370 | 1371 | 1372 | 1373 | 1374 | 1375 | 1376 | 1377 | 1378 | 1379 | 1380 | 1381 | 1382 | 1383 | 1384 | 1385 | 1386 | 1387 | 1388 | 1389 | 1390 | 1391 | 1392 | 1393 | 1394 | 1395 | 1396 | 1397 | 1398 | 1399 | 1400 | 1401 | 1402 | 1403 | 1404 | 1405 | 1406 | 1407 | 1408 | 1409 | 1410 | 1411 | 1412 | 1413 | 1414 | 1415 | 1416 | 1417 | 1418 | 1419 | 1420 | 1421 | 1422 | 1423 | 1424 | 1425 | 1426 | 1427 | 1428 | 1429 | 1430 | 1431 | 1432 | 1433 | 1434 | 1435 | 1436 | 1437 | 1438 | 1439 | 1440 | 1441 | 1442 | 1443 | 1444 | 1445 | 1446 | 1447 | 1448 | 1449 | 1450 | 1451 | 1452 | 1453 | 1454 | 1455 | 1456 | 1457 | 1458 | 1459 | 1460 | 1461 | 1462 | 1463 | 1464 | 1465 | 1466 | 1467 | 1468 | 1469 | 1470 | 1471 | 1472 | 1473 | 1474 | 1475 | 1476 | 1477 | 1478 | 1479 | 1480 | 1481 | 1482 | 1483 | 1484 | 1485 | 1486 | 1487 | 1488 | 1489 | 1490 | 1491 | 1492 | 1493 | 1494 | 1495 | 1496 | 1497 | 1498 | 1499 | 1500 | 1501 | 1502 | 1503 | 1504 | 1505 | 1506 | 1507 | 1508 | 1509 | 1510 | 1511 | 1512 | 1513 | 1514 | 1515 | 1516 | 1517 | 1518 | 1519 | 1520 | 1521 | 1522 | 1523 | 1524 | 1525 | 1526 | 1527 | 1528 | 1529 | 1530 | 1531 | 1532 | 1533 | 1534 | 1535 | 1536 | 1537 | 1538 | 1539 | 1540 | 1541 | 1542 | 1543 | 1544 | 1545 | 1546 | 1547 | 1548 | 1549 | 1550 | 1551 | 1552 | 1553 | 1554 | 1555 | 1556 | 1557 | 1558 | 1559 | 1560 | 1561 | 1562 | 1563 | 1564 | 1565 | 1566 | 1567 | 1568 | 1569 | 1570 | 1571 | 1572 | 1573 | 1574 | 1575 | 1576 | 1577 | 1578 | 1579 | 1580 | 1581 | 1582 | 1583 | 1584 | 1585 | 1586 | 1587 | 1588 | 1589 | 1590 | 1591 | 1592 | 1593 | 1594 | 1595 | 1596 | 1597 | 1598 | 1599 | 1600 | 1601 | 1602 | 1603 | 1604 | 1605 | 1606 | 1607 | 1608 | 1609 | 1610 | 1611 | 1612 | 1613 | 1614 | 1615 | 1616 | 1617 | 1618 | 1619 | 1620 | 1621 | 1622 | 1623 | 1624 | 1625 | 1626 | 1627 | 1628 | 1629 | 1630 | 1631 | 1632 | 1633 | 1634 | 1635 | 1636 | 1637 | 1638 | 1639 | 1640 | 1641 | 1642 | 1643 | 1644 | 1645 | 1646 | 1647 | 1648 | 1649 | 1650 | 1651 | 1652 | 1653 | 1654 | 1655 | 1656 | 1657 | 1658 | 1659 | 1660 | 1661 | 1662 | 1663 | 1664 | 1665 | 1666 | 1667 | 1668 | 1669 | 1670 | 1671 | 1672 | 1673 | 1674 | 1675 | 1676 | 1677 | 1678 | 1679 | 1680 | 1681 | 1682 | --------------------------------------------------------------------------------