├── LaFan.py ├── README.md ├── __init__.py ├── __pycache__ ├── LaFan.cpython-36.pyc ├── LaFan.cpython-38.pyc ├── functions.cpython-36.pyc ├── functions.cpython-38.pyc ├── model.cpython-36.pyc ├── model.cpython-38.pyc ├── quaternion.cpython-36.pyc ├── quaternion.cpython-38.pyc └── skeleton.cpython-36.pyc ├── baseline.gif ├── config ├── test-base.yaml └── train-base.yaml ├── constraint.gif ├── example.bvh ├── fix.gif ├── flip_bvh.py ├── foot_sliding ├── Animation.py ├── AnimationStructure.py ├── BVH.py ├── BVH_mod.py ├── InverseKinematics.py ├── Pivots.py ├── Quaternions.py ├── Quaternions_old.py ├── __init__.py ├── animation_2d_data.py ├── animation_data.py ├── example.bvh ├── example.txt ├── load_skeleton.py └── skeleton_lafan.yml ├── functions.py ├── model.py ├── quaternion.py ├── remove_fs.py ├── skeleton.py ├── test.py └── train.py /LaFan.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.utils.data import Dataset, DataLoader 3 | 4 | import sys, os 5 | sys.path.insert(0, os.path.dirname(__file__)) 6 | sys.path.append("..") 7 | 8 | import numpy as np 9 | from lafan1 import extract, utils, benchmarks 10 | 11 | class LaFan1(Dataset): 12 | def __init__(self, bvh_path, train = False, seq_len = 50, offset = 10, debug = False): 13 | """ 14 | Args: 15 | bvh_path (string): Path to the bvh files. 16 | seq_len (int): The max len of the sequence for interpolation. 17 | """ 18 | if train: 19 | self.actors = ['subject1', 'subject2', 'subject3', 'subject4'] 20 | else: 21 | self.actors = ['subject5'] 22 | self.train = train 23 | self.seq_len = seq_len 24 | self.debug = debug 25 | if self.debug: 26 | self.actors = ['subject1'] 27 | self.offset = offset 28 | self.data = self.load_data(bvh_path) 29 | self.cur_seq_length = 5 30 | 31 | 32 | def load_data(self, bvh_path): 33 | # Get test-set for windows of 65 frames, offset by 40 frames 34 | print('Building the data set...') 35 | X, Q, parents, contacts_l, contacts_r = extract.get_lafan1_set(\ 36 | bvh_path, self.actors, window=self.seq_len, offset=self.offset, debug = self.debug) 37 | # Global representation: 38 | q_glbl, x_glbl = utils.quat_fk(Q, X, parents) 39 | 40 | # if self.train: 41 | # Global positions stats: 42 | x_mean = np.mean(x_glbl.reshape([x_glbl.shape[0], x_glbl.shape[1], -1]).transpose([0, 2, 1]), axis=(0, 2), keepdims=True) 43 | x_std = np.std(x_glbl.reshape([x_glbl.shape[0], x_glbl.shape[1], -1]).transpose([0, 2, 1]), axis=(0, 2), keepdims=True) 44 | self.x_mean = torch.from_numpy(x_mean) 45 | self.x_std = torch.from_numpy(x_std) 46 | 47 | input_ = {} 48 | # The following features are inputs: 49 | # 1. local quaternion vector (J * 4d) 50 | input_['local_q'] = Q 51 | 52 | # 2. global root velocity vector (3d) 53 | input_['root_v'] = x_glbl[:,1:,0,:] - x_glbl[:,:-1,0,:] 54 | 55 | # 3. contact information vector (4d) 56 | input_['contact'] = np.concatenate([contacts_l, contacts_r], -1) 57 | 58 | # 4. global root position offset (?d) 59 | input_['root_p_offset'] = x_glbl[:,-1,0,:] 60 | 61 | # 5. local quaternion offset (?d) 62 | input_['local_q_offset'] = Q[:,-1,:,:] 63 | 64 | # 6. target 65 | input_['target'] = Q[:,-1,:,:] 66 | 67 | # 7. root pos 68 | input_['root_p'] = x_glbl[:,:,0,:] 69 | 70 | # 8. X 71 | input_['X'] = x_glbl[:,:,:,:] 72 | 73 | print('Nb of sequences : {}\n'.format(X.shape[0])) 74 | 75 | return input_ 76 | 77 | def __len__(self): 78 | return len(self.data['local_q']) 79 | 80 | def __getitem__(self, idx): 81 | idx_ = None 82 | if self.debug: 83 | idx_ = 0 84 | else: 85 | idx_ = idx 86 | sample = {} 87 | sample['local_q'] = self.data['local_q'][idx_].astype(np.float32) 88 | sample['root_v'] = self.data['root_v'][idx_].astype(np.float32) 89 | sample['contact'] = self.data['contact'][idx_].astype(np.float32) 90 | sample['root_p_offset'] = self.data['root_p_offset'][idx_].astype(np.float32) 91 | sample['local_q_offset'] = self.data['local_q_offset'][idx_].astype(np.float32) 92 | sample['target'] = self.data['target'][idx_].astype(np.float32) 93 | sample['root_p'] = self.data['root_p'][idx_].astype(np.float32) 94 | sample['X'] = self.data['X'][idx_].astype(np.float32) 95 | 96 | # sample['local_q_aug'] = self.data['local_q'][idx_].astype(np.float32) 97 | # sample['root_v_aug'] = self.data['root_v'][idx_].astype(np.float32) 98 | # sample['contact_aug'] = self.data['contact'][idx_].astype(np.float32) 99 | # ## data aug ## 100 | # sample['root_p_offset'] = self.data['root_p_offset'][idx_].astype(np.float32) 101 | # sample['local_q_offset'] = self.data['local_q_offset'][idx_].astype(np.float32) 102 | # sample['target'] = self.data['target'][idx_].astype(np.float32) 103 | # sample['root_p'] = self.data['root_p'][idx_].astype(np.float32) 104 | # sample['X'] = self.data['X'][idx_].astype(np.float32) 105 | return sample 106 | 107 | if __name__=="__main__": 108 | lafan_data = LaFan1('D:\\ubisoft-laforge-animation-dataset\\lafan1\\lafan1') 109 | print(lafan_data.data_X.shape, lafan_data.data_Q.shape) 110 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Pytorch Implementation of Robust Motion In-betweening 2 | 3 | This is the unofficial implementation of the approach described in the paper: 4 | > Felix G. Harvey, Mike Yurick, Derek Nowrouzezahrai, and Christopher Pal [Robust Motion In-betweening](https://static-wordpress.akamaized.net/montreal.ubisoft.com/wp-content/uploads/2020/07/09155337/RobustMotionInbetweening.pdf). In *ACM Transactions on Graphics (TOG)*, 2020. 5 | 6 | We provide the code for reproducing the main results, as well as pre-trained models. 7 | 8 | ## Dependencies 9 | - Python 3+ distribution 10 | - PyTorch >= 1.4.0 11 | - NumPy 12 | - PIL 13 | - TensorboardX 14 | - Pyyaml 15 | 16 | Please follow [this repo](https://github.com/ubisoft/ubisoft-laforge-animation-dataset) to download the data. Pretrained model is available at [this link](https://drive.google.com/file/d/1_eqiIJA9NFrHfDGnOoo5s0BdKLhgBz1U/view?usp=sharing). After downloading this repo, you need: (1) create new dirs named src, log, model, gif, and results repectively; (2) Put all downloaded files in to ./src and pretrained model into ./model. 17 | 18 | ## For data preparation 19 | ``` 20 | python flip_bvh.py 21 | ``` 22 | 23 | ## For training 24 | ``` 25 | python train.py 26 | ``` 27 | 28 | ## For testing 29 | ``` 30 | python test.py 31 | ``` 32 | 33 | ## The contribution of foot sliding loss 34 | In the original papaer, foot sliding problem is only post processed. Here I add the [foot sliding loss](https://github.com/xjwxjw/Pytorch-Robust-Motion-In-betweening/blob/386df7490ca2dfe89122952dd75b84506eedf700/train.py#L299) which turned out to be effective to further enhance visual quality. Here is an exmaple: 35 | ![image](https://drive.google.com/uc?export=view&id=1UsIvPuJtuGKvScHE7QTNuDvFnQ79hMjP) 36 | Images from left to right are orginal implementation, + foot sliding loss, + IK post processing, and ground truth respecitvely. With the help of foot sliding loss, the model is able to infer a rational foot contact arrange to reach the target. 37 | 38 | ## Work status 39 | [This sheet](https://docs.google.com/spreadsheets/d/1UhpiTP2QyN1eut8PT26ld14hbsh1qCkTzvO-jR4Dr7I/edit?usp=sharing) 40 | 41 | ## Demo results 42 | Synthesized resutls without foot sliding constraint could be downloaded from [this link](https://drive.google.com/file/d/1137bH0L-_Ri1cpJOjMi9oTUYC0x8v4fG/view?usp=sharing), the results with foot sliding constraint could be downloaded from [this link](https://drive.google.com/file/d/1FIoNyx-_SseJNWrzRjmDqeEpblyYrh3y/view?usp=sharing). 43 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xjwxjw/Pytorch-Robust-Motion-In-betweening/25b5af120c4f9f3ba2c22d0166869498bf80232c/__init__.py -------------------------------------------------------------------------------- /__pycache__/LaFan.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xjwxjw/Pytorch-Robust-Motion-In-betweening/25b5af120c4f9f3ba2c22d0166869498bf80232c/__pycache__/LaFan.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/LaFan.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xjwxjw/Pytorch-Robust-Motion-In-betweening/25b5af120c4f9f3ba2c22d0166869498bf80232c/__pycache__/LaFan.cpython-38.pyc -------------------------------------------------------------------------------- /__pycache__/functions.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xjwxjw/Pytorch-Robust-Motion-In-betweening/25b5af120c4f9f3ba2c22d0166869498bf80232c/__pycache__/functions.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/functions.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xjwxjw/Pytorch-Robust-Motion-In-betweening/25b5af120c4f9f3ba2c22d0166869498bf80232c/__pycache__/functions.cpython-38.pyc -------------------------------------------------------------------------------- /__pycache__/model.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xjwxjw/Pytorch-Robust-Motion-In-betweening/25b5af120c4f9f3ba2c22d0166869498bf80232c/__pycache__/model.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/model.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xjwxjw/Pytorch-Robust-Motion-In-betweening/25b5af120c4f9f3ba2c22d0166869498bf80232c/__pycache__/model.cpython-38.pyc -------------------------------------------------------------------------------- /__pycache__/quaternion.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xjwxjw/Pytorch-Robust-Motion-In-betweening/25b5af120c4f9f3ba2c22d0166869498bf80232c/__pycache__/quaternion.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/quaternion.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xjwxjw/Pytorch-Robust-Motion-In-betweening/25b5af120c4f9f3ba2c22d0166869498bf80232c/__pycache__/quaternion.cpython-38.pyc -------------------------------------------------------------------------------- /__pycache__/skeleton.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xjwxjw/Pytorch-Robust-Motion-In-betweening/25b5af120c4f9f3ba2c22d0166869498bf80232c/__pycache__/skeleton.cpython-36.pyc -------------------------------------------------------------------------------- /baseline.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xjwxjw/Pytorch-Robust-Motion-In-betweening/25b5af120c4f9f3ba2c22d0166869498bf80232c/baseline.gif -------------------------------------------------------------------------------- /config/test-base.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | seq_length: 50 3 | state_input_dim: 95 4 | offset_input_dim: 91 5 | target_input_dim: 88 6 | lstm_dim: 768 7 | num_joints: 22 8 | data: 9 | data_dir: 'D:\\ubisoft-laforge-animation-dataset\\lafan1\\lafan1' 10 | offsets: [ 11 | [-42.198200,91.614723,-40.067841], 12 | [ 0.103456,1.857829,10.548506], 13 | [43.499992,-0.000038,-0.000002], 14 | [42.372192,0.000015,-0.000007], 15 | [ 17.299999,-0.000002,0.000003], 16 | [0.000000,0.000000,0.000000], 17 | 18 | [0.103457,1.857829,-10.548503], 19 | [43.500042,-0.000027,0.000008], 20 | [42.372257,-0.000008,0.000014], 21 | [17.299992,-0.000005,0.000004], 22 | [0.000000,0.000000,0.000000], 23 | 24 | [6.901968,-2.603733,-0.000001], 25 | [12.588099,0.000002,0.000000], 26 | [12.343206,0.000000,-0.000001], 27 | [25.832886,-0.000004,0.000003], 28 | [11.766620,0.000005,-0.000001], 29 | [0.000000,0.000000,0.000000], 30 | 31 | [19.745899,-1.480370,6.000108], 32 | [11.284125,-0.000009,-0.000018], 33 | [33.000050,0.000004,0.000032], 34 | [25.200008,0.000015,0.000008], 35 | [0.000000,0.000000,0.000000], 36 | 37 | [19.746099,-1.480375,-6.000073], 38 | [11.284138,-0.000015,-0.000012], 39 | [33.000092,0.000017,0.000013], 40 | [25.199780,0.000135,0.000422], 41 | [0.000000,0.000000,0.000000] 42 | ] 43 | parents: [-1, 0, 1, 2, 3, 4, 44 | 0, 6, 7, 8, 9, 45 | 0, 11, 12, 13, 14, 15, 46 | 13, 17, 18, 19, 20, 47 | 13, 22, 23, 24, 25] 48 | joints_to_remove: [5,10,16,21,26] 49 | offset: 10 50 | num_workers: 4 51 | test: 52 | batch_size: 32 53 | num_epoch: 1 54 | use_ztta: True 55 | use_adv: True 56 | save_img: True 57 | save_gif: True 58 | save_pose: False 59 | save_bvh: False 60 | debug: False 61 | version: '' 62 | model_dir: 'D:\\ubisoft-laforge-animation-dataset\\model\\2020-12-10-00_55_12-' 63 | -------------------------------------------------------------------------------- /config/train-base.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | seq_length: 50 3 | state_input_dim: 95 4 | offset_input_dim: 91 5 | target_input_dim: 88 6 | lstm_dim: 768 7 | num_joints: 22 8 | data: 9 | data_dir: 'D:\\ubisoft-laforge-animation-dataset\\lafan1\\lafan1' 10 | offsets: [ 11 | [-42.198200,91.614723,-40.067841], 12 | [ 0.103456,1.857829,10.548506], 13 | [43.499992,-0.000038,-0.000002], 14 | [42.372192,0.000015,-0.000007], 15 | [ 17.299999,-0.000002,0.000003], 16 | [0.000000,0.000000,0.000000], 17 | 18 | [0.103457,1.857829,-10.548503], 19 | [43.500042,-0.000027,0.000008], 20 | [42.372257,-0.000008,0.000014], 21 | [17.299992,-0.000005,0.000004], 22 | [0.000000,0.000000,0.000000], 23 | 24 | [6.901968,-2.603733,-0.000001], 25 | [12.588099,0.000002,0.000000], 26 | [12.343206,0.000000,-0.000001], 27 | [25.832886,-0.000004,0.000003], 28 | [11.766620,0.000005,-0.000001], 29 | [0.000000,0.000000,0.000000], 30 | 31 | [19.745899,-1.480370,6.000108], 32 | [11.284125,-0.000009,-0.000018], 33 | [33.000050,0.000004,0.000032], 34 | [25.200008,0.000015,0.000008], 35 | [0.000000,0.000000,0.000000], 36 | 37 | [19.746099,-1.480375,-6.000073], 38 | [11.284138,-0.000015,-0.000012], 39 | [33.000092,0.000017,0.000013], 40 | [25.199780,0.000135,0.000422], 41 | [0.000000,0.000000,0.000000] 42 | ] 43 | parents: [-1, 0, 1, 2, 3, 4, 44 | 0, 6, 7, 8, 9, 45 | 0, 11, 12, 13, 14, 15, 46 | 13, 17, 18, 19, 20, 47 | 13, 22, 23, 24, 25] 48 | joints_to_remove: [5,10,16,21,26] 49 | foot_index: [9, 10, 11, 12, 13, 14, 21, 22, 23, 24, 25, 26] 50 | num_workers: 4 51 | offset: 10 52 | train: 53 | batch_size: 128 54 | lr: 0.0001 55 | beta1: 0.5 56 | beta2: 0.9 57 | loss_pos_weight: 1.0 58 | loss_quat_weight: 1.0 59 | loss_root_weight: 0.5 60 | loss_contact_weight: 0.1 61 | loss_slide_weight: 0.1 62 | loss_adv_weight: 0.01 63 | num_epoch: 200 64 | weight_decay: 0.00001 65 | use_ztta: True 66 | use_adv: True 67 | debug: False 68 | method: '' 69 | pretrained: 'D:\\ubisoft-laforge-animation-dataset\\model\\2020-12-08-16_22_58-' 70 | -------------------------------------------------------------------------------- /constraint.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xjwxjw/Pytorch-Robust-Motion-In-betweening/25b5af120c4f9f3ba2c22d0166869498bf80232c/constraint.gif -------------------------------------------------------------------------------- /fix.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xjwxjw/Pytorch-Robust-Motion-In-betweening/25b5af120c4f9f3ba2c22d0166869498bf80232c/fix.gif -------------------------------------------------------------------------------- /flip_bvh.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | from quaternion import euler_to_quaternion, qeuler_np 4 | 5 | root = 'D:\\ubisoft-laforge-animation-dataset\\lafan1\\lafan1' 6 | def flip_bvh(filename): 7 | fout = open(os.path.join(root, filename.replace('.bvh', '_flip.bvh')), 'w') 8 | cnt = 0 9 | for line in open(os.path.join(root, filename), 'r'): 10 | cnt += 1 11 | if cnt <= 134: 12 | fout.write(line) 13 | else: 14 | line = line.split('\n')[0].split(' ')[:69] 15 | line = np.reshape(np.array([float(x) for x in line]), [23, 3]) 16 | line[0,2] *= -1.0 17 | 18 | quat = euler_to_quaternion(line[1:] / 180.0 * np.pi, 'zyx') 19 | quat[:,0] *= -1.0 20 | quat[:,1] *= -1.0 21 | line[1:] = qeuler_np(quat, 'zyx') / np.pi * 180.0 22 | 23 | left_idx = [2,3,4,5,15,16,17,18] 24 | right_idx = [6,7,8,9,19,20,21,22] 25 | line[left_idx+right_idx] = line[right_idx+left_idx].copy() 26 | 27 | line = np.reshape(line, (69,)) 28 | new_line = '' 29 | for s in line[:-1]: 30 | new_line += (str(s) + ' ') 31 | new_line += (str(line[-1]) + '\n') 32 | fout.write(new_line) 33 | for filename in os.listdir(root): 34 | flip_bvh(filename) 35 | # assert 0 36 | print(filename) -------------------------------------------------------------------------------- /foot_sliding/AnimationStructure.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.sparse as sparse 3 | import Animation as Animation 4 | 5 | 6 | """ Maya Functions """ 7 | 8 | def load_from_maya(root): 9 | """ 10 | Load joint parents and names from maya 11 | 12 | Parameters 13 | ---------- 14 | 15 | root : PyNode 16 | Root Maya Node 17 | 18 | Returns 19 | ------- 20 | 21 | (names, parents) : ([str], (J) ndarray) 22 | List of joint names and array 23 | of indices representing the parent 24 | joint for each joint J. 25 | 26 | Joint index -1 is used to represent 27 | that there is no parent joint 28 | """ 29 | 30 | import pymel.core as pm 31 | 32 | names = [] 33 | parents = [] 34 | 35 | def unload_joint(j, parents, par): 36 | 37 | id = len(names) 38 | names.append(j) 39 | parents.append(par) 40 | 41 | children = [c for c in j.getChildren() if 42 | isinstance(c, pm.nt.Transform) and 43 | not isinstance(c, pm.nt.Constraint) and 44 | not any(pm.listRelatives(c, s=True)) and 45 | (any(pm.listRelatives(c, ad=True, ap=False, type='joint')) or isinstance(c, pm.nt.Joint))] 46 | 47 | map(lambda c: unload_joint(c, parents, id), children) 48 | 49 | unload_joint(root, parents, -1) 50 | 51 | return (names, parents) 52 | 53 | 54 | """ Family Functions """ 55 | 56 | def joints(parents): 57 | """ 58 | Parameters 59 | ---------- 60 | 61 | parents : (J) ndarray 62 | parents array 63 | 64 | Returns 65 | ------- 66 | 67 | joints : (J) ndarray 68 | Array of joint indices 69 | """ 70 | return np.arange(len(parents), dtype=int) 71 | 72 | def joints_list(parents): 73 | """ 74 | Parameters 75 | ---------- 76 | 77 | parents : (J) ndarray 78 | parents array 79 | 80 | Returns 81 | ------- 82 | 83 | joints : [ndarray] 84 | List of arrays of joint idices for 85 | each joint 86 | """ 87 | return list(joints(parents)[:,np.newaxis]) 88 | 89 | def parents_list(parents): 90 | """ 91 | Parameters 92 | ---------- 93 | 94 | parents : (J) ndarray 95 | parents array 96 | 97 | Returns 98 | ------- 99 | 100 | parents : [ndarray] 101 | List of arrays of joint idices for 102 | the parents of each joint 103 | """ 104 | return list(parents[:,np.newaxis]) 105 | 106 | 107 | def children_list(parents): 108 | """ 109 | Parameters 110 | ---------- 111 | 112 | parents : (J) ndarray 113 | parents array 114 | 115 | Returns 116 | ------- 117 | 118 | children : [ndarray] 119 | List of arrays of joint indices for 120 | the children of each joint 121 | """ 122 | 123 | def joint_children(i): 124 | return [j for j, p in enumerate(parents) if p == i] 125 | 126 | return list(map(lambda j: np.array(joint_children(j)), joints(parents))) 127 | 128 | 129 | def descendants_list(parents): 130 | """ 131 | Parameters 132 | ---------- 133 | 134 | parents : (J) ndarray 135 | parents array 136 | 137 | Returns 138 | ------- 139 | 140 | descendants : [ndarray] 141 | List of arrays of joint idices for 142 | the descendants of each joint 143 | """ 144 | 145 | children = children_list(parents) 146 | 147 | def joint_descendants(i): 148 | return sum([joint_descendants(j) for j in children[i]], list(children[i])) 149 | 150 | return list(map(lambda j: np.array(joint_descendants(j)), joints(parents))) 151 | 152 | 153 | def ancestors_list(parents): 154 | """ 155 | Parameters 156 | ---------- 157 | 158 | parents : (J) ndarray 159 | parents array 160 | 161 | Returns 162 | ------- 163 | 164 | ancestors : [ndarray] 165 | List of arrays of joint idices for 166 | the ancestors of each joint 167 | """ 168 | 169 | decendants = descendants_list(parents) 170 | 171 | def joint_ancestors(i): 172 | return [j for j in joints(parents) if i in decendants[j]] 173 | 174 | return list(map(lambda j: np.array(joint_ancestors(j)), joints(parents))) 175 | 176 | 177 | """ Mask Functions """ 178 | 179 | def mask(parents, filter): 180 | """ 181 | Constructs a Mask for a give filter 182 | 183 | A mask is a (J, J) ndarray truth table for a given 184 | condition over J joints. For example there 185 | may be a mask specifying if a joint N is a 186 | child of another joint M. 187 | 188 | This could be constructed into a mask using 189 | `m = mask(parents, children_list)` and the condition 190 | of childhood tested using `m[N, M]`. 191 | 192 | Parameters 193 | ---------- 194 | 195 | parents : (J) ndarray 196 | parents array 197 | 198 | filter : (J) ndarray -> [ndarray] 199 | function that outputs a list of arrays 200 | of joint indices for some condition 201 | 202 | Returns 203 | ------- 204 | 205 | mask : (N, N) ndarray 206 | boolean truth table of given condition 207 | """ 208 | m = np.zeros((len(parents), len(parents))).astype(bool) 209 | jnts = joints(parents) 210 | fltr = filter(parents) 211 | for i,f in enumerate(fltr): m[i,:] = np.any(jnts[:,np.newaxis] == f[np.newaxis,:], axis=1) 212 | return m 213 | 214 | def joints_mask(parents): return np.eye(len(parents)).astype(bool) 215 | def children_mask(parents): return mask(parents, children_list) 216 | def parents_mask(parents): return mask(parents, parents_list) 217 | def descendants_mask(parents): return mask(parents, descendants_list) 218 | def ancestors_mask(parents): return mask(parents, ancestors_list) 219 | 220 | """ Search Functions """ 221 | 222 | def joint_chain_ascend(parents, start, end): 223 | chain = [] 224 | while start != end: 225 | chain.append(start) 226 | start = parents[start] 227 | chain.append(end) 228 | return np.array(chain, dtype=int) 229 | 230 | 231 | """ Constraints """ 232 | 233 | def constraints(anim, **kwargs): 234 | """ 235 | Constraint list for Animation 236 | 237 | This constraint list can be used in the 238 | VerletParticle solver to constrain 239 | a animation global joint positions. 240 | 241 | Parameters 242 | ---------- 243 | 244 | anim : Animation 245 | Input animation 246 | 247 | masses : (F, J) ndarray 248 | Optional list of masses 249 | for joints J across frames F 250 | defaults to weighting by 251 | vertical height 252 | 253 | Returns 254 | ------- 255 | 256 | constraints : [(int, int, (F, J) ndarray, (F, J) ndarray, (F, J) ndarray)] 257 | A list of constraints in the format: 258 | (Joint1, Joint2, Masses1, Masses2, Lengths) 259 | 260 | """ 261 | 262 | masses = kwargs.pop('masses', None) 263 | 264 | children = children_list(anim.parents) 265 | constraints = [] 266 | 267 | points_offsets = Animation.offsets_global(anim) 268 | points = Animation.positions_global(anim) 269 | 270 | if masses is None: 271 | masses = 1.0 / (0.1 + np.absolute(points_offsets[:,1])) 272 | masses = masses[np.newaxis].repeat(len(anim), axis=0) 273 | 274 | for j in xrange(anim.shape[1]): 275 | 276 | """ Add constraints between all joints and their children """ 277 | for c0 in children[j]: 278 | 279 | dists = np.sum((points[:, c0] - points[:, j])**2.0, axis=1)**0.5 280 | constraints.append((c0, j, masses[:,c0], masses[:,j], dists)) 281 | 282 | """ Add constraints between all children of joint """ 283 | for c1 in children[j]: 284 | if c0 == c1: continue 285 | 286 | dists = np.sum((points[:, c0] - points[:, c1])**2.0, axis=1)**0.5 287 | constraints.append((c0, c1, masses[:,c0], masses[:,c1], dists)) 288 | 289 | return constraints 290 | 291 | """ Graph Functions """ 292 | 293 | def graph(anim): 294 | """ 295 | Generates a weighted adjacency matrix 296 | using local joint distances along 297 | the skeletal structure. 298 | 299 | Joints which are not connected 300 | are assigned the weight `0`. 301 | 302 | Joints which actually have zero distance 303 | between them, but are still connected, are 304 | perturbed by some minimal amount. 305 | 306 | The output of this routine can be used 307 | with the `scipy.sparse.csgraph` 308 | routines for graph analysis. 309 | 310 | Parameters 311 | ---------- 312 | 313 | anim : Animation 314 | input animation 315 | 316 | Returns 317 | ------- 318 | 319 | graph : (N, N) ndarray 320 | weight adjacency matrix using 321 | local distances along the 322 | skeletal structure from joint 323 | N to joint M. If joints are not 324 | directly connected are assigned 325 | the weight `0`. 326 | """ 327 | 328 | graph = np.zeros(anim.shape[1], anim.shape[1]) 329 | lengths = np.sum(anim.offsets**2.0, axis=1)**0.5 + 0.001 330 | 331 | for i,p in enumerate(anim.parents): 332 | if p == -1: continue 333 | graph[i,p] = lengths[p] 334 | graph[p,i] = lengths[p] 335 | 336 | return graph 337 | 338 | 339 | def distances(anim): 340 | """ 341 | Generates a distance matrix for 342 | pairwise joint distances along 343 | the skeletal structure 344 | 345 | Parameters 346 | ---------- 347 | 348 | anim : Animation 349 | input animation 350 | 351 | Returns 352 | ------- 353 | 354 | distances : (N, N) ndarray 355 | array of pairwise distances 356 | along skeletal structure 357 | from some joint N to some 358 | joint M 359 | """ 360 | 361 | distances = np.zeros((anim.shape[1], anim.shape[1])) 362 | generated = distances.copy().astype(bool) 363 | 364 | joint_lengths = np.sum(anim.offsets**2.0, axis=1)**0.5 365 | joint_children = children_list(anim) 366 | joint_parents = parents_list(anim) 367 | 368 | def find_distance(distances, generated, prev, i, j): 369 | 370 | """ If root, identity, or already generated, return """ 371 | if j == -1: return (0.0, True) 372 | if j == i: return (0.0, True) 373 | if generated[i,j]: return (distances[i,j], True) 374 | 375 | """ Find best distances along parents and children """ 376 | par_dists = [(joint_lengths[j], find_distance(distances, generated, j, i, p)) for p in joint_parents[j] if p != prev] 377 | out_dists = [(joint_lengths[c], find_distance(distances, generated, j, i, c)) for c in joint_children[j] if c != prev] 378 | 379 | """ Check valid distance and not dead end """ 380 | par_dists = [a + d for (a, (d, f)) in par_dists if f] 381 | out_dists = [a + d for (a, (d, f)) in out_dists if f] 382 | 383 | """ All dead ends """ 384 | if (out_dists + par_dists) == []: return (0.0, False) 385 | 386 | """ Get minimum path """ 387 | dist = min(out_dists + par_dists) 388 | distances[i,j] = dist; distances[j,i] = dist 389 | generated[i,j] = True; generated[j,i] = True 390 | 391 | for i in xrange(anim.shape[1]): 392 | for j in xrange(anim.shape[1]): 393 | find_distance(distances, generated, -1, i, j) 394 | 395 | return distances 396 | 397 | def edges(parents): 398 | """ 399 | Animation structure edges 400 | 401 | Parameters 402 | ---------- 403 | 404 | parents : (J) ndarray 405 | parents array 406 | 407 | Returns 408 | ------- 409 | 410 | edges : (M, 2) ndarray 411 | array of pairs where each 412 | pair contains two indices of a joints 413 | which corrisponds to an edge in the 414 | joint structure going from parent to child. 415 | """ 416 | 417 | return np.array(list(zip(parents, joints(parents)))[1:]) 418 | 419 | 420 | def incidence(parents): 421 | """ 422 | Incidence Matrix 423 | 424 | Parameters 425 | ---------- 426 | 427 | parents : (J) ndarray 428 | parents array 429 | 430 | Returns 431 | ------- 432 | 433 | incidence : (N, M) ndarray 434 | 435 | Matrix of N joint positions by 436 | M edges which each entry is either 437 | 1 or -1 and multiplication by the 438 | joint positions returns the an 439 | array of vectors along each edge 440 | of the structure 441 | """ 442 | 443 | es = edges(parents) 444 | 445 | inc = np.zeros((len(parents)-1, len(parents))).astype(np.int) 446 | for i, e in enumerate(es): 447 | inc[i,e[0]] = 1 448 | inc[i,e[1]] = -1 449 | 450 | return inc.T 451 | -------------------------------------------------------------------------------- /foot_sliding/BVH.py: -------------------------------------------------------------------------------- 1 | import re 2 | import numpy as np 3 | import sys 4 | sys.path.append("motion_utils") 5 | 6 | from Animation import Animation 7 | from Quaternions_old import Quaternions 8 | 9 | channelmap = { 10 | 'Xrotation' : 'x', 11 | 'Yrotation' : 'y', 12 | 'Zrotation' : 'z' 13 | } 14 | 15 | channelmap_inv = { 16 | 'x': 'Xrotation', 17 | 'y': 'Yrotation', 18 | 'z': 'Zrotation', 19 | } 20 | 21 | ordermap = { 22 | 'x' : 0, 23 | 'y' : 1, 24 | 'z' : 2, 25 | } 26 | 27 | def load(filename, start=None, end=None, order=None, world=False): 28 | """ 29 | Reads a BVH file and constructs an animation 30 | 31 | Parameters 32 | ---------- 33 | filename: str 34 | File to be opened 35 | 36 | start : int 37 | Optional Starting Frame 38 | 39 | end : int 40 | Optional Ending Frame 41 | 42 | order : str 43 | Optional Specifier for joint order. 44 | Given as string E.G 'xyz', 'zxy' 45 | 46 | world : bool 47 | If set to true euler angles are applied 48 | together in world space rather than local 49 | space 50 | 51 | Returns 52 | ------- 53 | 54 | (animation, joint_names, frametime) 55 | Tuple of loaded animation and joint names 56 | """ 57 | 58 | f = open(filename, "r") 59 | 60 | i = 0 61 | active = -1 62 | end_site = False 63 | 64 | names = [] 65 | orients = Quaternions.id(0) 66 | offsets = np.array([]).reshape((0,3)) 67 | parents = np.array([], dtype=int) 68 | 69 | for line in f: 70 | 71 | if "HIERARCHY" in line: continue 72 | if "MOTION" in line: continue 73 | 74 | rmatch = re.match(r"ROOT (\w+)", line) 75 | if rmatch: 76 | names.append(rmatch.group(1)) 77 | offsets = np.append(offsets, np.array([[0,0,0]]), axis=0) 78 | orients.qs = np.append(orients.qs, np.array([[1,0,0,0]]), axis=0) 79 | parents = np.append(parents, active) 80 | active = (len(parents)-1) 81 | continue 82 | 83 | if "{" in line: continue 84 | 85 | if "}" in line: 86 | if end_site: end_site = False 87 | else: active = parents[active] 88 | continue 89 | 90 | offmatch = re.match(r"\s*OFFSET\s+([\-\d\.e]+)\s+([\-\d\.e]+)\s+([\-\d\.e]+)", line) 91 | if offmatch: 92 | if not end_site: 93 | offsets[active] = np.array([list(map(float, offmatch.groups()))]) 94 | continue 95 | 96 | chanmatch = re.match(r"\s*CHANNELS\s+(\d+)", line) 97 | if chanmatch: 98 | channels = int(chanmatch.group(1)) 99 | if order is None: 100 | channelis = 0 if channels == 3 else 3 101 | channelie = 3 if channels == 3 else 6 102 | parts = line.split()[2+channelis:2+channelie] 103 | if any([p not in channelmap for p in parts]): 104 | continue 105 | order = "".join([channelmap[p] for p in parts]) 106 | continue 107 | 108 | jmatch = re.match("\s*JOINT\s+(\w+)", line) 109 | if jmatch: 110 | names.append(jmatch.group(1)) 111 | offsets = np.append(offsets, np.array([[0,0,0]]), axis=0) 112 | orients.qs = np.append(orients.qs, np.array([[1,0,0,0]]), axis=0) 113 | parents = np.append(parents, active) 114 | active = (len(parents)-1) 115 | continue 116 | 117 | if "End Site" in line: 118 | end_site = True 119 | continue 120 | 121 | fmatch = re.match("\s*Frames:\s+(\d+)", line) 122 | if fmatch: 123 | if start and end: 124 | fnum = (end - start)-1 125 | else: 126 | fnum = int(fmatch.group(1)) 127 | jnum = len(parents) 128 | # result: [fnum, J, 3] 129 | positions = offsets[np.newaxis].repeat(fnum, axis=0) 130 | # result: [fnum, len(orients), 3] 131 | rotations = np.zeros((fnum, len(orients), 3)) 132 | continue 133 | 134 | fmatch = re.match("\s*Frame Time:\s+([\d\.]+)", line) 135 | if fmatch: 136 | frametime = float(fmatch.group(1)) 137 | continue 138 | 139 | if (start and end) and (i < start or i >= end-1): 140 | i += 1 141 | continue 142 | 143 | dmatch = line.strip().split() 144 | if dmatch: 145 | data_block = np.array(list(map(float, dmatch))) 146 | N = len(parents) 147 | fi = i - start if start else i 148 | if channels == 3: 149 | # This should be root positions[0:1] & all rotations 150 | positions[fi,0:1] = data_block[0:3] 151 | rotations[fi, : ] = data_block[3: ].reshape(N,3) 152 | elif channels == 6: 153 | data_block = data_block.reshape(N,6) 154 | # fill in all positions 155 | positions[fi,:] = data_block[:,0:3] 156 | rotations[fi,:] = data_block[:,3:6] 157 | elif channels == 9: 158 | positions[fi,0] = data_block[0:3] 159 | data_block = data_block[3:].reshape(N-1,9) 160 | rotations[fi,1:] = data_block[:,3:6] 161 | positions[fi,1:] += data_block[:,0:3] * data_block[:,6:9] 162 | else: 163 | raise Exception("Too many channels! %i" % channels) 164 | 165 | i += 1 166 | 167 | f.close() 168 | 169 | rotations = Quaternions.from_euler(np.radians(rotations), order=order, world=world) 170 | 171 | return (Animation(rotations, positions, orients, offsets, parents), names, frametime) 172 | 173 | def load_bfa(filename, start=None, end=None, order=None, world=False): 174 | """ 175 | Reads a BVH file and constructs an animation 176 | 177 | !!! Read from bfa, will replace the end sites of arms by two joints (w/ unit rotation) 178 | 179 | Parameters 180 | ---------- 181 | filename: str 182 | File to be opened 183 | 184 | start : int 185 | Optional Starting Frame 186 | 187 | end : int 188 | Optional Ending Frame 189 | 190 | order : str 191 | Optional Specifier for joint order. 192 | Given as string E.G 'xyz', 'zxy' 193 | 194 | world : bool 195 | If set to true euler angles are applied 196 | together in world space rather than local 197 | space 198 | 199 | Returns 200 | ------- 201 | 202 | (animation, joint_names, frametime) 203 | Tuple of loaded animation and joint names 204 | """ 205 | 206 | f = open(filename, "r") 207 | 208 | i = 0 209 | active = -1 210 | end_site = False 211 | 212 | hand_idx = [9, 14] 213 | 214 | names = [] 215 | orients = Quaternions.id(0) 216 | offsets = np.array([]).reshape((0,3)) 217 | parents = np.array([], dtype=int) 218 | 219 | for line in f: 220 | 221 | if "HIERARCHY" in line: continue 222 | if "MOTION" in line: continue 223 | 224 | rmatch = re.match(r"ROOT (\w+)", line) 225 | if rmatch: 226 | names.append(rmatch.group(1)) 227 | offsets = np.append(offsets, np.array([[0,0,0]]), axis=0) 228 | orients.qs = np.append(orients.qs, np.array([[1,0,0,0]]), axis=0) 229 | parents = np.append(parents, active) 230 | active = (len(parents)-1) 231 | continue 232 | 233 | if "{" in line: continue 234 | 235 | if "}" in line: 236 | if end_site: end_site = False 237 | else: active = parents[active] 238 | continue 239 | 240 | offmatch = re.match(r"\s*OFFSET\s+([\-\d\.e]+)\s+([\-\d\.e]+)\s+([\-\d\.e]+)", line) 241 | if offmatch: 242 | if not end_site: 243 | offsets[active] = np.array([list(map(float, offmatch.groups()))]) 244 | """ 245 | else: 246 | print("active = ", active) 247 | if active in hand_idx: 248 | offsets[active] = np.array([list(map(float, offmatch.groups()))]) 249 | """ 250 | continue 251 | 252 | chanmatch = re.match(r"\s*CHANNELS\s+(\d+)", line) 253 | if chanmatch: 254 | channels = int(chanmatch.group(1)) 255 | if order is None: 256 | channelis = 0 if channels == 3 else 3 257 | channelie = 3 if channels == 3 else 6 258 | parts = line.split()[2+channelis:2+channelie] 259 | if any([p not in channelmap for p in parts]): 260 | continue 261 | order = "".join([channelmap[p] for p in parts]) 262 | continue 263 | 264 | jmatch = re.match("\s*JOINT\s+(\w+)", line) 265 | if jmatch: 266 | names.append(jmatch.group(1)) 267 | offsets = np.append(offsets, np.array([[0,0,0]]), axis=0) 268 | orients.qs = np.append(orients.qs, np.array([[1,0,0,0]]), axis=0) 269 | parents = np.append(parents, active) 270 | active = (len(parents)-1) 271 | continue 272 | 273 | if "End Site" in line: 274 | if active + 1 in hand_idx: 275 | print("parent:", names[-1]) 276 | name = "LeftHandIndex" if active + 1 == hand_idx[0] else "RightHandIndex" 277 | names.append(name) 278 | offsets = np.append(offsets, np.array([[0,0,0]]), axis=0) 279 | orients.qs = np.append(orients.qs, np.array([[1,0,0,0]]), axis=0) 280 | parents = np.append(parents, active) 281 | active = (len(parents)-1) 282 | else: 283 | end_site = True 284 | continue 285 | 286 | fmatch = re.match("\s*Frames:\s+(\d+)", line) 287 | if fmatch: 288 | if start and end: 289 | fnum = (end - start)-1 290 | else: 291 | fnum = int(fmatch.group(1)) 292 | jnum = len(parents) 293 | # result: [fnum, J, 3] 294 | positions = offsets[np.newaxis].repeat(fnum, axis=0) 295 | # result: [fnum, len(orients), 3] 296 | rotations = np.zeros((fnum, len(orients), 3)) 297 | continue 298 | 299 | fmatch = re.match("\s*Frame Time:\s+([\d\.]+)", line) 300 | if fmatch: 301 | frametime = float(fmatch.group(1)) 302 | continue 303 | 304 | if (start and end) and (i < start or i >= end-1): 305 | i += 1 306 | continue 307 | 308 | dmatch = line.strip().split() 309 | if dmatch: 310 | data_block = np.array(list(map(float, dmatch))) 311 | N = len(parents) 312 | fi = i - start if start else i 313 | if channels == 3: 314 | # This should be root positions[0:1] & all rotations 315 | positions[fi,0:1] = data_block[0:3] 316 | tmp = data_block[3: ].reshape(N - 2, 3) 317 | tmp = np.concatenate([tmp[:hand_idx[0]], 318 | np.array([[0, 0, 0]]), 319 | tmp[hand_idx[0]: hand_idx[1] - 1], 320 | np.array([[0, 0, 0]]), 321 | tmp[hand_idx[1] - 1:]], axis=0) 322 | rotations[fi, : ] = tmp.reshape(N,3) 323 | elif channels == 6: 324 | data_block = data_block.reshape(N,6) 325 | # fill in all positions 326 | positions[fi,:] = data_block[:,0:3] 327 | rotations[fi,:] = data_block[:,3:6] 328 | elif channels == 9: 329 | positions[fi,0] = data_block[0:3] 330 | data_block = data_block[3:].reshape(N-1,9) 331 | rotations[fi,1:] = data_block[:,3:6] 332 | positions[fi,1:] += data_block[:,0:3] * data_block[:,6:9] 333 | else: 334 | raise Exception("Too many channels! %i" % channels) 335 | 336 | i += 1 337 | 338 | f.close() 339 | 340 | rotations = Quaternions.from_euler(np.radians(rotations), order=order, world=world) 341 | 342 | return (Animation(rotations, positions, orients, offsets, parents), names, frametime) 343 | 344 | 345 | def save(filename, anim, names=None, frametime=1.0/24.0, order='zyx', positions=False, orients=True): 346 | """ 347 | Saves an Animation to file as BVH 348 | 349 | Parameters 350 | ---------- 351 | filename: str 352 | File to be saved to 353 | 354 | anim : Animation 355 | Animation to save 356 | 357 | names : [str] 358 | List of joint names 359 | 360 | order : str 361 | Optional Specifier for joint order. 362 | Given as string E.G 'xyz', 'zxy' 363 | 364 | frametime : float 365 | Optional Animation Frame time 366 | 367 | positions : bool 368 | Optional specfier to save bone 369 | positions for each frame 370 | 371 | orients : bool 372 | Multiply joint orients to the rotations 373 | before saving. 374 | 375 | """ 376 | 377 | if names is None: 378 | names = ["joint_" + str(i) for i in range(len(anim.parents))] 379 | 380 | with open(filename, 'w') as f: 381 | 382 | t = "" 383 | f.write("%sHIERARCHY\n" % t) 384 | f.write("%sROOT %s\n" % (t, names[0])) 385 | f.write("%s{\n" % t) 386 | t += '\t' 387 | 388 | f.write("%sOFFSET %f %f %f\n" % (t, anim.offsets[0,0], anim.offsets[0,1], anim.offsets[0,2]) ) 389 | f.write("%sCHANNELS 6 Xposition Yposition Zposition %s %s %s \n" % 390 | (t, channelmap_inv[order[0]], channelmap_inv[order[1]], channelmap_inv[order[2]])) 391 | 392 | for i in range(anim.shape[1]): 393 | if anim.parents[i] == 0: 394 | t = save_joint(f, anim, names, t, i, order=order, positions=positions) 395 | 396 | t = t[:-1] 397 | f.write("%s}\n" % t) 398 | 399 | f.write("MOTION\n") 400 | f.write("Frames: %i\n" % anim.shape[0]); 401 | f.write("Frame Time: %f\n" % frametime); 402 | 403 | #if orients: 404 | # rots = np.degrees((-anim.orients[np.newaxis] * anim.rotations).euler(order=order[::-1])) 405 | #else: 406 | # rots = np.degrees(anim.rotations.euler(order=order[::-1])) 407 | rots = np.degrees(anim.rotations.euler(order=order[::-1])) 408 | poss = anim.positions 409 | 410 | for i in range(anim.shape[0]): 411 | for j in range(anim.shape[1]): 412 | 413 | if positions or j == 0: 414 | 415 | f.write("%f %f %f %f %f %f " % ( 416 | poss[i,j,0], poss[i,j,1], poss[i,j,2], 417 | rots[i,j,ordermap[order[0]]], rots[i,j,ordermap[order[1]]], rots[i,j,ordermap[order[2]]])) 418 | 419 | else: 420 | 421 | f.write("%f %f %f " % ( 422 | rots[i,j,ordermap[order[0]]], rots[i,j,ordermap[order[1]]], rots[i,j,ordermap[order[2]]])) 423 | 424 | f.write("\n") 425 | 426 | 427 | def save_joint(f, anim, names, t, i, order='zyx', positions=False): 428 | 429 | f.write("%sJOINT %s\n" % (t, names[i])) 430 | f.write("%s{\n" % t) 431 | t += '\t' 432 | 433 | f.write("%sOFFSET %f %f %f\n" % (t, anim.offsets[i,0], anim.offsets[i,1], anim.offsets[i,2])) 434 | 435 | if positions: 436 | f.write("%sCHANNELS 6 Xposition Yposition Zposition %s %s %s \n" % (t, 437 | channelmap_inv[order[0]], channelmap_inv[order[1]], channelmap_inv[order[2]])) 438 | else: 439 | f.write("%sCHANNELS 3 %s %s %s\n" % (t, 440 | channelmap_inv[order[0]], channelmap_inv[order[1]], channelmap_inv[order[2]])) 441 | 442 | end_site = True 443 | 444 | for j in range(anim.shape[1]): 445 | if anim.parents[j] == i: 446 | t = save_joint(f, anim, names, t, j, order=order, positions=positions) 447 | end_site = False 448 | 449 | if end_site: 450 | f.write("%sEnd Site\n" % t) 451 | f.write("%s{\n" % t) 452 | t += '\t' 453 | f.write("%sOFFSET %f %f %f\n" % (t, 0.0, 0.0, 0.0)) 454 | t = t[:-1] 455 | f.write("%s}\n" % t) 456 | 457 | t = t[:-1] 458 | f.write("%s}\n" % t) 459 | 460 | return t -------------------------------------------------------------------------------- /foot_sliding/BVH_mod.py: -------------------------------------------------------------------------------- 1 | import re 2 | import numpy as np 3 | 4 | from Animation import Animation 5 | from Quaternions import Quaternions 6 | 7 | channelmap = { 8 | 'Xrotation' : 'x', 9 | 'Yrotation' : 'y', 10 | 'Zrotation' : 'z' 11 | } 12 | 13 | channelmap_inv = { 14 | 'x': 'Xrotation', 15 | 'y': 'Yrotation', 16 | 'z': 'Zrotation', 17 | } 18 | 19 | ordermap = { 20 | 'x' : 0, 21 | 'y' : 1, 22 | 'z' : 2, 23 | } 24 | 25 | def load(filename, start=None, end=None, order=None, world=False, need_quater=False): 26 | """ 27 | Reads a BVH file and constructs an animation 28 | 29 | Parameters 30 | ---------- 31 | filename: str 32 | File to be opened 33 | 34 | start : int 35 | Optional Starting Frame 36 | 37 | end : int 38 | Optional Ending Frame 39 | 40 | order : str 41 | Optional Specifier for joint order. 42 | Given as string E.G 'xyz', 'zxy' 43 | 44 | world : bool 45 | If set to true euler angles are applied 46 | together in world space rather than local 47 | space 48 | 49 | Returns 50 | ------- 51 | 52 | (animation, joint_names, frametime) 53 | Tuple of loaded animation and joint names 54 | """ 55 | 56 | f = open(filename, "r") 57 | 58 | i = 0 59 | active = -1 60 | end_site = False 61 | 62 | names = [] 63 | orients = Quaternions.id(0) 64 | offsets = np.array([]).reshape((0,3)) 65 | parents = np.array([], dtype=int) 66 | 67 | for line in f: 68 | 69 | if "HIERARCHY" in line: continue 70 | if "MOTION" in line: continue 71 | 72 | """ Modified line read to handle mixamo data """ 73 | # rmatch = re.match(r"ROOT (\w+)", line) 74 | rmatch = re.match(r"ROOT (\w+:?\w+)", line) 75 | if rmatch: 76 | names.append(rmatch.group(1)) 77 | offsets = np.append(offsets, np.array([[0,0,0]]), axis=0) 78 | orients.qs = np.append(orients.qs, np.array([[1,0,0,0]]), axis=0) 79 | parents = np.append(parents, active) 80 | active = (len(parents)-1) 81 | continue 82 | 83 | if "{" in line: continue 84 | 85 | if "}" in line: 86 | if end_site: end_site = False 87 | else: active = parents[active] 88 | continue 89 | 90 | offmatch = re.match(r"\s*OFFSET\s+([\-\d\.e]+)\s+([\-\d\.e]+)\s+([\-\d\.e]+)", line) 91 | if offmatch: 92 | if not end_site: 93 | offsets[active] = np.array([list(map(float, offmatch.groups()))]) 94 | continue 95 | 96 | chanmatch = re.match(r"\s*CHANNELS\s+(\d+)", line) 97 | if chanmatch: 98 | channels = int(chanmatch.group(1)) 99 | if order is None: 100 | channelis = 0 if channels == 3 else 3 101 | channelie = 3 if channels == 3 else 6 102 | parts = line.split()[2+channelis:2+channelie] 103 | if any([p not in channelmap for p in parts]): 104 | continue 105 | order = "".join([channelmap[p] for p in parts]) 106 | continue 107 | 108 | """ Modified line read to handle mixamo data """ 109 | # jmatch = re.match("\s*JOINT\s+(\w+)", line) 110 | jmatch = re.match("\s*JOINT\s+(\w+:?\w+)", line) 111 | if jmatch: 112 | names.append(jmatch.group(1)) 113 | offsets = np.append(offsets, np.array([[0,0,0]]), axis=0) 114 | orients.qs = np.append(orients.qs, np.array([[1,0,0,0]]), axis=0) 115 | parents = np.append(parents, active) 116 | active = (len(parents)-1) 117 | continue 118 | 119 | if "End Site" in line: 120 | end_site = True 121 | continue 122 | 123 | fmatch = re.match("\s*Frames:\s+(\d+)", line) 124 | if fmatch: 125 | if start and end: 126 | fnum = (end - start)-1 127 | else: 128 | fnum = int(fmatch.group(1)) 129 | jnum = len(parents) 130 | positions = offsets[np.newaxis].repeat(fnum, axis=0) 131 | rotations = np.zeros((fnum, len(orients), 3)) 132 | continue 133 | 134 | fmatch = re.match("\s*Frame Time:\s+([\d\.]+)", line) 135 | if fmatch: 136 | frametime = float(fmatch.group(1)) 137 | continue 138 | 139 | if (start and end) and (i < start or i >= end-1): 140 | i += 1 141 | continue 142 | 143 | # dmatch = line.strip().split(' ') 144 | dmatch = line.strip().split() 145 | if dmatch: 146 | data_block = np.array(list(map(float, dmatch))) 147 | N = len(parents) 148 | fi = i - start if start else i 149 | if channels == 3: 150 | positions[fi,0:1] = data_block[0:3] 151 | rotations[fi, : ] = data_block[3: ].reshape(N,3) 152 | elif channels == 6: 153 | data_block = data_block.reshape(N,6) 154 | positions[fi,:] = data_block[:,0:3] 155 | rotations[fi,:] = data_block[:,3:6] 156 | elif channels == 9: 157 | positions[fi,0] = data_block[0:3] 158 | data_block = data_block[3:].reshape(N-1,9) 159 | rotations[fi,1:] = data_block[:,3:6] 160 | positions[fi,1:] += data_block[:,0:3] * data_block[:,6:9] 161 | else: 162 | raise Exception("Too many channels! %i" % channels) 163 | 164 | i += 1 165 | 166 | f.close() 167 | 168 | if need_quater: 169 | rotations = Quaternions.from_euler(np.radians(rotations), order=order, world=world) 170 | elif order != 'xyz': 171 | rotations = Quaternions.from_euler(np.radians(rotations), order=order, world=world) 172 | rotations = np.degrees(rotations.euler()) 173 | 174 | return (Animation(rotations, positions, orients, offsets, parents), names, frametime) 175 | 176 | 177 | 178 | def save(filename, anim, names=None, frametime=1.0/24.0, order='zyx', positions=False, orients=True, mask=None, quater=False): 179 | """ 180 | Saves an Animation to file as BVH 181 | 182 | Parameters 183 | ---------- 184 | filename: str 185 | File to be saved to 186 | 187 | anim : Animation 188 | Animation to save 189 | 190 | names : [str] 191 | List of joint names 192 | 193 | order : str 194 | Optional Specifier for joint order. 195 | Given as string E.G 'xyz', 'zxy' 196 | 197 | frametime : float 198 | Optional Animation Frame time 199 | 200 | positions : bool 201 | Optional specfier to save bone 202 | positions for each frame 203 | 204 | orients : bool 205 | Multiply joint orients to the rotations 206 | before saving. 207 | 208 | """ 209 | 210 | if names is None: 211 | names = ["joint_" + str(i) for i in range(len(anim.parents))] 212 | 213 | with open(filename, 'w') as f: 214 | 215 | t = "" 216 | f.write("%sHIERARCHY\n" % t) 217 | f.write("%sROOT %s\n" % (t, names[0])) 218 | f.write("%s{\n" % t) 219 | t += '\t' 220 | 221 | f.write("%sOFFSET %f %f %f\n" % (t, anim.offsets[0,0], anim.offsets[0,1], anim.offsets[0,2]) ) 222 | f.write("%sCHANNELS 6 Xposition Yposition Zposition %s %s %s \n" % 223 | (t, channelmap_inv[order[0]], channelmap_inv[order[1]], channelmap_inv[order[2]])) 224 | 225 | for i in range(anim.shape[1]): 226 | if anim.parents[i] == 0: 227 | t = save_joint(f, anim, names, t, i, order=order, positions=positions) 228 | 229 | t = t[:-1] 230 | f.write("%s}\n" % t) 231 | 232 | f.write("MOTION\n") 233 | f.write("Frames: %i\n" % anim.shape[0]); 234 | f.write("Frame Time: %f\n" % frametime); 235 | 236 | #if orients: 237 | # rots = np.degrees((-anim.orients[np.newaxis] * anim.rotations).euler(order=order[::-1])) 238 | #else: 239 | # rots = np.degrees(anim.rotations.euler(order=order[::-1])) 240 | # rots = np.degrees(anim.rotations.euler(order=order[::-1])) 241 | if quater: 242 | rots = np.degrees(anim.rotations.euler(order=order[::-1])) 243 | else: 244 | rots = anim.rotations 245 | poss = anim.positions 246 | 247 | for i in range(anim.shape[0]): 248 | for j in range(anim.shape[1]): 249 | 250 | if positions or j == 0: 251 | 252 | f.write("%f %f %f %f %f %f " % ( 253 | poss[i,j,0], poss[i,j,1], poss[i,j,2], 254 | rots[i,j,ordermap[order[0]]], rots[i,j,ordermap[order[1]]], rots[i,j,ordermap[order[2]]])) 255 | 256 | else: 257 | if mask == None or mask[j] == 1: 258 | f.write("%f %f %f " % ( 259 | rots[i,j,ordermap[order[0]]], rots[i,j,ordermap[order[1]]], rots[i,j,ordermap[order[2]]])) 260 | else: 261 | f.write("%f %f %f " % (0, 0, 0)) 262 | 263 | f.write("\n") 264 | 265 | 266 | def save_joint(f, anim, names, t, i, order='zyx', positions=False): 267 | 268 | f.write("%sJOINT %s\n" % (t, names[i])) 269 | f.write("%s{\n" % t) 270 | t += '\t' 271 | 272 | f.write("%sOFFSET %f %f %f\n" % (t, anim.offsets[i,0], anim.offsets[i,1], anim.offsets[i,2])) 273 | 274 | if positions: 275 | f.write("%sCHANNELS 6 Xposition Yposition Zposition %s %s %s \n" % (t, 276 | channelmap_inv[order[0]], channelmap_inv[order[1]], channelmap_inv[order[2]])) 277 | else: 278 | f.write("%sCHANNELS 3 %s %s %s\n" % (t, 279 | channelmap_inv[order[0]], channelmap_inv[order[1]], channelmap_inv[order[2]])) 280 | 281 | end_site = True 282 | 283 | for j in range(anim.shape[1]): 284 | if anim.parents[j] == i: 285 | t = save_joint(f, anim, names, t, j, order=order, positions=positions) 286 | end_site = False 287 | 288 | if end_site: 289 | f.write("%sEnd Site\n" % t) 290 | f.write("%s{\n" % t) 291 | t += '\t' 292 | f.write("%sOFFSET %f %f %f\n" % (t, 0.0, 0.0, 0.0)) 293 | t = t[:-1] 294 | f.write("%s}\n" % t) 295 | 296 | t = t[:-1] 297 | f.write("%s}\n" % t) 298 | 299 | return t 300 | -------------------------------------------------------------------------------- /foot_sliding/InverseKinematics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.linalg as linalg 3 | 4 | import Animation 5 | import AnimationStructure 6 | 7 | from Quaternions_old import Quaternions 8 | 9 | class BasicInverseKinematics: 10 | """ 11 | Basic Inverse Kinematics Solver 12 | 13 | This is an extremely simple full body IK 14 | solver. 15 | 16 | It works given the following conditions: 17 | 18 | * All joint targets must be specified 19 | * All joint targets must be in reach 20 | * All joint targets must not differ 21 | extremely from the starting pose 22 | * No bone length constraints can be violated 23 | * The root translation and rotation are 24 | set to good initial values 25 | 26 | It works under the observation that if the 27 | _directions_ the joints are pointing toward 28 | match the _directions_ of the vectors between 29 | the target joints then the pose should match 30 | that of the target pose. 31 | 32 | Therefore it iterates over joints rotating 33 | each joint such that the vectors between it 34 | and it's children match that of the target 35 | positions. 36 | 37 | Parameters 38 | ---------- 39 | 40 | animation : Animation 41 | animation input 42 | 43 | positions : (F, J, 3) ndarray 44 | target positions for each frame F 45 | and each joint J 46 | 47 | iterations : int 48 | Optional number of iterations. 49 | If the above conditions are met 50 | 1 iteration should be enough, 51 | therefore the default is 1 52 | 53 | silent : bool 54 | Optional if to suppress output 55 | defaults to False 56 | """ 57 | 58 | def __init__(self, animation, positions, iterations=1, silent=True): 59 | 60 | self.animation = animation 61 | self.positions = positions 62 | self.iterations = iterations 63 | self.silent = silent 64 | 65 | def __call__(self): 66 | 67 | children = AnimationStructure.children_list(self.animation.parents) 68 | 69 | for i in range(self.iterations): 70 | 71 | for j in AnimationStructure.joints(self.animation.parents): 72 | 73 | c = np.array(children[j]) 74 | if len(c) == 0: continue 75 | 76 | anim_transforms = Animation.transforms_global(self.animation) 77 | anim_positions = anim_transforms[:,:,:3,3] 78 | anim_rotations = Quaternions.from_transforms(anim_transforms) 79 | 80 | jdirs = anim_positions[:,c] - anim_positions[:,np.newaxis,j] 81 | ddirs = self.positions[:,c] - anim_positions[:,np.newaxis,j] 82 | 83 | jsums = np.sqrt(np.sum(jdirs**2.0, axis=-1)) + 1e-10 84 | dsums = np.sqrt(np.sum(ddirs**2.0, axis=-1)) + 1e-10 85 | 86 | jdirs = jdirs / jsums[:,:,np.newaxis] 87 | ddirs = ddirs / dsums[:,:,np.newaxis] 88 | 89 | angles = np.arccos(np.sum(jdirs * ddirs, axis=2).clip(-1, 1)) 90 | axises = np.cross(jdirs, ddirs) 91 | axises = -anim_rotations[:,j,np.newaxis] * axises 92 | 93 | rotations = Quaternions.from_angle_axis(angles, axises) 94 | 95 | if rotations.shape[1] == 1: 96 | averages = rotations[:,0] 97 | else: 98 | averages = Quaternions.exp(rotations.log().mean(axis=-2)) 99 | 100 | self.animation.rotations[:,j] = self.animation.rotations[:,j] * averages 101 | 102 | if not self.silent: 103 | anim_positions = Animation.positions_global(self.animation) 104 | error = np.mean(np.sum((anim_positions - self.positions)**2.0, axis=-1)**0.5, axis=-1) 105 | print('[BasicInverseKinematics] Iteration %i Error: %f' % (i+1, error)) 106 | 107 | return self.animation 108 | 109 | 110 | class JacobianInverseKinematics: 111 | """ 112 | Jacobian Based Full Body IK Solver 113 | 114 | This is a full body IK solver which 115 | uses the dampened least squares inverse 116 | jacobian method. 117 | 118 | It should remain fairly stable and effective 119 | even for joint positions which are out of 120 | reach and it can also take any number of targets 121 | to treat as end effectors. 122 | 123 | Parameters 124 | ---------- 125 | 126 | animation : Animation 127 | animation to solve inverse problem on 128 | 129 | targets : {int : (F, 3) ndarray} 130 | Dictionary of target positions for each 131 | frame F, mapping joint index to 132 | a target position 133 | 134 | references : (F, 3) 135 | Optional list of J joint position 136 | references for which the result 137 | should bias toward 138 | 139 | iterations : int 140 | Optional number of iterations to 141 | compute. More iterations results in 142 | better accuracy but takes longer to 143 | compute. Default is 10. 144 | 145 | recalculate : bool 146 | Optional if to recalcuate jacobian 147 | each iteration. Gives better accuracy 148 | but slower to compute. Defaults to True 149 | 150 | damping : float 151 | Optional damping constant. Higher 152 | damping increases stability but 153 | requires more iterations to converge. 154 | Defaults to 5.0 155 | 156 | secondary : float 157 | Force, or bias toward secondary target. 158 | Defaults to 0.25 159 | 160 | silent : bool 161 | Optional if to suppress output 162 | defaults to False 163 | """ 164 | 165 | def __init__(self, animation, targets, 166 | references=None, iterations=10, 167 | recalculate=True, damping=2.0, 168 | secondary=0.25, translate=False, 169 | silent=False, weights=None, 170 | weights_translate=None): 171 | 172 | self.animation = animation 173 | self.targets = targets 174 | self.references = references 175 | 176 | self.iterations = iterations 177 | self.recalculate = recalculate 178 | self.damping = damping 179 | self.secondary = secondary 180 | self.translate = translate 181 | self.silent = silent 182 | self.weights = weights 183 | self.weights_translate = weights_translate 184 | 185 | def cross(self, a, b): 186 | o = np.empty(b.shape) 187 | o[...,0] = a[...,1]*b[...,2] - a[...,2]*b[...,1] 188 | o[...,1] = a[...,2]*b[...,0] - a[...,0]*b[...,2] 189 | o[...,2] = a[...,0]*b[...,1] - a[...,1]*b[...,0] 190 | return o 191 | 192 | def jacobian(self, x, fp, fr, ts, dsc, tdsc): 193 | 194 | """ Find parent rotations """ 195 | prs = fr[:,self.animation.parents] 196 | prs[:,0] = Quaternions.id((1)) 197 | 198 | """ Find global positions of target joints """ 199 | tps = fp[:,np.array(list(ts.keys()))] 200 | 201 | """ Get partial rotations """ 202 | qys = Quaternions.from_angle_axis(x[:,1:prs.shape[1]*3:3], np.array([[[0,1,0]]])) 203 | qzs = Quaternions.from_angle_axis(x[:,2:prs.shape[1]*3:3], np.array([[[0,0,1]]])) 204 | 205 | """ Find axis of rotations """ 206 | es = np.empty((len(x),fr.shape[1]*3, 3)) 207 | es[:,0::3] = ((prs * qzs) * qys) * np.array([[[1,0,0]]]) 208 | es[:,1::3] = ((prs * qzs) * np.array([[[0,1,0]]])) 209 | es[:,2::3] = ((prs * np.array([[[0,0,1]]]))) 210 | 211 | """ Construct Jacobian """ 212 | j = fp.repeat(3, axis=1) 213 | j = dsc[np.newaxis,:,:,np.newaxis] * (tps[:,np.newaxis,:] - j[:,:,np.newaxis]) 214 | j = self.cross(es[:,:,np.newaxis,:], j) 215 | j = np.swapaxes(j.reshape((len(x), fr.shape[1]*3, len(ts)*3)), 1, 2) 216 | 217 | if self.translate: 218 | 219 | es = np.empty((len(x),fr.shape[1]*3, 3)) 220 | es[:,0::3] = prs * np.array([[[1,0,0]]]) 221 | es[:,1::3] = prs * np.array([[[0,1,0]]]) 222 | es[:,2::3] = prs * np.array([[[0,0,1]]]) 223 | 224 | jt = tdsc[np.newaxis,:,:,np.newaxis] * es[:,:,np.newaxis,:].repeat(tps.shape[1], axis=2) 225 | jt = np.swapaxes(jt.reshape((len(x), fr.shape[1]*3, len(ts)*3)), 1, 2) 226 | 227 | j = np.concatenate([j, jt], axis=-1) 228 | 229 | return j 230 | 231 | #@profile(immediate=True) 232 | def __call__(self, descendants=None, gamma=1.0): 233 | 234 | self.descendants = descendants 235 | 236 | """ Calculate Masses """ 237 | if self.weights is None: 238 | self.weights = np.ones(self.animation.shape[1]) 239 | 240 | if self.weights_translate is None: 241 | self.weights_translate = np.ones(self.animation.shape[1]) 242 | 243 | """ Calculate Descendants """ 244 | if self.descendants is None: 245 | self.descendants = AnimationStructure.descendants_mask(self.animation.parents) 246 | 247 | self.tdescendants = np.eye(self.animation.shape[1]) + self.descendants 248 | 249 | self.first_descendants = self.descendants[:,np.array(list(self.targets.keys()))].repeat(3, axis=0).astype(int) 250 | self.first_tdescendants = self.tdescendants[:,np.array(list(self.targets.keys()))].repeat(3, axis=0).astype(int) 251 | 252 | """ Calculate End Effectors """ 253 | self.endeff = np.array(list(self.targets.values())) 254 | self.endeff = np.swapaxes(self.endeff, 0, 1) 255 | 256 | if not self.references is None: 257 | self.second_descendants = self.descendants.repeat(3, axis=0).astype(int) 258 | self.second_tdescendants = self.tdescendants.repeat(3, axis=0).astype(int) 259 | self.second_targets = dict([(i, self.references[:,i]) for i in xrange(self.references.shape[1])]) 260 | 261 | nf = len(self.animation) 262 | nj = self.animation.shape[1] 263 | 264 | if not self.silent: 265 | gp = Animation.positions_global(self.animation) 266 | gp = gp[:,np.array(list(self.targets.keys()))] 267 | error = np.mean(np.sqrt(np.sum((self.endeff - gp)**2.0, axis=2))) 268 | print('[JacobianInverseKinematics] Start | Error: %f' % error) 269 | 270 | for i in range(self.iterations): 271 | 272 | """ Get Global Rotations & Positions """ 273 | gt = Animation.transforms_global(self.animation) 274 | gp = gt[:,:,:,3] 275 | gp = gp[:,:,:3] / gp[:,:,3,np.newaxis] 276 | gr = Quaternions.from_transforms(gt) 277 | 278 | x = self.animation.rotations.euler().reshape(nf, -1) 279 | w = self.weights.repeat(3) 280 | 281 | if self.translate: 282 | x = np.hstack([x, self.animation.positions.reshape(nf, -1)]) 283 | w = np.hstack([w, self.weights_translate.repeat(3)]) 284 | 285 | """ Generate Jacobian """ 286 | if self.recalculate or i == 0: 287 | j = self.jacobian(x, gp, gr, self.targets, self.first_descendants, self.first_tdescendants) 288 | 289 | """ Update Variables """ 290 | l = self.damping * (1.0 / (w + 0.001)) 291 | d = (l*l) * np.eye(x.shape[1]) 292 | e = gamma * (self.endeff.reshape(nf,-1) - gp[:,np.array(list(self.targets.keys()))].reshape(nf, -1)) 293 | 294 | x += np.array(list(map(lambda jf, ef: 295 | linalg.lu_solve(linalg.lu_factor(jf.T.dot(jf) + d), jf.T.dot(ef)), j, e))) 296 | 297 | """ Generate Secondary Jacobian """ 298 | if self.references is not None: 299 | 300 | ns = np.array(list(map(lambda jf: 301 | np.eye(x.shape[1]) - linalg.solve(jf.T.dot(jf) + d, jf.T.dot(jf)), j))) 302 | 303 | if self.recalculate or i == 0: 304 | j2 = self.jacobian(x, gp, gr, self.second_targets, self.second_descendants, self.second_tdescendants) 305 | 306 | e2 = self.secondary * (self.references.reshape(nf, -1) - gp.reshape(nf, -1)) 307 | 308 | x += np.array(list(map(lambda nsf, j2f, e2f: 309 | nsf.dot(linalg.lu_solve(linalg.lu_factor(j2f.T.dot(j2f) + d), j2f.T.dot(e2f))), ns, j2, e2))) 310 | 311 | """ Set Back Rotations / Translations """ 312 | self.animation.rotations = Quaternions.from_euler( 313 | x[:,:nj*3].reshape((nf, nj, 3)), order='xyz', world=True) 314 | 315 | if self.translate: 316 | self.animation.positions = x[:,nj*3:].reshape((nf,nj, 3)) 317 | 318 | """ Generate Error """ 319 | 320 | if not self.silent: 321 | gp = Animation.positions_global(self.animation) 322 | gp = gp[:,np.array(list(self.targets.keys()))] 323 | error = np.mean(np.sum((self.endeff - gp)**2.0, axis=2)**0.5) 324 | print('[JacobianInverseKinematics] Iteration %i | Error: %f' % (i+1, error)) 325 | 326 | 327 | class BasicJacobianIK: 328 | """ 329 | Same interface as BasicInverseKinematics 330 | but uses the Jacobian IK Solver Instead 331 | """ 332 | 333 | def __init__(self, animation, positions, iterations=10, silent=True, **kw): 334 | 335 | targets = dict([(i, positions[:,i]) for i in range(positions.shape[1])]) 336 | self.ik = JacobianInverseKinematics(animation, targets, iterations=iterations, silent=silent, **kw) 337 | 338 | def __call__(self, **kw): 339 | return self.ik(**kw) 340 | 341 | 342 | class ICP: 343 | 344 | 345 | def __init__(self, 346 | anim, rest, weights, mesh, goal, 347 | find_closest=True, damping=10, 348 | iterations=10, silent=True, 349 | translate=True, recalculate=True, 350 | weights_translate=None): 351 | 352 | self.animation = anim 353 | self.rest = rest 354 | self.vweights = weights 355 | self.mesh = mesh 356 | self.goal = goal 357 | self.find_closest = find_closest 358 | self.iterations = iterations 359 | self.silent = silent 360 | self.translate = translate 361 | self.damping = damping 362 | self.weights = None 363 | self.weights_translate = weights_translate 364 | self.recalculate = recalculate 365 | 366 | def cross(self, a, b): 367 | o = np.empty(b.shape) 368 | o[...,0] = a[...,1]*b[...,2] - a[...,2]*b[...,1] 369 | o[...,1] = a[...,2]*b[...,0] - a[...,0]*b[...,2] 370 | o[...,2] = a[...,0]*b[...,1] - a[...,1]*b[...,0] 371 | return o 372 | 373 | def jacobian(self, x, fp, fr, goal, weights, des_r, des_t): 374 | 375 | """ Find parent rotations """ 376 | prs = fr[:,self.animation.parents] 377 | prs[:,0] = Quaternions.id((1)) 378 | 379 | """ Get partial rotations """ 380 | qys = Quaternions.from_angle_axis(x[:,1:prs.shape[1]*3:3], np.array([[[0,1,0]]])) 381 | qzs = Quaternions.from_angle_axis(x[:,2:prs.shape[1]*3:3], np.array([[[0,0,1]]])) 382 | 383 | """ Find axis of rotations """ 384 | es = np.empty((len(x),fr.shape[1]*3, 3)) 385 | es[:,0::3] = ((prs * qzs) * qys) * np.array([[[1,0,0]]]) 386 | es[:,1::3] = ((prs * qzs) * np.array([[[0,1,0]]])) 387 | es[:,2::3] = ((prs * np.array([[[0,0,1]]]))) 388 | 389 | """ Construct Jacobian """ 390 | j = fp.repeat(3, axis=1) 391 | j = des_r[np.newaxis,:,:,:,np.newaxis] * (goal[:,np.newaxis,:,np.newaxis] - j[:,:,np.newaxis,np.newaxis]) 392 | j = np.sum(j * weights[np.newaxis,np.newaxis,:,:,np.newaxis], 3) 393 | j = self.cross(es[:,:,np.newaxis,:], j) 394 | j = np.swapaxes(j.reshape((len(x), fr.shape[1]*3, goal.shape[1]*3)), 1, 2) 395 | 396 | if self.translate: 397 | 398 | es = np.empty((len(x),fr.shape[1]*3, 3)) 399 | es[:,0::3] = prs * np.array([[[1,0,0]]]) 400 | es[:,1::3] = prs * np.array([[[0,1,0]]]) 401 | es[:,2::3] = prs * np.array([[[0,0,1]]]) 402 | 403 | jt = des_t[np.newaxis,:,:,:,np.newaxis] * es[:,:,np.newaxis,np.newaxis,:].repeat(goal.shape[1], axis=2) 404 | jt = np.sum(jt * weights[np.newaxis,np.newaxis,:,:,np.newaxis], 3) 405 | jt = np.swapaxes(jt.reshape((len(x), fr.shape[1]*3, goal.shape[1]*3)), 1, 2) 406 | 407 | j = np.concatenate([j, jt], axis=-1) 408 | 409 | return j 410 | 411 | #@profile(immediate=True) 412 | def __call__(self, descendants=None, maxjoints=4, gamma=1.0, transpose=False): 413 | 414 | """ Calculate Masses """ 415 | if self.weights is None: 416 | self.weights = np.ones(self.animation.shape[1]) 417 | 418 | if self.weights_translate is None: 419 | self.weights_translate = np.ones(self.animation.shape[1]) 420 | 421 | nf = len(self.animation) 422 | nj = self.animation.shape[1] 423 | nv = self.goal.shape[1] 424 | 425 | weightids = np.argsort(-self.vweights, axis=1)[:,:maxjoints] 426 | weightvls = np.array(list(map(lambda w, i: w[i], self.vweights, weightids))) 427 | weightvls = weightvls / weightvls.sum(axis=1)[...,np.newaxis] 428 | 429 | if descendants is None: 430 | self.descendants = AnimationStructure.descendants_mask(self.animation.parents) 431 | else: 432 | self.descendants = descendants 433 | 434 | des_r = np.eye(nj) + self.descendants 435 | des_r = des_r[:,weightids].repeat(3, axis=0) 436 | 437 | des_t = np.eye(nj) + self.descendants 438 | des_t = des_t[:,weightids].repeat(3, axis=0) 439 | 440 | if not self.silent: 441 | curr = Animation.skin(self.animation, self.rest, self.vweights, self.mesh, maxjoints=maxjoints) 442 | error = np.mean(np.sqrt(np.sum((curr - self.goal)**2.0, axis=-1))) 443 | print('[ICP] Start | Error: %f' % error) 444 | 445 | for i in range(self.iterations): 446 | 447 | """ Get Global Rotations & Positions """ 448 | gt = Animation.transforms_global(self.animation) 449 | gp = gt[:,:,:,3] 450 | gp = gp[:,:,:3] / gp[:,:,3,np.newaxis] 451 | gr = Quaternions.from_transforms(gt) 452 | 453 | x = self.animation.rotations.euler().reshape(nf, -1) 454 | w = self.weights.repeat(3) 455 | 456 | if self.translate: 457 | x = np.hstack([x, self.animation.positions.reshape(nf, -1)]) 458 | w = np.hstack([w, self.weights_translate.repeat(3)]) 459 | 460 | """ Get Current State """ 461 | curr = Animation.skin(self.animation, self.rest, self.vweights, self.mesh, maxjoints=maxjoints) 462 | 463 | """ Find Cloest Points """ 464 | if self.find_closest: 465 | mapping = np.argmin( 466 | (curr[:,:,np.newaxis] - 467 | self.goal[:,np.newaxis,:])**2.0, axis=2) 468 | e = gamma * (np.array(list(map(lambda g, m: g[m], self.goal, mapping))) - curr).reshape(nf, -1) 469 | else: 470 | e = gamma * (self.goal - curr).reshape(nf, -1) 471 | 472 | """ Generate Jacobian """ 473 | if self.recalculate or i == 0: 474 | j = self.jacobian(x, gp, gr, self.goal, weightvls, des_r, des_t) 475 | 476 | """ Update Variables """ 477 | l = self.damping * (1.0 / (w + 1e-10)) 478 | d = (l*l) * np.eye(x.shape[1]) 479 | 480 | if transpose: 481 | x += np.array(list(map(lambda jf, ef: jf.T.dot(ef), j, e))) 482 | else: 483 | x += np.array(list(map(lambda jf, ef: 484 | linalg.lu_solve(linalg.lu_factor(jf.T.dot(jf) + d), jf.T.dot(ef)), j, e))) 485 | 486 | """ Set Back Rotations / Translations """ 487 | self.animation.rotations = Quaternions.from_euler( 488 | x[:,:nj*3].reshape((nf, nj, 3)), order='xyz', world=True) 489 | 490 | if self.translate: 491 | self.animation.positions = x[:,nj*3:].reshape((nf, nj, 3)) 492 | 493 | if not self.silent: 494 | curr = Animation.skin(self.animation, self.rest, self.vweights, self.mesh) 495 | error = np.mean(np.sqrt(np.sum((curr - self.goal)**2.0, axis=-1))) 496 | print('[ICP] Iteration %i | Error: %f' % (i+1, error)) 497 | 498 | -------------------------------------------------------------------------------- /foot_sliding/Pivots.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from Quaternions_old import Quaternions 4 | 5 | class Pivots: 6 | """ 7 | Pivots is an ndarray of angular rotations 8 | 9 | This wrapper provides some functions for 10 | working with pivots. 11 | 12 | These are particularly useful as a number 13 | of atomic operations (such as adding or 14 | subtracting) cannot be achieved using 15 | the standard arithmatic and need to be 16 | defined differently to work correctly 17 | """ 18 | 19 | def __init__(self, ps): self.ps = np.array(ps) 20 | def __str__(self): return "Pivots("+ str(self.ps) + ")" 21 | def __repr__(self): return "Pivots("+ repr(self.ps) + ")" 22 | 23 | def __add__(self, other): return Pivots(np.arctan2(np.sin(self.ps + other.ps), np.cos(self.ps + other.ps))) 24 | def __sub__(self, other): return Pivots(np.arctan2(np.sin(self.ps - other.ps), np.cos(self.ps - other.ps))) 25 | def __mul__(self, other): return Pivots(self.ps * other.ps) 26 | def __div__(self, other): return Pivots(self.ps / other.ps) 27 | def __mod__(self, other): return Pivots(self.ps % other.ps) 28 | def __pow__(self, other): return Pivots(self.ps ** other.ps) 29 | 30 | def __lt__(self, other): return self.ps < other.ps 31 | def __le__(self, other): return self.ps <= other.ps 32 | def __eq__(self, other): return self.ps == other.ps 33 | def __ne__(self, other): return self.ps != other.ps 34 | def __ge__(self, other): return self.ps >= other.ps 35 | def __gt__(self, other): return self.ps > other.ps 36 | 37 | def __abs__(self): return Pivots(abs(self.ps)) 38 | def __neg__(self): return Pivots(-self.ps) 39 | 40 | def __iter__(self): return iter(self.ps) 41 | def __len__(self): return len(self.ps) 42 | 43 | def __getitem__(self, k): return Pivots(self.ps[k]) 44 | def __setitem__(self, k, v): self.ps[k] = v.ps 45 | 46 | def _ellipsis(self): return tuple(map(lambda x: slice(None), self.shape)) 47 | 48 | def quaternions(self, plane='xz'): 49 | fa = self._ellipsis() 50 | axises = np.ones(self.ps.shape + (3,)) 51 | axises[fa + ("xyz".index(plane[0]),)] = 0.0 52 | axises[fa + ("xyz".index(plane[1]),)] = 0.0 53 | return Quaternions.from_angle_axis(self.ps, axises) 54 | 55 | def directions(self, plane='xz'): 56 | dirs = np.zeros((len(self.ps), 3)) 57 | dirs[..., "xyz".index(plane[0])] = np.sin(self.ps) 58 | dirs[..., "xyz".index(plane[1])] = np.cos(self.ps) 59 | return dirs 60 | 61 | def normalized(self): 62 | xs = np.copy(self.ps) 63 | while np.any(xs > np.pi): xs[xs > np.pi] = xs[xs > np.pi] - 2 * np.pi 64 | while np.any(xs < -np.pi): xs[xs < -np.pi] = xs[xs < -np.pi] + 2 * np.pi 65 | return Pivots(xs) 66 | 67 | def interpolate(self, ws): 68 | dir = np.average(self.directions, weights=ws, axis=0) 69 | return np.arctan2(dir[2], dir[0]) 70 | 71 | def copy(self): 72 | return Pivots(np.copy(self.ps)) 73 | 74 | @property 75 | def shape(self): 76 | return self.ps.shape 77 | 78 | @classmethod 79 | def from_quaternions(cls, qs, forward='z', plane='xz'): 80 | ds = np.zeros(qs.shape + (3,)) 81 | ds[...,'xyz'.index(forward)] = 1.0 82 | return Pivots.from_directions(qs * ds, plane=plane) 83 | 84 | @classmethod 85 | def from_directions(cls, ds, plane='xz'): 86 | ys = ds[...,'xyz'.index(plane[0])] 87 | xs = ds[...,'xyz'.index(plane[1])] 88 | return Pivots(np.arctan2(ys, xs)) 89 | 90 | -------------------------------------------------------------------------------- /foot_sliding/Quaternions.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | class Quaternions: 4 | """ 5 | Quaternions is a wrapper around a numpy ndarray 6 | that allows it to act as if it were an narray of 7 | a quater data type. 8 | 9 | Therefore addition, subtraction, multiplication, 10 | division, negation, absolute, are all defined 11 | in terms of quater operations such as quater 12 | multiplication. 13 | 14 | This allows for much neater code and many routines 15 | which conceptually do the same thing to be written 16 | in the same way for point data and for rotation data. 17 | 18 | The Quaternions class has been desgined such that it 19 | should support broadcasting and slicing in all of the 20 | usual ways. 21 | """ 22 | 23 | def __init__(self, qs): 24 | if isinstance(qs, np.ndarray): 25 | if len(qs.shape) == 1: qs = np.array([qs]) 26 | self.qs = qs 27 | return 28 | 29 | if isinstance(qs, Quaternions): 30 | self.qs = qs 31 | return 32 | 33 | raise TypeError('Quaternions must be constructed from iterable, numpy array, or Quaternions, not %s' % type(qs)) 34 | 35 | def __str__(self): return "Quaternions("+ str(self.qs) + ")" 36 | def __repr__(self): return "Quaternions("+ repr(self.qs) + ")" 37 | 38 | """ Helper Methods for Broadcasting and Data extraction """ 39 | 40 | @classmethod 41 | def _broadcast(cls, sqs, oqs, scalar=False): 42 | if isinstance(oqs, float): return sqs, oqs * np.ones(sqs.shape[:-1]) 43 | 44 | ss = np.array(sqs.shape) if not scalar else np.array(sqs.shape[:-1]) 45 | os = np.array(oqs.shape) 46 | 47 | if len(ss) != len(os): 48 | raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape)) 49 | 50 | if np.all(ss == os): return sqs, oqs 51 | 52 | if not np.all((ss == os) | (os == np.ones(len(os))) | (ss == np.ones(len(ss)))): 53 | raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape)) 54 | 55 | sqsn, oqsn = sqs.copy(), oqs.copy() 56 | 57 | for a in np.where(ss == 1)[0]: sqsn = sqsn.repeat(os[a], axis=a) 58 | for a in np.where(os == 1)[0]: oqsn = oqsn.repeat(ss[a], axis=a) 59 | 60 | return sqsn, oqsn 61 | 62 | """ Adding Quaterions is just Defined as Multiplication """ 63 | 64 | def __add__(self, other): return self * other 65 | def __sub__(self, other): return self / other 66 | 67 | """ Quaterion Multiplication """ 68 | 69 | def __mul__(self, other): 70 | """ 71 | Quaternion multiplication has three main methods. 72 | 73 | When multiplying a Quaternions array by Quaternions 74 | normal quater multiplication is performed. 75 | 76 | When multiplying a Quaternions array by a vector 77 | array of the same shape, where the last axis is 3, 78 | it is assumed to be a Quaternion by 3D-Vector 79 | multiplication and the 3D-Vectors are rotated 80 | in space by the Quaternions. 81 | 82 | When multipplying a Quaternions array by a scalar 83 | or vector of different shape it is assumed to be 84 | a Quaternions by Scalars multiplication and the 85 | Quaternions are scaled using Slerp and the identity 86 | quaternions. 87 | """ 88 | 89 | """ If Quaternions type do Quaternions * Quaternions """ 90 | if isinstance(other, Quaternions): 91 | sqs, oqs = Quaternions._broadcast(self.qs, other.qs) 92 | 93 | q0 = sqs[...,0]; q1 = sqs[...,1]; 94 | q2 = sqs[...,2]; q3 = sqs[...,3]; 95 | r0 = oqs[...,0]; r1 = oqs[...,1]; 96 | r2 = oqs[...,2]; r3 = oqs[...,3]; 97 | 98 | qs = np.empty(sqs.shape) 99 | qs[...,0] = r0 * q0 - r1 * q1 - r2 * q2 - r3 * q3 100 | qs[...,1] = r0 * q1 + r1 * q0 - r2 * q3 + r3 * q2 101 | qs[...,2] = r0 * q2 + r1 * q3 + r2 * q0 - r3 * q1 102 | qs[...,3] = r0 * q3 - r1 * q2 + r2 * q1 + r3 * q0 103 | 104 | return Quaternions(qs) 105 | 106 | """ If array type do Quaternions * Vectors """ 107 | if isinstance(other, np.ndarray) and other.shape[-1] == 3: 108 | vs = Quaternions(np.concatenate([np.zeros(other.shape[:-1] + (1,)), other], axis=-1)) 109 | 110 | return (self * (vs * -self)).imaginaries 111 | 112 | """ If float do Quaternions * Scalars """ 113 | if isinstance(other, np.ndarray) or isinstance(other, float): 114 | return Quaternions.slerp(Quaternions.id_like(self), self, other) 115 | 116 | raise TypeError('Cannot multiply/add Quaternions with type %s' % str(type(other))) 117 | 118 | def __div__(self, other): 119 | """ 120 | When a Quaternion type is supplied, division is defined 121 | as multiplication by the inverse of that Quaternion. 122 | 123 | When a scalar or vector is supplied it is defined 124 | as multiplicaion of one over the supplied value. 125 | Essentially a scaling. 126 | """ 127 | 128 | if isinstance(other, Quaternions): return self * (-other) 129 | if isinstance(other, np.ndarray): return self * (1.0 / other) 130 | if isinstance(other, float): return self * (1.0 / other) 131 | raise TypeError('Cannot divide/subtract Quaternions with type %s' + str(type(other))) 132 | 133 | def __eq__(self, other): return self.qs == other.qs 134 | def __ne__(self, other): return self.qs != other.qs 135 | 136 | def __neg__(self): 137 | """ Invert Quaternions """ 138 | return Quaternions(self.qs * np.array([[1, -1, -1, -1]])) 139 | 140 | def __abs__(self): 141 | """ Unify Quaternions To Single Pole """ 142 | qabs = self.normalized().copy() 143 | top = np.sum(( qabs.qs) * np.array([1,0,0,0]), axis=-1) 144 | bot = np.sum((-qabs.qs) * np.array([1,0,0,0]), axis=-1) 145 | qabs.qs[top < bot] = -qabs.qs[top < bot] 146 | return qabs 147 | 148 | def __iter__(self): return iter(self.qs) 149 | def __len__(self): return len(self.qs) 150 | 151 | def __getitem__(self, k): return Quaternions(self.qs[k]) 152 | def __setitem__(self, k, v): self.qs[k] = v.qs 153 | 154 | @property 155 | def lengths(self): 156 | return np.sum(self.qs**2.0, axis=-1)**0.5 157 | 158 | @property 159 | def reals(self): 160 | return self.qs[...,0] 161 | 162 | @property 163 | def imaginaries(self): 164 | return self.qs[...,1:4] 165 | 166 | @property 167 | def shape(self): return self.qs.shape[:-1] 168 | 169 | def repeat(self, n, **kwargs): 170 | return Quaternions(self.qs.repeat(n, **kwargs)) 171 | 172 | def normalized(self): 173 | return Quaternions(self.qs / self.lengths[...,np.newaxis]) 174 | 175 | def log(self): 176 | norm = abs(self.normalized()) 177 | imgs = norm.imaginaries 178 | lens = np.sqrt(np.sum(imgs**2, axis=-1)) 179 | lens = np.arctan2(lens, norm.reals) / (lens + 1e-10) 180 | return imgs * lens[...,np.newaxis] 181 | 182 | def constrained(self, axis): 183 | 184 | rl = self.reals 185 | im = np.sum(axis * self.imaginaries, axis=-1) 186 | 187 | t1 = -2 * np.arctan2(rl, im) + np.pi 188 | t2 = -2 * np.arctan2(rl, im) - np.pi 189 | 190 | top = Quaternions.exp(axis[np.newaxis] * (t1[:,np.newaxis] / 2.0)) 191 | bot = Quaternions.exp(axis[np.newaxis] * (t2[:,np.newaxis] / 2.0)) 192 | img = self.dot(top) > self.dot(bot) 193 | 194 | ret = top.copy() 195 | ret[ img] = top[ img] 196 | ret[~img] = bot[~img] 197 | return ret 198 | 199 | def constrained_x(self): return self.constrained(np.array([1,0,0])) 200 | def constrained_y(self): return self.constrained(np.array([0,1,0])) 201 | def constrained_z(self): return self.constrained(np.array([0,0,1])) 202 | 203 | def dot(self, q): return np.sum(self.qs * q.qs, axis=-1) 204 | 205 | def copy(self): return Quaternions(np.copy(self.qs)) 206 | 207 | def reshape(self, s): 208 | self.qs.reshape(s) 209 | return self 210 | 211 | def interpolate(self, ws): 212 | return Quaternions.exp(np.average(abs(self).log, axis=0, weights=ws)) 213 | 214 | def euler(self, order='xyz'): 215 | 216 | q = self.normalized().qs 217 | q0 = q[...,0] 218 | q1 = q[...,1] 219 | q2 = q[...,2] 220 | q3 = q[...,3] 221 | es = np.zeros(self.shape + (3,)) 222 | 223 | # These version is wrong on converting 224 | ''' 225 | if order == 'xyz': 226 | es[...,0] = np.arctan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2)) 227 | es[...,1] = np.arcsin((2 * (q0 * q2 - q3 * q1)).clip(-1,1)) 228 | es[...,2] = np.arctan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3)) 229 | elif order == 'yzx': 230 | es[...,0] = np.arctan2(2 * (q1 * q0 - q2 * q3), -q1 * q1 + q2 * q2 - q3 * q3 + q0 * q0) 231 | es[...,1] = np.arctan2(2 * (q2 * q0 - q1 * q3), q1 * q1 - q2 * q2 - q3 * q3 + q0 * q0) 232 | es[...,2] = np.arcsin((2 * (q1 * q2 + q3 * q0)).clip(-1,1)) 233 | else: 234 | raise NotImplementedError('Cannot convert from ordering %s' % order) 235 | 236 | ''' 237 | 238 | if order == 'xyz': 239 | es[..., 2] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3) 240 | es[..., 1] = np.arcsin((2 * (q1 * q3 + q0 * q2)).clip(-1,1)) 241 | es[..., 0] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3) 242 | else: 243 | raise NotImplementedError('Cannot convert from ordering %s' % order) 244 | 245 | # These conversion don't appear to work correctly for Maya. 246 | # http://bediyap.com/programming/convert-quaternion-to-euler-rotations/ 247 | ''' 248 | if order == 'xyz': 249 | es[..., 0] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3) 250 | es[..., 1] = np.arcsin((2 * (q1 * q3 + q0 * q2)).clip(-1,1)) 251 | es[..., 2] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3) 252 | elif order == 'yzx': 253 | es[fa + (0,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3) 254 | es[fa + (1,)] = np.arcsin((2 * (q1 * q2 + q0 * q3)).clip(-1,1)) 255 | es[fa + (2,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3) 256 | elif order == 'zxy': 257 | es[fa + (0,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3) 258 | es[fa + (1,)] = np.arcsin((2 * (q0 * q1 + q2 * q3)).clip(-1,1)) 259 | es[fa + (2,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3) 260 | elif order == 'xzy': 261 | es[fa + (0,)] = np.arctan2(2 * (q0 * q2 + q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3) 262 | es[fa + (1,)] = np.arcsin((2 * (q0 * q3 - q1 * q2)).clip(-1,1)) 263 | es[fa + (2,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3) 264 | elif order == 'yxz': 265 | es[fa + (0,)] = np.arctan2(2 * (q1 * q2 + q0 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3) 266 | es[fa + (1,)] = np.arcsin((2 * (q0 * q1 - q2 * q3)).clip(-1,1)) 267 | es[fa + (2,)] = np.arctan2(2 * (q1 * q3 + q0 * q2), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3) 268 | elif order == 'zyx': 269 | es[fa + (0,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3) 270 | es[fa + (1,)] = np.arcsin((2 * (q0 * q2 - q1 * q3)).clip(-1,1)) 271 | es[fa + (2,)] = np.arctan2(2 * (q0 * q3 + q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3) 272 | 273 | else: 274 | raise KeyError('Unknown ordering %s' % order) 275 | ''' 276 | 277 | 278 | # https://github.com/ehsan/ogre/blob/master/OgreMain/src/OgreMatrix3.cpp 279 | # Use this class and convert from matrix 280 | 281 | return es 282 | 283 | 284 | def average(self): 285 | 286 | if len(self.shape) == 1: 287 | 288 | import numpy.core.umath_tests as ut 289 | system = ut.matrix_multiply(self.qs[:,:,np.newaxis], self.qs[:,np.newaxis,:]).sum(axis=0) 290 | w, v = np.linalg.eigh(system) 291 | qiT_dot_qref = (self.qs[:,:,np.newaxis] * v[np.newaxis,:,:]).sum(axis=1) 292 | return Quaternions(v[:,np.argmin((1.-qiT_dot_qref**2).sum(axis=0))]) 293 | 294 | else: 295 | 296 | raise NotImplementedError('Cannot average multi-dimensionsal Quaternions') 297 | 298 | def angle_axis(self): 299 | 300 | norm = self.normalized() 301 | s = np.sqrt(1 - (norm.reals**2.0)) 302 | s[s == 0] = 0.001 303 | 304 | angles = 2.0 * np.arccos(norm.reals) 305 | axis = norm.imaginaries / s[...,np.newaxis] 306 | 307 | return angles, axis 308 | 309 | 310 | def transforms(self): 311 | 312 | qw = self.qs[...,0] 313 | qx = self.qs[...,1] 314 | qy = self.qs[...,2] 315 | qz = self.qs[...,3] 316 | 317 | x2 = qx + qx; y2 = qy + qy; z2 = qz + qz; 318 | xx = qx * x2; yy = qy * y2; wx = qw * x2; 319 | xy = qx * y2; yz = qy * z2; wy = qw * y2; 320 | xz = qx * z2; zz = qz * z2; wz = qw * z2; 321 | 322 | m = np.empty(self.shape + (3,3)) 323 | m[...,0,0] = 1.0 - (yy + zz) 324 | m[...,0,1] = xy - wz 325 | m[...,0,2] = xz + wy 326 | m[...,1,0] = xy + wz 327 | m[...,1,1] = 1.0 - (xx + zz) 328 | m[...,1,2] = yz - wx 329 | m[...,2,0] = xz - wy 330 | m[...,2,1] = yz + wx 331 | m[...,2,2] = 1.0 - (xx + yy) 332 | 333 | return m 334 | 335 | def ravel(self): 336 | return self.qs.ravel() 337 | 338 | @classmethod 339 | def id(cls, n): 340 | 341 | if isinstance(n, tuple): 342 | qs = np.zeros(n + (4,)) 343 | qs[...,0] = 1.0 344 | return Quaternions(qs) 345 | 346 | if isinstance(n, int) or isinstance(n, long): 347 | qs = np.zeros((n,4)) 348 | qs[:,0] = 1.0 349 | return Quaternions(qs) 350 | 351 | raise TypeError('Cannot Construct Quaternion from %s type' % str(type(n))) 352 | 353 | @classmethod 354 | def id_like(cls, a): 355 | qs = np.zeros(a.shape + (4,)) 356 | qs[...,0] = 1.0 357 | return Quaternions(qs) 358 | 359 | @classmethod 360 | def exp(cls, ws): 361 | 362 | ts = np.sum(ws**2.0, axis=-1)**0.5 363 | ts[ts == 0] = 0.001 364 | ls = np.sin(ts) / ts 365 | 366 | qs = np.empty(ws.shape[:-1] + (4,)) 367 | qs[...,0] = np.cos(ts) 368 | qs[...,1] = ws[...,0] * ls 369 | qs[...,2] = ws[...,1] * ls 370 | qs[...,3] = ws[...,2] * ls 371 | 372 | return Quaternions(qs).normalized() 373 | 374 | @classmethod 375 | def slerp(cls, q0s, q1s, a): 376 | 377 | fst, snd = cls._broadcast(q0s.qs, q1s.qs) 378 | fst, a = cls._broadcast(fst, a, scalar=True) 379 | snd, a = cls._broadcast(snd, a, scalar=True) 380 | 381 | len = np.sum(fst * snd, axis=-1) 382 | 383 | neg = len < 0.0 384 | len[neg] = -len[neg] 385 | snd[neg] = -snd[neg] 386 | 387 | amount0 = np.zeros(a.shape) 388 | amount1 = np.zeros(a.shape) 389 | 390 | linear = (1.0 - len) < 0.01 391 | omegas = np.arccos(len[~linear]) 392 | sinoms = np.sin(omegas) 393 | 394 | amount0[ linear] = 1.0 - a[linear] 395 | amount1[ linear] = a[linear] 396 | amount0[~linear] = np.sin((1.0 - a[~linear]) * omegas) / sinoms 397 | amount1[~linear] = np.sin( a[~linear] * omegas) / sinoms 398 | 399 | return Quaternions( 400 | amount0[...,np.newaxis] * fst + 401 | amount1[...,np.newaxis] * snd) 402 | 403 | @classmethod 404 | def between(cls, v0s, v1s): 405 | a = np.cross(v0s, v1s) 406 | w = np.sqrt((v0s**2).sum(axis=-1) * (v1s**2).sum(axis=-1)) + (v0s * v1s).sum(axis=-1) 407 | return Quaternions(np.concatenate([w[...,np.newaxis], a], axis=-1)).normalized() 408 | 409 | @classmethod 410 | def from_angle_axis(cls, angles, axis): 411 | axis = axis / (np.sqrt(np.sum(axis**2, axis=-1)) + 1e-10)[...,np.newaxis] 412 | sines = np.sin(angles / 2.0)[...,np.newaxis] 413 | cosines = np.cos(angles / 2.0)[...,np.newaxis] 414 | return Quaternions(np.concatenate([cosines, axis * sines], axis=-1)) 415 | 416 | @classmethod 417 | def from_euler(cls, es, order='xyz', world=False): 418 | 419 | axis = { 420 | 'x' : np.array([1,0,0]), 421 | 'y' : np.array([0,1,0]), 422 | 'z' : np.array([0,0,1]), 423 | } 424 | 425 | q0s = Quaternions.from_angle_axis(es[...,0], axis[order[0]]) 426 | q1s = Quaternions.from_angle_axis(es[...,1], axis[order[1]]) 427 | q2s = Quaternions.from_angle_axis(es[...,2], axis[order[2]]) 428 | 429 | return (q2s * (q1s * q0s)) if world else (q0s * (q1s * q2s)) 430 | 431 | @classmethod 432 | def from_transforms(cls, ts): 433 | 434 | d0, d1, d2 = ts[...,0,0], ts[...,1,1], ts[...,2,2] 435 | 436 | q0 = ( d0 + d1 + d2 + 1.0) / 4.0 437 | q1 = ( d0 - d1 - d2 + 1.0) / 4.0 438 | q2 = (-d0 + d1 - d2 + 1.0) / 4.0 439 | q3 = (-d0 - d1 + d2 + 1.0) / 4.0 440 | 441 | q0 = np.sqrt(q0.clip(0,None)) 442 | q1 = np.sqrt(q1.clip(0,None)) 443 | q2 = np.sqrt(q2.clip(0,None)) 444 | q3 = np.sqrt(q3.clip(0,None)) 445 | 446 | c0 = (q0 >= q1) & (q0 >= q2) & (q0 >= q3) 447 | c1 = (q1 >= q0) & (q1 >= q2) & (q1 >= q3) 448 | c2 = (q2 >= q0) & (q2 >= q1) & (q2 >= q3) 449 | c3 = (q3 >= q0) & (q3 >= q1) & (q3 >= q2) 450 | 451 | q1[c0] *= np.sign(ts[c0,2,1] - ts[c0,1,2]) 452 | q2[c0] *= np.sign(ts[c0,0,2] - ts[c0,2,0]) 453 | q3[c0] *= np.sign(ts[c0,1,0] - ts[c0,0,1]) 454 | 455 | q0[c1] *= np.sign(ts[c1,2,1] - ts[c1,1,2]) 456 | q2[c1] *= np.sign(ts[c1,1,0] + ts[c1,0,1]) 457 | q3[c1] *= np.sign(ts[c1,0,2] + ts[c1,2,0]) 458 | 459 | q0[c2] *= np.sign(ts[c2,0,2] - ts[c2,2,0]) 460 | q1[c2] *= np.sign(ts[c2,1,0] + ts[c2,0,1]) 461 | q3[c2] *= np.sign(ts[c2,2,1] + ts[c2,1,2]) 462 | 463 | q0[c3] *= np.sign(ts[c3,1,0] - ts[c3,0,1]) 464 | q1[c3] *= np.sign(ts[c3,2,0] + ts[c3,0,2]) 465 | q2[c3] *= np.sign(ts[c3,2,1] + ts[c3,1,2]) 466 | 467 | qs = np.empty(ts.shape[:-2] + (4,)) 468 | qs[...,0] = q0 469 | qs[...,1] = q1 470 | qs[...,2] = q2 471 | qs[...,3] = q3 472 | 473 | return cls(qs) 474 | -------------------------------------------------------------------------------- /foot_sliding/Quaternions_old.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | class Quaternions: 4 | """ 5 | Quaternions is a wrapper around a numpy ndarray 6 | that allows it to act as if it were an narray of 7 | a quaternion data type. 8 | 9 | Therefore addition, subtraction, multiplication, 10 | division, negation, absolute, are all defined 11 | in terms of quaternion operations such as quaternion 12 | multiplication. 13 | 14 | This allows for much neater code and many routines 15 | which conceptually do the same thing to be written 16 | in the same way for point data and for rotation data. 17 | 18 | The Quaternions class has been desgined such that it 19 | should support broadcasting and slicing in all of the 20 | usual ways. 21 | """ 22 | 23 | def __init__(self, qs): 24 | if isinstance(qs, np.ndarray): 25 | 26 | if len(qs.shape) == 1: qs = np.array([qs]) 27 | self.qs = qs 28 | return 29 | 30 | if isinstance(qs, Quaternions): 31 | self.qs = qs.qs 32 | return 33 | 34 | raise TypeError('Quaternions must be constructed from iterable, numpy array, or Quaternions, not %s' % type(qs)) 35 | 36 | def __str__(self): return "Quaternions("+ str(self.qs) + ")" 37 | def __repr__(self): return "Quaternions("+ repr(self.qs) + ")" 38 | 39 | """ Helper Methods for Broadcasting and Data extraction """ 40 | 41 | @classmethod 42 | def _broadcast(cls, sqs, oqs, scalar=False): 43 | if isinstance(oqs, float): return sqs, oqs * np.ones(sqs.shape[:-1]) 44 | 45 | ss = np.array(sqs.shape) if not scalar else np.array(sqs.shape[:-1]) 46 | os = np.array(oqs.shape) 47 | 48 | if len(ss) != len(os): 49 | raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape)) 50 | 51 | if np.all(ss == os): return sqs, oqs 52 | 53 | if not np.all((ss == os) | (os == np.ones(len(os))) | (ss == np.ones(len(ss)))): 54 | raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape)) 55 | 56 | sqsn, oqsn = sqs.copy(), oqs.copy() 57 | 58 | for a in np.where(ss == 1)[0]: sqsn = sqsn.repeat(os[a], axis=a) 59 | for a in np.where(os == 1)[0]: oqsn = oqsn.repeat(ss[a], axis=a) 60 | 61 | return sqsn, oqsn 62 | 63 | """ Adding Quaterions is just Defined as Multiplication """ 64 | 65 | def __add__(self, other): return self * other 66 | def __sub__(self, other): return self / other 67 | 68 | """ Quaterion Multiplication """ 69 | 70 | def __mul__(self, other): 71 | """ 72 | Quaternion multiplication has three main methods. 73 | 74 | When multiplying a Quaternions array by Quaternions 75 | normal quaternion multiplication is performed. 76 | 77 | When multiplying a Quaternions array by a vector 78 | array of the same shape, where the last axis is 3, 79 | it is assumed to be a Quaternion by 3D-Vector 80 | multiplication and the 3D-Vectors are rotated 81 | in space by the Quaternions. 82 | 83 | When multipplying a Quaternions array by a scalar 84 | or vector of different shape it is assumed to be 85 | a Quaternions by Scalars multiplication and the 86 | Quaternions are scaled using Slerp and the identity 87 | quaternions. 88 | """ 89 | 90 | """ If Quaternions type do Quaternions * Quaternions """ 91 | if isinstance(other, Quaternions): 92 | sqs, oqs = Quaternions._broadcast(self.qs, other.qs) 93 | 94 | q0 = sqs[...,0]; q1 = sqs[...,1]; 95 | q2 = sqs[...,2]; q3 = sqs[...,3]; 96 | r0 = oqs[...,0]; r1 = oqs[...,1]; 97 | r2 = oqs[...,2]; r3 = oqs[...,3]; 98 | 99 | qs = np.empty(sqs.shape) 100 | qs[...,0] = r0 * q0 - r1 * q1 - r2 * q2 - r3 * q3 101 | qs[...,1] = r0 * q1 + r1 * q0 - r2 * q3 + r3 * q2 102 | qs[...,2] = r0 * q2 + r1 * q3 + r2 * q0 - r3 * q1 103 | qs[...,3] = r0 * q3 - r1 * q2 + r2 * q1 + r3 * q0 104 | 105 | return Quaternions(qs) 106 | 107 | """ If array type do Quaternions * Vectors """ 108 | if isinstance(other, np.ndarray) and other.shape[-1] == 3: 109 | vs = Quaternions(np.concatenate([np.zeros(other.shape[:-1] + (1,)), other], axis=-1)) 110 | 111 | return (self * (vs * -self)).imaginaries 112 | 113 | """ If float do Quaternions * Scalars """ 114 | if isinstance(other, np.ndarray) or isinstance(other, float): 115 | return Quaternions.slerp(Quaternions.id_like(self), self, other) 116 | 117 | raise TypeError('Cannot multiply/add Quaternions with type %s' % str(type(other))) 118 | 119 | def __div__(self, other): 120 | """ 121 | When a Quaternion type is supplied, division is defined 122 | as multiplication by the inverse of that Quaternion. 123 | 124 | When a scalar or vector is supplied it is defined 125 | as multiplicaion of one over the supplied value. 126 | Essentially a scaling. 127 | """ 128 | 129 | if isinstance(other, Quaternions): return self * (-other) 130 | if isinstance(other, np.ndarray): return self * (1.0 / other) 131 | if isinstance(other, float): return self * (1.0 / other) 132 | raise TypeError('Cannot divide/subtract Quaternions with type %s' + str(type(other))) 133 | 134 | def __eq__(self, other): return self.qs == other.qs 135 | def __ne__(self, other): return self.qs != other.qs 136 | 137 | def __neg__(self): 138 | """ Invert Quaternions """ 139 | return Quaternions(self.qs * np.array([[1, -1, -1, -1]])) 140 | 141 | def __abs__(self): 142 | """ Unify Quaternions To Single Pole """ 143 | qabs = self.normalized().copy() 144 | top = np.sum(( qabs.qs) * np.array([1,0,0,0]), axis=-1) 145 | bot = np.sum((-qabs.qs) * np.array([1,0,0,0]), axis=-1) 146 | qabs.qs[top < bot] = -qabs.qs[top < bot] 147 | return qabs 148 | 149 | def __iter__(self): return iter(self.qs) 150 | def __len__(self): return len(self.qs) 151 | 152 | def __getitem__(self, k): return Quaternions(self.qs[k]) 153 | def __setitem__(self, k, v): self.qs[k] = v.qs 154 | 155 | @property 156 | def lengths(self): 157 | return np.sum(self.qs**2.0, axis=-1)**0.5 158 | 159 | @property 160 | def reals(self): 161 | return self.qs[...,0] 162 | 163 | @property 164 | def imaginaries(self): 165 | return self.qs[...,1:4] 166 | 167 | @property 168 | def shape(self): return self.qs.shape[:-1] 169 | 170 | def repeat(self, n, **kwargs): 171 | return Quaternions(self.qs.repeat(n, **kwargs)) 172 | 173 | def normalized(self): 174 | return Quaternions(self.qs / self.lengths[...,np.newaxis]) 175 | 176 | def log(self): 177 | norm = abs(self.normalized()) 178 | imgs = norm.imaginaries 179 | lens = np.sqrt(np.sum(imgs**2, axis=-1)) 180 | lens = np.arctan2(lens, norm.reals) / (lens + 1e-10) 181 | return imgs * lens[...,np.newaxis] 182 | 183 | def constrained(self, axis): 184 | 185 | rl = self.reals 186 | im = np.sum(axis * self.imaginaries, axis=-1) 187 | 188 | t1 = -2 * np.arctan2(rl, im) + np.pi 189 | t2 = -2 * np.arctan2(rl, im) - np.pi 190 | 191 | top = Quaternions.exp(axis[np.newaxis] * (t1[:,np.newaxis] / 2.0)) 192 | bot = Quaternions.exp(axis[np.newaxis] * (t2[:,np.newaxis] / 2.0)) 193 | img = self.dot(top) > self.dot(bot) 194 | 195 | ret = top.copy() 196 | ret[ img] = top[ img] 197 | ret[~img] = bot[~img] 198 | return ret 199 | 200 | def constrained_x(self): return self.constrained(np.array([1,0,0])) 201 | def constrained_y(self): return self.constrained(np.array([0,1,0])) 202 | def constrained_z(self): return self.constrained(np.array([0,0,1])) 203 | 204 | def dot(self, q): return np.sum(self.qs * q.qs, axis=-1) 205 | 206 | def copy(self): return Quaternions(np.copy(self.qs)) 207 | 208 | def reshape(self, s): 209 | self.qs.reshape(s) 210 | return self 211 | 212 | def interpolate(self, ws): 213 | return Quaternions.exp(np.average(abs(self).log, axis=0, weights=ws)) 214 | 215 | def euler(self, order='xyz'): 216 | 217 | q = self.normalized().qs 218 | q0 = q[...,0] 219 | q1 = q[...,1] 220 | q2 = q[...,2] 221 | q3 = q[...,3] 222 | es = np.zeros(self.shape + (3,)) 223 | 224 | if order == 'xyz': 225 | es[...,0] = np.arctan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2)) 226 | es[...,1] = np.arcsin((2 * (q0 * q2 - q3 * q1)).clip(-1,1)) 227 | es[...,2] = np.arctan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3)) 228 | elif order == 'yzx': 229 | es[...,0] = np.arctan2(2 * (q1 * q0 - q2 * q3), -q1 * q1 + q2 * q2 - q3 * q3 + q0 * q0) 230 | es[...,1] = np.arctan2(2 * (q2 * q0 - q1 * q3), q1 * q1 - q2 * q2 - q3 * q3 + q0 * q0) 231 | es[...,2] = np.arcsin((2 * (q1 * q2 + q3 * q0)).clip(-1,1)) 232 | else: 233 | raise NotImplementedError('Cannot convert from ordering %s' % order) 234 | 235 | """ 236 | 237 | # These conversion don't appear to work correctly for Maya. 238 | # http://bediyap.com/programming/convert-quaternion-to-euler-rotations/ 239 | 240 | if order == 'xyz': 241 | es[fa + (0,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3) 242 | es[fa + (1,)] = np.arcsin((2 * (q1 * q3 + q0 * q2)).clip(-1,1)) 243 | es[fa + (2,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3) 244 | elif order == 'yzx': 245 | es[fa + (0,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3) 246 | es[fa + (1,)] = np.arcsin((2 * (q1 * q2 + q0 * q3)).clip(-1,1)) 247 | es[fa + (2,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3) 248 | elif order == 'zxy': 249 | es[fa + (0,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3) 250 | es[fa + (1,)] = np.arcsin((2 * (q0 * q1 + q2 * q3)).clip(-1,1)) 251 | es[fa + (2,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3) 252 | elif order == 'xzy': 253 | es[fa + (0,)] = np.arctan2(2 * (q0 * q2 + q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3) 254 | es[fa + (1,)] = np.arcsin((2 * (q0 * q3 - q1 * q2)).clip(-1,1)) 255 | es[fa + (2,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3) 256 | elif order == 'yxz': 257 | es[fa + (0,)] = np.arctan2(2 * (q1 * q2 + q0 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3) 258 | es[fa + (1,)] = np.arcsin((2 * (q0 * q1 - q2 * q3)).clip(-1,1)) 259 | es[fa + (2,)] = np.arctan2(2 * (q1 * q3 + q0 * q2), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3) 260 | elif order == 'zyx': 261 | es[fa + (0,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3) 262 | es[fa + (1,)] = np.arcsin((2 * (q0 * q2 - q1 * q3)).clip(-1,1)) 263 | es[fa + (2,)] = np.arctan2(2 * (q0 * q3 + q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3) 264 | else: 265 | raise KeyError('Unknown ordering %s' % order) 266 | 267 | """ 268 | 269 | # https://github.com/ehsan/ogre/blob/master/OgreMain/src/OgreMatrix3.cpp 270 | # Use this class and convert from matrix 271 | 272 | return es 273 | 274 | 275 | def average(self): 276 | 277 | if len(self.shape) == 1: 278 | 279 | import numpy.core.umath_tests as ut 280 | system = ut.matrix_multiply(self.qs[:,:,np.newaxis], self.qs[:,np.newaxis,:]).sum(axis=0) 281 | w, v = np.linalg.eigh(system) 282 | qiT_dot_qref = (self.qs[:,:,np.newaxis] * v[np.newaxis,:,:]).sum(axis=1) 283 | return Quaternions(v[:,np.argmin((1.-qiT_dot_qref**2).sum(axis=0))]) 284 | 285 | else: 286 | 287 | raise NotImplementedError('Cannot average multi-dimensionsal Quaternions') 288 | 289 | def angle_axis(self): 290 | 291 | norm = self.normalized() 292 | s = np.sqrt(1 - (norm.reals**2.0)) 293 | s[s == 0] = 0.001 294 | 295 | angles = 2.0 * np.arccos(norm.reals) 296 | axis = norm.imaginaries / s[...,np.newaxis] 297 | 298 | return angles, axis 299 | 300 | 301 | def transforms(self): 302 | 303 | qw = self.qs[...,0] 304 | qx = self.qs[...,1] 305 | qy = self.qs[...,2] 306 | qz = self.qs[...,3] 307 | 308 | x2 = qx + qx; y2 = qy + qy; z2 = qz + qz; 309 | xx = qx * x2; yy = qy * y2; wx = qw * x2; 310 | xy = qx * y2; yz = qy * z2; wy = qw * y2; 311 | xz = qx * z2; zz = qz * z2; wz = qw * z2; 312 | 313 | m = np.empty(self.shape + (3,3)) 314 | m[...,0,0] = 1.0 - (yy + zz) 315 | m[...,0,1] = xy - wz 316 | m[...,0,2] = xz + wy 317 | m[...,1,0] = xy + wz 318 | m[...,1,1] = 1.0 - (xx + zz) 319 | m[...,1,2] = yz - wx 320 | m[...,2,0] = xz - wy 321 | m[...,2,1] = yz + wx 322 | m[...,2,2] = 1.0 - (xx + yy) 323 | 324 | return m 325 | 326 | def ravel(self): 327 | return self.qs.ravel() 328 | 329 | @classmethod 330 | def id(cls, n): 331 | 332 | if isinstance(n, tuple): 333 | qs = np.zeros(n + (4,)) 334 | qs[...,0] = 1.0 335 | return Quaternions(qs) 336 | 337 | if isinstance(n, int) or isinstance(n, long): 338 | qs = np.zeros((n,4)) 339 | qs[:,0] = 1.0 340 | return Quaternions(qs) 341 | 342 | raise TypeError('Cannot Construct Quaternion from %s type' % str(type(n))) 343 | 344 | @classmethod 345 | def id_like(cls, a): 346 | qs = np.zeros(a.shape + (4,)) 347 | qs[...,0] = 1.0 348 | return Quaternions(qs) 349 | 350 | @classmethod 351 | def exp(cls, ws): 352 | 353 | ts = np.sum(ws**2.0, axis=-1)**0.5 354 | ts[ts == 0] = 0.001 355 | ls = np.sin(ts) / ts 356 | 357 | qs = np.empty(ws.shape[:-1] + (4,)) 358 | qs[...,0] = np.cos(ts) 359 | qs[...,1] = ws[...,0] * ls 360 | qs[...,2] = ws[...,1] * ls 361 | qs[...,3] = ws[...,2] * ls 362 | 363 | return Quaternions(qs).normalized() 364 | 365 | @classmethod 366 | def slerp(cls, q0s, q1s, a): 367 | 368 | fst, snd = cls._broadcast(q0s.qs, q1s.qs) 369 | fst, a = cls._broadcast(fst, a, scalar=True) 370 | snd, a = cls._broadcast(snd, a, scalar=True) 371 | 372 | len = np.sum(fst * snd, axis=-1) 373 | 374 | neg = len < 0.0 375 | len[neg] = -len[neg] 376 | snd[neg] = -snd[neg] 377 | 378 | amount0 = np.zeros(a.shape) 379 | amount1 = np.zeros(a.shape) 380 | 381 | linear = (1.0 - len) < 0.01 382 | omegas = np.arccos(len[~linear]) 383 | sinoms = np.sin(omegas) 384 | 385 | amount0[ linear] = 1.0 - a[linear] 386 | amount1[ linear] = a[linear] 387 | amount0[~linear] = np.sin((1.0 - a[~linear]) * omegas) / sinoms 388 | amount1[~linear] = np.sin( a[~linear] * omegas) / sinoms 389 | 390 | return Quaternions( 391 | amount0[...,np.newaxis] * fst + 392 | amount1[...,np.newaxis] * snd) 393 | 394 | @classmethod 395 | def between(cls, v0s, v1s): 396 | a = np.cross(v0s, v1s) 397 | w = np.sqrt((v0s**2).sum(axis=-1) * (v1s**2).sum(axis=-1)) + (v0s * v1s).sum(axis=-1) 398 | return Quaternions(np.concatenate([w[...,np.newaxis], a], axis=-1)).normalized() 399 | 400 | @classmethod 401 | def from_angle_axis(cls, angles, axis): 402 | axis = axis / (np.sqrt(np.sum(axis**2, axis=-1)) + 1e-10)[...,np.newaxis] 403 | sines = np.sin(angles / 2.0)[...,np.newaxis] 404 | cosines = np.cos(angles / 2.0)[...,np.newaxis] 405 | return Quaternions(np.concatenate([cosines, axis * sines], axis=-1)) 406 | 407 | @classmethod 408 | def from_euler(cls, es, order='xyz', world=False): 409 | 410 | axis = { 411 | 'x' : np.array([1,0,0]), 412 | 'y' : np.array([0,1,0]), 413 | 'z' : np.array([0,0,1]), 414 | } 415 | 416 | q0s = Quaternions.from_angle_axis(es[...,0], axis[order[0]]) 417 | q1s = Quaternions.from_angle_axis(es[...,1], axis[order[1]]) 418 | q2s = Quaternions.from_angle_axis(es[...,2], axis[order[2]]) 419 | 420 | return (q2s * (q1s * q0s)) if world else (q0s * (q1s * q2s)) 421 | 422 | @classmethod 423 | def from_transforms(cls, ts): 424 | 425 | d0, d1, d2 = ts[...,0,0], ts[...,1,1], ts[...,2,2] 426 | 427 | q0 = ( d0 + d1 + d2 + 1.0) / 4.0 428 | q1 = ( d0 - d1 - d2 + 1.0) / 4.0 429 | q2 = (-d0 + d1 - d2 + 1.0) / 4.0 430 | q3 = (-d0 - d1 + d2 + 1.0) / 4.0 431 | 432 | q0 = np.sqrt(q0.clip(0,None)) 433 | q1 = np.sqrt(q1.clip(0,None)) 434 | q2 = np.sqrt(q2.clip(0,None)) 435 | q3 = np.sqrt(q3.clip(0,None)) 436 | 437 | c0 = (q0 >= q1) & (q0 >= q2) & (q0 >= q3) 438 | c1 = (q1 >= q0) & (q1 >= q2) & (q1 >= q3) 439 | c2 = (q2 >= q0) & (q2 >= q1) & (q2 >= q3) 440 | c3 = (q3 >= q0) & (q3 >= q1) & (q3 >= q2) 441 | 442 | q1[c0] *= np.sign(ts[c0,2,1] - ts[c0,1,2]) 443 | q2[c0] *= np.sign(ts[c0,0,2] - ts[c0,2,0]) 444 | q3[c0] *= np.sign(ts[c0,1,0] - ts[c0,0,1]) 445 | 446 | q0[c1] *= np.sign(ts[c1,2,1] - ts[c1,1,2]) 447 | q2[c1] *= np.sign(ts[c1,1,0] + ts[c1,0,1]) 448 | q3[c1] *= np.sign(ts[c1,0,2] + ts[c1,2,0]) 449 | 450 | q0[c2] *= np.sign(ts[c2,0,2] - ts[c2,2,0]) 451 | q1[c2] *= np.sign(ts[c2,1,0] + ts[c2,0,1]) 452 | q3[c2] *= np.sign(ts[c2,2,1] + ts[c2,1,2]) 453 | 454 | q0[c3] *= np.sign(ts[c3,1,0] - ts[c3,0,1]) 455 | q1[c3] *= np.sign(ts[c3,2,0] + ts[c3,0,2]) 456 | q2[c3] *= np.sign(ts[c3,2,1] + ts[c3,1,2]) 457 | 458 | qs = np.empty(ts.shape[:-2] + (4,)) 459 | qs[...,0] = q0 460 | qs[...,1] = q1 461 | qs[...,2] = q2 462 | qs[...,3] = q3 463 | 464 | return cls(qs) 465 | 466 | 467 | 468 | -------------------------------------------------------------------------------- /foot_sliding/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | BASEPATH = os.path.dirname(__file__) 4 | sys.path.insert(0, BASEPATH) 5 | 6 | 7 | -------------------------------------------------------------------------------- /foot_sliding/animation_2d_data.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | from os.path import join as pjoin 4 | BASEPATH = os.path.dirname(__file__) 5 | sys.path.insert(0, BASEPATH) 6 | sys.path.insert(0, pjoin(BASEPATH, '..')) 7 | 8 | import numpy as np 9 | import json 10 | from scipy.ndimage import gaussian_filter1d 11 | from style_transfer.probe.anim_view import visualize 12 | 13 | 14 | class AnimationData2D: 15 | def __init__(self, projection): 16 | self.projection = projection # [T, J, 2] 17 | self.style2d = None 18 | 19 | def get_style2d(self): 20 | if self.style2d is None: 21 | root = self.projection[..., :1, :].copy() 22 | relative = self.projection[..., 1:, :].copy() - root 23 | style2d = np.concatenate([relative, root], axis=-2) 24 | style2d = style2d.reshape(style2d.shape[:-2] + (-1,)).swapaxes(-1, -2) 25 | self.style2d = style2d 26 | 27 | return self.style2d 28 | 29 | def get_projection(self): 30 | return self.projection 31 | 32 | @classmethod 33 | def from_style2d(cls, style2d): 34 | style2d = style2d.swapaxes(-1, -2) # [J * 2, T] -> [T, J * 2] 35 | style2d = style2d.reshape(style2d.shape[:-1] + (-1, 2)) # [T, J, 2] 36 | root, relative = style2d[..., -1:, :], style2d[..., :-1, :] 37 | relative = relative + root 38 | projection = np.concatenate([root, relative], axis=-2) 39 | return cls(projection) 40 | 41 | @classmethod 42 | def from_openpose_json(cls, json_dir, scale=0.07, smooth=True): 43 | json_files = sorted(os.listdir(json_dir)) 44 | length = len(json_files) // 4 * 4 45 | json_files = json_files[:length] 46 | json_files = [os.path.join(json_dir, x) for x in json_files] 47 | 48 | motion = [] 49 | joint_map = { 50 | 0: 8, 51 | 1: 12, 2: 13, 3: 14, 4: 19, 52 | 5: 9, 6: 10, 7: 11, 8: 22, 53 | # 9 is somewhere between 0 & 10 54 | 10: 1, 55 | # 11 is somewhere between 10 and 12 56 | 12: 0, 57 | 13: 5, 14: 6, 15: 7, # 16 is a little bit further 58 | 17: 2, 18: 3, 19: 4, # 20 is a little bit further 59 | } 60 | 61 | num_joints = 21 62 | start = False 63 | 64 | for path in json_files: 65 | with open(path) as f: 66 | joint_dict = json.load(f) 67 | if len(joint_dict['people']) == 0: 68 | if start: 69 | raw_joint = motion[-1] 70 | motion.append(raw_joint) 71 | else: 72 | continue 73 | start = True 74 | body_joint = np.array(joint_dict['people'][0]['pose_keypoints_2d']).reshape((-1, 3))[:, :2] 75 | lhand_joint = np.array(joint_dict['people'][0]['hand_left_keypoints_2d']).reshape((-1, 3))[:, :2] 76 | rhand_joint = np.array(joint_dict['people'][0]['hand_right_keypoints_2d']).reshape((-1, 3))[:, :2] 77 | raw_joint = np.concatenate([body_joint, lhand_joint, rhand_joint], axis=-2) 78 | if len(motion) > 0: 79 | raw_joint[np.where(raw_joint == 0)] = motion[-1][np.where(raw_joint == 0)] 80 | motion.append(raw_joint) 81 | 82 | for i in range(len(motion) - 1, 0, -1): 83 | motion[i - 1][np.where(motion[i - 1] == 0)] = motion[i][np.where(motion[i - 1] == 0)] 84 | 85 | motion = np.stack(motion, axis=0) 86 | # motion: [T, J, 2] 87 | 88 | trans_motion = np.zeros((motion.shape[0], num_joints, 2)) 89 | for i in range(num_joints): 90 | if i in [9, 11, 12, 16, 20]: 91 | continue 92 | else: 93 | trans_motion[:, i, :] = motion[:, joint_map[i], :] 94 | 95 | trans_motion[:, 12, :] = (motion[:, 15, :] + motion[:, 16, :]) / 2.0 96 | trans_motion[:, 16, :] = motion[:, 35, :] # 25 + 10 97 | trans_motion[:, 20, :] = motion[:, 56, :] # 25 + 21 + 10 98 | 99 | trans_motion[:, 9, :] = (trans_motion[:, 0, :] + trans_motion[:, 10, :]) / 2 100 | trans_motion[:, 11, :] = (trans_motion[:, 10, :] + trans_motion[:, 12, :]) / 2 101 | 102 | motion = trans_motion 103 | motion[:, :, 1] = -motion[:, :, 1] # upside-down 104 | motion[:, :, :] -= motion[0:1, 0:1, :] # start from zero 105 | 106 | if smooth: 107 | motion = gaussian_filter1d(motion, sigma=2, axis=0) 108 | 109 | motion = motion * scale 110 | return cls(motion) 111 | 112 | 113 | def test(): 114 | style2d = np.random.rand(42, 60) 115 | anim = AnimationData2D.from_style2d(style2d) 116 | bla = anim.get_style2d() 117 | 118 | print(np.sum(style2d - bla)) 119 | 120 | bla = {} 121 | for num in [27, 32, 95]: 122 | anim2d = AnimationData2D.from_openpose_json(f'../../data/treadmill/json_inputs/{num}') 123 | bla[str(num)] = {"motion": anim2d.get_projection(), "foot_contact": None} 124 | 125 | visualize(bla) 126 | 127 | 128 | if __name__ == '__main__': 129 | test() 130 | -------------------------------------------------------------------------------- /foot_sliding/animation_data.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | from os.path import join as pjoin 4 | 5 | BASEPATH = os.path.dirname(__file__) 6 | sys.path.insert(0, BASEPATH) 7 | sys.path.insert(0, pjoin(BASEPATH, '..')) 8 | 9 | import argparse 10 | import numpy as np 11 | import scipy.ndimage.filters as filters 12 | from load_skeleton import Skel 13 | from Quaternions_old import Quaternions 14 | from Pivots import Pivots 15 | import BVH 16 | from probe.anim_view import visualize 17 | 18 | 19 | def forward_rotations(skel, rotations, rtpos=None, trim=True): 20 | """ 21 | input: rotations [T, J, 4], rtpos [T, 3] 22 | output: positions [T, J, 3] 23 | """ 24 | transforms = Quaternions(rotations).transforms() # [..., J, 3, 3] 25 | glb = np.zeros(rotations.shape[:-1] + (3,)) # [T, J, 3] 26 | if rtpos is not None: 27 | glb[..., 0, :] = rtpos 28 | for i, pi in enumerate(skel.topology): 29 | if pi == -1: 30 | continue 31 | glb[..., i, :] = np.matmul(transforms[..., pi, :, :], 32 | skel.offset[i]) 33 | glb[..., i, :] += glb[..., pi, :] 34 | transforms[..., i, :, :] = np.matmul(transforms[..., pi, :, :], 35 | transforms[..., i, :, :]) 36 | if trim: 37 | glb = glb[..., skel.chosen_joints, :] 38 | return glb 39 | 40 | 41 | def rotate_coordinates(local3d, angles): 42 | """ 43 | Rotate xyz coordinates from given view_angles. 44 | local3d: numpy array. Unit LOCAL xyz vectors 45 | angles: tuple of length 3. Rotation angles around each GLOBAL axis. 46 | """ 47 | cx, cy, cz = np.cos(angles) 48 | sx, sy, sz = np.sin(angles) 49 | 50 | mat33_x = np.array([ 51 | [1, 0, 0], 52 | [0, cx, sx], 53 | [0, -sx, cx] 54 | ], dtype='float') 55 | 56 | mat33_y = np.array([ 57 | [cy, 0, sy], 58 | [0, 1, 0], 59 | [-sy, 0, cy] 60 | ], dtype='float') 61 | 62 | mat33_z = np.array([ 63 | [cz, sz, 0], 64 | [-sz, cz, 0], 65 | [0, 0, 1] 66 | ], dtype='float') 67 | 68 | local3d = local3d @ mat33_x @ mat33_y @ mat33_z 69 | return local3d 70 | 71 | 72 | def get_local3d(local_x, view_angle=None): 73 | """ 74 | Get the unit vectors for local rectangular coordinates for given 3D motion 75 | :param local_x: local x axis, (B *) [*, 0, *] 76 | :return: numpy array. unit vectors for local rectangular coordinates's , shape (3, 3). 77 | """ 78 | local_y = np.zeros_like(local_x) # [(B,) 3] 79 | local_y[..., :] = np.array([0, 1, 0]) 80 | local_z = np.cross(local_x, local_y) 81 | local_z = local_z / np.linalg.norm(local_z, axis=-1, keepdims=True) 82 | 83 | local = np.stack([local_x, local_y, local_z], axis=-2) 84 | 85 | if view_angle is not None: 86 | local = rotate_coordinates(local, view_angle) 87 | 88 | return local 89 | 90 | 91 | def motion_projection(motion, local_x, view_angle=None): 92 | """ 93 | motion: motion in relative joint positions & global root positions 94 | [(B,) T, (J - 1) + 1, 3] 95 | local_x: [(B,) 3], local x-axis 96 | view_angle: [3], the angles to rotate 97 | output: motion_proj [(B,) J * 2, T] 98 | """ 99 | 100 | local = get_local3d(local_x, view_angle) # [(B,) 3, 3] 101 | 102 | T = motion.shape[-1] 103 | # proj on xy-plane 104 | # motion_proj = (local[[0, 1], :] @ motion) this used to be [2, 3] @ [J, 3, T] 105 | # but T doesn't matter here ... what we care is the "3", using [T, J, 3, 1] would also be OK 106 | motion = motion[..., np.newaxis] # [(B,) T, J, 3, 1] 107 | motion_proj = local[..., np.newaxis, np.newaxis, [0, 1], :] @ motion # [(B,), 1, 1, 2, 3] @ [(B,), T, J, 3, 1] => [(B,), T, J, 2, 1] 108 | motion_proj = motion_proj.reshape(motion_proj.shape[:-3] + (-1, )) # [(B,) T, -1] 109 | motion_proj = motion_proj.swapaxes(-1, -2) # [(B,) J * 2, T] 110 | 111 | return motion_proj 112 | 113 | 114 | def foot_contact_from_positions(positions, fid_l=(3, 4), fid_r=(7, 8)): 115 | """ 116 | positions: [T, J, 3], trimmed (only "chosen_joints") 117 | fid_l, fid_r: indices of feet joints (in "chosen_joints") 118 | """ 119 | fid_l, fid_r = np.array(fid_l), np.array(fid_r) 120 | velfactor = np.array([0.05, 0.05]) 121 | feet_contact = [] 122 | for fid_index in [fid_l, fid_r]: 123 | foot_vel = (positions[1:, fid_index] - positions[:-1, fid_index]) ** 2 # [T - 1, 2, 3] 124 | foot_vel = np.sum(foot_vel, axis=-1) # [T - 1, 2] 125 | foot_contact = (foot_vel < velfactor).astype(np.float) 126 | feet_contact.append(foot_contact) 127 | feet_contact = np.concatenate(feet_contact, axis=-1) # [T - 1, 4] 128 | feet_contact = np.concatenate((feet_contact[0:1].copy(), feet_contact), axis=0) 129 | 130 | return feet_contact # [T, 4] 131 | 132 | 133 | def phase_from_ft(foot_contact, is_debug=False): 134 | """ 135 | foot_contact: [T, 4] -> take joints 0, 2 as standards 136 | phase = left foot in contact (0) --> right foot in contact (pi) --> left foot in contact (2pi), 137 | in range [0, 2pi) 138 | """ 139 | num_circles = 0 140 | circle_length = 0 141 | total_length = len(foot_contact) 142 | ft = foot_contact[:, [0, 2]].astype(np.int) 143 | ft_start = np.zeros((total_length, 2)) 144 | phases = np.zeros((total_length, 1)) 145 | 146 | """ 147 | calculate the average "half-phase length" 148 | find the first and last "01" pattern 149 | """ 150 | for j in range(2): 151 | for i in range(1, total_length): 152 | ft_start[i, j] = (ft[i - 1, j] == 0 and ft[i, j] == 1) 153 | if is_debug: 154 | print('ft_start,', ft_start) 155 | 156 | last, beg_i = -1, -1 157 | starts = [] 158 | for i in range(total_length): 159 | if ft_start[i, 0] or ft_start[i, 1]: 160 | if last != -1: 161 | num_circles += 1 162 | circle_length += i - last 163 | else: 164 | beg_i = i 165 | last = i 166 | starts.append(i) 167 | 168 | avg_circle = 0 if num_circles == 0 else circle_length * 1.0 / num_circles 169 | if is_debug: 170 | print("%d circles, total length = %d, avg length = %.3lf" % (num_circles, circle_length, avg_circle)) 171 | 172 | if len(starts) == 0: # phase never changed 173 | return phases 174 | 175 | """[0, beg_i - 1]: first incomplete circle""" 176 | prev_pos = min(0, beg_i - avg_circle) 177 | prev_val = 0 if ft_start[beg_i, 1] == 1 else 1 # 0 if next step is on the right 178 | cir_i = 0 179 | next_pos = starts[cir_i] 180 | 181 | for i in range(total_length): 182 | if i == next_pos: 183 | prev_pos = next_pos 184 | prev_val = 1 - prev_val 185 | cir_i += 1 186 | if cir_i >= len(starts): 187 | next_pos = max(total_length + 1, next_pos + avg_circle) 188 | else: 189 | next_pos = starts[cir_i] 190 | phases[i] = prev_val + (i - prev_pos) * 1.0 / (next_pos - prev_pos) 191 | 192 | phases *= np.pi 193 | if is_debug: 194 | print('phases:', phases) 195 | return phases 196 | 197 | 198 | def across_from_glb(positions, hips=(2, 6), sdrs=(14, 18)): 199 | """ 200 | positions: positions [T, J, 3], trimmed (only "chosen_joints") 201 | hips, sdrs: left/right hip joints, left/right shoulder joints 202 | output: local x-axis for each frame [T, 3] 203 | """ 204 | across = positions[..., hips[0], :] - positions[..., hips[1], :] + \ 205 | positions[..., sdrs[0], :] - positions[..., sdrs[1], :] # [T, 3] 206 | across = across / np.sqrt((across ** 2).sum(axis=-1))[..., np.newaxis] 207 | 208 | return across 209 | 210 | 211 | def y_rotation_from_positions(positions, hips=(2, 6), sdrs=(14, 18)): 212 | """ 213 | input: positions [T, J, 3] 214 | output: quaters: [T, 1, 4], quaternions that rotate the character around the y-axis to face [0, 0, 1] 215 | pivots: [T, 1] in [0, 2pi], the angle from [0, 0, 1] to the current facing direction 216 | """ 217 | across = across_from_glb(positions, hips=hips, sdrs=sdrs) 218 | direction_filterwidth = 20 219 | forward = np.cross(across, np.array([[0, 1, 0]])) 220 | forward = filters.gaussian_filter1d(forward, direction_filterwidth, axis=0, mode='nearest') 221 | forward = forward / np.sqrt((forward ** 2).sum(axis=-1))[..., np.newaxis] 222 | 223 | target = np.tile(np.array([0, 0, 1]), forward.shape[:-1] + (1, )) 224 | quaters = Quaternions.between(forward, target)[..., np.newaxis, :] # [T, 4] -> [T, 1, 4] 225 | pivots = Pivots.from_quaternions(-quaters).ps # from "target"[0, 0, 1] to current facing direction "forward" 226 | return quaters, pivots 227 | 228 | 229 | class AnimationData: 230 | """ 231 | Canonical Representation: 232 | Skeleton 233 | [T, Jo * 4 + 4 global params + 4 foot_contact] 234 | """ 235 | def __init__(self, full, skel=None, frametime=1/30): 236 | if skel is None: 237 | skel = Skel() 238 | self.skel = skel 239 | self.frametime = frametime 240 | self.len = len(full) 241 | self.rotations = full[:, :-8].reshape(self.len, -1, 4) # [T, Jo, 4] 242 | assert self.rotations.shape[1] == len(self.skel.topology), "Rotations do not match the skeleton." 243 | self.rotations /= np.sqrt(np.sum(self.rotations ** 2, axis=-1))[..., np.newaxis] 244 | self.rt_pos = full[:, -8:-5] # [T, 3] 245 | self.rt_rot = full[:, -5:-4] # [T, 1] 246 | self.foot_contact = full[:, -4:] # [T, 4] 247 | self.full = np.concatenate([self.rotations.reshape(self.len, -1), self.rt_pos, self.rt_rot, self.foot_contact], axis=-1) 248 | self.phases = None # [T, 1] 249 | self.local_x = None # [3] 250 | self.positions_for_proj = None # [T, (J - 1) + 1, 3], trimmed and not forward facing 251 | self.global_positions = None 252 | 253 | 254 | def get_full(self): 255 | return self.full 256 | 257 | def get_root_positions(self): 258 | return self.rt_pos 259 | 260 | def get_original_rotations(self, rt_rot=None): 261 | if rt_rot is None: 262 | rt_rot = self.rt_rot 263 | yaxis_rotations = Quaternions(np.array(Pivots(rt_rot).quaternions())) 264 | rt_rotations = Quaternions(self.rotations[:, :1]) # [T, 1, 4] 265 | rt_rotations = np.array(yaxis_rotations * rt_rotations) 266 | rt_rotations /= np.sqrt((rt_rotations ** 2).sum(axis=-1))[..., np.newaxis] 267 | return np.concatenate((rt_rotations, self.rotations[:, 1:]), axis=1) # [T, J, 4] 268 | 269 | def get_foot_contact(self, transpose=False): 270 | if transpose: 271 | return self.foot_contact.transpose(1, 0) # [4, T] 272 | else: 273 | return self.foot_contact 274 | 275 | def get_phases(self): 276 | if self.phases is None: 277 | self.phases = phase_from_ft(self.foot_contact) 278 | return self.phases 279 | 280 | def get_local_x(self): 281 | if self.local_x is None: 282 | forward_pivot = np.mean(self.rt_rot, axis=0) # [T, 1] -> [1] 283 | forward_dir = Pivots(forward_pivot).directions() 284 | self.local_x = np.cross(np.array((0, 1, 0)), forward_dir).reshape(-1) 285 | return self.local_x 286 | 287 | def get_content_input(self): 288 | rotations = self.rotations.reshape(self.len, -1) # [T, Jo * 4] 289 | return np.concatenate((rotations, self.rt_pos, self.rt_rot), axis=-1).transpose(1, 0) # [Jo * 4 + 3 + 1, T] 290 | 291 | def get_style3d_input(self): 292 | pos3d = forward_rotations(self.skel, self.rotations, trim=True)[:, 1:] # [T, J - 1, 3] 293 | pos3d = pos3d.reshape((len(pos3d), -1)) # [T, (J - 1) * 3] 294 | return np.concatenate((pos3d, self.rt_pos, self.rt_rot), axis=-1).transpose(1, 0) # [(J - 1) * 3 + 3 + 1, T] 295 | 296 | def get_projections(self, view_angles, scales=None): 297 | if self.positions_for_proj is None: 298 | rotations = self.get_original_rotations() 299 | positions = forward_rotations(self.skel, rotations, trim=True)[:, 1:] # [T, J - 1, 3] 300 | positions = np.concatenate((positions, self.rt_pos[:, np.newaxis, :]), axis=1) # [T, J, 3] 301 | self.positions_for_proj = positions.copy() 302 | else: 303 | positions = self.positions_for_proj.copy() 304 | projections = [] 305 | if scales is None: 306 | scales = np.ones((len(view_angles))) 307 | for angle, scale in zip(view_angles, scales): 308 | projections.append(motion_projection(positions, self.get_local_x(), angle) * scale) 309 | projections = np.stack(projections, axis=-3) # [V, J * 2, T] 310 | return projections 311 | 312 | def get_global_positions(self, trim=True): # for visualization 313 | if not trim: 314 | return forward_rotations(self.skel, self.get_original_rotations(), rtpos=self.rt_pos, trim=False) 315 | if self.global_positions is None: 316 | rotations = self.get_original_rotations() 317 | positions = forward_rotations(self.skel, rotations, rtpos=self.rt_pos, trim=True) 318 | self.global_positions = positions 319 | return self.global_positions 320 | 321 | def get_velocity_factor(self): 322 | positions = forward_rotations(self.skel, self.get_original_rotations(), trim=True)[:, 1:] # [T, J - 1, 3] 323 | velocity = positions[1:] - positions[:-1] # [T - 1, J - 1, 3] 324 | velocity = np.sqrt(np.sum(velocity ** 2, axis=-1)) # [T - 1, J - 1] 325 | max_velocity = np.max(velocity, axis=-1) # [T - 1] 326 | velocity_factor = np.mean(max_velocity) 327 | return velocity_factor 328 | 329 | def get_BVH(self, forward=True): 330 | rt_pos = self.rt_pos # [T, 3] 331 | rt_rot = self.rt_rot # [T, 1] 332 | if forward: # choose a direction in [z+, x+, z-, x-], which is closest to "forward", as the new z+ 333 | 334 | directions = np.array(range(4)) * np.pi * 0.5 # [0, 1, 2, 3] * 0.5pi 335 | diff = rt_rot[np.newaxis, :] - directions[:, np.newaxis, np.newaxis] # [1, T, 1] - [4, 1, 1] 336 | diff = np.minimum(np.abs(diff), 2.0 * np.pi - np.abs(diff)) 337 | diff = np.sum(diff, axis=(-1, -2)) # [4, T, 1] -> [4] 338 | 339 | new_forward = np.argmin(diff) 340 | rt_rot -= new_forward * np.pi * 0.5 341 | 342 | for d in range(new_forward): 343 | tmp = rt_pos[..., 0].copy() 344 | rt_pos[..., 0] = -rt_pos[..., 2].copy() 345 | rt_pos[..., 2] = tmp 346 | 347 | rotations = self.get_original_rotations(rt_rot=rt_rot) 348 | 349 | rest, names, _ = self.skel.rest_bvh 350 | anim = rest.copy() 351 | anim.positions = anim.positions.repeat(self.len, axis=0) 352 | anim.positions[:, 0, :] = rt_pos 353 | anim.rotations.qs = rotations 354 | 355 | return (anim, names, self.frametime) 356 | 357 | @classmethod 358 | def from_network_output(cls, input): 359 | input = input.transpose(1, 0) 360 | input = np.concatenate((input, np.zeros((len(input), 4))), axis=-1) 361 | return cls(input) 362 | 363 | @classmethod 364 | def from_rotations_and_root_positions(cls, rotations, root_positions, skel=None, frametime=1/30): 365 | """ 366 | rotations: [T, J, 4] 367 | root_positions: [T, 3] 368 | """ 369 | if skel is None: 370 | skel = Skel() 371 | 372 | rotations /= np.sqrt(np.sum(rotations ** 2, axis=-1))[..., np.newaxis] 373 | global_positions = forward_rotations(skel, rotations, root_positions, trim=True) 374 | foot_contact = foot_contact_from_positions(global_positions, fid_l=skel.fid_l, fid_r=skel.fid_r) 375 | quaters, pivots = y_rotation_from_positions(global_positions, hips=skel.hips, sdrs=skel.sdrs) 376 | 377 | root_rotations = Quaternions(rotations[:, 0:1, :].copy()) # [T, 1, 4] 378 | root_rotations = quaters * root_rotations # facing [0, 0, 1] 379 | root_rotations = np.array(root_rotations).reshape((-1, 1, 4)) # [T, 1, 4] 380 | rotations[:, 0:1, :] = root_rotations 381 | 382 | full = np.concatenate([rotations.reshape((len(rotations), -1)), root_positions, pivots, foot_contact], axis=-1) 383 | return cls(full, skel, frametime) 384 | 385 | @classmethod 386 | def from_BVH(cls, filename, downsample=4, skel=None, trim_scale=None): 387 | anim, names, frametime = BVH.load(filename) 388 | anim = anim[::downsample] 389 | if trim_scale is not None: 390 | length = (len(anim) // trim_scale) * trim_scale 391 | anim = anim[:length] 392 | rotations = np.array(anim.rotations) # [T, J, 4] 393 | root_positions = anim.positions[:, 0, :] 394 | return cls.from_rotations_and_root_positions(rotations, root_positions, skel=skel, frametime=frametime * downsample) 395 | 396 | 397 | def parse_args(): 398 | parser = argparse.ArgumentParser("test") 399 | parser.add_argument('--bvh_in', type=str, default=None) 400 | parser.add_argument('--dataset', type=str, default=None) 401 | 402 | return parser.parse_args() 403 | 404 | 405 | def test_all(args): 406 | 407 | def mse(a, b): 408 | return np.sum((a - b) ** 2) 409 | 410 | def test_phase_from_ft(): 411 | pace = np.zeros((100, 1), dtype=np.int) 412 | pace[::8] = 1 413 | left = pace[:-4] 414 | right = pace[4:] 415 | phase_from_ft(np.concatenate([left, left, right, right], axis=-1), is_debug=True) 416 | 417 | def BVH_and_back(filename): 418 | anim, names, frametime = BVH.load(filename) 419 | anim = anim[::4] 420 | rotations = np.array(anim.rotations) # [T, J, 4] 421 | root_positions = anim.positions[:, 0, :] 422 | 423 | anim_a = AnimationData.from_BVH(filename) 424 | rotations = rotations / np.sqrt(np.sum(rotations ** 2, axis=-1))[..., np.newaxis] 425 | print(f'rotations: {mse(anim_a.get_original_rotations(), rotations)}') 426 | print(f'root_positions: {mse(anim_a.get_root_positions(), root_positions)}') 427 | 428 | content_input = anim_a.get_content_input() 429 | style3d_input = anim_a.get_style3d_input() 430 | view_points = () 431 | for i in range(7): 432 | view_points += ((0, -np.pi / 2 + i * np.pi / 6, 0), ) 433 | view_points = () 434 | scales = () 435 | for i in range(4): 436 | view_points += ((0, -np.pi / 2 + float(np.random.rand(1)) * np.pi, 0), ) 437 | scales += (float(np.random.rand(1)) * 0.4 + 0.8, ) 438 | style2d_input = anim_a.get_projections(view_points, scales) 439 | 440 | print(f'content {content_input.shape}, style3d {style3d_input.shape}, style2d {style2d_input.shape}') 441 | 442 | foot_contact = anim_a.get_foot_contact() 443 | T = content_input.shape[-1] 444 | inplace_no_rot = style3d_input.transpose(1, 0)[:, :-4].reshape(T, -1, 3) 445 | inplace_no_rot = np.concatenate((np.zeros((T, 1, 3)), inplace_no_rot), axis=1) 446 | inplace = anim_a.positions_for_proj[:, :-1, :] 447 | inplace = np.concatenate((np.zeros((T, 1, 3)), inplace), axis=1) 448 | original = anim_a.get_global_positions() 449 | print(f'inplace no rot {inplace_no_rot.shape}, inplace {inplace.shape}, original {original.shape}') 450 | 451 | """ 452 | visualize({ 453 | "inplace_no_rot": {"motion": inplace_no_rot, "foot_contact": foot_contact}, 454 | "inplace": {"motion": inplace, "foot_contact": foot_contact}, 455 | "original": {"motion": original, "foot_contact": foot_contact}, 456 | }) 457 | """ 458 | 459 | motion_proj = {} 460 | for (view_point, scale, proj) in zip(view_points, scales, style2d_input): # [V, J * 2, T] 461 | proj = proj.copy().transpose(1, 0).reshape(T, -1, 2) # [T, J, 2] 462 | proj = np.concatenate([proj[:, -1:], proj[:, :-1]], axis=1) 463 | ori_proj = np.concatenate([proj[:, :1], proj[:, 1:] + proj[:, :1].copy()], axis=1) 464 | proj[:, :1] = 0 465 | motion_proj[f'angle: {(view_point[1] / np.pi * 180):3f} scale: {scale:3f}'] = {"motion": ori_proj, "foot_contact": foot_contact} 466 | """ 467 | visualize({ 468 | "inplace_proj": {"motion": proj, "foot_contact": foot_contact}, 469 | "original_proj": {"motion": ori_proj, "foot_contact": foot_contact} 470 | }) 471 | """ 472 | visualize(motion_proj) 473 | 474 | BVH.save("bla.bvh", *anim_a.get_BVH()) 475 | 476 | def check_velocity(dataset): 477 | skel = Skel() 478 | motions, labels, metas = dataset["motion"], dataset["style"], dataset["meta"] 479 | style_names = list(set(metas["style"])) 480 | content_names = list(set(metas["content"])) 481 | info = {content: {style: [] for style in style_names} for content in content_names} 482 | for i, motion in enumerate(motions): 483 | anim = AnimationData(motion, skel=skel) 484 | vel = anim.get_velocity_factor() 485 | info[metas["content"][i]][metas["style"][i]].append(vel) 486 | 487 | for content in info: 488 | all = [] 489 | for style in info[content]: 490 | all += info[content][style] 491 | info[content][style] = np.mean(info[content][style]) 492 | info[content]["all"] = np.mean(all) 493 | 494 | with open("probe_velocity.csv", "w") as f: 495 | columns = ['all'] + style_names 496 | f.write(',' + ','.join(columns) + '\n') 497 | for content in info: 498 | values = [f'{info[content][key]}' for key in columns] 499 | f.write(','.join([content] + values) + '\n') 500 | 501 | dataset = np.load(args.dataset, allow_pickle=True)["trainfull"].item() 502 | check_velocity(dataset) 503 | 504 | # BVH_and_back(args.bvh_in) 505 | 506 | 507 | if __name__ == '__main__': 508 | args = parse_args() 509 | test_all(args) 510 | 511 | -------------------------------------------------------------------------------- /foot_sliding/example.bvh: -------------------------------------------------------------------------------- 1 | HIERARCHY 2 | ROOT Hips 3 | { 4 | OFFSET -42.198200 91.614723 -40.067841 5 | CHANNELS 6 Xposition Yposition Zposition Zrotation Yrotation Xrotation 6 | JOINT LeftUpLeg 7 | { 8 | OFFSET 0.103456 1.857829 10.548506 9 | CHANNELS 3 Zrotation Yrotation Xrotation 10 | JOINT LeftLeg 11 | { 12 | OFFSET 43.499992 -0.000038 -0.000002 13 | CHANNELS 3 Zrotation Yrotation Xrotation 14 | JOINT LeftFoot 15 | { 16 | OFFSET 42.372192 0.000015 -0.000007 17 | CHANNELS 3 Zrotation Yrotation Xrotation 18 | JOINT LeftToe 19 | { 20 | OFFSET 17.299999 -0.000002 0.000003 21 | CHANNELS 3 Zrotation Yrotation Xrotation 22 | End Site 23 | { 24 | OFFSET 0.000000 0.000000 0.000000 25 | } 26 | } 27 | } 28 | } 29 | } 30 | JOINT RightUpLeg 31 | { 32 | OFFSET 0.103457 1.857829 -10.548503 33 | CHANNELS 3 Zrotation Yrotation Xrotation 34 | JOINT RightLeg 35 | { 36 | OFFSET 43.500042 -0.000027 0.000008 37 | CHANNELS 3 Zrotation Yrotation Xrotation 38 | JOINT RightFoot 39 | { 40 | OFFSET 42.372257 -0.000008 0.000014 41 | CHANNELS 3 Zrotation Yrotation Xrotation 42 | JOINT RightToe 43 | { 44 | OFFSET 17.299992 -0.000005 0.000004 45 | CHANNELS 3 Zrotation Yrotation Xrotation 46 | End Site 47 | { 48 | OFFSET 0.000000 0.000000 0.000000 49 | } 50 | } 51 | } 52 | } 53 | } 54 | JOINT Spine 55 | { 56 | OFFSET 6.901968 -2.603733 -0.000001 57 | CHANNELS 3 Zrotation Yrotation Xrotation 58 | JOINT Spine1 59 | { 60 | OFFSET 12.588099 0.000002 0.000000 61 | CHANNELS 3 Zrotation Yrotation Xrotation 62 | JOINT Spine2 63 | { 64 | OFFSET 12.343206 0.000000 -0.000001 65 | CHANNELS 3 Zrotation Yrotation Xrotation 66 | JOINT Neck 67 | { 68 | OFFSET 25.832886 -0.000004 0.000003 69 | CHANNELS 3 Zrotation Yrotation Xrotation 70 | JOINT Head 71 | { 72 | OFFSET 11.766620 0.000005 -0.000001 73 | CHANNELS 3 Zrotation Yrotation Xrotation 74 | End Site 75 | { 76 | OFFSET 0.000000 0.000000 0.000000 77 | } 78 | } 79 | } 80 | JOINT LeftShoulder 81 | { 82 | OFFSET 19.745899 -1.480370 6.000108 83 | CHANNELS 3 Zrotation Yrotation Xrotation 84 | JOINT LeftArm 85 | { 86 | OFFSET 11.284125 -0.000009 -0.000018 87 | CHANNELS 3 Zrotation Yrotation Xrotation 88 | JOINT LeftForeArm 89 | { 90 | OFFSET 33.000050 0.000004 0.000032 91 | CHANNELS 3 Zrotation Yrotation Xrotation 92 | JOINT LeftHand 93 | { 94 | OFFSET 25.200008 0.000015 0.000008 95 | CHANNELS 3 Zrotation Yrotation Xrotation 96 | End Site 97 | { 98 | OFFSET 0.000000 0.000000 0.000000 99 | } 100 | } 101 | } 102 | } 103 | } 104 | JOINT RightShoulder 105 | { 106 | OFFSET 19.746099 -1.480375 -6.000073 107 | CHANNELS 3 Zrotation Yrotation Xrotation 108 | JOINT RightArm 109 | { 110 | OFFSET 11.284138 -0.000015 -0.000012 111 | CHANNELS 3 Zrotation Yrotation Xrotation 112 | JOINT RightForeArm 113 | { 114 | OFFSET 33.000092 0.000017 0.000013 115 | CHANNELS 3 Zrotation Yrotation Xrotation 116 | JOINT RightHand 117 | { 118 | OFFSET 25.199780 0.000135 0.000422 119 | CHANNELS 3 Zrotation Yrotation Xrotation 120 | End Site 121 | { 122 | OFFSET 0.000000 0.000000 0.000000 123 | } 124 | } 125 | } 126 | } 127 | } 128 | } 129 | } 130 | } 131 | } 132 | MOTION 133 | Frames: 1 134 | Frame Time: 0.033333 135 | -42.198200 91.614723 -40.067841 90.170347 -2.086049 85.392022 170.856526 -0.363297 168.197903 -22.836420 2.021979 -6.742188 84.710356 4.013207 -2.880859 21.454550 0.003082 -0.000002 171.034972 5.992200 -169.886585 -21.319295 -4.166972 3.013343 84.298919 -0.023303 9.810145 21.454558 -0.003123 -0.000020 5.766118 -0.153155 0.061496 1.507459 -0.304251 0.122335 1.189346 -0.303351 0.124202 10.441163 0.222989 -1.509924 -24.817228 -4.718812 0.514271 -75.852727 -83.355745 -106.532148 7.686574 24.469569 -21.835148 -58.556953 -27.048987 2.893453 4.518584 -14.680798 10.156247 -112.849137 88.407257 68.962132 7.963207 -23.947900 30.814760 -57.416913 14.705768 -2.028131 -4.092285 14.401766 -17.998197 136 | -------------------------------------------------------------------------------- /foot_sliding/example.txt: -------------------------------------------------------------------------------- 1 | HIERARCHY 2 | ROOT Hips #0 3 | { 4 | OFFSET -42.198200 91.614723 -40.067841 5 | CHANNELS 6 Xposition Yposition Zposition Zrotation Yrotation Xrotation 6 | JOINT LeftUpLeg #1 7 | { 8 | OFFSET 0.103456 1.857829 10.548506 9 | CHANNELS 3 Zrotation Yrotation Xrotation 10 | JOINT LeftLeg #2 11 | { 12 | OFFSET 43.499992 -0.000038 -0.000002 13 | CHANNELS 3 Zrotation Yrotation Xrotation 14 | JOINT LeftFoot #3 15 | { 16 | OFFSET 42.372192 0.000015 -0.000007 17 | CHANNELS 3 Zrotation Yrotation Xrotation 18 | JOINT LeftToe #4 19 | { 20 | OFFSET 17.299999 -0.000002 0.000003 21 | CHANNELS 3 Zrotation Yrotation Xrotation 22 | End Site 23 | { 24 | OFFSET 0.000000 0.000000 0.000000 25 | } 26 | } 27 | } 28 | } 29 | } 30 | JOINT RightUpLeg #5 31 | { 32 | OFFSET 0.103457 1.857829 -10.548503 33 | CHANNELS 3 Zrotation Yrotation Xrotation 34 | JOINT RightLeg #6 35 | { 36 | OFFSET 43.500042 -0.000027 0.000008 37 | CHANNELS 3 Zrotation Yrotation Xrotation 38 | JOINT RightFoot #7 39 | { 40 | OFFSET 42.372257 -0.000008 0.000014 41 | CHANNELS 3 Zrotation Yrotation Xrotation 42 | JOINT RightToe #8 43 | { 44 | OFFSET 17.299992 -0.000005 0.000004 45 | CHANNELS 3 Zrotation Yrotation Xrotation 46 | End Site 47 | { 48 | OFFSET 0.000000 0.000000 0.000000 49 | } 50 | } 51 | } 52 | } 53 | } 54 | JOINT Spine #9 55 | { 56 | OFFSET 6.901968 -2.603733 -0.000001 57 | CHANNELS 3 Zrotation Yrotation Xrotation 58 | JOINT Spine1 #10 59 | { 60 | OFFSET 12.588099 0.000002 0.000000 61 | CHANNELS 3 Zrotation Yrotation Xrotation 62 | JOINT Spine2 #11 63 | { 64 | OFFSET 12.343206 0.000000 -0.000001 65 | CHANNELS 3 Zrotation Yrotation Xrotation 66 | JOINT Neck #12 67 | { 68 | OFFSET 25.832886 -0.000004 0.000003 69 | CHANNELS 3 Zrotation Yrotation Xrotation 70 | JOINT Head #13 71 | { 72 | OFFSET 11.766620 0.000005 -0.000001 73 | CHANNELS 3 Zrotation Yrotation Xrotation 74 | End Site 75 | { 76 | OFFSET 0.000000 0.000000 0.000000 77 | } 78 | } 79 | } 80 | JOINT LeftShoulder #14 81 | { 82 | OFFSET 19.745899 -1.480370 6.000108 83 | CHANNELS 3 Zrotation Yrotation Xrotation 84 | JOINT LeftArm #15 85 | { 86 | OFFSET 11.284125 -0.000009 -0.000018 87 | CHANNELS 3 Zrotation Yrotation Xrotation 88 | JOINT LeftForeArm #16 89 | { 90 | OFFSET 33.000050 0.000004 0.000032 91 | CHANNELS 3 Zrotation Yrotation Xrotation 92 | JOINT LeftHand #17 93 | { 94 | OFFSET 25.200008 0.000015 0.000008 95 | CHANNELS 3 Zrotation Yrotation Xrotation 96 | End Site 97 | { 98 | OFFSET 0.000000 0.000000 0.000000 99 | } 100 | } 101 | } 102 | } 103 | } 104 | JOINT RightShoulder #18 105 | { 106 | OFFSET 19.746099 -1.480375 -6.000073 107 | CHANNELS 3 Zrotation Yrotation Xrotation 108 | JOINT RightArm #19 109 | { 110 | OFFSET 11.284138 -0.000015 -0.000012 111 | CHANNELS 3 Zrotation Yrotation Xrotation 112 | JOINT RightForeArm #20 113 | { 114 | OFFSET 33.000092 0.000017 0.000013 115 | CHANNELS 3 Zrotation Yrotation Xrotation 116 | JOINT RightHand #21 117 | { 118 | OFFSET 25.199780 0.000135 0.000422 119 | CHANNELS 3 Zrotation Yrotation Xrotation 120 | End Site 121 | { 122 | OFFSET 0.000000 0.000000 0.000000 123 | } 124 | } 125 | } 126 | } 127 | } 128 | } 129 | } 130 | } 131 | } 132 | MOTION 133 | Frames: 1 134 | Frame Time: 0.033333 135 | -42.198200 91.614723 -40.067841 90.170347 -2.086049 85.392022 170.856526 -0.363297 168.197903 -22.836420 2.021979 -6.742188 84.710356 4.013207 -2.880859 21.454550 0.003082 -0.000002 171.034972 5.992200 -169.886585 -21.319295 -4.166972 3.013343 84.298919 -0.023303 9.810145 21.454558 -0.003123 -0.000020 5.766118 -0.153155 0.061496 1.507459 -0.304251 0.122335 1.189346 -0.303351 0.124202 10.441163 0.222989 -1.509924 -24.817228 -4.718812 0.514271 -75.852727 -83.355745 -106.532148 7.686574 24.469569 -21.835148 -58.556953 -27.048987 2.893453 4.518584 -14.680798 10.156247 -112.849137 88.407257 68.962132 7.963207 -23.947900 30.814760 -57.416913 14.705768 -2.028131 -4.092285 14.401766 -17.998197 136 | -------------------------------------------------------------------------------- /foot_sliding/load_skeleton.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | import numpy as np 3 | import os 4 | import sys 5 | from os.path import join as pjoin 6 | BASEPATH = os.path.dirname(os.path.abspath(__file__)) 7 | sys.path.insert(0, BASEPATH) 8 | import BVH as BVH 9 | 10 | 11 | class Skel: 12 | def __init__(self, filename=os.path.join(BASEPATH, "skeleton_lafan.yml")): 13 | f = open(filename, "r") 14 | skel = yaml.load(f, Loader=yaml.Loader) 15 | self.bvh_name = os.path.join(os.path.dirname(filename), skel['BVH']) 16 | self.rest_bvh = BVH.load(self.bvh_name) 17 | self.offset = np.array(skel['offsets']) 18 | self.topology = np.array(skel['parents']) 19 | self.chosen_joints = np.array(skel['chosen_joints']) 20 | self.chosen_parents = np.array(skel['chosen_parents']) 21 | self.fid_l, self.fid_r = skel['left_foot'], skel['right_foot'] 22 | self.hips, self.sdrs = skel['hips'], skel['shoulders'] 23 | self.head = skel['head'] 24 | self.visualization = skel['visualization'] 25 | 26 | 27 | if __name__ == '__main__': 28 | skel = Skel() 29 | print(skel.topology) 30 | print(skel.offset) 31 | print(skel.rest_bvh[0].offsets) 32 | print(skel.chosen_joints) 33 | print(skel.chosen_parents) 34 | 35 | -------------------------------------------------------------------------------- /foot_sliding/skeleton_lafan.yml: -------------------------------------------------------------------------------- 1 | BVH: example.bvh 2 | offsets: 3 | [ 4 | [-42.198200,91.614723,-40.067841], 5 | [ 0.103456,1.857829,10.548506], 6 | [43.499992,-0.000038,-0.000002], 7 | [42.372192,0.000015,-0.000007], 8 | [ 17.299999,-0.000002,0.000003], 9 | 10 | 11 | [0.103457,1.857829,-10.548503], 12 | [43.500042,-0.000027,0.000008], 13 | [42.372257,-0.000008,0.000014], 14 | [17.299992,-0.000005,0.000004], 15 | 16 | 17 | [6.901968,-2.603733,-0.000001], 18 | [12.588099,0.000002,0.000000], 19 | [12.343206,0.000000,-0.000001], 20 | [25.832886,-0.000004,0.000003], 21 | [11.766620,0.000005,-0.000001], 22 | 23 | 24 | [19.745899,-1.480370,6.000108], 25 | [11.284125,-0.000009,-0.000018], 26 | [33.000050,0.000004,0.000032], 27 | [25.200008,0.000015,0.000008], 28 | 29 | 30 | [19.746099,-1.480375,-6.000073], 31 | [11.284138,-0.000015,-0.000012], 32 | [33.000092,0.000017,0.000013], 33 | [25.199780,0.000135,0.000422]] 34 | 35 | parents: 36 | [-1, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 12, 11, 14, 15, 16, 11, 18, 19, 20] 37 | 38 | chosen_joints: 39 | [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] 40 | 41 | chosen_parents: 42 | [-1, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 12, 11, 14, 15, 16, 11, 18, 19, 20] 43 | 44 | left_foot: [3, 4] 45 | right_foot: [7, 8] 46 | hips: [1, 5] 47 | shoulders: [14, 18] 48 | head: 13 49 | 50 | visualization: 51 | joint_sizes: [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 7, 3, 3, 3, 3, 3, 3, 3, 3] 52 | -------------------------------------------------------------------------------- /functions.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import torch.nn as nn 4 | from quaternion import qeuler_np 5 | from remove_fs import remove_fs 6 | 7 | def PLU(x, alpha = 0.1, c = 1.0): 8 | relu = nn.ReLU() 9 | o1 = alpha * (x + c) - c 10 | o2 = alpha * (x - c) + c 11 | o3 = x - relu(x - o2) 12 | o4 = relu(o1 - o3) + o3 13 | return o4 14 | 15 | def gen_ztta(dim = 256, length = 50): 16 | ### currently without T_max ### 17 | ztta = np.zeros((1, length, dim)) 18 | for t in range(length): 19 | for d in range(dim): 20 | if d % 2 == 0: 21 | ztta[:, t, d] = np.sin(1.0 * (length - t) / 10000 ** (d / dim)) 22 | else: 23 | ztta[:, t, d] = np.cos(1.0 * (length - t) / 10000 ** (d / dim)) 24 | return torch.from_numpy(ztta.astype(np.float)) 25 | 26 | def gen_ztar(sigma = 1.0, length = 50): 27 | ### currently noise term in not inroduced ### 28 | lambda_tar = [] 29 | for t in range(length): 30 | if t < 5: 31 | lambda_tar.append(0) 32 | elif t < 30 and t >= 5: 33 | lambda_tar.append((t - 5.0) / 25.0) 34 | else: 35 | lambda_tar.append(1) 36 | lambda_tar = np.array(lambda_tar) 37 | return torch.from_numpy(lambda_tar) 38 | 39 | def write_to_bvhfile(data, filename, joints_to_remove): 40 | fout = open(filename, 'w') 41 | line_cnt = 0 42 | for line in open('./example.bvh', 'r'): 43 | fout.write(line) 44 | line_cnt += 1 45 | if line_cnt >= 132: 46 | break 47 | fout.write(('Frames: %d\n' % data.shape[0])) 48 | fout.write('Frame Time: 0.033333\n') 49 | pose_data = qeuler_np(data[:,3:].reshape(data.shape[0], -1, 4), order='zyx', use_gpu=False) 50 | # pose_data = np.concatenate([pose_data[:,:5], np.zeros_like(pose_data[:,0:1]),\ 51 | # pose_data[:,5:9], np.zeros_like(pose_data[:,0:1]),\ 52 | # pose_data[:,9:14], np.zeros_like(pose_data[:,0:1]),\ 53 | # pose_data[:,14:18], np.zeros_like(pose_data[:,0:1]),\ 54 | # pose_data[:,18:22], np.zeros_like(pose_data[:,0:1])], 1) 55 | pose_data = pose_data / np.pi * 180.0 56 | for t in range(data.shape[0]): 57 | line = '%f %f %f ' % (data[t, 0], data[t, 1], data[t, 2]) 58 | for d in range(pose_data.shape[1] - 1): 59 | line += '%f %f %f ' % (pose_data[t, d, 2], pose_data[t, d, 1], pose_data[t, d, 0]) 60 | line += '%f %f %f\n' % (pose_data[t, -1, 2], pose_data[t, -1, 1], pose_data[t, -1, 0]) 61 | fout.write(line) 62 | fout.close() 63 | 64 | 65 | -------------------------------------------------------------------------------- /model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import torch.nn as nn 4 | from functions import PLU 5 | 6 | 7 | class StateEncoder(nn.Module): 8 | def __init__(self, in_dim = 128, hidden_dim = 512, out_dim = 256): 9 | super(StateEncoder, self).__init__() 10 | self.in_dim = in_dim 11 | self.hidden_dim = hidden_dim 12 | self.out_dim = out_dim 13 | 14 | self.fc0 = nn.Linear(in_dim, hidden_dim, bias=True) 15 | self.fc1 = nn.Linear(hidden_dim, out_dim, bias=True) 16 | 17 | def forward(self, x): 18 | x = self.fc0(x) 19 | x = PLU(x) 20 | x = self.fc1(x) 21 | x = PLU(x) 22 | return x 23 | 24 | class OffsetEncoder(nn.Module): 25 | def __init__(self, in_dim = 128, hidden_dim = 512, out_dim = 256): 26 | super(OffsetEncoder, self).__init__() 27 | self.in_dim = in_dim 28 | self.hidden_dim = hidden_dim 29 | self.out_dim = out_dim 30 | 31 | self.fc0 = nn.Linear(in_dim, hidden_dim, bias=True) 32 | self.fc1 = nn.Linear(hidden_dim, out_dim, bias=True) 33 | 34 | def forward(self, x): 35 | x = self.fc0(x) 36 | x = PLU(x) 37 | x = self.fc1(x) 38 | x = PLU(x) 39 | return x 40 | 41 | class TargetEncoder(nn.Module): 42 | def __init__(self, in_dim = 128, hidden_dim = 512, out_dim = 256): 43 | super(TargetEncoder, self).__init__() 44 | self.in_dim = in_dim 45 | self.hidden_dim = hidden_dim 46 | self.out_dim = out_dim 47 | 48 | self.fc0 = nn.Linear(in_dim, hidden_dim, bias=True) 49 | self.fc1 = nn.Linear(hidden_dim, out_dim, bias=True) 50 | 51 | def forward(self, x): 52 | x = self.fc0(x) 53 | x = PLU(x) 54 | x = self.fc1(x) 55 | x = PLU(x) 56 | return x 57 | 58 | class LSTM(nn.Module): 59 | def __init__(self, in_dim = 128, hidden_dim = 768, num_layer = 1): 60 | super(LSTM, self).__init__() 61 | self.in_dim = in_dim 62 | self.hidden_dim = hidden_dim 63 | self.num_layer = num_layer 64 | self.rnn = nn.LSTM(self.in_dim, self.hidden_dim, self.num_layer) 65 | 66 | def init_hidden(self, batch_size): 67 | self.h = torch.zeros((self.num_layer, batch_size, self.hidden_dim)).cuda() 68 | self.c = torch.zeros((self.num_layer, batch_size, self.hidden_dim)).cuda() 69 | 70 | def forward(self, x): 71 | x, (self.h, self.c) = self.rnn(x, (self.h, self.c)) 72 | return x 73 | 74 | 75 | class Decoder(nn.Module): 76 | def __init__(self, in_dim = 128, hidden_dim = 512, out_dim = 256): 77 | super(Decoder, self).__init__() 78 | self.in_dim = in_dim 79 | self.hidden_dim = hidden_dim 80 | self.out_dim = out_dim 81 | 82 | self.fc0 = nn.Linear(in_dim, hidden_dim, bias=True) 83 | self.fc1 = nn.Linear(hidden_dim, hidden_dim // 2, bias=True) 84 | self.fc2 = nn.Linear(hidden_dim // 2, out_dim - 4, bias=True) 85 | self.fc_conct = nn.Linear(hidden_dim // 2, 4, bias=True) 86 | self.ac_sig = nn.Sigmoid() 87 | 88 | def forward(self, x): 89 | x = self.fc0(x) 90 | x = PLU(x) 91 | x = self.fc1(x) 92 | x = PLU(x) 93 | o1 = self.fc2(x) 94 | o2 = self.ac_sig(self.fc_conct(x)) 95 | return o1, o2 96 | 97 | class ShortMotionDiscriminator(nn.Module): 98 | def __init__(self, length = 3, in_dim = 128, hidden_dim = 512, out_dim = 1): 99 | super(ShortMotionDiscriminator, self).__init__() 100 | self.in_dim = in_dim 101 | self.hidden_dim = hidden_dim 102 | self.out_dim = out_dim 103 | self.length = length 104 | 105 | self.fc0 = nn.Conv1d(in_dim, hidden_dim, kernel_size = self.length, bias=True) 106 | self.fc1 = nn.Conv1d(hidden_dim, hidden_dim // 2, kernel_size = 1, bias=True) 107 | self.fc2 = nn.Conv1d(hidden_dim // 2, out_dim, kernel_size = 1, bias=True) 108 | 109 | def forward(self, x): 110 | x = self.fc0(x) 111 | x = PLU(x) 112 | x = self.fc1(x) 113 | x = PLU(x) 114 | x = self.fc2(x) 115 | return x 116 | 117 | class LongMotionDiscriminator(nn.Module): 118 | def __init__(self, length = 10, in_dim = 128, hidden_dim = 512, out_dim = 1): 119 | super(LongMotionDiscriminator, self).__init__() 120 | self.in_dim = in_dim 121 | self.hidden_dim = hidden_dim 122 | self.out_dim = out_dim 123 | self.length = length 124 | 125 | self.fc0 = nn.Conv1d(in_dim, hidden_dim, kernel_size = self.length, bias=True) 126 | self.fc1 = nn.Conv1d(hidden_dim, hidden_dim // 2, kernel_size = 1, bias=True) 127 | self.fc2 = nn.Conv1d(hidden_dim // 2, out_dim, kernel_size = 1, bias=True) 128 | 129 | def forward(self, x): 130 | x = self.fc0(x) 131 | x = PLU(x) 132 | x = self.fc1(x) 133 | x = PLU(x) 134 | x = self.fc2(x) 135 | return x 136 | 137 | if __name__=="__main__": 138 | state_encoder = StateEncoder() 139 | x = torch.zeros((32, 128)) 140 | print(state_encoder(x).size()) 141 | 142 | offset_encoder = OffsetEncoder() 143 | x = torch.zeros((32, 128)) 144 | print(offset_encoder(x).size()) 145 | 146 | target_encoder = TargetEncoder() 147 | x = torch.zeros((32, 128)) 148 | print(target_encoder(x).size()) 149 | 150 | lstm = LSTM(32) 151 | x = torch.zeros((10, 32, 128)) 152 | print(lstm(x).size()) 153 | 154 | decoder = Decoder() 155 | x = torch.zeros((32, 128)) 156 | print(decoder(x)[0].size()) 157 | 158 | short_dis = ShortMotionDiscriminator() 159 | x = torch.zeros((32, 128, 50)) 160 | print(short_dis(x).size()) 161 | 162 | long_dis = LongMotionDiscriminator() 163 | x = torch.zeros((32, 128, 50)) 164 | print(long_dis(x).size()) 165 | 166 | -------------------------------------------------------------------------------- /quaternion.py: -------------------------------------------------------------------------------- 1 | 2 | import torch 3 | import numpy as np 4 | 5 | # PyTorch-backed implementations 6 | 7 | def qmul(q, r): 8 | """ 9 | Multiply quaternion(s) q with quaternion(s) r. 10 | Expects two equally-sized tensors of shape (*, 4), where * denotes any number of dimensions. 11 | Returns q*r as a tensor of shape (*, 4). 12 | """ 13 | assert q.shape[-1] == 4 14 | assert r.shape[-1] == 4 15 | 16 | original_shape = q.shape 17 | 18 | # Compute outer product 19 | terms = torch.bmm(r.view(-1, 4, 1), q.view(-1, 1, 4)) 20 | 21 | w = terms[:, 0, 0] - terms[:, 1, 1] - terms[:, 2, 2] - terms[:, 3, 3] 22 | x = terms[:, 0, 1] + terms[:, 1, 0] - terms[:, 2, 3] + terms[:, 3, 2] 23 | y = terms[:, 0, 2] + terms[:, 1, 3] + terms[:, 2, 0] - terms[:, 3, 1] 24 | z = terms[:, 0, 3] - terms[:, 1, 2] + terms[:, 2, 1] + terms[:, 3, 0] 25 | return torch.stack((w, x, y, z), dim=1).view(original_shape) 26 | 27 | def qrot(q, v): 28 | """ 29 | Rotate vector(s) v about the rotation described by quaternion(s) q. 30 | Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v, 31 | where * denotes any number of dimensions. 32 | Returns a tensor of shape (*, 3). 33 | """ 34 | assert q.shape[-1] == 4 35 | assert v.shape[-1] == 3 36 | assert q.shape[:-1] == v.shape[:-1] 37 | 38 | original_shape = list(v.shape) 39 | q = q.view(-1, 4) 40 | v = v.view(-1, 3) 41 | 42 | qvec = q[:, 1:] 43 | uv = torch.cross(qvec, v, dim=1) 44 | uuv = torch.cross(qvec, uv, dim=1) 45 | return (v + 2 * (q[:, :1] * uv + uuv)).view(original_shape) 46 | 47 | def qeuler(q, order, epsilon=0): 48 | """ 49 | Convert quaternion(s) q to Euler angles. 50 | Expects a tensor of shape (*, 4), where * denotes any number of dimensions. 51 | Returns a tensor of shape (*, 3). 52 | """ 53 | assert q.shape[-1] == 4 54 | 55 | original_shape = list(q.shape) 56 | original_shape[-1] = 3 57 | q = q.view(-1, 4) 58 | 59 | q0 = q[:, 0] 60 | q1 = q[:, 1] 61 | q2 = q[:, 2] 62 | q3 = q[:, 3] 63 | 64 | if order == 'xyz': 65 | x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2*(q1 * q1 + q2 * q2)) 66 | y = torch.asin(torch.clamp(2 * (q1 * q3 + q0 * q2), -1+epsilon, 1-epsilon)) 67 | z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2*(q2 * q2 + q3 * q3)) 68 | elif order == 'yzx': 69 | x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2*(q1 * q1 + q3 * q3)) 70 | y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2*(q2 * q2 + q3 * q3)) 71 | z = torch.asin(torch.clamp(2 * (q1 * q2 + q0 * q3), -1+epsilon, 1-epsilon)) 72 | elif order == 'zxy': 73 | x = torch.asin(torch.clamp(2 * (q0 * q1 + q2 * q3), -1+epsilon, 1-epsilon)) 74 | y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2*(q1 * q1 + q2 * q2)) 75 | z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2*(q1 * q1 + q3 * q3)) 76 | elif order == 'xzy': 77 | x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2*(q1 * q1 + q3 * q3)) 78 | y = torch.atan2(2 * (q0 * q2 + q1 * q3), 1 - 2*(q2 * q2 + q3 * q3)) 79 | z = torch.asin(torch.clamp(2 * (q0 * q3 - q1 * q2), -1+epsilon, 1-epsilon)) 80 | elif order == 'yxz': 81 | x = torch.asin(torch.clamp(2 * (q0 * q1 - q2 * q3), -1+epsilon, 1-epsilon)) 82 | y = torch.atan2(2 * (q1 * q3 + q0 * q2), 1 - 2*(q1 * q1 + q2 * q2)) 83 | z = torch.atan2(2 * (q1 * q2 + q0 * q3), 1 - 2*(q1 * q1 + q3 * q3)) 84 | elif order == 'zyx': 85 | x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2*(q1 * q1 + q2 * q2)) 86 | y = torch.asin(torch.clamp(2 * (q0 * q2 - q1 * q3), -1+epsilon, 1-epsilon)) 87 | z = torch.atan2(2 * (q0 * q3 + q1 * q2), 1 - 2*(q2 * q2 + q3 * q3)) 88 | else: 89 | raise 90 | 91 | return torch.stack((x, y, z), dim=1).view(original_shape) 92 | 93 | # Numpy-backed implementations 94 | 95 | def qmul_np(q, r): 96 | q = torch.from_numpy(q).contiguous() 97 | r = torch.from_numpy(r).contiguous() 98 | return qmul(q, r).numpy() 99 | 100 | def qrot_np(q, v): 101 | q = torch.from_numpy(q).contiguous() 102 | v = torch.from_numpy(v).contiguous() 103 | return qrot(q, v).numpy() 104 | 105 | def qeuler_np(q, order, epsilon=0, use_gpu=False): 106 | if use_gpu: 107 | q = torch.from_numpy(q).cuda() 108 | return qeuler(q, order, epsilon).cpu().numpy() 109 | else: 110 | q = torch.from_numpy(q).contiguous() 111 | return qeuler(q, order, epsilon).numpy() 112 | 113 | def qfix(q): 114 | """ 115 | Enforce quaternion continuity across the time dimension by selecting 116 | the representation (q or -q) with minimal distance (or, equivalently, maximal dot product) 117 | between two consecutive frames. 118 | 119 | Expects a tensor of shape (L, J, 4), where L is the sequence length and J is the number of joints. 120 | Returns a tensor of the same shape. 121 | """ 122 | assert len(q.shape) == 3 123 | assert q.shape[-1] == 4 124 | 125 | result = q.copy() 126 | dot_products = np.sum(q[1:]*q[:-1], axis=2) 127 | mask = dot_products < 0 128 | mask = (np.cumsum(mask, axis=0)%2).astype(bool) 129 | result[1:][mask] *= -1 130 | return result 131 | 132 | def expmap_to_quaternion(e): 133 | """ 134 | Convert axis-angle rotations (aka exponential maps) to quaternions. 135 | Stable formula from "Practical Parameterization of Rotations Using the Exponential Map". 136 | Expects a tensor of shape (*, 3), where * denotes any number of dimensions. 137 | Returns a tensor of shape (*, 4). 138 | """ 139 | assert e.shape[-1] == 3 140 | 141 | original_shape = list(e.shape) 142 | original_shape[-1] = 4 143 | e = e.reshape(-1, 3) 144 | 145 | theta = np.linalg.norm(e, axis=1).reshape(-1, 1) 146 | w = np.cos(0.5*theta).reshape(-1, 1) 147 | xyz = 0.5*np.sinc(0.5*theta/np.pi)*e 148 | return np.concatenate((w, xyz), axis=1).reshape(original_shape) 149 | 150 | def euler_to_quaternion(e, order): 151 | """ 152 | Convert Euler angles to quaternions. 153 | """ 154 | assert e.shape[-1] == 3 155 | 156 | original_shape = list(e.shape) 157 | original_shape[-1] = 4 158 | 159 | e = e.reshape(-1, 3) 160 | 161 | x = e[:, 0] 162 | y = e[:, 1] 163 | z = e[:, 2] 164 | 165 | rx = np.stack((np.cos(x/2), np.sin(x/2), np.zeros_like(x), np.zeros_like(x)), axis=1) 166 | ry = np.stack((np.cos(y/2), np.zeros_like(y), np.sin(y/2), np.zeros_like(y)), axis=1) 167 | rz = np.stack((np.cos(z/2), np.zeros_like(z), np.zeros_like(z), np.sin(z/2)), axis=1) 168 | 169 | result = None 170 | for coord in order: 171 | if coord == 'x': 172 | r = rx 173 | elif coord == 'y': 174 | r = ry 175 | elif coord == 'z': 176 | r = rz 177 | else: 178 | raise 179 | if result is None: 180 | result = r 181 | else: 182 | result = qmul_np(result, r) 183 | 184 | # Reverse antipodal representation to have a non-negative "w" 185 | if order in ['xyz', 'yzx', 'zxy']: 186 | result *= -1 187 | 188 | return result.reshape(original_shape) 189 | -------------------------------------------------------------------------------- /remove_fs.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import numpy as np 4 | import torch 5 | import argparse 6 | from tqdm import tqdm 7 | BASEPATH = os.path.dirname(__file__) 8 | from os.path import join as pjoin 9 | sys.path.insert(0, BASEPATH) 10 | sys.path.insert(0, pjoin(BASEPATH, '..')) 11 | 12 | import foot_sliding.BVH as BVH 13 | from foot_sliding.InverseKinematics import JacobianInverseKinematics 14 | from foot_sliding.animation_data import AnimationData 15 | 16 | 17 | def softmax(x, **kw): 18 | softness = kw.pop("softness", 1.0) 19 | maxi, mini = np.max(x, **kw), np.min(x, **kw) 20 | return maxi + np.log(softness + np.exp(mini - maxi)) 21 | 22 | 23 | def softmin(x, **kw): 24 | return -softmax(-x, **kw) 25 | 26 | 27 | def alpha(t): 28 | return 2.0 * t * t * t - 3.0 * t * t + 1 29 | 30 | 31 | def lerp(a, l, r): 32 | return (1 - a) * l + a * r 33 | 34 | 35 | def parse_args(): 36 | parser = argparse.ArgumentParser() 37 | parser.add_argument("--data", type=str, default="bla_3d") 38 | return parser.parse_args() 39 | 40 | 41 | def nrot2anim(filename): 42 | anim = AnimationData.from_BVH(filename, downsample=1) 43 | # anim = AnimationData.from_network_output(nrot) 44 | bvh, names, ftime = anim.get_BVH() 45 | anim = AnimationData.from_rotations_and_root_positions(np.array(bvh.rotations), bvh.positions[:, 0, :]) 46 | glb = anim.get_global_positions(trim=False) 47 | # print('bvh.rotations:', np.array(bvh.rotations)[0,0]) 48 | # assert 0 49 | return (bvh, names, ftime), glb 50 | 51 | 52 | def save_bvh_from_network_output(nrot, output_path): 53 | anim = AnimationData.from_network_output(nrot) 54 | bvh, names, ftime = anim.get_BVH() 55 | if not os.path.exists(os.path.dirname(output_path)): 56 | os.makedirs(os.path.dirname(output_path)) 57 | BVH.save(output_path, bvh, names, ftime) 58 | 59 | 60 | def remove_fs(filename, foot, output_path, fid_l=(4, 5), fid_r=(9, 10), interp_length=5, force_on_floor=False): 61 | (anim, names, ftime), glb = nrot2anim(filename) 62 | T = len(glb) 63 | 64 | fid = list(fid_l) + list(fid_r) 65 | fid_l, fid_r = np.array(fid_l), np.array(fid_r) 66 | foot_heights = np.minimum(glb[:, fid_l, 1], 67 | glb[:, fid_r, 1]).min(axis=1) # [T, 2] -> [T] 68 | # print(np.min(foot_heights)) 69 | floor_height = softmin(foot_heights, softness=0.5, axis=0) 70 | # print(floor_height) 71 | glb[:, :, 1] -= floor_height 72 | anim.positions[:, 0, 1] -= floor_height 73 | glb_cp = glb.copy() 74 | 75 | for i, fidx in enumerate(fid): 76 | fixed = foot[i] # [T] 77 | 78 | """ 79 | for t in range(T): 80 | glb[t, fidx][1] = max(glb[t, fidx][1], 0.25) 81 | """ 82 | 83 | s = 0 84 | while s < T: 85 | while s < T and fixed[s] == 0: 86 | s += 1 87 | if s >= T: 88 | break 89 | t = s 90 | avg = glb[t, fidx].copy() 91 | while t + 1 < T and fixed[t + 1] == 1: 92 | t += 1 93 | avg += glb[t, fidx].copy() 94 | avg /= (t - s + 1) 95 | 96 | if force_on_floor: 97 | avg[1] = 0.0 98 | 99 | for j in range(s, t + 1): 100 | glb[j, fidx] = avg.copy() 101 | 102 | # print(fixed[s - 1:t + 2]) 103 | 104 | s = t + 1 105 | 106 | for s in range(T): 107 | if fixed[s] == 1: 108 | continue 109 | l, r = None, None 110 | consl, consr = False, False 111 | for k in range(interp_length): 112 | if s - k - 1 < 0: 113 | break 114 | if fixed[s - k - 1]: 115 | l = s - k - 1 116 | consl = True 117 | break 118 | for k in range(interp_length): 119 | if s + k + 1 >= T: 120 | break 121 | if fixed[s + k + 1]: 122 | r = s + k + 1 123 | consr = True 124 | break 125 | 126 | if not consl and not consr: 127 | continue 128 | if consl and consr: 129 | litp = lerp(alpha(1.0 * (s - l + 1) / (interp_length + 1)), 130 | glb[s, fidx], glb[l, fidx]) 131 | ritp = lerp(alpha(1.0 * (r - s + 1) / (interp_length + 1)), 132 | glb[s, fidx], glb[r, fidx]) 133 | itp = lerp(alpha(1.0 * (s - l + 1) / (r - l + 1)), 134 | ritp, litp) 135 | glb[s, fidx] = itp.copy() 136 | continue 137 | if consl: 138 | litp = lerp(alpha(1.0 * (s - l + 1) / (interp_length + 1)), 139 | glb[s, fidx], glb[l, fidx]) 140 | glb[s, fidx] = litp.copy() 141 | continue 142 | if consr: 143 | ritp = lerp(alpha(1.0 * (r - s + 1) / (interp_length + 1)), 144 | glb[s, fidx], glb[r, fidx]) 145 | glb[s, fidx] = ritp.copy() 146 | 147 | targetmap = {} 148 | for j in range(glb.shape[1]): 149 | targetmap[j] = glb[:, j] 150 | 151 | ik = JacobianInverseKinematics(anim, targetmap, iterations=10, damping=4.0, 152 | silent=False) 153 | ik() 154 | 155 | if not os.path.exists(os.path.dirname(output_path)): 156 | os.makedirs(os.path.dirname(output_path)) 157 | BVH.save(output_path, anim, names, ftime) 158 | return glb 159 | 160 | 161 | def process_data(filename, style_and_content=True, output_dir=None, selected=None): 162 | 163 | # data = torch.load(filename, map_location="cpu") 164 | # feet = data["foot_contact"] 165 | # motions = data["trans"] 166 | 167 | # if selected is None: 168 | # selected = range(len(motions)) 169 | 170 | # for num in tqdm(selected): 171 | for num in range(1): 172 | # feet = feet[num].detach().numpy() 173 | # if style_and_content: 174 | # style = styles[num].detach().numpy() 175 | # content = contents[num].detach().numpy() 176 | # save_bvh_from_network_output(style.copy(), output_path=pjoin(output_dir, "style_%02d.bvh" % num)) 177 | # save_bvh_from_network_output(content.copy(), output_path=pjoin(output_dir, "content_%02d.bvh" % num)) 178 | motion = np.ones((92, 100)) 179 | foot = np.zeros((4, 100)) 180 | # motion = motions[num].detach().numpy() 181 | save_bvh_from_network_output(motion, output_path=pjoin(output_dir, "raw_%02d.bvh" % num)) 182 | remove_fs(motion, foot, output_path=pjoin(output_dir, "after_%02d.bvh" % num)) 183 | 184 | 185 | def main(args): 186 | output_dir = args.data + "_bvh" 187 | try: 188 | os.mkdir(output_dir) 189 | except FileExistsError: 190 | pass 191 | 192 | process_data(args.data, output_dir=output_dir) 193 | 194 | 195 | if __name__ == '__main__': 196 | args = parse_args() 197 | main(args) 198 | 199 | 200 | 201 | 202 | 203 | 204 | -------------------------------------------------------------------------------- /skeleton.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import os 3 | import numpy as np 4 | import sys 5 | sys.path.insert(0, os.path.dirname(__file__)) 6 | from quaternion import qmul_np, qmul, qrot 7 | from torch.utils.data import Dataset, DataLoader 8 | from LaFan import LaFan1 9 | 10 | class Skeleton: 11 | def __init__(self, offsets, parents, joints_left=None, joints_right=None): 12 | assert len(offsets) == len(parents) 13 | 14 | self._offsets = torch.FloatTensor(offsets) 15 | self._parents = np.array(parents) 16 | self._joints_left = joints_left 17 | self._joints_right = joints_right 18 | self._compute_metadata() 19 | 20 | def cuda(self): 21 | self._offsets = self._offsets.cuda() 22 | return self 23 | 24 | def num_joints(self): 25 | return self._offsets.shape[0] 26 | 27 | def offsets(self): 28 | return self._offsets 29 | 30 | def parents(self): 31 | return self._parents 32 | 33 | def has_children(self): 34 | return self._has_children 35 | 36 | def children(self): 37 | return self._children 38 | 39 | def remove_joints(self, joints_to_remove): 40 | """ 41 | Remove the joints specified in 'joints_to_remove', both from the 42 | skeleton definition and from the dataset (which is modified in place). 43 | The rotations of removed joints are propagated along the kinematic chain. 44 | """ 45 | valid_joints = [] 46 | for joint in range(len(self._parents)): 47 | if joint not in joints_to_remove: 48 | valid_joints.append(joint) 49 | 50 | index_offsets = np.zeros(len(self._parents), dtype=int) 51 | new_parents = [] 52 | for i, parent in enumerate(self._parents): 53 | if i not in joints_to_remove: 54 | new_parents.append(parent - index_offsets[parent]) 55 | else: 56 | index_offsets[i:] += 1 57 | self._parents = np.array(new_parents) 58 | 59 | self._offsets = self._offsets[valid_joints] 60 | self._compute_metadata() 61 | 62 | def forward_kinematics(self, rotations, root_positions): 63 | """ 64 | Perform forward kinematics using the given trajectory and local rotations. 65 | Arguments (where N = batch size, L = sequence length, J = number of joints): 66 | -- rotations: (N, L, J, 4) tensor of unit quaternions describing the local rotations of each joint. 67 | -- root_positions: (N, L, 3) tensor describing the root joint positions. 68 | """ 69 | assert len(rotations.shape) == 4 70 | assert rotations.shape[-1] == 4 71 | 72 | positions_world = [] 73 | rotations_world = [] 74 | 75 | expanded_offsets = self._offsets.expand(rotations.shape[0], rotations.shape[1], 76 | self._offsets.shape[0], self._offsets.shape[1]) 77 | 78 | # Parallelize along the batch and time dimensions 79 | for i in range(self._offsets.shape[0]): 80 | if self._parents[i] == -1: 81 | positions_world.append(root_positions) 82 | rotations_world.append(rotations[:, :, 0]) 83 | else: 84 | positions_world.append(qrot(rotations_world[self._parents[i]], expanded_offsets[:, :, i]) \ 85 | + positions_world[self._parents[i]]) 86 | if self._has_children[i]: 87 | rotations_world.append(qmul(rotations_world[self._parents[i]], rotations[:, :, i])) 88 | else: 89 | # This joint is a terminal node -> it would be useless to compute the transformation 90 | rotations_world.append(None) 91 | 92 | return torch.stack(positions_world, dim=3).permute(0, 1, 3, 2) 93 | 94 | def joints_left(self): 95 | return self._joints_left 96 | 97 | def joints_right(self): 98 | return self._joints_right 99 | 100 | def _compute_metadata(self): 101 | self._has_children = np.zeros(len(self._parents)).astype(bool) 102 | for i, parent in enumerate(self._parents): 103 | if parent != -1: 104 | self._has_children[parent] = True 105 | 106 | self._children = [] 107 | for i, parent in enumerate(self._parents): 108 | self._children.append([]) 109 | for i, parent in enumerate(self._parents): 110 | if parent != -1: 111 | self._children[parent].append(i) 112 | 113 | if __name__=="__main__": 114 | skeleton_mocap = Skeleton(offsets=[ 115 | [-42.198200,91.614723,-40.067841], 116 | [ 0.103456,1.857829,10.548506], 117 | [43.499992,-0.000038,-0.000002], 118 | [42.372192,0.000015,-0.000007], 119 | [ 17.299999,-0.000002,0.000003], 120 | [0.000000,0.000000,0.000000], 121 | 122 | [0.103457,1.857829,-10.548503], 123 | [43.500042,-0.000027,0.000008], 124 | [42.372257,-0.000008,0.000014], 125 | [17.299992,-0.000005,0.000004], 126 | [0.000000,0.000000,0.000000], 127 | 128 | [6.901968,-2.603733,-0.000001], 129 | [12.588099,0.000002,0.000000], 130 | [12.343206,0.000000,-0.000001], 131 | [25.832886,-0.000004,0.000003], 132 | [11.766620,0.000005,-0.000001], 133 | [0.000000,0.000000,0.000000], 134 | 135 | [19.745899,-1.480370,6.000108], 136 | [11.284125,-0.000009,-0.000018], 137 | [33.000050,0.000004,0.000032], 138 | [25.200008,0.000015,0.000008], 139 | [0.000000,0.000000,0.000000], 140 | 141 | [19.746099,-1.480375,-6.000073], 142 | [11.284138,-0.000015,-0.000012], 143 | [33.000092,0.000017,0.000013], 144 | [25.199780,0.000135,0.000422], 145 | [0.000000,0.000000,0.000000] 146 | ], 147 | parents=[-1, 0, 1, 2, 3, 4,\ 148 | 0, 6, 7, 8, 9,\ 149 | 0, 11, 12, 13, 14, 15,\ 150 | 13, 17, 18, 19, 20, 151 | 13, 22, 23, 24, 25]) 152 | 153 | skeleton_mocap.remove_joints([5,10,16,21,26]) 154 | os.system('conda deactivate') 155 | os.system('conda activate mobet') 156 | # from npybvh.bvh import Bvh 157 | # anim = Bvh() 158 | # anim.parse_file('D:\\ubisoft-laforge-animation-dataset\\lafan1\\lafan1\\aiming1_subject1.bvh') 159 | 160 | # for t in range(65): 161 | # positions, rotations = anim.frame_pose(t) 162 | # want_idx = [0,1,2,3,4,\ 163 | # 6,7,8,9,\ 164 | # 11,12,13,14,15,\ 165 | # 17,18,19,20,\ 166 | # 22,23,24,25] 167 | # positions = positions[want_idx] 168 | # print(positions[0]) 169 | 170 | lafan_data = LaFan1('D:\\ubisoft-laforge-animation-dataset\\lafan1\\lafan1', train = False, debug=False) 171 | lafan_loader = DataLoader(lafan_data, batch_size=32, shuffle=False, num_workers=4) 172 | for i_batch, sample_batched in enumerate(lafan_loader): 173 | pos_batch = skeleton_mocap.forward_kinematics(sample_batched['local_q'], sample_batched['root_p']) 174 | # print(pos_batch[0,:,0].cpu().numpy()) 175 | # break 176 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import sys, os 3 | sys.path.insert(0, os.path.dirname(__file__)) 4 | from LaFan import LaFan1 5 | from torch.utils.data import Dataset, DataLoader 6 | from model import StateEncoder, \ 7 | OffsetEncoder, \ 8 | TargetEncoder, \ 9 | LSTM, \ 10 | Decoder, \ 11 | ShortMotionDiscriminator, \ 12 | LongMotionDiscriminator 13 | from skeleton import Skeleton 14 | import torch.optim as optim 15 | from tensorboardX import SummaryWriter 16 | import numpy as np 17 | from tqdm import tqdm 18 | from functions import gen_ztta, write_to_bvhfile 19 | import yaml 20 | import time 21 | import shutil 22 | import imageio 23 | import matplotlib.pyplot as plt 24 | from mpl_toolkits.mplot3d import axes3d, Axes3D 25 | from remove_fs import remove_fs, save_bvh_from_network_output 26 | from foot_sliding.animation_data import y_rotation_from_positions 27 | from PIL import Image 28 | 29 | def plot_pose(pose, cur_frame, prefix): 30 | 31 | fig = plt.figure() 32 | ax = fig.add_subplot(111, projection='3d') 33 | 34 | parents = [-1, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 12, 11, 14, 15, 16, 11, 18, 19, 20] 35 | ax.cla() 36 | num_joint = pose.shape[0] // 3 37 | for i, p in enumerate(parents): 38 | if i > 0: 39 | ax.plot([pose[i, 0], pose[p, 0]],\ 40 | [pose[i, 2], pose[p, 2]],\ 41 | [pose[i, 1], pose[p, 1]], c='r') 42 | ax.plot([pose[i+num_joint, 0], pose[p+num_joint, 0]],\ 43 | [pose[i+num_joint, 2], pose[p+num_joint, 2]],\ 44 | [pose[i+num_joint, 1], pose[p+num_joint, 1]], c='b') 45 | ax.plot([pose[i+num_joint*2, 0], pose[p+num_joint*2, 0]],\ 46 | [pose[i+num_joint*2, 2], pose[p+num_joint*2, 2]],\ 47 | [pose[i+num_joint*2, 1], pose[p+num_joint*2, 1]], c='g') 48 | # ax.scatter(pose[:num_joint, 0], pose[:num_joint, 2], pose[:num_joint, 1],c='b') 49 | # ax.scatter(pose[num_joint:num_joint*2, 0], pose[num_joint:num_joint*2, 2], pose[num_joint:num_joint*2, 1],c='b') 50 | # ax.scatter(pose[num_joint*2:num_joint*3, 0], pose[num_joint*2:num_joint*3, 2], pose[num_joint*2:num_joint*3, 1],c='g') 51 | xmin = np.min(pose[:, 0]) 52 | ymin = np.min(pose[:, 2]) 53 | zmin = np.min(pose[:, 1]) 54 | xmax = np.max(pose[:, 0]) 55 | ymax = np.max(pose[:, 2]) 56 | zmax = np.max(pose[:, 1]) 57 | scale = np.max([xmax - xmin, ymax - ymin, zmax - zmin]) 58 | xmid = (xmax + xmin) // 2 59 | ymid = (ymax + ymin) // 2 60 | zmid = (zmax + zmin) // 2 61 | ax.set_xlim(xmid - scale // 2, xmid + scale // 2) 62 | ax.set_ylim(ymid - scale // 2, ymid + scale // 2) 63 | ax.set_zlim(zmid - scale // 2, zmid + scale // 2) 64 | 65 | plt.draw() 66 | plt.savefig(prefix + '_' + str(cur_frame)+'.png', dpi=200, bbox_inches='tight') 67 | plt.close() 68 | 69 | if __name__ == '__main__': 70 | opt = yaml.load(open('./config/test-base.yaml', 'r').read()) 71 | model_dir =opt['test']['model_dir'] 72 | 73 | 74 | ## initilize the skeleton ## 75 | skeleton_mocap = Skeleton(offsets=opt['data']['offsets'], parents=opt['data']['parents']) 76 | skeleton_mocap.cuda() 77 | skeleton_mocap.remove_joints(opt['data']['joints_to_remove']) 78 | 79 | ## load train data ## 80 | lafan_data_test = LaFan1(opt['data']['data_dir'], \ 81 | seq_len = opt['model']['seq_length'], \ 82 | offset = 40,\ 83 | train = False, debug=opt['test']['debug']) 84 | lafan_data_test.cur_seq_length = opt['model']['seq_length'] 85 | x_mean = lafan_data_test.x_mean.cuda() 86 | x_std = lafan_data_test.x_std.cuda().view(1, 1, opt['model']['num_joints'], 3) 87 | lafan_loader_test = DataLoader(lafan_data_test, \ 88 | batch_size=opt['test']['batch_size'], \ 89 | shuffle=False, num_workers=opt['data']['num_workers']) 90 | 91 | ## initialize model and load parameters ## 92 | state_encoder = StateEncoder(in_dim=opt['model']['state_input_dim']) 93 | state_encoder = state_encoder.cuda() 94 | state_encoder.load_state_dict(torch.load(os.path.join(opt['test']['model_dir'], 'state_encoder.pkl'))) 95 | offset_encoder = OffsetEncoder(in_dim=opt['model']['offset_input_dim']) 96 | offset_encoder = offset_encoder.cuda() 97 | offset_encoder.load_state_dict(torch.load(os.path.join(opt['test']['model_dir'], 'offset_encoder.pkl'))) 98 | target_encoder = TargetEncoder(in_dim=opt['model']['target_input_dim']) 99 | target_encoder = target_encoder.cuda() 100 | target_encoder.load_state_dict(torch.load(os.path.join(opt['test']['model_dir'], 'target_encoder.pkl'))) 101 | lstm = LSTM(in_dim=opt['model']['lstm_dim'], hidden_dim = opt['model']['lstm_dim'] * 2) 102 | lstm = lstm.cuda() 103 | lstm.load_state_dict(torch.load(os.path.join(opt['test']['model_dir'], 'lstm.pkl'))) 104 | decoder = Decoder(in_dim=opt['model']['lstm_dim'] * 2, out_dim=opt['model']['state_input_dim']) 105 | decoder = decoder.cuda() 106 | decoder.load_state_dict(torch.load(os.path.join(opt['test']['model_dir'], 'decoder.pkl'))) 107 | print('model loaded') 108 | 109 | ## get positional code ## 110 | if opt['test']['use_ztta']: 111 | ztta = gen_ztta().cuda() 112 | # print('ztta:', ztta.size()) 113 | # assert 0 114 | version = opt['test']['version'] 115 | 116 | # writer = SummaryWriter(log_dir) 117 | loss_total_min = 10000000.0 118 | for epoch in range(opt['test']['num_epoch']): 119 | state_encoder.eval() 120 | offset_encoder.eval() 121 | target_encoder.eval() 122 | lstm.eval() 123 | decoder.eval() 124 | loss_total_list = [] 125 | 126 | for i_batch, sampled_batch in enumerate(lafan_loader_test): 127 | # if i_batch != 33: 128 | # continue 129 | pred_img_list = [] 130 | gt_img_list = [] 131 | img_list = [] 132 | 133 | # print(i_batch, sample_batched['local_q'].size()) 134 | 135 | loss_pos = 0 136 | loss_quat = 0 137 | loss_contact = 0 138 | loss_root = 0 139 | with torch.no_grad(): 140 | # if True: 141 | # state input 142 | local_q = sampled_batch['local_q'].cuda() 143 | root_v = sampled_batch['root_v'].cuda() 144 | contact = sampled_batch['contact'].cuda() 145 | # offset input 146 | root_p_offset = sampled_batch['root_p_offset'].cuda() 147 | local_q_offset = sampled_batch['local_q_offset'].cuda() 148 | local_q_offset = local_q_offset.view(local_q_offset.size(0), -1) 149 | # target input 150 | target = sampled_batch['target'].cuda() 151 | target = target.view(target.size(0), -1) 152 | # root pos 153 | root_p = sampled_batch['root_p'].cuda() 154 | # X 155 | X = sampled_batch['X'].cuda() 156 | bs = 6#np.random.choice(X.size(0), 1)[0] 157 | if False: 158 | print('local_q:', local_q.size(), \ 159 | 'root_v:', root_v.size(), \ 160 | 'contact:', contact.size(), \ 161 | 'root_p_offset:', root_p_offset.size(), \ 162 | 'local_q_offset:', local_q_offset.size(), \ 163 | 'target:', target.size()) 164 | assert 0 165 | 166 | lstm.init_hidden(local_q.size(0)) 167 | h_list = [] 168 | quat_list = [] 169 | quat_list.append(local_q[:,0,].view(local_q.size(0), -1, 4)) 170 | pred_list = [] 171 | pred_list.append(X[:,0]) 172 | bvh_list = [] 173 | bvh_list.append(torch.cat([X[:,0,0], local_q[:,0,].view(local_q.size(0), -1)], -1)) 174 | contact_list = [] 175 | contact_list.append(contact[:,0]) 176 | root_list = [] 177 | root_list.append(X[:,0,0]) 178 | # print(X.size()) 179 | for t in range(opt['model']['seq_length'] - 1): 180 | # root pos 181 | if t == 0: 182 | root_p_t = root_p[:,t] 183 | local_q_t = local_q[:,t] 184 | local_q_t = local_q_t.view(local_q_t.size(0), -1) 185 | contact_t = contact[:,t] 186 | root_v_t = root_v[:,t] 187 | else: 188 | root_p_t = root_pred[0] 189 | local_q_t = local_q_pred[0] 190 | contact_t = contact_pred[0] 191 | root_v_t = root_v_pred[0] 192 | 193 | # state input 194 | state_input = torch.cat([local_q_t, root_v_t, contact_t], -1) 195 | # offset input 196 | root_p_offset_t = root_p_offset - root_p_t 197 | local_q_offset_t = local_q_offset - local_q_t 198 | # print('root_p_offset_t:', root_p_offset_t.size(), 'local_q_offset_t:', local_q_offset_t.size()) 199 | offset_input = torch.cat([root_p_offset_t, local_q_offset_t], -1) 200 | # target input 201 | target_input = target 202 | 203 | 204 | # print('state_input:',state_input.size()) 205 | h_state = state_encoder(state_input) 206 | h_offset = offset_encoder(offset_input) 207 | h_target = target_encoder(target_input) 208 | 209 | if opt['test']['use_ztta']: 210 | h_state += ztta[:, t] 211 | h_offset += ztta[:, t] 212 | h_target += ztta[:, t] 213 | 214 | if opt['test']['use_adv']: 215 | tta = opt['model']['seq_length'] - 2 - t 216 | if tta < 5: 217 | lambda_target = 0.0 218 | elif tta >=5 and tta < 30: 219 | lambda_target = (tta - 5) / 25.0 220 | else: 221 | lambda_target = 1.0 222 | h_offset += 0.5 * lambda_target * torch.cuda.FloatTensor(h_offset.size()).normal_() 223 | h_target += 0.5 * lambda_target * torch.cuda.FloatTensor(h_target.size()).normal_() 224 | 225 | h_in = torch.cat([h_state, h_offset, h_target], -1).unsqueeze(0) 226 | h_out = lstm(h_in) 227 | # print('h_out:', h_out.size()) 228 | 229 | h_pred, contact_pred = decoder(h_out) 230 | local_q_v_pred = h_pred[:,:,:opt['model']['target_input_dim']] 231 | local_q_pred = local_q_v_pred + local_q_t 232 | # print('q_pred:', q_pred.size()) 233 | local_q_pred_ = local_q_pred.view(local_q_pred.size(0), local_q_pred.size(1), -1, 4) 234 | local_q_pred_ = local_q_pred_ / torch.norm(local_q_pred_, dim = -1, keepdim = True) 235 | # print("local_q_pred_:", local_q_pred_.size()) 236 | quat_list.append(local_q_pred_[0]) 237 | root_v_pred = h_pred[:,:,opt['model']['target_input_dim']:] 238 | root_pred = root_v_pred + root_p_t 239 | root_list.append(root_pred[0]) 240 | # print(''contact:'', contact_pred.size()) 241 | # print('root_pred:', root_pred.size()) 242 | bvh_list.append(torch.cat([root_pred[0], local_q_pred_[0].view(local_q_pred_.size(1), -1)], -1)) 243 | pos_pred = skeleton_mocap.forward_kinematics(local_q_pred_, root_pred) 244 | 245 | pos_next = X[:,t+1] 246 | local_q_next = local_q[:,t+1] 247 | local_q_next = local_q_next.view(local_q_next.size(0), -1) 248 | root_p_next = root_p[:,t+1] 249 | contact_next = contact[:,t+1] 250 | # print(pos_pred.size(), x_std.size()) 251 | loss_pos += torch.mean(torch.abs(pos_pred[0] - pos_next) / x_std) / opt['model']['seq_length'] 252 | loss_quat += torch.mean(torch.abs(local_q_pred[0] - local_q_next)) / opt['model']['seq_length'] 253 | loss_root += torch.mean(torch.abs(root_pred[0] - root_p_next) / x_std[:,:,0]) / opt['model']['seq_length'] 254 | loss_contact += torch.mean(torch.abs(contact_pred[0] - contact_next)) / opt['model']['seq_length'] 255 | pred_list.append(pos_pred[0]) 256 | contact_list.append(contact_pred[0]) 257 | 258 | # if i_batch < 49: 259 | # print("pos_pred:", pos_pred.size()) 260 | if opt['test']['save_img']: 261 | plot_pose(np.concatenate([X[bs,0].view(22, 3).detach().cpu().numpy(),\ 262 | pos_pred[0, bs].view(22, 3).detach().cpu().numpy(),\ 263 | X[bs,-1].view(22, 3).detach().cpu().numpy()], 0),\ 264 | t, '../results'+version+'/pred') 265 | plot_pose(np.concatenate([X[bs,0].view(22, 3).detach().cpu().numpy(),\ 266 | X[bs,t+1].view(22, 3).detach().cpu().numpy(),\ 267 | X[bs,-1].view(22, 3).detach().cpu().numpy()], 0),\ 268 | t, '../results'+version+'/gt') 269 | pred_img = Image.open('../results'+version+'/pred_'+str(t)+'.png', 'r') 270 | gt_img = Image.open('../results'+version+'/gt_'+str(t)+'.png', 'r') 271 | pred_img_list.append(pred_img) 272 | gt_img_list.append(gt_img) 273 | img_list.append(np.concatenate([pred_img, gt_img.resize(pred_img.size)], 1)) 274 | 275 | 276 | # print('pivots:', pivots.shape) 277 | # print('rot_data.size:', rot_data.shape) 278 | if opt['test']['save_bvh']: 279 | # print("bs:", bs) 280 | bvh_data = torch.cat([x[bs].unsqueeze(0) for x in bvh_list], 0).detach().cpu().numpy() 281 | # print('bvh_data:', bvh_data.shape) 282 | # print('bvh_data:', bvh_data[0,3:7]) 283 | # assert 0 284 | write_to_bvhfile(bvh_data, ('../bvh_seq/test_%03d.bvh' % i_batch), opt['data']['joints_to_remove']) 285 | # assert 0 286 | contact_data = torch.cat([x[bs].unsqueeze(0) for x in contact_list], 0).detach().cpu().numpy() 287 | # rot_data = torch.cat([x[bs].unsqueeze(0) for x in quat_list], 0).detach().cpu().numpy() 288 | # root_data = torch.cat([x[bs].unsqueeze(0) for x in root_list], 0).detach().cpu().numpy() 289 | # pred_pose = torch.cat([x[bs].unsqueeze(0) for x in pred_list], 0).detach().cpu().numpy() 290 | # quaters, pivots = y_rotation_from_positions(pred_pose, hips = (1,5), sdrs = (14,18)) 291 | # motion = np.concatenate([rot_data.reshape(rot_data.shape[0], -1),\ 292 | # root_data,\ 293 | # pivots], -1) 294 | # motion = motion.transpose(1,0) 295 | foot = contact_data.transpose(1,0) 296 | foot[foot > 0.5] = 1.0 297 | foot[foot <= 0.5] = 0.0 298 | # print('foot[0]:',foot[0]) 299 | glb = remove_fs(('../bvh_seq/test_%03d.bvh' % i_batch), \ 300 | foot, \ 301 | fid_l=(3, 4), \ 302 | fid_r=(7, 8),\ 303 | output_path=("../bvh_seq_after"+version+"/test_%03d.bvh" % i_batch)) 304 | fix_img_list = [] 305 | for t in range(opt['model']['seq_length']): 306 | plot_pose(np.concatenate([X[bs,0].view(22, 3).detach().cpu().numpy(),\ 307 | glb[t],\ 308 | X[bs,-1].view(22, 3).detach().cpu().numpy()], 0),\ 309 | t, '../results'+version+'/fixed') 310 | plot_pose(np.concatenate([X[bs,0].view(22, 3).detach().cpu().numpy(),\ 311 | X[bs,t].view(22, 3).detach().cpu().numpy(),\ 312 | X[bs,-1].view(22, 3).detach().cpu().numpy()], 0),\ 313 | t, '../results'+version+'/gt') 314 | fix_img = Image.open('../results'+version+'/fixed_'+str(t)+'.png', 'r') 315 | gt_img = Image.open('../results'+version+'/gt_'+str(t)+'.png', 'r') 316 | fix_img_list.append(np.concatenate([fix_img, gt_img.resize(fix_img.size)], 1)) 317 | imageio.mimsave(('../gif'+version+'/img_fix_%03d.gif' % i_batch), fix_img_list, duration=0.1) 318 | # save_bvh_from_network_output(motion, output_path=("../bvh_seq_after/test_%03d.bvh" % i_batch)) 319 | 320 | 321 | # if i_batch < 49: 322 | if opt['test']['save_img'] and opt['test']['save_gif']: 323 | imageio.mimsave(('../gif'+version+'/img_%03d.gif' % i_batch), img_list, duration=0.1) 324 | if opt['test']['save_pose']: 325 | gt_pose = X[bs,:].view(opt['model']['seq_length'], 22, 3).detach().cpu().numpy() 326 | pred_pose = torch.cat([x[bs].unsqueeze(0) for x in pred_list], 0).detach().cpu().numpy() 327 | plt.clf() 328 | joint_idx = 13 329 | plt.plot(range(opt['model']['seq_length']), gt_pose[:,joint_idx,0]) 330 | plt.plot(range(opt['model']['seq_length']), pred_pose[:,joint_idx,0]) 331 | plt.legend(['gt', 'pred']) 332 | plt.savefig('../results'+version+'/pose_%03d.png' % i_batch) 333 | plt.close() 334 | 335 | # if opt['test']['save_img'] and i_batch > 49: 336 | # break 337 | 338 | if opt['test']['save_pose'] and i_batch > 49: 339 | break 340 | 341 | # print("train epoch: %03d, cur total loss:%.3f, cur best loss:%.3f" % (epoch, loss_total_cur, loss_total_min)) 342 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import sys, os 3 | sys.path.insert(0, os.path.dirname(__file__)) 4 | from LaFan import LaFan1 5 | from torch.utils.data import Dataset, DataLoader 6 | from model import StateEncoder, \ 7 | OffsetEncoder, \ 8 | TargetEncoder, \ 9 | LSTM, \ 10 | Decoder, \ 11 | ShortMotionDiscriminator, \ 12 | LongMotionDiscriminator 13 | from skeleton import Skeleton 14 | import torch.optim as optim 15 | from tensorboardX import SummaryWriter 16 | import numpy as np 17 | from tqdm import tqdm 18 | from functions import gen_ztta 19 | import yaml 20 | import time 21 | import shutil 22 | 23 | if __name__ == '__main__': 24 | opt = yaml.load(open('.\\config\\train-base.yaml', 'r').read()) 25 | 26 | stamp = time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime(time.time())) 27 | stamp = stamp + '-' + opt['train']['method'] 28 | # print(local_time) 29 | # assert 0 30 | if opt['train']['debug']: 31 | stamp = 'debug' 32 | log_dir = os.path.join('..\\log', stamp) 33 | model_dir = os.path.join('..\\model', stamp) 34 | if not os.path.exists(log_dir): 35 | os.mkdir(log_dir) 36 | if not os.path.exists(model_dir): 37 | os.mkdir(model_dir) 38 | def copydirs(from_file, to_file): 39 | if not os.path.exists(to_file): 40 | os.makedirs(to_file) 41 | files = os.listdir(from_file) 42 | for f in files: 43 | if os.path.isdir(from_file + '\\' + f): 44 | copydirs(from_file + '\\' + f, to_file + '\\' + f) 45 | else: 46 | if '.git' not in from_file: 47 | shutil.copy(from_file + '\\' + f, to_file + '\\' + f) 48 | copydirs('.\\', log_dir + '\\src') 49 | 50 | ## initilize the skeleton ## 51 | skeleton_mocap = Skeleton(offsets=opt['data']['offsets'], parents=opt['data']['parents']) 52 | skeleton_mocap.cuda() 53 | skeleton_mocap.remove_joints(opt['data']['joints_to_remove']) 54 | 55 | ## load train data ## 56 | lafan_data_train = LaFan1(opt['data']['data_dir'], \ 57 | seq_len = opt['model']['seq_length'], \ 58 | offset = opt['data']['offset'],\ 59 | train = True, debug=opt['train']['debug']) 60 | x_mean = lafan_data_train.x_mean.cuda() 61 | x_std = lafan_data_train.x_std.cuda().view(1, 1, opt['model']['num_joints'], 3) 62 | if opt['train']['debug']: 63 | opt['data']['num_workers'] = 1 64 | lafan_loader_train = DataLoader(lafan_data_train, \ 65 | batch_size=opt['train']['batch_size'], \ 66 | shuffle=True, num_workers=opt['data']['num_workers']) 67 | 68 | ## load test data ## 69 | # lafan_data_test = LaFan1(opt['data']['data_dir'], \ 70 | # seq_len = opt['model']['seq_length'], \ 71 | # train = False, debug=False) 72 | # lafan_loader_test = DataLoader(lafan_data_test, \ 73 | # batch_size=opt['train']['batch_size'], \ 74 | # shuffle=True, num_workers=opt['data']['num_workers']) 75 | 76 | ## initialize model ## 77 | 78 | state_encoder = StateEncoder(in_dim=opt['model']['state_input_dim']) 79 | state_encoder = state_encoder.cuda() 80 | offset_encoder = OffsetEncoder(in_dim=opt['model']['offset_input_dim']) 81 | offset_encoder = offset_encoder.cuda() 82 | target_encoder = TargetEncoder(in_dim=opt['model']['target_input_dim']) 83 | target_encoder = target_encoder.cuda() 84 | lstm = LSTM(in_dim=opt['model']['lstm_dim'], hidden_dim = opt['model']['lstm_dim'] * 2) 85 | lstm = lstm.cuda() 86 | decoder = Decoder(in_dim=opt['model']['lstm_dim'] * 2, out_dim=opt['model']['state_input_dim']) 87 | decoder = decoder.cuda() 88 | if len(opt['train']['pretrained']) > 0: 89 | state_encoder.load_state_dict(torch.load(os.path.join(opt['train']['pretrained'], 'state_encoder.pkl'))) 90 | offset_encoder.load_state_dict(torch.load(os.path.join(opt['train']['pretrained'], 'offset_encoder.pkl'))) 91 | target_encoder.load_state_dict(torch.load(os.path.join(opt['train']['pretrained'], 'target_encoder.pkl'))) 92 | lstm.load_state_dict(torch.load(os.path.join(opt['train']['pretrained'], 'lstm.pkl'))) 93 | decoder.load_state_dict(torch.load(os.path.join(opt['train']['pretrained'], 'decoder.pkl'))) 94 | print('generator model loaded') 95 | 96 | if opt['train']['use_adv']: 97 | short_discriminator = ShortMotionDiscriminator(in_dim = (opt['model']['num_joints'] * 3 * 2)) 98 | short_discriminator = short_discriminator.cuda() 99 | long_discriminator = LongMotionDiscriminator(in_dim = (opt['model']['num_joints'] * 3 * 2)) 100 | long_discriminator = long_discriminator.cuda() 101 | if len(opt['train']['pretrained']) > 0: 102 | short_discriminator.load_state_dict(torch.load(os.path.join(opt['train']['pretrained'], 'short_discriminator.pkl'))) 103 | long_discriminator.load_state_dict(torch.load(os.path.join(opt['train']['pretrained'], 'long_discriminator.pkl'))) 104 | print('discriminator model loaded') 105 | 106 | # print('ztta:', ztta.size()) 107 | # assert 0 108 | 109 | ## initilize optimizer_g ## 110 | optimizer_g = optim.Adam(lr = opt['train']['lr'], params = list(state_encoder.parameters()) +\ 111 | list(offset_encoder.parameters()) +\ 112 | list(target_encoder.parameters()) +\ 113 | list(lstm.parameters()) +\ 114 | list(decoder.parameters()), \ 115 | betas = (opt['train']['beta1'], opt['train']['beta2']), \ 116 | weight_decay = opt['train']['weight_decay']) 117 | if len(opt['train']['pretrained']) > 0: 118 | optimizer_g.load_state_dict(torch.load(os.path.join(opt['train']['pretrained'], 'optimizer_g.pkl'))) 119 | print('optimizer_g model loaded') 120 | ## initialize optimizer_d ## 121 | if opt['train']['use_adv']: 122 | optimizer_d = optim.Adam(lr = opt['train']['lr'] * 0.1, params = list(short_discriminator.parameters()) +\ 123 | list(long_discriminator.parameters()), \ 124 | betas = (opt['train']['beta1'], opt['train']['beta2']), \ 125 | weight_decay = opt['train']['weight_decay']) 126 | if len(opt['train']['pretrained']) > 0: 127 | optimizer_d.load_state_dict(torch.load(os.path.join(opt['train']['pretrained'], 'optimizer_d.pkl'))) 128 | print('optimizer_d model loaded') 129 | 130 | writer = SummaryWriter(log_dir) 131 | loss_total_min = 10000000.0 132 | for epoch in range(opt['train']['num_epoch']): 133 | state_encoder.train() 134 | offset_encoder.train() 135 | target_encoder.train() 136 | lstm.train() 137 | decoder.train() 138 | loss_total_list = [] 139 | 140 | if opt['train']['progressive_training']: 141 | ## get positional code ## 142 | if opt['train']['use_ztta']: 143 | ztta = gen_ztta(length = lafan_data_train.cur_seq_length).cuda() 144 | if (10 + (epoch // 2)) < opt['model']['seq_length']: 145 | lafan_data_train.cur_seq_length = 10 + (epoch // 2) 146 | else: 147 | lafan_data_train.cur_seq_length = opt['model']['seq_length'] 148 | else: 149 | ## get positional code ## 150 | if opt['train']['use_ztta']: 151 | lafan_data_train.cur_seq_length = opt['model']['seq_length'] 152 | ztta = gen_ztta(length = opt['model']['seq_length']).cuda() 153 | 154 | for i_batch, sampled_batch in tqdm(enumerate(lafan_loader_train)): 155 | # print(i_batch, sample_batched['local_q'].size()) 156 | loss_pos = 0 157 | loss_quat = 0 158 | loss_contact = 0 159 | loss_root = 0 160 | # with torch.no_grad(): 161 | if True: 162 | # state input 163 | local_q = sampled_batch['local_q'].cuda() 164 | root_v = sampled_batch['root_v'].cuda() 165 | contact = sampled_batch['contact'].cuda() 166 | # offset input 167 | root_p_offset = sampled_batch['root_p_offset'].cuda() 168 | local_q_offset = sampled_batch['local_q_offset'].cuda() 169 | local_q_offset = local_q_offset.view(local_q_offset.size(0), -1) 170 | # target input 171 | target = sampled_batch['target'].cuda() 172 | target = target.view(target.size(0), -1) 173 | # root pos 174 | root_p = sampled_batch['root_p'].cuda() 175 | # X 176 | X = sampled_batch['X'].cuda() 177 | 178 | if False: 179 | print('local_q:', local_q.size(), \ 180 | 'root_v:', root_v.size(), \ 181 | 'contact:', contact.size(), \ 182 | 'root_p_offset:', root_p_offset.size(), \ 183 | 'local_q_offset:', local_q_offset.size(), \ 184 | 'target:', target.size()) 185 | 186 | lstm.init_hidden(local_q.size(0)) 187 | h_list = [] 188 | pred_list = [] 189 | pred_list.append(X[:,0]) 190 | # for t in range(opt['model']['seq_length'] - 1): 191 | for t in range(lafan_data_train.cur_seq_length - 1): 192 | # root pos 193 | if t == 0: 194 | root_p_t = root_p[:,t] 195 | local_q_t = local_q[:,t] 196 | local_q_t = local_q_t.view(local_q_t.size(0), -1) 197 | contact_t = contact[:,t] 198 | root_v_t = root_v[:,t] 199 | else: 200 | root_p_t = root_pred[0] 201 | local_q_t = local_q_pred[0] 202 | contact_t = contact_pred[0] 203 | root_v_t = root_v_pred[0] 204 | 205 | # state input 206 | state_input = torch.cat([local_q_t, root_v_t, contact_t], -1) 207 | # offset input 208 | # print('root_p_offset:', root_p_offset.size(), 'root_p_t:', root_p_t.size()) 209 | # print('local_q_offset:', local_q_offset.size(), 'local_q_t:', local_q_t.size()) 210 | root_p_offset_t = root_p_offset - root_p_t 211 | local_q_offset_t = local_q_offset - local_q_t 212 | # print('root_p_offset_t:', root_p_offset_t.size(), 'local_q_offset_t:', local_q_offset_t.size()) 213 | offset_input = torch.cat([root_p_offset_t, local_q_offset_t], -1) 214 | # target input 215 | target_input = target 216 | 217 | 218 | # print('state_input:',state_input.size()) 219 | h_state = state_encoder(state_input) 220 | h_offset = offset_encoder(offset_input) 221 | h_target = target_encoder(target_input) 222 | 223 | if opt['train']['use_ztta']: 224 | h_state += ztta[:, t] 225 | h_offset += ztta[:, t] 226 | h_target += ztta[:, t] 227 | # print('h_state:', h_state.size(),\ 228 | # 'h_offset:', h_offset.size(),\ 229 | # 'h_target:', h_target.size()) 230 | if opt['train']['use_adv']: 231 | tta = lafan_data_train.cur_seq_length - 2 - t 232 | if tta < 5: 233 | lambda_target = 0.0 234 | elif tta >=5 and tta < 30: 235 | lambda_target = (tta - 5) / 25.0 236 | else: 237 | lambda_target = 1.0 238 | h_offset += 0.5 * lambda_target * torch.cuda.FloatTensor(h_offset.size()).normal_() 239 | h_target += 0.5 * lambda_target * torch.cuda.FloatTensor(h_target.size()).normal_() 240 | 241 | h_in = torch.cat([h_state, h_offset, h_target], -1).unsqueeze(0) 242 | h_out = lstm(h_in) 243 | # print('h_out:', h_out.size()) 244 | 245 | h_pred, contact_pred = decoder(h_out) 246 | local_q_v_pred = h_pred[:,:,:opt['model']['target_input_dim']] 247 | local_q_pred = local_q_v_pred + local_q_t 248 | # print('q_pred:', q_pred.size()) 249 | local_q_pred_ = local_q_pred.view(local_q_pred.size(0), local_q_pred.size(1), -1, 4) 250 | local_q_pred_ = local_q_pred_ / torch.norm(local_q_pred_, dim = -1, keepdim = True) 251 | 252 | root_v_pred = h_pred[:,:,opt['model']['target_input_dim']:] 253 | root_pred = root_v_pred + root_p_t 254 | # print(''contact:'', contact_pred.size()) 255 | # print('root_pred:', root_pred.size()) 256 | pos_pred = skeleton_mocap.forward_kinematics(local_q_pred_, root_pred) 257 | 258 | pos_next = X[:,t+1] 259 | local_q_next = local_q[:,t+1] 260 | local_q_next = local_q_next.view(local_q_next.size(0), -1) 261 | root_p_next = root_p[:,t+1] 262 | contact_next = contact[:,t+1] 263 | # print(pos_pred.size(), x_std.size()) 264 | loss_pos += torch.mean(torch.abs(pos_pred[0] - pos_next) / x_std) / lafan_data_train.cur_seq_length #opt['model']['seq_length'] 265 | loss_quat += torch.mean(torch.abs(local_q_pred[0] - local_q_next)) / lafan_data_train.cur_seq_length #opt['model']['seq_length'] 266 | loss_root += torch.mean(torch.abs(root_pred[0] - root_p_next) / x_std[:,:,0]) / lafan_data_train.cur_seq_length #opt['model']['seq_length'] 267 | loss_contact += torch.mean(torch.abs(contact_pred[0] - contact_next)) / lafan_data_train.cur_seq_length #opt['model']['seq_length'] 268 | pred_list.append(pos_pred[0]) 269 | 270 | if opt['train']['use_adv']: 271 | fake_input = torch.cat([x.reshape(x.size(0), -1).unsqueeze(-1) for x in pred_list], -1) 272 | fake_v_input = torch.cat([fake_input[:,:,1:] - fake_input[:,:,:-1], torch.zeros_like(fake_input[:,:,0:1]).cuda()], -1) 273 | fake_input = torch.cat([fake_input, fake_v_input], 1) 274 | 275 | real_input = torch.cat([X[:, i].view(X.size(0), -1).unsqueeze(-1) for i in range(lafan_data_train.cur_seq_length)], -1) 276 | real_v_input = torch.cat([real_input[:,:,1:] - real_input[:,:,:-1], torch.zeros_like(real_input[:,:,0:1]).cuda()], -1) 277 | real_input = torch.cat([real_input, real_v_input], 1) 278 | 279 | optimizer_d.zero_grad() 280 | short_fake_logits = torch.mean(short_discriminator(fake_input.detach())[:,0], 1) 281 | short_real_logits = torch.mean(short_discriminator(real_input)[:,0], 1) 282 | short_d_fake_loss = torch.mean((short_fake_logits) ** 2) 283 | short_d_real_loss = torch.mean((short_real_logits - 1) ** 2) 284 | short_d_loss = (short_d_fake_loss + short_d_real_loss) / 2.0 285 | 286 | long_fake_logits = torch.mean(long_discriminator(fake_input.detach())[:,0], 1) 287 | long_real_logits = torch.mean(long_discriminator(real_input)[:,0], 1) 288 | long_d_fake_loss = torch.mean((long_fake_logits) ** 2) 289 | long_d_real_loss = torch.mean((long_real_logits - 1) ** 2) 290 | long_d_loss = (long_d_fake_loss + long_d_real_loss) / 2.0 291 | total_d_loss = opt['train']['loss_adv_weight'] * long_d_loss + \ 292 | opt['train']['loss_adv_weight'] * short_d_loss 293 | total_d_loss.backward() 294 | optimizer_d.step() 295 | 296 | optimizer_g.zero_grad() 297 | pred_pos = torch.cat([x.reshape(x.size(0), -1).unsqueeze(-1) for x in pred_list], -1) 298 | pred_vel = (pred_pos[:,opt['data']['foot_index'],1:] - pred_pos[:,opt['data']['foot_index'],:-1]) 299 | pred_vel = pred_vel.view(pred_vel.size(0), 4, 3, pred_vel.size(-1)) 300 | loss_slide = torch.mean(torch.abs(pred_vel * contact[:,:-1].permute(0, 2, 1).unsqueeze(2))) 301 | loss_total = opt['train']['loss_pos_weight'] * loss_pos + \ 302 | opt['train']['loss_quat_weight'] * loss_quat + \ 303 | opt['train']['loss_root_weight'] * loss_root + \ 304 | opt['train']['loss_slide_weight'] * loss_slide + \ 305 | opt['train']['loss_contact_weight'] * loss_contact 306 | 307 | 308 | 309 | if opt['train']['use_adv']: 310 | short_fake_logits = torch.mean(short_discriminator(fake_input)[:,0], 1) 311 | short_g_loss = torch.mean((short_fake_logits - 1) ** 2) 312 | long_fake_logits = torch.mean(long_discriminator(fake_input)[:,0], 1) 313 | long_g_loss = torch.mean((long_fake_logits - 1) ** 2) 314 | total_g_loss = opt['train']['loss_adv_weight'] * long_g_loss + \ 315 | opt['train']['loss_adv_weight'] * short_g_loss 316 | loss_total += total_g_loss 317 | 318 | loss_total.backward() 319 | torch.nn.utils.clip_grad_norm_(state_encoder.parameters(), 0.5) 320 | torch.nn.utils.clip_grad_norm_(offset_encoder.parameters(), 0.5) 321 | torch.nn.utils.clip_grad_norm_(target_encoder.parameters(), 0.5) 322 | torch.nn.utils.clip_grad_norm_(lstm.parameters(), 0.5) 323 | torch.nn.utils.clip_grad_norm_(decoder.parameters(), 0.5) 324 | optimizer_g.step() 325 | # print("epoch: %03d, batch: %03d, pos: %.3f, quat: %.3f, root: %.3f, cont: %.3f"%\ 326 | # (epoch, \ 327 | # i_batch, \ 328 | # loss_pos.item(), \ 329 | # loss_quat.item(), \ 330 | # loss_root.item(), \ 331 | # loss_contact.item())) 332 | writer.add_scalar('loss_pos', loss_pos.item(), global_step = epoch * 317 + i_batch) 333 | writer.add_scalar('loss_quat', loss_quat.item(), global_step = epoch * 317 + i_batch) 334 | writer.add_scalar('loss_root', loss_root.item(), global_step = epoch * 317 + i_batch) 335 | writer.add_scalar('loss_slide', loss_slide.item(), global_step = epoch * 317 + i_batch) 336 | writer.add_scalar('loss_contact', loss_contact.item(), global_step = epoch * 317 + i_batch) 337 | writer.add_scalar('loss_total', loss_total.item(), global_step = epoch * 317 + i_batch) 338 | 339 | if opt['train']['use_adv']: 340 | writer.add_scalar('loss_short_g', short_g_loss.item(), global_step = epoch * 317 + i_batch) 341 | writer.add_scalar('loss_long_g', long_g_loss.item(), global_step = epoch * 317 + i_batch) 342 | writer.add_scalar('loss_short_d_real', short_d_real_loss.item(), global_step = epoch * 317 + i_batch) 343 | writer.add_scalar('loss_short_d_fake', short_d_fake_loss.item(), global_step = epoch * 317 + i_batch) 344 | writer.add_scalar('loss_long_d_real', long_d_real_loss.item(), global_step = epoch * 317 + i_batch) 345 | writer.add_scalar('loss_long_d_fake', long_d_fake_loss.item(), global_step = epoch * 317 + i_batch) 346 | loss_total_list.append(loss_total.item()) 347 | 348 | loss_total_cur = np.mean(loss_total_list) 349 | if loss_total_cur < loss_total_min: 350 | loss_total_min = loss_total_cur 351 | torch.save(state_encoder.state_dict(), model_dir + '/state_encoder.pkl') 352 | torch.save(target_encoder.state_dict(), model_dir + '/target_encoder.pkl') 353 | torch.save(offset_encoder.state_dict(), model_dir + '/offset_encoder.pkl') 354 | torch.save(lstm.state_dict(), model_dir + '/lstm.pkl') 355 | torch.save(decoder.state_dict(), model_dir + '/decoder.pkl') 356 | torch.save(optimizer_g.state_dict(), model_dir + '/optimizer_g.pkl') 357 | if opt['train']['use_adv']: 358 | torch.save(short_discriminator.state_dict(), model_dir + '/short_discriminator.pkl') 359 | torch.save(long_discriminator.state_dict(), model_dir + '/long_discriminator.pkl') 360 | torch.save(optimizer_d.state_dict(), model_dir + '/optimizer_d.pkl') 361 | print("train epoch: %03d, cur total loss:%.3f, cur best loss:%.3f" % (epoch, loss_total_cur, loss_total_min)) 362 | 363 | 364 | 365 | 366 | --------------------------------------------------------------------------------