├── lafan1
├── __init__.py
├── lafan1.zip
├── extract.py
├── benchmarks.py
└── utils.py
├── .gitattributes
├── .gitignore
├── images
├── L2P.png
├── L2Q.png
├── benchmark_results.png
└── lafan1_1635_cropped.jpg
├── evaluate.py
├── evaluate_test.py
├── README.md
└── license.txt
/lafan1/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.zip filter=lfs diff=lfs merge=lfs -text
2 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | .vscode/
3 |
4 | output/
5 |
6 | *.pyc
7 |
--------------------------------------------------------------------------------
/images/L2P.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XefPatterson/Ubisoft-LaForge-Animation-Dataset/HEAD/images/L2P.png
--------------------------------------------------------------------------------
/images/L2Q.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XefPatterson/Ubisoft-LaForge-Animation-Dataset/HEAD/images/L2Q.png
--------------------------------------------------------------------------------
/images/benchmark_results.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XefPatterson/Ubisoft-LaForge-Animation-Dataset/HEAD/images/benchmark_results.png
--------------------------------------------------------------------------------
/images/lafan1_1635_cropped.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XefPatterson/Ubisoft-LaForge-Animation-Dataset/HEAD/images/lafan1_1635_cropped.jpg
--------------------------------------------------------------------------------
/lafan1/lafan1.zip:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:ea918082b500a5d158e9d3aa39039df04cd42e25f5c02fe8f7e88e8e9365a977
3 | size 144051503
4 |
--------------------------------------------------------------------------------
/evaluate.py:
--------------------------------------------------------------------------------
1 | import sys, os
2 | sys.path.insert(0, os.path.dirname(__file__))
3 |
4 | import pickle as pkl
5 | import zipfile
6 | from lafan1 import extract, utils, benchmarks
7 |
8 |
9 | """
10 | Unzips the data, extracts the LaFAN1 train statistics,
11 | then the LaFAN1 test set and evaluates baselines on the test set.
12 | """
13 |
14 | # output location for unzipped bvhs, stats, results, etc...
15 | out_path = os.path.join(os.path.dirname(__file__), 'output')
16 |
17 | # the train/test set actors as in the paper
18 | train_actors = ['subject1', 'subject2', 'subject3', 'subject4']
19 | test_actors = ['subject5']
20 |
21 |
22 | print('Unzipping the data...\n')
23 | lafan_data = os.path.join(os.path.dirname(__file__), 'lafan1', 'lafan1.zip')
24 | bvh_folder = os.path.join(out_path, 'BVH')
25 | with zipfile.ZipFile(lafan_data, "r") as zip_ref:
26 | if not os.path.exists(bvh_folder):
27 | os.makedirs(bvh_folder, exist_ok=True)
28 | zip_ref.extractall(bvh_folder)
29 |
30 |
31 | print('Retrieving statistics...')
32 | stats_file = os.path.join(out_path, 'train_stats.pkl')
33 | if not os.path.exists(stats_file):
34 | x_mean, x_std, offsets = extract.get_train_stats(bvh_folder, train_actors)
35 | with open(stats_file, 'wb') as f:
36 | pkl.dump({
37 | 'x_mean': x_mean,
38 | 'x_std': x_std,
39 | 'offsets': offsets,
40 | }, f, protocol=pkl.HIGHEST_PROTOCOL)
41 | else:
42 | print(' Reusing stats file: ' + stats_file)
43 | with open(stats_file, 'rb') as f:
44 | stats = pkl.load(f)
45 | x_mean = stats['x_mean']
46 | x_std = stats['x_std']
47 | offsets = stats['offsets']
48 |
49 |
50 | # Get test-set for windows of 65 frames, offset by 40 frames
51 | print('\nBuilding the test set...')
52 | X, Q, parents, contacts_l, contacts_r = extract.get_lafan1_set(bvh_folder, test_actors, window=65, offset=40)
53 | print(' Nb of sequences : {}\n'.format(X.shape[0]))
54 |
55 | results = benchmarks.benchmark_interpolation(X, Q, x_mean, x_std, offsets, parents, out_path=out_path)
56 |
57 | # save the results for validation if desired
58 | with open(os.path.join(out_path, 'results.pkl'), 'wb') as f:
59 | pkl.dump(results, f, protocol=pkl.HIGHEST_PROTOCOL)
60 |
61 |
62 | print('Done.')
63 |
64 |
--------------------------------------------------------------------------------
/evaluate_test.py:
--------------------------------------------------------------------------------
1 | import sys, os, shutil
2 | sys.path.insert(0, os.path.dirname(__file__))
3 |
4 | import lafan1 as lf
5 | import unittest, os, logging
6 | import pickle as pkl
7 |
8 | class test_eval(unittest.TestCase):
9 |
10 | @classmethod
11 | def setUpClass(cls):
12 | with open(os.path.join(os.path.dirname(__file__), 'output', 'results.pkl'), 'rb') as f:
13 | cls.results = pkl.load(f)
14 |
15 | @classmethod
16 | def tearDownClass(cls):
17 | pass
18 |
19 | def test_01_zerov_quat_loss(self):
20 | results = self.results
21 | self.assertTrue(results[('zerov_quat_loss', 5)] >= 0.550 and results[('zerov_quat_loss', 5)] < 0.565)
22 | self.assertTrue(results[('zerov_quat_loss', 15)] >= 1.095 and results[('zerov_quat_loss', 15)] < 1.105)
23 | self.assertTrue(results[('zerov_quat_loss', 30)] >= 1.505 and results[('zerov_quat_loss', 30)] < 1.515)
24 | self.assertTrue(results[('zerov_quat_loss', 45)] >= 1.805 and results[('zerov_quat_loss', 45)] < 1.815)
25 |
26 | def test_02_interp_quat_loss(self):
27 | results = self.results
28 | self.assertTrue(results[('interp_quat_loss', 5)] >= 0.215 and results[('interp_quat_loss', 5)] < 0.225)
29 | self.assertTrue(results[('interp_quat_loss', 15)] >= 0.615 and results[('interp_quat_loss', 15)] < 0.625)
30 | self.assertTrue(results[('interp_quat_loss', 30)] >= 0.975 and results[('interp_quat_loss', 30)] < 0.985)
31 | self.assertTrue(results[('interp_quat_loss', 45)] >= 1.245 and results[('interp_quat_loss', 45)] < 1.255)
32 |
33 | def test_03_zerov_pos_loss(self):
34 | results = self.results
35 | self.assertTrue(results[('zerov_pos_loss', 5)] >= 1.52295 and results[('zerov_pos_loss', 5)] < 1.52305)
36 | self.assertTrue(results[('zerov_pos_loss', 15)] >= 3.69435 and results[('zerov_pos_loss', 15)] < 3.69445)
37 | self.assertTrue(results[('zerov_pos_loss', 30)] >= 6.60015 and results[('zerov_pos_loss', 30)] < 6.60025)
38 | self.assertTrue(results[('zerov_pos_loss', 45)] >= 9.32885 and results[('zerov_pos_loss', 45)] < 9.32895)
39 |
40 | def test_04_interp_pos_loss(self):
41 | results = self.results
42 | self.assertTrue(results[('interp_pos_loss', 5)] >= 0.37285 and results[('interp_pos_loss', 5)] < 0.37295)
43 | self.assertTrue(results[('interp_pos_loss', 15)] >= 1.24875 and results[('interp_pos_loss', 15)] < 1.24885)
44 | self.assertTrue(results[('interp_pos_loss', 30)] >= 2.31575 and results[('interp_pos_loss', 30)] < 2.31585)
45 | self.assertTrue(results[('interp_pos_loss', 45)] >= 3.44685 and results[('interp_pos_loss', 45)] < 3.44695)
46 |
47 | def test_05_zerov_npss_loss(self):
48 | results = self.results
49 | self.assertTrue(results[('zerov_npss_loss', 5)] >= 0.00525 and results[('zerov_npss_loss', 5)] < 0.00535)
50 | self.assertTrue(results[('zerov_npss_loss', 15)] >= 0.05215 and results[('zerov_npss_loss', 15)] < 0.05225)
51 | self.assertTrue(results[('zerov_npss_loss', 30)] >= 0.23175 and results[('zerov_npss_loss', 30)] < 0.23185)
52 | self.assertTrue(results[('zerov_npss_loss', 45)] >= 0.49175 and results[('zerov_npss_loss', 45)] < 0.49185)
53 |
54 | def test_06_interp_npss_loss(self):
55 | results = self.results
56 | self.assertTrue(results[('interp_npss_loss', 5)] >= 0.00225 and results[('interp_npss_loss', 5)] < 0.00235)
57 | self.assertTrue(results[('interp_npss_loss', 15)] >= 0.03905 and results[('interp_npss_loss', 15)] < 0.03915)
58 | self.assertTrue(results[('interp_npss_loss', 30)] >= 0.20125 and results[('interp_npss_loss', 30)] < 0.20135)
59 | self.assertTrue(results[('interp_npss_loss', 45)] >= 0.44925 and results[('interp_npss_loss', 45)] < 0.44935)
60 |
61 | if __name__ == '__main__':
62 | unittest.main(verbosity=2)
63 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Ubisoft La Forge Animation Dataset ("LAFAN1")
2 | 
3 |
4 | Ubisoft La Forge Animation dataset and accompanying code for the Robust In-Betweening SIGGRAPH 2020 article.
5 | Shot in May 2017.
6 |
7 | This dataset can be used under the Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International Public License (see license.txt).
8 |
9 | ## Data
10 | The animation data is contained in the lafan1.zip file.
11 | All the animation sequences are in the BVH file format.
12 | There are 5 subjects in the dataset, 77 sequences, and 496,672 motion frames at 30fps (~4.6 hours).
13 | Every BVH file is named with the folowwing convention: \[*theme*\]\[*take number*\]_\[*subject ID*\].bvh.
14 | Any sequences sharing the same *theme* and *take_number* were recorded at the same time in the studio.
15 | Themes are high level indicators of the actions in the sequences.
16 |
17 | The following themes are present in the LaFAN1 dataset:
18 |
19 | | Theme | Description |Number of sequences|
20 | |:----------------|:------------------------------------------ |:-----------------:|
21 | | Obstacles | Locomotion on uneven terrain |17 |
22 | | Walk | Walking locomotion, with different styles |12 |
23 | | Dance | Free dancing |8 |
24 | | Fall and get up | Falling on the ground and getting back up |6 |
25 | | Aiming | Locomotion while handling or aiming a gun |5 |
26 | | Ground | Locomotion while crawling and crouching |5 |
27 | | Multiple actions| Miscellaneous/multiple movements per sequence|4 |
28 | | Run | Jogging/Running locomotion |4 |
29 | | Fight | Various fight movements |3 |
30 | | Jumps | Locomotion with one and two-leg jumps |3 |
31 | | Fight and sports| Fight and sports movements |2 |
32 | | Push and stumble| Pushing, stumbling and recovery |3 |
33 | | Push and fall | Pushing, falling, and getting up |2 |
34 | | Sprint | Sprinting locomotion |2 |
35 | | Push | Pushing adversary |1 |
36 |
37 | ## Code
38 |
39 |
40 | ### Requirements
41 | You should only need `numpy` installed in a python 3.X environment, but the code was developped with `python 3.7` and `numpy 1.17.4`
42 |
43 | You can easily create a test environement with `conda` :
44 |
45 | `conda create -n lafan_env python=3.7 numpy=1.17.4`
46 |
47 |
48 | ### Evaluating baselines
49 | From the command line:
50 |
51 | `python evaluate.py`
52 |
53 | To validate that the results produced by the baseline evaluations are within the expected ranges, you can subsequently run:
54 |
55 | `python evaluate_test.py`
56 |
57 | ---
58 |
59 | The first run may take several minutes, as it will compute the training statistics.
60 | Following runs should go faster.
61 |
62 |
63 | The training statistics for normalization are computed on windows of 50 frames offset by 20 frames.
64 | We sample our test windows from Subject 5 at every 40 frames, and retrieve 2232 sequences for evaluation.
65 |
66 | In the **Zero-Velocity** baseline, the last seed frame is repeated during the whole transition.
67 | In the **Interpolation** baseline, the global root position is linearly interpolated (LERP), and quaternions are spherically linearly interpolated (SLERP).
68 |
69 | ---
70 |
71 | You should obtain the following results:
72 |
73 |
74 |
75 |
76 | ### Evaluation metrics
77 | When running the baseline evaluations, we report three different metrics on global quaternions on positions to assess the quality of the generated transition.
78 |
79 | #### Global quaternion loss
80 | The global quaternion loss (L2Q) is the L2 distance computed between estimated and ground-truth global quaternion vectors ***g***, averaged over each time step *t* and over all sequences *s* in the test set.
81 |
82 |
83 |
84 | #### Global position loss
85 | The global position loss (L2P) is the L2 distance computed between estimated and ground-truth normalized global position vectors ***p***, averaged over each time step *t* and over all sequences *s* in the test set.
86 |
87 |
88 |
89 | #### Normalized Power Spectrum Similarity (NPSS) on global quaternions
90 | This is a distance metric based on the power spectrum of joint angles, as proposed by Gopalakrishnan et al. (2019).
91 | It is reported to correlate better with human judgement about motion.
92 | Its motivation and implementation details can be found in [their paper][1].
93 |
94 | [1]: "A Neural Temporal Model for Human Motion Prediction"
95 |
--------------------------------------------------------------------------------
/lafan1/extract.py:
--------------------------------------------------------------------------------
1 | import re, os, ntpath
2 | import numpy as np
3 | from . import utils
4 |
5 | channelmap = {
6 | 'Xrotation': 'x',
7 | 'Yrotation': 'y',
8 | 'Zrotation': 'z'
9 | }
10 |
11 | channelmap_inv = {
12 | 'x': 'Xrotation',
13 | 'y': 'Yrotation',
14 | 'z': 'Zrotation',
15 | }
16 |
17 | ordermap = {
18 | 'x': 0,
19 | 'y': 1,
20 | 'z': 2,
21 | }
22 |
23 |
24 | class Anim(object):
25 | """
26 | A very basic animation object
27 | """
28 | def __init__(self, quats, pos, offsets, parents, bones):
29 | """
30 | :param quats: local quaternions tensor
31 | :param pos: local positions tensor
32 | :param offsets: local joint offsets
33 | :param parents: bone hierarchy
34 | :param bones: bone names
35 | """
36 | self.quats = quats
37 | self.pos = pos
38 | self.offsets = offsets
39 | self.parents = parents
40 | self.bones = bones
41 |
42 |
43 | def read_bvh(filename, start=None, end=None, order=None):
44 | """
45 | Reads a BVH file and extracts animation information.
46 |
47 | :param filename: BVh filename
48 | :param start: start frame
49 | :param end: end frame
50 | :param order: order of euler rotations
51 | :return: A simple Anim object conatining the extracted information.
52 | """
53 |
54 | f = open(filename, "r")
55 |
56 | i = 0
57 | active = -1
58 | end_site = False
59 |
60 | names = []
61 | orients = np.array([]).reshape((0, 4))
62 | offsets = np.array([]).reshape((0, 3))
63 | parents = np.array([], dtype=int)
64 |
65 | # Parse the file, line by line
66 | for line in f:
67 |
68 | if "HIERARCHY" in line: continue
69 | if "MOTION" in line: continue
70 |
71 | rmatch = re.match(r"ROOT (\w+)", line)
72 | if rmatch:
73 | names.append(rmatch.group(1))
74 | offsets = np.append(offsets, np.array([[0, 0, 0]]), axis=0)
75 | orients = np.append(orients, np.array([[1, 0, 0, 0]]), axis=0)
76 | parents = np.append(parents, active)
77 | active = (len(parents) - 1)
78 | continue
79 |
80 | if "{" in line: continue
81 |
82 | if "}" in line:
83 | if end_site:
84 | end_site = False
85 | else:
86 | active = parents[active]
87 | continue
88 |
89 | offmatch = re.match(r"\s*OFFSET\s+([\-\d\.e]+)\s+([\-\d\.e]+)\s+([\-\d\.e]+)", line)
90 | if offmatch:
91 | if not end_site:
92 | offsets[active] = np.array([list(map(float, offmatch.groups()))])
93 | continue
94 |
95 | chanmatch = re.match(r"\s*CHANNELS\s+(\d+)", line)
96 | if chanmatch:
97 | channels = int(chanmatch.group(1))
98 | if order is None:
99 | channelis = 0 if channels == 3 else 3
100 | channelie = 3 if channels == 3 else 6
101 | parts = line.split()[2 + channelis:2 + channelie]
102 | if any([p not in channelmap for p in parts]):
103 | continue
104 | order = "".join([channelmap[p] for p in parts])
105 | continue
106 |
107 | jmatch = re.match("\s*JOINT\s+(\w+)", line)
108 | if jmatch:
109 | names.append(jmatch.group(1))
110 | offsets = np.append(offsets, np.array([[0, 0, 0]]), axis=0)
111 | orients = np.append(orients, np.array([[1, 0, 0, 0]]), axis=0)
112 | parents = np.append(parents, active)
113 | active = (len(parents) - 1)
114 | continue
115 |
116 | if "End Site" in line:
117 | end_site = True
118 | continue
119 |
120 | fmatch = re.match("\s*Frames:\s+(\d+)", line)
121 | if fmatch:
122 | if start and end:
123 | fnum = (end - start) - 1
124 | else:
125 | fnum = int(fmatch.group(1))
126 | positions = offsets[np.newaxis].repeat(fnum, axis=0)
127 | rotations = np.zeros((fnum, len(orients), 3))
128 | continue
129 |
130 | fmatch = re.match("\s*Frame Time:\s+([\d\.]+)", line)
131 | if fmatch:
132 | frametime = float(fmatch.group(1))
133 | continue
134 |
135 | if (start and end) and (i < start or i >= end - 1):
136 | i += 1
137 | continue
138 |
139 | dmatch = line.strip().split(' ')
140 | if dmatch:
141 | data_block = np.array(list(map(float, dmatch)))
142 | N = len(parents)
143 | fi = i - start if start else i
144 | if channels == 3:
145 | positions[fi, 0:1] = data_block[0:3]
146 | rotations[fi, :] = data_block[3:].reshape(N, 3)
147 | elif channels == 6:
148 | data_block = data_block.reshape(N, 6)
149 | positions[fi, :] = data_block[:, 0:3]
150 | rotations[fi, :] = data_block[:, 3:6]
151 | elif channels == 9:
152 | positions[fi, 0] = data_block[0:3]
153 | data_block = data_block[3:].reshape(N - 1, 9)
154 | rotations[fi, 1:] = data_block[:, 3:6]
155 | positions[fi, 1:] += data_block[:, 0:3] * data_block[:, 6:9]
156 | else:
157 | raise Exception("Too many channels! %i" % channels)
158 |
159 | i += 1
160 |
161 | f.close()
162 |
163 | rotations = utils.euler_to_quat(np.radians(rotations), order=order)
164 | rotations = utils.remove_quat_discontinuities(rotations)
165 |
166 | return Anim(rotations, positions, offsets, parents, names)
167 |
168 |
169 | def get_lafan1_set(bvh_path, actors, window=50, offset=20):
170 | """
171 | Extract the same test set as in the article, given the location of the BVH files.
172 |
173 | :param bvh_path: Path to the dataset BVH files
174 | :param list: actor prefixes to use in set
175 | :param window: width of the sliding windows (in timesteps)
176 | :param offset: offset between windows (in timesteps)
177 | :return: tuple:
178 | X: local positions
179 | Q: local quaternions
180 | parents: list of parent indices defining the bone hierarchy
181 | contacts_l: binary tensor of left-foot contacts of shape (Batchsize, Timesteps, 2)
182 | contacts_r: binary tensor of right-foot contacts of shape (Batchsize, Timesteps, 2)
183 | """
184 | npast = 10
185 | subjects = []
186 | seq_names = []
187 | X = []
188 | Q = []
189 | contacts_l = []
190 | contacts_r = []
191 |
192 | # Extract
193 | bvh_files = os.listdir(bvh_path)
194 |
195 | for file in bvh_files:
196 | if file.endswith('.bvh'):
197 | seq_name, subject = ntpath.basename(file[:-4]).split('_')
198 |
199 | if subject in actors:
200 | print('Processing file {}'.format(file))
201 | seq_path = os.path.join(bvh_path, file)
202 | anim = read_bvh(seq_path)
203 |
204 | # Sliding windows
205 | i = 0
206 | while i+window < anim.pos.shape[0]:
207 | q, x = utils.quat_fk(anim.quats[i: i+window], anim.pos[i: i+window], anim.parents)
208 | # Extract contacts
209 | c_l, c_r = utils.extract_feet_contacts(x, [3, 4], [7, 8], velfactor=0.02)
210 | X.append(anim.pos[i: i+window])
211 | Q.append(anim.quats[i: i+window])
212 | seq_names.append(seq_name)
213 | subjects.append(subjects)
214 | contacts_l.append(c_l)
215 | contacts_r.append(c_r)
216 |
217 | i += offset
218 |
219 | X = np.asarray(X)
220 | Q = np.asarray(Q)
221 | contacts_l = np.asarray(contacts_l)
222 | contacts_r = np.asarray(contacts_r)
223 |
224 | # Sequences around XZ = 0
225 | xzs = np.mean(X[:, :, 0, ::2], axis=1, keepdims=True)
226 | X[:, :, 0, 0] = X[:, :, 0, 0] - xzs[..., 0]
227 | X[:, :, 0, 2] = X[:, :, 0, 2] - xzs[..., 1]
228 |
229 | # Unify facing on last seed frame
230 | X, Q = utils.rotate_at_frame(X, Q, anim.parents, n_past=npast)
231 |
232 | return X, Q, anim.parents, contacts_l, contacts_r
233 |
234 |
235 | def get_train_stats(bvh_folder, train_set):
236 | """
237 | Extract the same training set as in the paper in order to compute the normalizing statistics
238 | :return: Tuple of (local position mean vector, local position standard deviation vector, local joint offsets tensor)
239 | """
240 | print('Building the train set...')
241 | xtrain, qtrain, parents, _, _ = get_lafan1_set(bvh_folder, train_set, window=50, offset=20)
242 |
243 | print('Computing stats...\n')
244 | # Joint offsets : are constant, so just take the first frame:
245 | offsets = xtrain[0:1, 0:1, 1:, :] # Shape : (1, 1, J, 3)
246 |
247 | # Global representation:
248 | q_glbl, x_glbl = utils.quat_fk(qtrain, xtrain, parents)
249 |
250 | # Global positions stats:
251 | x_mean = np.mean(x_glbl.reshape([x_glbl.shape[0], x_glbl.shape[1], -1]).transpose([0, 2, 1]), axis=(0, 2), keepdims=True)
252 | x_std = np.std(x_glbl.reshape([x_glbl.shape[0], x_glbl.shape[1], -1]).transpose([0, 2, 1]), axis=(0, 2), keepdims=True)
253 |
254 | return x_mean, x_std, offsets
255 |
--------------------------------------------------------------------------------
/lafan1/benchmarks.py:
--------------------------------------------------------------------------------
1 | import pickle as pkl
2 | import numpy as np
3 | import zipfile
4 | import os
5 | from . import extract
6 | from . import utils
7 | np.set_printoptions(precision=3)
8 |
9 | def fast_npss(gt_seq, pred_seq):
10 | """
11 | Computes Normalized Power Spectrum Similarity (NPSS).
12 |
13 | This is the metric proposed by Gropalakrishnan et al (2019).
14 | This implementation uses numpy parallelism for improved performance.
15 |
16 | :param gt_seq: ground-truth array of shape : (Batchsize, Timesteps, Dimension)
17 | :param pred_seq: shape : (Batchsize, Timesteps, Dimension)
18 | :return: The average npss metric for the batch
19 | """
20 | # Fourier coefficients along the time dimension
21 | gt_fourier_coeffs = np.real(np.fft.fft(gt_seq, axis=1))
22 | pred_fourier_coeffs = np.real(np.fft.fft(pred_seq, axis=1))
23 |
24 | # Square of the Fourier coefficients
25 | gt_power = np.square(gt_fourier_coeffs)
26 | pred_power = np.square(pred_fourier_coeffs)
27 |
28 | # Sum of powers over time dimension
29 | gt_total_power = np.sum(gt_power, axis=1)
30 | pred_total_power = np.sum(pred_power, axis=1)
31 |
32 | # Normalize powers with totals
33 | gt_norm_power = gt_power / gt_total_power[:, np.newaxis, :]
34 | pred_norm_power = pred_power / pred_total_power[:, np.newaxis, :]
35 |
36 | # Cumulative sum over time
37 | cdf_gt_power = np.cumsum(gt_norm_power, axis=1)
38 | cdf_pred_power = np.cumsum(pred_norm_power, axis=1)
39 |
40 | # Earth mover distance
41 | emd = np.linalg.norm((cdf_pred_power - cdf_gt_power), ord=1, axis=1)
42 |
43 | # Weighted EMD
44 | power_weighted_emd = np.average(emd, weights=gt_total_power)
45 |
46 | return power_weighted_emd
47 |
48 |
49 | def flatjoints(x):
50 | """
51 | Shorthand for a common reshape pattern. Collapses all but the two first dimensions of a tensor.
52 | :param x: Data tensor of at least 3 dimensions.
53 | :return: The flattened tensor.
54 | """
55 | return x.reshape((x.shape[0], x.shape[1], -1))
56 |
57 |
58 | def benchmark_interpolation(X, Q, x_mean, x_std, offsets, parents, out_path=None, n_past=10, n_future=10):
59 | """
60 | Evaluate naive baselines (zero-velocity and interpolation) for transition generation on given data.
61 | :param X: Local positions array of shape (Batchsize, Timesteps, Joints, 3)
62 | :param Q: Local quaternions array of shape (B, T, J, 4)
63 | :param x_mean : Mean vector of local positions of shape (1, J*3, 1)
64 | :param out_path: Standard deviation vector of local positions (1, J*3, 1)
65 | :param offsets: Local bone offsets tensor of shape (1, 1, J, 3)
66 | :param parents: List of bone parents indices defining the hierarchy
67 | :param out_path: optional path for saving the results
68 | :param n_past: Number of frames used as past context
69 | :param n_future: Number of frames used as future context (only the first frame is used as the target)
70 | :return: Results dictionary
71 | """
72 |
73 | trans_lengths = [5, 15, 30, 45]
74 | n_joints = 22
75 | res = {}
76 |
77 | for n_trans in trans_lengths:
78 | print('Computing errors for transition length = {}...'.format(n_trans))
79 |
80 | # Format the data for the current transition lengths. The number of samples and the offset stays unchanged.
81 | curr_window = n_trans + n_past + n_future
82 | curr_x = X[:, :curr_window, ...]
83 | curr_q = Q[:, :curr_window, ...]
84 | batchsize = curr_x.shape[0]
85 |
86 | # Ground-truth positions/quats/eulers
87 | gt_local_quats = curr_q
88 | gt_roots = curr_x[:, :, 0:1, :]
89 | gt_offsets = np.tile(offsets, [batchsize, curr_window, 1, 1])
90 | gt_local_poses = np.concatenate([gt_roots, gt_offsets], axis=2)
91 | trans_gt_local_poses = gt_local_poses[:, n_past: -n_future, ...]
92 | trans_gt_local_quats = gt_local_quats[:, n_past: -n_future, ...]
93 | # Local to global with Forward Kinematics (FK)
94 | trans_gt_global_quats, trans_gt_global_poses = utils.quat_fk(trans_gt_local_quats, trans_gt_local_poses, parents)
95 | trans_gt_global_poses = trans_gt_global_poses.reshape((trans_gt_global_poses.shape[0], -1, n_joints * 3)).transpose([0, 2, 1])
96 | # Normalize
97 | trans_gt_global_poses = (trans_gt_global_poses - x_mean) / x_std
98 |
99 | # Zero-velocity pos/quats
100 | zerov_trans_local_quats, zerov_trans_local_poses = np.zeros_like(trans_gt_local_quats), np.zeros_like(trans_gt_local_poses)
101 | zerov_trans_local_quats[:, :, :, :] = gt_local_quats[:, n_past - 1:n_past, :, :]
102 | zerov_trans_local_poses[:, :, :, :] = gt_local_poses[:, n_past - 1:n_past, :, :]
103 | # To global
104 | trans_zerov_global_quats, trans_zerov_global_poses = utils.quat_fk(zerov_trans_local_quats, zerov_trans_local_poses, parents)
105 | trans_zerov_global_poses = trans_zerov_global_poses.reshape((trans_zerov_global_poses.shape[0], -1, n_joints * 3)).transpose([0, 2, 1])
106 | # Normalize
107 | trans_zerov_global_poses = (trans_zerov_global_poses - x_mean) / x_std
108 |
109 | # Interpolation pos/quats
110 | r, q = curr_x[:, :, 0:1], curr_q
111 | inter_root, inter_local_quats = utils.interpolate_local(r, q, n_past, n_future)
112 | trans_inter_root = inter_root[:, 1:-1, :, :]
113 | trans_inter_offsets = np.tile(offsets, [batchsize, n_trans, 1, 1])
114 | trans_inter_local_poses = np.concatenate([trans_inter_root, trans_inter_offsets], axis=2)
115 | inter_local_quats = inter_local_quats[:, 1:-1, :, :]
116 | # To global
117 | trans_interp_global_quats, trans_interp_global_poses = utils.quat_fk(inter_local_quats, trans_inter_local_poses, parents)
118 | trans_interp_global_poses = trans_interp_global_poses.reshape((trans_interp_global_poses.shape[0], -1, n_joints * 3)).transpose([0, 2, 1])
119 | # Normalize
120 | trans_interp_global_poses = (trans_interp_global_poses - x_mean) / x_std
121 |
122 | # Local quaternion loss
123 | res[('zerov_quat_loss', n_trans)] = np.mean(np.sqrt(np.sum((trans_zerov_global_quats - trans_gt_global_quats) ** 2.0, axis=(2, 3))))
124 | res[('interp_quat_loss', n_trans)] = np.mean(np.sqrt(np.sum((trans_interp_global_quats - trans_gt_global_quats) ** 2.0, axis=(2, 3))))
125 |
126 | # Global positions loss
127 | res[('zerov_pos_loss', n_trans)] = np.mean(np.sqrt(np.sum((trans_zerov_global_poses - trans_gt_global_poses)**2.0, axis=1)))
128 | res[('interp_pos_loss', n_trans)] = np.mean(np.sqrt(np.sum((trans_interp_global_poses - trans_gt_global_poses)**2.0, axis=1)))
129 |
130 | # NPSS loss on global quaternions
131 | res[('zerov_npss_loss', n_trans)] = fast_npss(flatjoints(trans_gt_global_quats), flatjoints(trans_zerov_global_quats))
132 | res[('interp_npss_loss', n_trans)] = fast_npss(flatjoints(trans_gt_global_quats), flatjoints(trans_interp_global_quats))
133 |
134 | print()
135 | avg_zerov_quat_losses = [res[('zerov_quat_loss', n)] for n in trans_lengths]
136 | avg_interp_quat_losses = [res[('interp_quat_loss', n)] for n in trans_lengths]
137 | print("=== Global quat losses ===")
138 | print("{0: <16} | {1:6d} | {2:6d} | {3:6d} | {4:6d}".format("Lengths", 5, 15, 30, 45))
139 | print("{0: <16} | {1:6.2f} | {2:6.2f} | {3:6.2f} | {4:6.2f}".format("Zero-V", *avg_zerov_quat_losses))
140 | print("{0: <16} | {1:6.2f} | {2:6.2f} | {3:6.2f} | {4:6.2f}".format("Interp.", *avg_interp_quat_losses))
141 | print()
142 |
143 | avg_zerov_pos_losses = [res[('zerov_pos_loss', n)] for n in trans_lengths]
144 | avg_interp_pos_losses = [res[('interp_pos_loss', n)] for n in trans_lengths]
145 | print("=== Global pos losses ===")
146 | print("{0: <16} | {1:6d} | {2:6d} | {3:6d} | {4:6d}".format("Lengths", 5, 15, 30, 45))
147 | print("{0: <16} | {1:6.3f} | {2:6.3f} | {3:6.3f} | {4:6.3f}".format("Zero-V", *avg_zerov_pos_losses))
148 | print("{0: <16} | {1:6.3f} | {2:6.3f} | {3:6.3f} | {4:6.3f}".format("Interp.", *avg_interp_pos_losses))
149 | print()
150 |
151 | avg_zerov_npss_losses = [res[('zerov_npss_loss', n)] for n in trans_lengths]
152 | avg_interp_npss_losses = [res[('interp_npss_loss', n)] for n in trans_lengths]
153 | print("=== NPSS on global quats ===")
154 | print("{0: <16} | {1:5d} | {2:5d} | {3:5d} | {4:5d}".format("Lengths", 5, 15, 30, 45))
155 | print("{0: <16} | {1:5.4f} | {2:5.4f} | {3:5.4f} | {4:5.4f}".format("Zero-V", *avg_zerov_npss_losses))
156 | print("{0: <16} | {1:5.4f} | {2:5.4f} | {3:5.4f} | {4:5.4f}".format("Interp.", *avg_interp_npss_losses))
157 | print()
158 |
159 | # Write to file is desired
160 | if out_path is not None:
161 | res_txt_file = open(os.path.join(out_path, 'h36m_transitions_benchmark.txt'), "a")
162 | res_txt_file.write("\n=== Global quat losses ===\n")
163 | res_txt_file.write("{0: <16} | {1:6d} | {2:6d} | {3:6d} | {4:6d}\n".format("Lengths", 5, 15, 30, 45))
164 | res_txt_file.write("{0: <16} | {1:6.2f} | {2:6.2f} | {3:6.2f} | {4:6.2f}\n".format("Zero-V", *avg_zerov_quat_losses))
165 | res_txt_file.write("{0: <16} | {1:6.2f} | {2:6.2f} | {3:6.2f} | {4:6.2f}\n".format("Interp.", *avg_interp_quat_losses))
166 | res_txt_file.write("\n\n")
167 | res_txt_file.write("=== Global pos losses ===\n")
168 | res_txt_file.write("{0: <16} | {1:5d} | {2:5d} | {3:5d} | {4:5d}\n".format("Lengths", 5, 15, 30, 45))
169 | res_txt_file.write("{0: <16} | {1:5.4f} | {2:5.4f} | {3:5.4f} | {4:5.4f}\n".format("Zero-V", *avg_zerov_pos_losses))
170 | res_txt_file.write("{0: <16} | {1:5.4f} | {2:5.4f} | {3:5.4f} | {4:5.4f}\n".format("Interp.", *avg_interp_pos_losses))
171 | res_txt_file.write("\n\n")
172 | res_txt_file.write("=== NPSS on global quats ===\n")
173 | res_txt_file.write("{0: <16} | {1:5d} | {2:5d} | {3:5d} | {4:5d}\n".format("Lengths", 5, 15, 30, 45))
174 | res_txt_file.write("{0: <16} | {1:5.4f} | {2:5.4f} | {3:5.4f} | {4:5.4f}\n".format("Zero-V", *avg_zerov_npss_losses))
175 | res_txt_file.write("{0: <16} | {1:5.4f} | {2:5.4f} | {3:5.4f} | {4:5.4f}\n".format("Interp.", *avg_interp_npss_losses))
176 | res_txt_file.write("\n\n\n\n")
177 | res_txt_file.close()
178 |
179 | return res
180 |
181 |
182 |
--------------------------------------------------------------------------------
/lafan1/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def length(x, axis=-1, keepdims=True):
5 | """
6 | Computes vector norm along a tensor axis(axes)
7 |
8 | :param x: tensor
9 | :param axis: axis(axes) along which to compute the norm
10 | :param keepdims: indicates if the dimension(s) on axis should be kept
11 | :return: The length or vector of lengths.
12 | """
13 | lgth = np.sqrt(np.sum(x * x, axis=axis, keepdims=keepdims))
14 | return lgth
15 |
16 |
17 | def normalize(x, axis=-1, eps=1e-8):
18 | """
19 | Normalizes a tensor over some axis (axes)
20 |
21 | :param x: data tensor
22 | :param axis: axis(axes) along which to compute the norm
23 | :param eps: epsilon to prevent numerical instabilities
24 | :return: The normalized tensor
25 | """
26 | res = x / (length(x, axis=axis) + eps)
27 | return res
28 |
29 |
30 | def quat_normalize(x, eps=1e-8):
31 | """
32 | Normalizes a quaternion tensor
33 |
34 | :param x: data tensor
35 | :param eps: epsilon to prevent numerical instabilities
36 | :return: The normalized quaternions tensor
37 | """
38 | res = normalize(x, eps=eps)
39 | return res
40 |
41 |
42 | def angle_axis_to_quat(angle, axis):
43 | """
44 | Converts from and angle-axis representation to a quaternion representation
45 |
46 | :param angle: angles tensor
47 | :param axis: axis tensor
48 | :return: quaternion tensor
49 | """
50 | c = np.cos(angle / 2.0)[..., np.newaxis]
51 | s = np.sin(angle / 2.0)[..., np.newaxis]
52 | q = np.concatenate([c, s * axis], axis=-1)
53 | return q
54 |
55 |
56 | def euler_to_quat(e, order='zyx'):
57 | """
58 |
59 | Converts from an euler representation to a quaternion representation
60 |
61 | :param e: euler tensor
62 | :param order: order of euler rotations
63 | :return: quaternion tensor
64 | """
65 | axis = {
66 | 'x': np.asarray([1, 0, 0], dtype=np.float32),
67 | 'y': np.asarray([0, 1, 0], dtype=np.float32),
68 | 'z': np.asarray([0, 0, 1], dtype=np.float32)}
69 |
70 | q0 = angle_axis_to_quat(e[..., 0], axis[order[0]])
71 | q1 = angle_axis_to_quat(e[..., 1], axis[order[1]])
72 | q2 = angle_axis_to_quat(e[..., 2], axis[order[2]])
73 |
74 | return quat_mul(q0, quat_mul(q1, q2))
75 |
76 |
77 | def quat_inv(q):
78 | """
79 | Inverts a tensor of quaternions
80 |
81 | :param q: quaternion tensor
82 | :return: tensor of inverted quaternions
83 | """
84 | res = np.asarray([1, -1, -1, -1], dtype=np.float32) * q
85 | return res
86 |
87 |
88 | def quat_fk(lrot, lpos, parents):
89 | """
90 | Performs Forward Kinematics (FK) on local quaternions and local positions to retrieve global representations
91 |
92 | :param lrot: tensor of local quaternions with shape (..., Nb of joints, 4)
93 | :param lpos: tensor of local positions with shape (..., Nb of joints, 3)
94 | :param parents: list of parents indices
95 | :return: tuple of tensors of global quaternion, global positions
96 | """
97 | gp, gr = [lpos[..., :1, :]], [lrot[..., :1, :]]
98 | for i in range(1, len(parents)):
99 | gp.append(quat_mul_vec(gr[parents[i]], lpos[..., i:i+1, :]) + gp[parents[i]])
100 | gr.append(quat_mul (gr[parents[i]], lrot[..., i:i+1, :]))
101 |
102 | res = np.concatenate(gr, axis=-2), np.concatenate(gp, axis=-2)
103 | return res
104 |
105 |
106 | def quat_ik(grot, gpos, parents):
107 | """
108 | Performs Inverse Kinematics (IK) on global quaternions and global positions to retrieve local representations
109 |
110 | :param grot: tensor of global quaternions with shape (..., Nb of joints, 4)
111 | :param gpos: tensor of global positions with shape (..., Nb of joints, 3)
112 | :param parents: list of parents indices
113 | :return: tuple of tensors of local quaternion, local positions
114 | """
115 | res = [
116 | np.concatenate([
117 | grot[..., :1, :],
118 | quat_mul(quat_inv(grot[..., parents[1:], :]), grot[..., 1:, :]),
119 | ], axis=-2),
120 | np.concatenate([
121 | gpos[..., :1, :],
122 | quat_mul_vec(
123 | quat_inv(grot[..., parents[1:], :]),
124 | gpos[..., 1:, :] - gpos[..., parents[1:], :]),
125 | ], axis=-2)
126 | ]
127 |
128 | return res
129 |
130 |
131 | def quat_mul(x, y):
132 | """
133 | Performs quaternion multiplication on arrays of quaternions
134 |
135 | :param x: tensor of quaternions of shape (..., Nb of joints, 4)
136 | :param y: tensor of quaternions of shape (..., Nb of joints, 4)
137 | :return: The resulting quaternions
138 | """
139 | x0, x1, x2, x3 = x[..., 0:1], x[..., 1:2], x[..., 2:3], x[..., 3:4]
140 | y0, y1, y2, y3 = y[..., 0:1], y[..., 1:2], y[..., 2:3], y[..., 3:4]
141 |
142 | res = np.concatenate([
143 | y0 * x0 - y1 * x1 - y2 * x2 - y3 * x3,
144 | y0 * x1 + y1 * x0 - y2 * x3 + y3 * x2,
145 | y0 * x2 + y1 * x3 + y2 * x0 - y3 * x1,
146 | y0 * x3 - y1 * x2 + y2 * x1 + y3 * x0], axis=-1)
147 |
148 | return res
149 |
150 |
151 | def quat_mul_vec(q, x):
152 | """
153 | Performs multiplication of an array of 3D vectors by an array of quaternions (rotation).
154 |
155 | :param q: tensor of quaternions of shape (..., Nb of joints, 4)
156 | :param x: tensor of vectors of shape (..., Nb of joints, 3)
157 | :return: the resulting array of rotated vectors
158 | """
159 | t = 2.0 * np.cross(q[..., 1:], x)
160 | res = x + q[..., 0][..., np.newaxis] * t + np.cross(q[..., 1:], t)
161 |
162 | return res
163 |
164 |
165 | def quat_slerp(x, y, a):
166 | """
167 | Perfroms spherical linear interpolation (SLERP) between x and y, with proportion a
168 |
169 | :param x: quaternion tensor
170 | :param y: quaternion tensor
171 | :param a: indicator (between 0 and 1) of completion of the interpolation.
172 | :return: tensor of interpolation results
173 | """
174 | len = np.sum(x * y, axis=-1)
175 |
176 | neg = len < 0.0
177 | len[neg] = -len[neg]
178 | y[neg] = -y[neg]
179 |
180 | a = np.zeros_like(x[..., 0]) + a
181 | amount0 = np.zeros(a.shape)
182 | amount1 = np.zeros(a.shape)
183 |
184 | linear = (1.0 - len) < 0.01
185 | omegas = np.arccos(len[~linear])
186 | sinoms = np.sin(omegas)
187 |
188 | amount0[linear] = 1.0 - a[linear]
189 | amount0[~linear] = np.sin((1.0 - a[~linear]) * omegas) / sinoms
190 |
191 | amount1[linear] = a[linear]
192 | amount1[~linear] = np.sin(a[~linear] * omegas) / sinoms
193 | res = amount0[..., np.newaxis] * x + amount1[..., np.newaxis] * y
194 |
195 | return res
196 |
197 |
198 | def quat_between(x, y):
199 | """
200 | Quaternion rotations between two 3D-vector arrays
201 |
202 | :param x: tensor of 3D vectors
203 | :param y: tensor of 3D vetcors
204 | :return: tensor of quaternions
205 | """
206 | res = np.concatenate([
207 | np.sqrt(np.sum(x * x, axis=-1) * np.sum(y * y, axis=-1))[..., np.newaxis] +
208 | np.sum(x * y, axis=-1)[..., np.newaxis],
209 | np.cross(x, y)], axis=-1)
210 | return res
211 |
212 |
213 | def interpolate_local(lcl_r_mb, lcl_q_mb, n_past, n_future):
214 | """
215 | Performs interpolation between 2 frames of an animation sequence.
216 |
217 | The 2 frames are indirectly specified through n_past and n_future.
218 | SLERP is performed on the quaternions
219 | LERP is performed on the root's positions.
220 |
221 | :param lcl_r_mb: Local/Global root positions (B, T, 1, 3)
222 | :param lcl_q_mb: Local quaternions (B, T, J, 4)
223 | :param n_past: Number of frames of past context
224 | :param n_future: Number of frames of future context
225 | :return: Interpolated root and quats
226 | """
227 | # Extract last past frame and target frame
228 | start_lcl_r_mb = lcl_r_mb[:, n_past - 1, :, :][:, None, :, :] # (B, 1, J, 3)
229 | end_lcl_r_mb = lcl_r_mb[:, -n_future, :, :][:, None, :, :]
230 |
231 | start_lcl_q_mb = lcl_q_mb[:, n_past - 1, :, :]
232 | end_lcl_q_mb = lcl_q_mb[:, -n_future, :, :]
233 |
234 | # LERP Local Positions:
235 | n_trans = lcl_r_mb.shape[1] - (n_past + n_future)
236 | interp_ws = np.linspace(0.0, 1.0, num=n_trans + 2, dtype=np.float32)
237 | offset = end_lcl_r_mb - start_lcl_r_mb
238 |
239 | const_trans = np.tile(start_lcl_r_mb, [1, n_trans + 2, 1, 1])
240 | inter_lcl_r_mb = const_trans + (interp_ws)[None, :, None, None] * offset
241 |
242 | # SLERP Local Quats:
243 | interp_ws = np.linspace(0.0, 1.0, num=n_trans + 2, dtype=np.float32)
244 | inter_lcl_q_mb = np.stack(
245 | [(quat_normalize(quat_slerp(quat_normalize(start_lcl_q_mb), quat_normalize(end_lcl_q_mb), w))) for w in
246 | interp_ws], axis=1)
247 |
248 | return inter_lcl_r_mb, inter_lcl_q_mb
249 |
250 |
251 | def remove_quat_discontinuities(rotations):
252 | """
253 |
254 | Removing quat discontinuities on the time dimension (removing flips)
255 |
256 | :param rotations: Array of quaternions of shape (T, J, 4)
257 | :return: The processed array without quaternion inversion.
258 | """
259 | rots_inv = -rotations
260 |
261 | for i in range(1, rotations.shape[0]):
262 | # Compare dot products
263 | replace_mask = np.sum(rotations[i - 1: i] * rotations[i: i + 1], axis=-1) < np.sum(
264 | rotations[i - 1: i] * rots_inv[i: i + 1], axis=-1)
265 | replace_mask = replace_mask[..., np.newaxis]
266 | rotations[i] = replace_mask * rots_inv[i] + (1.0 - replace_mask) * rotations[i]
267 |
268 | return rotations
269 |
270 |
271 | # Orient the data according to the las past keframe
272 | def rotate_at_frame(X, Q, parents, n_past=10):
273 | """
274 | Re-orients the animation data according to the last frame of past context.
275 |
276 | :param X: tensor of local positions of shape (Batchsize, Timesteps, Joints, 3)
277 | :param Q: tensor of local quaternions (Batchsize, Timesteps, Joints, 4)
278 | :param parents: list of parents' indices
279 | :param n_past: number of frames in the past context
280 | :return: The rotated positions X and quaternions Q
281 | """
282 | # Get global quats and global poses (FK)
283 | global_q, global_x = quat_fk(Q, X, parents)
284 |
285 | key_glob_Q = global_q[:, n_past - 1: n_past, 0:1, :] # (B, 1, 1, 4)
286 | forward = np.array([1, 0, 1])[np.newaxis, np.newaxis, np.newaxis, :] \
287 | * quat_mul_vec(key_glob_Q, np.array([0, 1, 0])[np.newaxis, np.newaxis, np.newaxis, :])
288 | forward = normalize(forward)
289 | yrot = quat_normalize(quat_between(np.array([1, 0, 0]), forward))
290 | new_glob_Q = quat_mul(quat_inv(yrot), global_q)
291 | new_glob_X = quat_mul_vec(quat_inv(yrot), global_x)
292 |
293 | # back to local quat-pos
294 | Q, X = quat_ik(new_glob_Q, new_glob_X, parents)
295 |
296 | return X, Q
297 |
298 |
299 | def extract_feet_contacts(pos, lfoot_idx, rfoot_idx, velfactor=0.02):
300 | """
301 | Extracts binary tensors of feet contacts
302 |
303 | :param pos: tensor of global positions of shape (Timesteps, Joints, 3)
304 | :param lfoot_idx: indices list of left foot joints
305 | :param rfoot_idx: indices list of right foot joints
306 | :param velfactor: velocity threshold to consider a joint moving or not
307 | :return: binary tensors of left foot contacts and right foot contacts
308 | """
309 | lfoot_xyz = (pos[1:, lfoot_idx, :] - pos[:-1, lfoot_idx, :]) ** 2
310 | contacts_l = (np.sum(lfoot_xyz, axis=-1) < velfactor)
311 |
312 | rfoot_xyz = (pos[1:, rfoot_idx, :] - pos[:-1, rfoot_idx, :]) ** 2
313 | contacts_r = (np.sum(rfoot_xyz, axis=-1) < velfactor)
314 |
315 | # Duplicate the last frame for shape consistency
316 | contacts_l = np.concatenate([contacts_l, contacts_l[-1:]], axis=0)
317 | contacts_r = np.concatenate([contacts_r, contacts_r[-1:]], axis=0)
318 |
319 | return contacts_l, contacts_r
320 |
321 |
322 |
--------------------------------------------------------------------------------
/license.txt:
--------------------------------------------------------------------------------
1 | Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International Public License
2 |
3 | By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions.
4 |
5 |
6 | Section 1 – Definitions.
7 |
8 | Adapted Material means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image.
9 | Copyright and Similar Rights means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights.
10 | Effective Technological Measures means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements.
11 | Exceptions and Limitations means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material.
12 | Licensed Material means the artistic or literary work, database, or other material to which the Licensor applied this Public License.
13 | Licensed Rights means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license.
14 | Licensor means the individual(s) or entity(ies) granting rights under this Public License.
15 | NonCommercial means not primarily intended for or directed towards commercial advantage or monetary compensation. For purposes of this Public License, the exchange of the Licensed Material for other material subject to Copyright and Similar Rights by digital file-sharing or similar means is NonCommercial provided there is no payment of monetary compensation in connection with the exchange.
16 | Share means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them.
17 | Sui Generis Database Rights means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world.
18 | You means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning.
19 |
20 |
21 | Section 2 – Scope.
22 |
23 | License grant.
24 | Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to:
25 | reproduce and Share the Licensed Material, in whole or in part, for NonCommercial purposes only; and
26 | produce and reproduce, but not Share, Adapted Material for NonCommercial purposes only.
27 | Exceptions and Limitations. For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions.
28 | Term. The term of this Public License is specified in Section 6(a).
29 | Media and formats; technical modifications allowed. The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material.
30 | Downstream recipients.
31 | Offer from the Licensor – Licensed Material. Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License.
32 | No downstream restrictions. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material.
33 | No endorsement. Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i).
34 | Other rights.
35 |
36 | Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise.
37 | Patent and trademark rights are not licensed under this Public License.
38 | To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties, including when the Licensed Material is used other than for NonCommercial purposes.
39 |
40 |
41 | Section 3 – License Conditions.
42 |
43 | Your exercise of the Licensed Rights is expressly made subject to the following conditions.
44 |
45 | Attribution.
46 |
47 | If You Share the Licensed Material, You must:
48 |
49 | retain the following if it is supplied by the Licensor with the Licensed Material:
50 | identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated);
51 | a copyright notice;
52 | a notice that refers to this Public License;
53 | a notice that refers to the disclaimer of warranties;
54 | a URI or hyperlink to the Licensed Material to the extent reasonably practicable;
55 | indicate if You modified the Licensed Material and retain an indication of any previous modifications; and
56 | indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License.
57 | For the avoidance of doubt, You do not have permission under this Public License to Share Adapted Material.
58 | You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information.
59 | If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable.
60 |
61 |
62 | Section 4 – Sui Generis Database Rights.
63 |
64 | Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material:
65 |
66 | for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database for NonCommercial purposes only and provided You do not Share Adapted Material;
67 | if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material; and
68 | You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database.
69 | For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights.
70 |
71 |
72 | Section 5 – Disclaimer of Warranties and Limitation of Liability.
73 |
74 | Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You.
75 | To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You.
76 | The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability.
77 |
78 |
79 | Section 6 – Term and Termination.
80 |
81 | This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically.
82 | Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates:
83 |
84 | automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or
85 | upon express reinstatement by the Licensor.
86 | For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License.
87 | For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License.
88 | Sections 1, 5, 6, 7, and 8 survive termination of this Public License.
89 |
90 |
91 | Section 7 – Other Terms and Conditions.
92 |
93 | The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed.
94 | Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License.
95 |
96 |
97 | Section 8 – Interpretation.
98 |
99 | For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License.
100 | To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions.
101 | No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor.
102 | Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority.
--------------------------------------------------------------------------------