├── .gitignore ├── PyOpenGL-3.1.6-cp38-cp38-win_amd64.whl ├── PyOpenGL_accelerate-3.1.6-cp38-cp38-win_amd64.whl ├── README.md ├── fairmotion_ops ├── __init__.py ├── conversions.py ├── math.py └── quaternion.py ├── fairmotion_utils ├── __init__.py ├── constants.py └── utils.py ├── fairmotion_vis ├── __init__.py ├── camera.py ├── gl_render.py ├── glut_viewer.py └── utils.py ├── imgs ├── output.png ├── requirehmd.png ├── steam_vr_location.png ├── steamvr_location.png └── vive_tracker.png ├── render_argparse.py ├── requirements.txt ├── run_tracker.py ├── track.py └── vive_visualizer.py /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ -------------------------------------------------------------------------------- /PyOpenGL-3.1.6-cp38-cp38-win_amd64.whl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/snuvclab/Vive_Tracker/705c990d07f9aaccf34681a61f85a76974b4c34f/PyOpenGL-3.1.6-cp38-cp38-win_amd64.whl -------------------------------------------------------------------------------- /PyOpenGL_accelerate-3.1.6-cp38-cp38-win_amd64.whl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/snuvclab/Vive_Tracker/705c990d07f9aaccf34681a61f85a76974b4c34f/PyOpenGL_accelerate-3.1.6-cp38-cp38-win_amd64.whl -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Vive_Tracker 2 | Vive Tracker 6 DOF Tracking 3 |

4 | pipeline

5 |

6 | 7 | > - HTC Base Station and Vive Tracker 3.0 8 | > - Tracking: Support for SteamVR BS1.0 and BS2.0 9 | > - Weight: 75g 10 | > - Dimensions: 70.9 x 79.0 x 44.1 mm 11 | > - Battery Life: 7.5 hours 12 | > - Field of view: 240 Degrees 13 | > - Components: Vive Tracker, Dongle, Dongle Cradle (USB-C), USB cable 14 | 15 | ## Installing Python Dependencies 16 | > Clone the respository: 17 | ``` 18 | git clone https://github.com/snuvclab/Vive_Tracker.git 19 | cd Vive_Tracker 20 | ``` 21 | > Note: This code was developed on Ubuntu 20.04 with Python 3.7. Later versions should work, but have not been tested.
22 | > Create and activate a virtual environment to work in, e.g. using Conda:
23 | 24 | ``` 25 | conda create -n venv_vive python=3.7 26 | conda activate venv_vive 27 | ``` 28 | > Install OpenVR library 29 | 30 | ``` 31 | pip install openvr 32 | ``` 33 | 34 | ## Setting up SteamVR 35 | > Install Steam: 36 | ``` 37 | https://cdn.cloudflare.steamstatic.com/client/installer/steam.deb 38 | sudo dpkg -i YourDownloadDirectory/steam_latest.deb 39 | sudo apt-get update 40 | sudo apt upgrade 41 | ``` 42 | 43 | > Install SteamVR: 44 | After logging in with a Steam Account, install SteamVR (Store -> Search for "SteamVR") 45 |

46 | pipeline

47 |

48 | 49 | > Remove HDM requirement: 50 | Since we do not have the Vive HMD (Head Mounted Display) you have to change a setting file. 51 | You can find the default.vrsettings config file in: 52 | ``` 53 | home/yourDirectory/.local/share/Steam/steamapps/common/SteamVR/resources/settings/default.vrsettings 54 | eg. /home/yc4ny/.local/share/Steam/steamapps/common/SteamVR/resources/settings/default.vrsettings 55 | ``` 56 | Change the requireHmd: True to requireHmd: False 57 |

58 | pipeline

59 |

60 | 61 | > Restart SteamVR 62 | If you have run SteamVR in advance, please restart SteamVR. 63 | You can restart quickly by clicking the VR button on the top right corner of Steam. 64 |

65 | pipeline

66 |

67 | 68 | ## Running the Tracker 69 | You can run the tracker by running 70 | ``` 71 | python run_tracker.py -f FrequencyValue 72 | eg. python run_tracker.py -f 30 73 | ``` 74 | The -f flag indicates the frequency of location update, so if you are using a camera which takes video as 30fps, you can add the -f 30 flag. 75 |

76 | pipeline

77 |

78 | 79 | > - Output: x,y,z,yaw,pitch,roll 80 | ## Visualization 81 | To be updated... 82 | 83 | 84 | -------------------------------------------------------------------------------- /fairmotion_ops/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | -------------------------------------------------------------------------------- /fairmotion_ops/conversions.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | 3 | import numpy as np 4 | 5 | from fairmotion_utils import constants, utils 6 | from scipy.spatial.transform import Rotation 7 | 8 | import warnings 9 | 10 | """ 11 | Glossary: 12 | p: position (3,) 13 | rad: radians 14 | deg: degrees 15 | A: Axis angle (3,) 16 | E: Euler angle (3,) 17 | Q: Quaternion (4,) 18 | R: Rotation matrix (3,3) 19 | T: Transition matrix (4,4) 20 | 21 | Quaternion uses the xyzw order 22 | Rotation matrix matrix is column-wise 23 | """ 24 | 25 | """ 26 | TODO: 27 | Euler Angle order correction for Test 28 | """ 29 | 30 | 31 | def batch_auto_reshape(x, fn, shape_in, shape_out): 32 | reshape = x.ndim - len(shape_in) > 1 33 | xx = x.reshape(-1, *shape_in) if reshape else x 34 | y = fn(xx) 35 | return y.reshape(x.shape[: -len(shape_in)] + shape_out) if reshape else y 36 | 37 | 38 | """ 39 | Angle conversions 40 | """ 41 | 42 | 43 | def rad2deg(rad): 44 | """Convert from radians to degrees.""" 45 | return rad * 180.0 / np.pi 46 | 47 | 48 | def deg2rad(deg): 49 | """Convert from degrees to radians.""" 50 | return deg * np.pi / 180.0 51 | 52 | 53 | """ 54 | From A to other representations 55 | """ 56 | 57 | 58 | def A2A(A): 59 | """ 60 | The same 3D orientation could be represented by the two different 61 | axis-angle representatons -- (axis, angle) and (-axis, 2pi - angle) where 62 | we assume 0 <= angle <= pi. This method forces that the representation of 63 | orientation strictly uses an angle between 0 and pi. 64 | """ 65 | 66 | def a2a(a): 67 | angle = np.linalg.norm(a) 68 | if angle <= constants.EPSILON: 69 | return a 70 | if angle > 2 * np.pi: 71 | angle = angle % 2 * np.pi 72 | warnings.warn("!!!Angle is larger than 2PI!!!") 73 | if angle > np.pi: 74 | return (-a / angle) * (2 * np.pi - angle) 75 | else: 76 | return a 77 | 78 | return batch_auto_reshape( 79 | A, lambda x: utils._apply_fn_agnostic_to_vec_mat(x, a2a), (3,), (3,), 80 | ) 81 | 82 | 83 | def A2E(A, order="xyz", degrees=False): 84 | return batch_auto_reshape( 85 | A, 86 | lambda x: Rotation.from_rotvec(x).as_euler(order, degrees=degrees), 87 | (3,), 88 | (3,), 89 | ) 90 | 91 | 92 | def A2Q(A): 93 | return batch_auto_reshape( 94 | A, lambda x: Rotation.from_rotvec(x).as_quat(), (3,), (4,), 95 | ) 96 | 97 | 98 | def A2R(A): 99 | return batch_auto_reshape( 100 | A, lambda x: Rotation.from_rotvec(x).as_matrix(), (3,), (3, 3), 101 | ) 102 | 103 | 104 | def A2T(A): 105 | return batch_auto_reshape( 106 | A, lambda x: Rp2T(A2R(x), constants.zero_p()), (3,), (4, 4), 107 | ) 108 | 109 | 110 | def Ax2R(theta): 111 | """ 112 | Convert (axis) angle along x axis Ax to rotation matrix R 113 | """ 114 | if isinstance(theta, np.ndarray): 115 | x = np.zeros((theta.shape + (3,))) 116 | else: 117 | x = np.zeros(3) 118 | x[..., 0] = theta 119 | return A2R(x) 120 | 121 | 122 | def Ay2R(theta): 123 | """ 124 | Convert (axis) angle along y axis Ay to rotation matrix R 125 | """ 126 | if isinstance(theta, np.ndarray): 127 | R = np.zeros(theta.shape + (3, 3)) 128 | R[...] = constants.eye_R() 129 | else: 130 | R = constants.eye_R() 131 | c = np.cos(theta) 132 | s = np.sin(theta) 133 | R[..., 0, 0] = c 134 | R[..., 0, 2] = s 135 | R[..., 2, 0] = -s 136 | R[..., 2, 2] = c 137 | return R 138 | 139 | 140 | def Az2R(theta): 141 | """ 142 | Convert (axis) angle along z axis Az to rotation matrix R 143 | """ 144 | if isinstance(theta, np.ndarray): 145 | R = np.zeros(theta.shape + (3, 3)) 146 | R[...] = constants.eye_R() 147 | else: 148 | R = constants.eye_R() 149 | c = np.cos(theta) 150 | s = np.sin(theta) 151 | R[..., 0, 0] = c 152 | R[..., 0, 1] = -s 153 | R[..., 1, 0] = s 154 | R[..., 1, 1] = c 155 | return R 156 | 157 | 158 | """ 159 | From R to other representations 160 | """ 161 | 162 | 163 | def R2A(R): 164 | return batch_auto_reshape( 165 | R, lambda x: Rotation.from_matrix(x).as_rotvec(), (3, 3), (3,), 166 | ) 167 | 168 | 169 | def R2E(R, order="XYZ", degrees=False): 170 | return batch_auto_reshape( 171 | R, 172 | lambda x: Rotation.from_matrix(x).as_euler(order, degrees=degrees), 173 | (3, 3), 174 | (3,), 175 | ) 176 | 177 | 178 | def R2Q(R): 179 | return batch_auto_reshape( 180 | R, lambda x: Rotation.from_matrix(x).as_quat(), (3, 3), (4,), 181 | ) 182 | 183 | 184 | def R2R6D(R): 185 | return R[..., 0:2] 186 | 187 | 188 | def R6D2R(R6D): 189 | R3D = np.cross(R6D[..., 0], R6D[..., 1]) 190 | R = np.concatenate((R6D, np.expand_dims(R3D, axis=-1)), axis=-1) 191 | return R 192 | 193 | 194 | def R2R(R): 195 | """ 196 | This returns valid (corrected) rotation if input 197 | rotations are invalid. Otherwise returns the same values. 198 | """ 199 | return batch_auto_reshape( 200 | R, lambda x: Rotation.from_matrix(x).as_matrix(), (3, 3), (3, 3), 201 | ) 202 | 203 | 204 | def R2T(R): 205 | return Rp2T(R, constants.zero_p()) 206 | 207 | 208 | """ 209 | From Q to other representations 210 | """ 211 | 212 | 213 | def Q2A(Q): 214 | return batch_auto_reshape( 215 | Q, lambda x: Rotation.from_quat(x).as_rotvec(), (4,), (3,), 216 | ) 217 | 218 | 219 | def Q2E(Q, order="xyz", degrees=False): 220 | return batch_auto_reshape( 221 | Q, 222 | lambda x: Rotation.from_quat(x).as_euler(order, degrees=degrees), 223 | (4,), 224 | (3,), 225 | ) 226 | 227 | 228 | def Q2Q(Q, op, xyzw_in=True): 229 | """ 230 | This returns valid (corrected) rotation if input rotations are invalid. 231 | Otherwise returns the same values. 232 | """ 233 | return batch_auto_reshape( 234 | Q, lambda x: Rotation.from_quat(x).as_quat(), (4,), (4,), 235 | ) 236 | 237 | 238 | def Q2R(Q): 239 | return batch_auto_reshape( 240 | Q, lambda x: Rotation.from_quat(x).as_matrix(), (4,), (3, 3), 241 | ) 242 | 243 | 244 | def Q2T(Q): 245 | return batch_auto_reshape( 246 | Q, lambda x: Rp2T(Q2R(x), constants.zero_p()), (4,), (4, 4), 247 | ) 248 | 249 | 250 | """ 251 | From T to other representations 252 | """ 253 | 254 | 255 | def T2p(T): 256 | _, p = T2Rp(T) 257 | return p 258 | 259 | 260 | def T2R(T): 261 | R, _ = T2Rp(T) 262 | return R 263 | 264 | 265 | def T2Rp(T): 266 | R = T[..., :3, :3] 267 | p = T[..., :3, 3] 268 | return R, p 269 | 270 | 271 | def T2Qp(T): 272 | R, p = T2Rp(T) 273 | Q = R2Q(R) 274 | return Q, p 275 | 276 | 277 | def Ap2T(A, p): 278 | return Rp2T(A2R(A), p) 279 | 280 | 281 | def E2R(theta): 282 | return Rotation.from_euler("xyz", theta).as_matrix() 283 | 284 | 285 | def Ep2T(E, p, order="xyz", degrees=False): 286 | return Rp2T(E2R(E, order, degrees), p) 287 | 288 | 289 | """ 290 | From some representations to T 291 | """ 292 | 293 | 294 | def Qp2T(Q, p): 295 | R = Q2R(Q) 296 | return Rp2T(R, p) 297 | 298 | 299 | def Rp2T(R, p): 300 | input_shape = R.shape[:-2] if R.ndim > 2 else p.shape[:-1] 301 | R_flat = R.reshape((-1, 3, 3)) 302 | p_flat = p.reshape((-1, 3)) 303 | T = np.zeros((int(np.prod(input_shape)), 4, 4)) 304 | T[...] = constants.eye_T() 305 | T[..., :3, :3] = R_flat 306 | T[..., :3, 3] = p_flat 307 | return T.reshape(list(input_shape) + [4, 4]) 308 | 309 | 310 | def p2T(p): 311 | return Rp2T(constants.eye_R(), np.array(p)) 312 | -------------------------------------------------------------------------------- /fairmotion_ops/math.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | 3 | import math 4 | import numpy as np 5 | import warnings 6 | from scipy import stats 7 | 8 | from fairmotion_ops import conversions, quaternion 9 | from fairmotion_utils import constants, utils 10 | 11 | 12 | def normalize(v): 13 | """ 14 | Divide vector by its norm. The method handles vectors with type list and 15 | np.array. 16 | """ 17 | is_list = type(v) == list 18 | length = np.linalg.norm(v) 19 | if length > constants.EPSILON: 20 | norm_v = np.array(v) / length 21 | if is_list: 22 | return list(norm_v) 23 | else: 24 | return norm_v 25 | else: 26 | warnings.warn("!!!The length of input vector is almost zero!!!") 27 | return v 28 | 29 | 30 | def slerp(R1, R2, t): 31 | """ 32 | Spherical linear interpolation (https://en.wikipedia.org/wiki/Slerp) 33 | between R1 and R2 with parameter t, 0 ≤ t ≤ 1 34 | """ 35 | return np.dot( 36 | R1, conversions.A2R(t * conversions.R2A(np.dot(R1.transpose(), R2))) 37 | ) 38 | 39 | 40 | def lerp(v0, v1, t): 41 | """ 42 | Simple linear interpolation between v0 and v1 with parameter t, 0 ≤ t ≤ 1 43 | """ 44 | return v0 + (v1 - v0) * t 45 | 46 | 47 | def invertT(T): 48 | R = T[:3, :3] 49 | p = T[:3, 3] 50 | invT = constants.eye_T() 51 | R_trans = R.transpose() 52 | R_trans_p = np.dot(R_trans, p) 53 | invT[:3, :3] = R_trans 54 | invT[:3, 3] = -R_trans_p 55 | return invT 56 | 57 | 58 | def componentOnVector(inputVector, directionVector): 59 | return np.inner(directionVector, inputVector) / np.dot( 60 | directionVector, directionVector 61 | ) 62 | 63 | 64 | def projectionOnVector(inputVector, directionVector): 65 | return componentOnVector(inputVector, directionVector) * directionVector 66 | 67 | 68 | def R_from_vectors(vec1, vec2): 69 | """ 70 | Returns R such that R dot vec1 = vec2 71 | """ 72 | vec1 = normalize(vec1) 73 | vec2 = normalize(vec2) 74 | 75 | rot_axis = normalize(np.cross(vec1, vec2)) 76 | inner = np.inner(vec1, vec2) 77 | theta = math.acos(inner) 78 | 79 | if rot_axis[0] == 0 and rot_axis[1] == 0 and rot_axis[2] == 0: 80 | rot_axis = [0, 1, 0] 81 | 82 | x, y, z = rot_axis 83 | c = inner 84 | s = math.sin(theta) 85 | R = np.array( 86 | [ 87 | [ 88 | c + (1.0 - c) * x * x, 89 | (1.0 - c) * x * y - s * z, 90 | (1 - c) * x * z + s * y, 91 | ], 92 | [ 93 | (1.0 - c) * x * y + s * z, 94 | c + (1.0 - c) * y * y, 95 | (1.0 - c) * y * z - s * x, 96 | ], 97 | [ 98 | (1.0 - c) * z * x - s * y, 99 | (1.0 - c) * z * y + s * x, 100 | c + (1.0 - c) * z * z, 101 | ], 102 | ] 103 | ) 104 | return R 105 | 106 | 107 | def project_rotation_1D(R, axis): 108 | """ 109 | Project a 3D rotation matrix to the closest 1D rotation 110 | when a rotational axis is given 111 | """ 112 | Q, angle = quaternion.Q_closest( 113 | conversions.R2Q(R), [1.0, 0.0, 0.0, 0.0], axis, 114 | ) 115 | return angle 116 | 117 | 118 | def project_rotation_2D(R, axis1, axis2, order="zyx"): 119 | """ 120 | Project a 3D rotation matrix to the 2D rotation 121 | when two rotational axes are given 122 | """ 123 | zyx = conversions.R2E(R, order) 124 | index1 = utils.axis_to_index(axis1) 125 | index2 = utils.axis_to_index(axis2) 126 | if index1 == 0 and index2 == 1: 127 | return np.array(zyx[2], zyx[1]) 128 | elif index1 == 0 and index2 == 2: 129 | return np.array(zyx[2], zyx[0]) 130 | elif index1 == 1 and index2 == 0: 131 | return np.array(zyx[1], zyx[2]) 132 | elif index1 == 1 and index2 == 2: 133 | return np.array(zyx[1], zyx[0]) 134 | elif index1 == 2 and index2 == 0: 135 | return np.array(zyx[0], zyx[2]) 136 | elif index1 == 2 and index2 == 1: 137 | return np.array(zyx[0], zyx[1]) 138 | else: 139 | raise Exception 140 | 141 | 142 | def project_rotation_3D(R): 143 | """ 144 | Project a 3D rotation matrix to the 3D rotation. 145 | It will just returns corresponding axis-angle. 146 | """ 147 | return conversions.R2A(R) 148 | 149 | 150 | def project_angular_vel_1D(w, axis): 151 | """ 152 | Project a 3D angular velocity to 1d angular velocity. 153 | """ 154 | return np.linalg.norm(np.dot(w, axis)) 155 | 156 | 157 | def project_angular_vel_2D(w, axis1, axis2): 158 | """ 159 | Project a 3D angular velocity to 2d angular velocity. 160 | """ 161 | index1 = utils.axis_to_index(axis1) 162 | index2 = utils.axis_to_index(axis2) 163 | return np.array([w[index1], w[index2]]) 164 | 165 | 166 | def project_angular_vel_3D(w): 167 | """ 168 | Project a 3D angular velocity to 3d angular velocity. 169 | """ 170 | return w 171 | 172 | 173 | def truncnorm(mu, sigma, lower, upper): 174 | """ 175 | Generate a sample from a truncated normal districution 176 | """ 177 | return np.atleast_1d( 178 | stats.truncnorm( 179 | (lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma 180 | ).rvs() 181 | ) 182 | 183 | 184 | def random_unit_vector(dim=3): 185 | """ 186 | Generate a random unit-vector (whose length is 1.0) 187 | """ 188 | while True: 189 | v = np.random.uniform(-1.0, 1.0, size=dim) 190 | l = np.linalg.norm(v) 191 | if l < constants.EPSILON: 192 | continue 193 | v = v / l 194 | break 195 | return v 196 | 197 | 198 | def random_position(mu_l, sigma_l, lower_l, upper_l, dim=3): 199 | """ 200 | Generate a random position by a truncated normal districution 201 | """ 202 | l = truncnorm(mu=mu_l, sigma=sigma_l, lower=lower_l, upper=upper_l) 203 | return random_unit_vector(dim) * l 204 | 205 | 206 | def random_rotation(mu_theta, sigma_theta, lower_theta, upper_theta): 207 | """ 208 | Generate a random position by a truncated normal districution 209 | """ 210 | theta = truncnorm( 211 | mu=mu_theta, sigma=sigma_theta, lower=lower_theta, upper=upper_theta 212 | ) 213 | return conversions.A2R(random_unit_vector() * theta) 214 | 215 | 216 | def lerp_from_paired_list(x, xy_pairs, clamp=True): 217 | """ 218 | Given a list of data points in the shape of [[x0,y0][x1,y1],...,[xN,yN]], 219 | this returns an interpolated y value that correspoinds to a given x value 220 | """ 221 | x0, y0 = xy_pairs[0] 222 | xN, yN = xy_pairs[-1] 223 | # if clamp is false, then check if x is inside of the given x range 224 | if not clamp: 225 | assert x0 <= x <= xN 226 | # Return the boundary values if the value is outside """ 227 | if x <= x0: 228 | return y0 229 | elif x >= xN: 230 | return yN 231 | else: 232 | """ Otherwise, return linearly interpolated values """ 233 | for i in range(len(xy_pairs) - 1): 234 | x1, y1 = xy_pairs[i] 235 | x2, y2 = xy_pairs[i + 1] 236 | if x1 <= x < x2: 237 | alpha = (x - x1) / (x2 - x1) 238 | return (1.0 - alpha) * y1 + alpha * y2 239 | raise Exception("This should not be reached!!!") 240 | 241 | 242 | class Normalizer: 243 | """ 244 | Helper class for the normalization between two sets of values. 245 | (real_val_max, real_val_min) <--> (norm_val_max, norm_val_min) 246 | """ 247 | 248 | def __init__( 249 | self, 250 | real_val_max, 251 | real_val_min, 252 | norm_val_max, 253 | norm_val_min, 254 | apply_clamp=True, 255 | ): 256 | self.set_real_range(real_val_max, real_val_min) 257 | self.set_norm_range(norm_val_max, norm_val_min) 258 | self.apply_clamp = apply_clamp 259 | self.dim = len(real_val_max) 260 | 261 | def set_real_range(self, real_val_max, real_val_min): 262 | self.real_val_max = real_val_max 263 | self.real_val_min = real_val_min 264 | self.real_val_diff = real_val_max - real_val_min 265 | self.real_val_diff_inv = 1.0 / self.real_val_diff 266 | # 267 | # Check if wrong values exist in the setting 268 | # e.g. min <= max or abs(max-min) is too small 269 | # 270 | for v in self.real_val_diff: 271 | if v <= 0.0 or abs(v) < 1.0e-08: 272 | raise Exception("Normalizer", "wrong values") 273 | 274 | def set_norm_range(self, norm_val_max, norm_val_min): 275 | self.norm_val_max = norm_val_max 276 | self.norm_val_min = norm_val_min 277 | self.norm_val_diff = norm_val_max - norm_val_min 278 | self.norm_val_diff_inv = 1.0 / self.norm_val_diff 279 | # 280 | # Check if wrong values exist in the setting 281 | # e.g. min <= max or abs(max-min) is too small 282 | # 283 | for v in self.norm_val_diff: 284 | if v <= 0.0 or abs(v) < 1.0e-08: 285 | raise Exception("Normalizer", "wrong values") 286 | 287 | def real_to_norm(self, val): 288 | val_0_1 = (val - self.real_val_min) * self.real_val_diff_inv 289 | if self.apply_clamp: 290 | self._clip(val_0_1) 291 | return self.norm_val_min + self.norm_val_diff * val_0_1 292 | 293 | def norm_to_real(self, val): 294 | val_0_1 = (val - self.norm_val_min) * self.norm_val_diff_inv 295 | if self.apply_clamp: 296 | self._clip(val_0_1) 297 | return self.real_val_min + self.real_val_diff * val_0_1 298 | 299 | def _clip(self, val): 300 | for i in range(len(val)): 301 | val[i] = np.clip(val[i], 0.0, 1.0) 302 | -------------------------------------------------------------------------------- /fairmotion_ops/quaternion.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | 3 | import math 4 | import numpy as np 5 | 6 | from fairmotion_utils import constants, utils 7 | from fairmotion_ops import conversions, math as math_ops 8 | 9 | from scipy.spatial.transform import Rotation 10 | 11 | 12 | def Q_op(Q, op, xyzw_in=True): 13 | """ 14 | Perform operations on quaternion. The operations currently supported are 15 | "change_order", "normalize" and "halfspace". 16 | 17 | `change_order` changes order of quaternion to xyzw if it's in wxyz and 18 | vice-versa 19 | `normalize` divides the quaternion by its norm 20 | `half-space` negates the quaternion if w < 0 21 | 22 | Args: 23 | Q: Numpy array of shape (..., 4) 24 | op: String; The operation to be performed on the quaternion. `op` can 25 | take values "change_order", "normalize" and "halfspace" 26 | xyzw_in: Set to True if input order is "xyzw". Otherwise, the order 27 | "wxyz" is assumed. 28 | """ 29 | 30 | def q2q(q): 31 | result = q.copy() 32 | if "normalize" in op: 33 | norm = np.linalg.norm(result) 34 | if norm < constants.EPSILON: 35 | raise Exception("Invalid input with zero length") 36 | result /= norm 37 | if "halfspace" in op: 38 | w_idx = 3 if xyzw_in else 0 39 | if result[w_idx] < 0.0: 40 | result *= -1.0 41 | if "change_order" in op: 42 | result = result[[3, 0, 1, 2]] if xyzw_in else result[[1, 2, 3, 0]] 43 | return result 44 | 45 | return utils._apply_fn_agnostic_to_vec_mat(Q, q2q) 46 | 47 | 48 | def Q_diff(Q1, Q2): 49 | raise NotImplementedError 50 | 51 | 52 | def Q_mult(Q1, Q2): 53 | """ 54 | Multiply two quaternions. 55 | """ 56 | R1 = Rotation.from_quat(Q1) 57 | R2 = Rotation.from_quat(Q2) 58 | return (R1 * R2).as_quat() 59 | 60 | 61 | def Q_closest(Q1, Q2, axis): 62 | """ 63 | This computes optimal-in-place orientation given a target orientation Q1 64 | and a geodesic curve (Q2, axis). In tutively speaking, the optimal-in-place 65 | orientation is the closest orientation to Q1 when we are able to rotate Q2 66 | along the given axis. We assume Q is given in the order of xyzw. 67 | """ 68 | ws, vs = Q1[3], Q1[0:3] 69 | w0, v0 = Q2[3], Q2[0:3] 70 | u = math_ops.normalize(axis) 71 | 72 | a = ws * w0 + np.dot(vs, v0) 73 | b = -ws * np.dot(u, v0) + w0 * np.dot(vs, u) + np.dot(vs, np.cross(u, v0)) 74 | alpha = math.atan2(a, b) 75 | 76 | theta1 = -2 * alpha + math.pi 77 | theta2 = -2 * alpha - math.pi 78 | G1 = conversions.A2Q(theta1 * u) 79 | G2 = conversions.A2Q(theta2 * u) 80 | 81 | if np.dot(Q1, G1) > np.dot(Q1, G2): 82 | theta = theta1 83 | Qnearest = Q_mult(G1, Q2) 84 | else: 85 | theta = theta2 86 | Qnearest = Q_mult(G1, Q2) 87 | 88 | return Qnearest, theta 89 | -------------------------------------------------------------------------------- /fairmotion_utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | -------------------------------------------------------------------------------- /fairmotion_utils/constants.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | 3 | import numpy as np 4 | 5 | 6 | EPSILON = np.finfo(float).eps 7 | 8 | EYE_R = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], float) 9 | 10 | EYE_T = np.array( 11 | [ 12 | [1.0, 0.0, 0.0, 0.0], 13 | [0.0, 1.0, 0.0, 0.0], 14 | [0.0, 0.0, 1.0, 0.0], 15 | [0.0, 0.0, 0.0, 1.0], 16 | ], 17 | float, 18 | ) 19 | 20 | ZERO_P = np.array([0.0, 0.0, 0.0], float) 21 | 22 | ZERO_R = np.zeros((3, 3)) 23 | 24 | 25 | def eye_T(): 26 | return EYE_T.copy() 27 | 28 | 29 | def eye_R(): 30 | return EYE_R.copy() 31 | 32 | 33 | def zero_p(): 34 | return ZERO_P.copy() 35 | 36 | 37 | def zero_R(): 38 | return ZERO_R.copy() 39 | -------------------------------------------------------------------------------- /fairmotion_utils/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | 3 | import numpy as np 4 | import os 5 | from functools import partial 6 | from multiprocessing import Pool 7 | import random 8 | import copy 9 | 10 | def str_to_axis(s): 11 | if s == "x": 12 | return np.array([1.0, 0.0, 0.0]) 13 | elif s == "y": 14 | return np.array([0.0, 1.0, 0.0]) 15 | elif s == "z": 16 | return np.array([0.0, 0.0, 1.0]) 17 | else: 18 | raise Exception 19 | 20 | 21 | def axis_to_str(a): 22 | if np.array_equal(a, [1.0, 0.0, 0.0]): 23 | return "x" 24 | elif np.array_equal(a, [0.0, 1.0, 0.0]): 25 | return "y" 26 | elif np.array_equal(a, [0.0, 0.0, 1.0]): 27 | return "z" 28 | else: 29 | raise Exception 30 | 31 | 32 | def get_index(index_dict, key): 33 | if isinstance(key, int): 34 | return key 35 | elif isinstance(key, str): 36 | return index_dict[key] 37 | else: 38 | return index_dict[key.name] 39 | 40 | 41 | def run_parallel(func, iterable, num_cpus=20, **kwargs): 42 | """ 43 | Run function over multiple cpus. The function must be written such that 44 | it processes single input value. 45 | 46 | Args: 47 | func: Method that is run in parallel. The first argument of func 48 | accepts input values from iterable. 49 | iterable: List of input values that func is executed over. 50 | num_cpus: Number of cpus used by multiprocessing. 51 | kwargs: Dictionary of keyword arguments that is passed on to each 52 | parallel call to the function 53 | 54 | Returns: 55 | Flattened list of results from running the function on iterable 56 | arguments 57 | """ 58 | func_with_kwargs = partial(func, **kwargs) 59 | with Pool(processes=num_cpus) as pool: 60 | results = pool.map(func_with_kwargs, iterable) 61 | return results 62 | 63 | 64 | def files_in_dir( 65 | path, 66 | ext=None, 67 | keyword=None, 68 | sort=False, 69 | sample_mode=None, 70 | sample_num=None, 71 | keywords_exclude=[], 72 | ): 73 | """Returns list of files in `path` directory. 74 | 75 | Args: 76 | path: Path to directory to list files from 77 | ext: Extension of files to be listed 78 | keyword: Return file if filename contains `keyword` 79 | sort: Sort files by filename in the returned list 80 | sample_mode: str; Use this option to return subset of files from `path` 81 | directory. `sample_mode` takes values 'sequential' to return first 82 | `sample_num` files, or 'shuffle' to return `sample_num` number of 83 | files randomly 84 | sample_num: Number of files to return 85 | exclude: the files in this this are excluded 86 | """ 87 | files = [] 88 | # r=root, d=directories, f = files 89 | for r, d, f in os.walk(path): 90 | for file in f: 91 | add = True 92 | if ext is not None and not file.endswith(ext): 93 | add = False 94 | if keyword is not None and keyword not in file: 95 | add = False 96 | for ke in keywords_exclude: 97 | if ke in file: 98 | add = False 99 | break 100 | if add: 101 | files.append(os.path.join(r, file)) 102 | if sort: 103 | files.sort() 104 | 105 | if sample_num is None: 106 | sample_num = len(files) 107 | else: 108 | sample_num = min(sample_num, len(files)) 109 | 110 | if sample_mode is None: 111 | pass 112 | elif sample_mode == "sequential": 113 | files = files[:sample_num] 114 | elif sample_mode == "shuffle": 115 | files = random.shuffle(files)[:sample_num] 116 | else: 117 | raise NotImplementedError 118 | 119 | return files 120 | 121 | 122 | def _apply_fn_agnostic_to_vec_mat(input, fn): 123 | output = np.array([input]) if input.ndim == 1 else input 124 | output = np.apply_along_axis(fn, 1, output) 125 | return output[0] if input.ndim == 1 else output 126 | 127 | 128 | def create_dir_if_absent(path): 129 | if not os.path.exists(path): 130 | os.makedirs(path) 131 | 132 | def parse_motion(motion, start, end): 133 | cut_motion = copy.deepcopy(motion) 134 | cut_motion.poses = cut_motion.poses[start:end+1] 135 | return cut_motion 136 | -------------------------------------------------------------------------------- /fairmotion_vis/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/snuvclab/Vive_Tracker/705c990d07f9aaccf34681a61f85a76974b4c34f/fairmotion_vis/__init__.py -------------------------------------------------------------------------------- /fairmotion_vis/camera.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | 3 | import numpy as np 4 | from fairmotion_utils import constants 5 | from fairmotion_ops import conversions 6 | 7 | 8 | class Camera(object): 9 | """Camera class for the visualizer. 10 | 11 | Attributes: 12 | pos: Position of the camera in global coordinates 13 | origin: Point in global coordinates that the camera is pointing at 14 | vup: Vertical axis. Defaults to y-axis 15 | fov: Field of view in degrees 16 | """ 17 | 18 | def __init__(self, pos, origin, vup=np.array([0.0, 1.0, 0.0]), fov=45.0): 19 | self.pos = pos 20 | self.origin = origin 21 | self.vup = vup 22 | self.fov = fov 23 | 24 | def get_cam_rotation(self): 25 | def _get_cam_rotation(p_cam, p_obj, vup): 26 | z = p_obj - p_cam 27 | z /= np.linalg.norm(z) 28 | x = np.cross(vup, z) 29 | x /= np.linalg.norm(x) 30 | y = np.cross(z, x) 31 | return np.array([x, y, z]).transpose() 32 | 33 | return _get_cam_rotation(self.pos, self.origin, self.vup) 34 | 35 | def translate(self, dp, frame_local=False): 36 | R = self.get_cam_rotation() if frame_local else constants.eye_R() 37 | dt = np.dot(R, dp) 38 | self.pos += dt 39 | self.origin += dt 40 | 41 | def rotate(self, dx, dy, dz): 42 | R = self.get_cam_rotation() 43 | pos_local = np.dot(R.transpose(), self.pos - self.origin) 44 | dR = np.dot( 45 | np.dot(conversions.Ax2R(dx), conversions.Ay2R(dy)), 46 | conversions.Az2R(dz), 47 | ) 48 | R = np.dot(R, dR) 49 | # self.vup = R[:, 1] 50 | pos_new = self.origin + np.dot(R, pos_local) 51 | dp = pos_new - self.origin 52 | dp /= np.linalg.norm(dp) 53 | if ( 54 | np.linalg.norm(dp - self.vup) > 0.2 55 | and np.linalg.norm(dp + self.vup) > 0.2 56 | ): 57 | self.pos = pos_new 58 | 59 | def zoom(self, gamma, l_min=0.5): 60 | vl = self.pos - self.origin 61 | length = np.linalg.norm(vl) 62 | self.pos = self.origin + max(l_min, gamma * length) * (vl / length) 63 | 64 | def get_transform_flat(self): 65 | R = self.get_cam_rotation() 66 | R = R.transpose() 67 | p = self.pos 68 | return list(conversions.Rp2T(R, p).ravel()) 69 | 70 | def update_target_pos(self, pos, ignore_x=False, ignore_y=False, ignore_z=False): 71 | if np.array_equal(pos, self.origin): 72 | return 73 | d = pos - self.origin 74 | if ignore_x: d[0] = 0.0 75 | if ignore_y: d[1] = 0.0 76 | if ignore_z: d[2] = 0.0 77 | self.translate(d) 78 | -------------------------------------------------------------------------------- /fairmotion_vis/gl_render.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | 3 | from OpenGL.GL import * 4 | from OpenGL.GLUT import * 5 | from OpenGL.GLU import * 6 | import math 7 | import numpy as np 8 | 9 | from fairmotion_utils import constants 10 | from fairmotion_ops import conversions, math as math_ops 11 | 12 | 13 | def load_texture(file): 14 | im = Image.open(file) 15 | ix, iy, im_data = im.size[0], im.size[1], im.tobytes("raw", "RGBA", 0, -1) 16 | 17 | tex_id = glGenTextures(1) 18 | glBindTexture(GL_TEXTURE_2D, tex_id) 19 | glPixelStorei(GL_UNPACK_ALIGNMENT, 1) 20 | glTexImage2D( 21 | GL_TEXTURE_2D, 0, GL_RGB, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, im_data 22 | ) 23 | glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT) 24 | glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT) 25 | glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST) 26 | glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST) 27 | glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL) 28 | 29 | return tex_id 30 | 31 | 32 | COLOR_SEQUENCE = [ 33 | [1, 0, 0, 1], 34 | [0, 1, 0, 1], 35 | [0, 0, 1, 1], 36 | [1, 1, 0, 1], 37 | [1, 0, 1, 1], 38 | [0, 1, 1, 1], 39 | [1, 0, 0, 0.5], 40 | [0, 1, 0, 0.5], 41 | [0, 0, 1, 0.5], 42 | [1, 1, 0, 0.5], 43 | [1, 0, 1, 0.5], 44 | [0, 1, 1, 0.5], 45 | ] 46 | 47 | 48 | def glTransform(T): 49 | glMultMatrixd(T.transpose().ravel()) 50 | 51 | 52 | def glColor(color): 53 | num_val = len(color) 54 | if num_val == 3: 55 | glColor3d(color[0], color[1], color[2]) 56 | elif num_val > 3: 57 | glColor4d(color[0], color[1], color[2], color[3]) 58 | else: 59 | raise NotImplemented 60 | 61 | 62 | def render_cylinder_info( 63 | T, length, radius, scale=1.0, line_width=2.0, color=[0, 0, 0, 1], slice=10 64 | ): 65 | glDisable(GL_LIGHTING) 66 | 67 | glPushMatrix() 68 | glTransform(T) 69 | glScalef(scale, scale, scale) 70 | 71 | glColor(color) 72 | glPushMatrix() 73 | glTranslated(0.0, 0.0, -0.5 * length) 74 | 75 | render_circle( 76 | constants.eye_T(), 77 | r=radius, 78 | slice=slice, 79 | scale=1.0, 80 | line_width=line_width, 81 | color=color, 82 | draw_plane="xy", 83 | ) 84 | 85 | glTranslated(0.0, 0.0, length) 86 | 87 | render_circle( 88 | constants.eye_T(), 89 | r=radius, 90 | slice=slice, 91 | scale=1.0, 92 | line_width=line_width, 93 | color=color, 94 | draw_plane="xy", 95 | ) 96 | glPopMatrix() 97 | 98 | render_line( 99 | p1=[radius, 0.0, -0.5 * length], 100 | p2=[radius, 0.0, 0.5 * length], 101 | color=color, 102 | line_width=line_width, 103 | ) 104 | 105 | glPopMatrix() 106 | 107 | glEnable(GL_LIGHTING) 108 | 109 | 110 | def render_capsule_info( 111 | T, length, radius, scale=1.0, line_width=2.0, color=[0, 0, 0, 1], slice=10 112 | ): 113 | render_cylinder_info(T, length, radius, scale, line_width, color, slice) 114 | 115 | 116 | def render_sphere_info( 117 | T, r=1.0, slice=10, scale=1.0, line_width=2.0, color=[0, 0, 0, 1] 118 | ): 119 | 120 | render_circle( 121 | T=T, 122 | r=r, 123 | slice=slice, 124 | scale=scale, 125 | line_width=line_width, 126 | color=color, 127 | draw_plane="xy", 128 | ) 129 | render_circle( 130 | T=T, 131 | r=r, 132 | slice=slice, 133 | scale=scale, 134 | line_width=line_width, 135 | color=color, 136 | draw_plane="yz", 137 | ) 138 | render_circle( 139 | T=T, 140 | r=r, 141 | slice=slice, 142 | scale=scale, 143 | line_width=line_width, 144 | color=color, 145 | draw_plane="zx", 146 | ) 147 | 148 | 149 | def render_cylinder( 150 | T, length, radius, scale=1.0, color=[0, 0, 0, 1], slice=16 151 | ): 152 | quadric = gluNewQuadric() 153 | gluQuadricDrawStyle(quadric, GLU_FILL) 154 | gluQuadricNormals(quadric, GLU_SMOOTH) 155 | 156 | glEnable(GL_DEPTH_TEST) 157 | glPushMatrix() 158 | glTransform(T) 159 | glScalef(scale, scale, scale) 160 | 161 | glColor(color) 162 | 163 | glTranslated(0.0, 0.0, -0.5 * length) 164 | gluCylinder(quadric, radius, radius, length, slice, 1) 165 | gluDisk(quadric, 0.0, radius, slice, 1) 166 | glTranslated(0.0, 0.0, length) 167 | gluDisk(quadric, 0.0, radius, slice, 1) 168 | 169 | glPopMatrix() 170 | 171 | 172 | def render_capsule(T, length, radius, scale=1.0, color=[0, 0, 0, 1], slice=16): 173 | quadric = gluNewQuadric() 174 | gluQuadricDrawStyle(quadric, GLU_FILL) 175 | gluQuadricNormals(quadric, GLU_SMOOTH) 176 | 177 | glEnable(GL_DEPTH_TEST) 178 | glPushMatrix() 179 | glTransform(T) 180 | glScalef(scale, scale, scale) 181 | 182 | glColor(color) 183 | 184 | glTranslated(0.0, 0.0, -0.5 * length) 185 | gluSphere(quadric, radius, slice, slice) 186 | gluCylinder(quadric, radius, radius, length, slice, 1) 187 | glTranslated(0.0, 0.0, length) 188 | gluSphere(quadric, radius, slice, slice) 189 | 190 | glPopMatrix() 191 | 192 | 193 | def render_cube( 194 | T, size=[1.0, 1.0, 1.0], color=[0, 0, 0, 1], solid=True, line_width=1.0 195 | ): 196 | glPushMatrix() 197 | glTransform(T) 198 | glScalef(size[0], size[1], size[2]) 199 | 200 | glColor(color) 201 | 202 | if solid: 203 | glutSolidCube(1.0) 204 | else: 205 | glLineWidth(line_width) 206 | glutWireCube(1.0) 207 | 208 | glPopMatrix() 209 | 210 | 211 | def render_sphere(T, r=1.0, slice1=10, slice2=10, scale=1.0, color=[0, 0, 0]): 212 | glPushMatrix() 213 | glTransform(T) 214 | glScalef(scale, scale, scale) 215 | 216 | glColor(color) 217 | 218 | glutSolidSphere(r, slice1, slice2) 219 | 220 | glPopMatrix() 221 | 222 | 223 | def render_disk( 224 | T, 225 | r_inner=0.0, 226 | r_outer=1.0, 227 | slice1=32, 228 | slice2=1, 229 | scale=1.0, 230 | color=[0.8, 0.8, 0.8, 1.0], 231 | ): 232 | quadric = gluNewQuadric() 233 | gluQuadricDrawStyle(quadric, GLU_FILL) 234 | gluQuadricNormals(quadric, GLU_SMOOTH) 235 | 236 | glPushMatrix() 237 | glTransform(T) 238 | glScalef(scale, scale, scale) 239 | 240 | glColor(color) 241 | 242 | gluDisk(quadric, r_inner, r_outer, slice1, slice2) 243 | 244 | glPopMatrix() 245 | 246 | 247 | def render_circle( 248 | T, 249 | r=1.0, 250 | slice=128, 251 | scale=1.0, 252 | line_width=1.0, 253 | color=[0, 0, 0], 254 | draw_plane="xy", 255 | ): 256 | glPushMatrix() 257 | glTransform(T) 258 | glScalef(scale, scale, scale) 259 | 260 | glColor(color) 261 | glLineWidth(line_width) 262 | 263 | glBegin(GL_LINE_LOOP) 264 | for i in range(slice): 265 | theta = 2.0 * i * math.pi / slice 266 | if draw_plane == "xy": 267 | glVertex3f(math.cos(theta) * r, math.sin(theta) * r, 0.0) 268 | elif draw_plane == "yz": 269 | glVertex3f(0.0, math.cos(theta) * r, math.sin(theta) * r) 270 | else: 271 | glVertex3f(math.cos(theta) * r, 0.0, math.sin(theta) * r) 272 | glEnd() 273 | 274 | glPopMatrix() 275 | 276 | 277 | def render_point(p, scale=1.0, radius=1.0, color=[1.0, 1.0, 1.0, 1.0]): 278 | glPushMatrix() 279 | glTranslated(p[0], p[1], p[2]) 280 | glScalef(scale, scale, scale) 281 | 282 | glColor(color) 283 | glutSolidSphere(radius, 10, 10) 284 | 285 | glPopMatrix() 286 | 287 | 288 | def render_line(p1, p2, color=[0.0, 0.0, 0.0, 1.0], line_width=1.0): 289 | glLineWidth(line_width) 290 | 291 | glBegin(GL_LINES) 292 | glColor(color) 293 | glVertex3d(p1[0], p1[1], p1[2]) 294 | glVertex3d(p2[0], p2[1], p2[2]) 295 | glEnd() 296 | 297 | 298 | def render_quad( 299 | p1, 300 | p2, 301 | p3, 302 | p4, 303 | n=None, 304 | color=[1.0, 1.0, 1.0, 1.0], 305 | tex_id=None, 306 | tex_param1=[0, 0], 307 | tex_param2=[1, 0], 308 | tex_param3=[1, 1], 309 | tex_param4=[0, 1], 310 | ): 311 | 312 | draw_tex = tex_id is not None 313 | if draw_tex: 314 | glEnable(GL_TEXTURE_2D) 315 | glBindTexture(GL_TEXTURE_2D, tex_id) 316 | else: 317 | glColor(color) 318 | 319 | if n is None: 320 | n = math_ops.normalize(np.cross(p3 - p2, p2 - p1)) 321 | 322 | glBegin(GL_QUADS) 323 | 324 | if draw_tex: 325 | glTexCoord2f(tex_param1[0], tex_param1[1]) 326 | glNormal3d(n[0], n[1], n[2]) 327 | glVertex3d(p1[0], p1[1], p1[2]) 328 | 329 | if draw_tex: 330 | glTexCoord2f(tex_param2[0], tex_param2[1]) 331 | glNormal3d(n[0], n[1], n[2]) 332 | glVertex3d(p2[0], p2[1], p2[2]) 333 | 334 | if draw_tex: 335 | glTexCoord2f(tex_param3[0], tex_param3[1]) 336 | glNormal3d(n[0], n[1], n[2]) 337 | glVertex3d(p3[0], p3[1], p3[2]) 338 | 339 | if draw_tex: 340 | glTexCoord2f(tex_param4[0], tex_param4[1]) 341 | glNormal3d(n[0], n[1], n[2]) 342 | glVertex3d(p4[0], p4[1], p4[2]) 343 | 344 | glEnd() 345 | 346 | if draw_tex: 347 | glDisable(GL_TEXTURE_2D) 348 | 349 | 350 | def render_tri(p1, p2, p3, color=[1.0, 1.0, 1.0, 1.0]): 351 | glColor(color) 352 | glBegin(GL_TRIANGLES) 353 | glVertex3d(p1[0], p1[1], p1[2]) 354 | glVertex3d(p2[0], p2[1], p2[2]) 355 | glVertex3d(p3[0], p3[1], p3[2]) 356 | glEnd() 357 | 358 | 359 | def render_tet(p1, p2, p3, p4, color=[1.0, 1.0, 1.0, 1.0]): 360 | render_tri(p1, p2, p3, color) 361 | render_tri(p1, p2, p4, color) 362 | render_tri(p2, p3, p4, color) 363 | render_tri(p3, p1, p4, color) 364 | 365 | 366 | def render_tet_line( 367 | p1, p2, p3, p4, color=[0.0, 0.0, 0.0, 1.0], line_width=1.0 368 | ): 369 | render_line(p1, p2, color, line_width) 370 | render_line(p2, p3, color, line_width) 371 | render_line(p3, p1, color, line_width) 372 | render_line(p1, p4, color, line_width) 373 | render_line(p2, p4, color, line_width) 374 | render_line(p3, p4, color, line_width) 375 | 376 | 377 | def render_ground_texture( 378 | tex_id, 379 | size=[20.0, 20.0], 380 | dsize=[1.0, 1.0], 381 | axis="y", 382 | origin=True, 383 | use_arrow=True, 384 | circle_cut=False, 385 | circle_color=[1, 1, 1, 1], 386 | circle_offset=0.001, 387 | ): 388 | assert tex_id > 0 389 | 390 | lx = size[0] 391 | lz = size[1] 392 | dx = dsize[0] 393 | dz = dsize[1] 394 | nx = int(lx / dx) + 1 395 | nz = int(lz / dz) + 1 396 | 397 | if axis is "x": 398 | raise NotImplementedError 399 | elif axis is "y": 400 | up_vec = np.array([0.0, 1.0, 0.0]) 401 | p1 = np.array([-0.5 * size[0], 0, -0.5 * size[0]]) 402 | p2 = np.array([0.5 * size[0], 0, -0.5 * size[0]]) 403 | p3 = np.array([0.5 * size[0], 0, 0.5 * size[0]]) 404 | p4 = np.array([-0.5 * size[0], 0, 0.5 * size[0]]) 405 | elif axis is "z": 406 | up_vec = np.array([0.0, 0.0, 1.0]) 407 | p1 = np.array([-0.5 * size[0], -0.5 * size[0], 0]) 408 | p2 = np.array([0.5 * size[0], -0.5 * size[0], 0]) 409 | p3 = np.array([0.5 * size[0], 0.5 * size[0], 0]) 410 | p4 = np.array([-0.5 * size[0], 0.5 * size[0], 0]) 411 | 412 | render_quad( 413 | p1, 414 | p2, 415 | p3, 416 | p4, 417 | tex_id=tex_id, 418 | tex_param1=[0, 0], 419 | tex_param2=[size[0] / dsize[0], 0], 420 | tex_param3=[size[0] / dsize[0], size[1] / dsize[1]], 421 | tex_param4=[0, size[1] / dsize[0]], 422 | ) 423 | 424 | if origin: 425 | render_transform(constants.eye_T(), use_arrow=use_arrow) 426 | 427 | if circle_cut: 428 | r_inner = min(0.5 * size[0], 0.5 * size[1]) 429 | r_outer = 1.5 * max(0.5 * size[0], 0.5 * size[1]) 430 | offset = circle_offset * up_vec 431 | glDisable(GL_LIGHTING) 432 | glPushMatrix() 433 | glTranslatef(offset[0], offset[1], offset[2]) 434 | if axis is "y": 435 | glRotated(-90.0, 1, 0, 0) 436 | render_disk( 437 | constants.eye_T(), 438 | r_inner=r_inner, 439 | r_outer=r_outer, 440 | slice1=64, 441 | slice2=32, 442 | scale=1.0, 443 | color=circle_color, 444 | ) 445 | glPopMatrix() 446 | 447 | 448 | def render_path( 449 | data, color=[0.0, 0.0, 0.0], scale=1.0, line_width=1.0, point_size=1.0 450 | ): 451 | glColor(color) 452 | glLineWidth(line_width) 453 | glBegin(GL_LINE_STRIP) 454 | for d in data: 455 | R, p = conversions.T2Rp(d) 456 | glVertex3d(p[0], p[1], p[2]) 457 | glEnd() 458 | 459 | for d in data: 460 | render_transform(d, scale, line_width, point_size) 461 | 462 | 463 | def render_arrow(p1, p2, D=0.1, color=[1.0, 0.5, 0.0], closed=False): 464 | quadric = gluNewQuadric() 465 | gluQuadricDrawStyle(quadric, GLU_FILL) 466 | gluQuadricNormals(quadric, GLU_SMOOTH) 467 | 468 | glColor(color) 469 | RADPERDEG = 0.0174533 470 | d = p2 - p1 471 | x = d[0] 472 | y = d[1] 473 | z = d[2] 474 | L = np.linalg.norm(d) 475 | 476 | glPushMatrix() 477 | 478 | glTranslated(p1[0], p1[1], p1[2]) 479 | 480 | if x != 0.0 or y != 0.0: 481 | glRotated(math.atan2(y, x) / RADPERDEG, 0.0, 0.0, 1.0) 482 | glRotated( 483 | math.atan2(math.sqrt(x * x + y * y), z) / RADPERDEG, 0.0, 1.0, 0.0 484 | ) 485 | elif z < 0: 486 | glRotated(180, 1.0, 0.0, 0.0) 487 | 488 | glTranslatef(0, 0, L - 4 * D) 489 | 490 | gluCylinder(quadric, 2 * D, 0.0, 4 * D, 32, 1) 491 | if closed: 492 | gluDisk(quadric, 0.0, 2 * D, 32, 1) 493 | 494 | glTranslatef(0, 0, -L + 4 * D) 495 | 496 | gluCylinder(quadric, D, D, L - 4 * D, 32, 1) 497 | if closed: 498 | gluDisk(quadric, 0.0, D, 32, 1) 499 | 500 | glPopMatrix() 501 | 502 | 503 | def render_transform( 504 | T, 505 | scale=0.1, 506 | line_width=1.0, 507 | point_size=0.05, 508 | render_pos=True, 509 | render_ori=[True, True, True], 510 | color_pos=[0, 0, 0, 1], 511 | color_ori=[[1, 0, 0], [0, 1, 0], [0, 0, 1]], 512 | use_arrow=False, 513 | ): 514 | glLineWidth(line_width) 515 | 516 | R, p = conversions.T2Rp(T) 517 | 518 | glPushMatrix() 519 | glTranslated(p[0], p[1], p[2]) 520 | glScalef(scale, scale, scale) 521 | 522 | if render_pos: 523 | glColor(color_pos) 524 | glutSolidSphere(0.5 * point_size, 10, 10) 525 | 526 | if render_ori: 527 | o = np.zeros(3) 528 | if use_arrow: 529 | if render_ori[0]: 530 | render_arrow( 531 | o, o + R[:, 0], D=line_width * 0.02, color=color_ori[0] 532 | ) 533 | if render_ori[1]: 534 | render_arrow( 535 | o, o + R[:, 1], D=line_width * 0.02, color=color_ori[1] 536 | ) 537 | if render_ori[2]: 538 | render_arrow( 539 | o, o + R[:, 2], D=line_width * 0.02, color=color_ori[2] 540 | ) 541 | else: 542 | if render_ori[0]: 543 | render_line(o, o + R[:, 0], color=color_ori[0]) 544 | if render_ori[1]: 545 | render_line(o, o + R[:, 1], color=color_ori[1]) 546 | if render_ori[2]: 547 | render_line(o, o + R[:, 2], color=color_ori[2]) 548 | 549 | glPopMatrix() 550 | 551 | 552 | # def render_ground( 553 | # size=[20.0, 20.0], 554 | # dsize=[1.0, 1.0], 555 | # color=[0.0, 0.0, 0.0, 1.0], 556 | # line_width=1.0, 557 | # axis="y", 558 | # origin=True, 559 | # use_arrow=False, 560 | # lighting=False, 561 | # ): 562 | # lx = size[0] 563 | # lz = size[1] 564 | # dx = dsize[0] 565 | # dz = dsize[1] 566 | # nx = int(lx / dx) + 1 567 | # nz = int(lz / dz) + 1 568 | 569 | # glColor(color) 570 | # glLineWidth(line_width) 571 | # if lighting: 572 | # glEnable(GL_LIGHTING) 573 | 574 | # if axis is "x": 575 | # for i in np.linspace(-0.5 * lx, 0.5 * lx, nx): 576 | # glBegin(GL_LINES) 577 | # glVertex3d(0, i, -0.5 * lz) 578 | # glVertex3d(0, i, 0.5 * lz) 579 | # glEnd() 580 | # for i in np.linspace(-0.5 * lz, 0.5 * lz, nz): 581 | # glBegin(GL_LINES) 582 | # glVertex3d(0, -0.5 * lx, i) 583 | # glVertex3d(0, 0.5 * lx, i) 584 | # glEnd() 585 | # elif axis is "y": 586 | # for i in np.linspace(-0.5 * lx, 0.5 * lx, nx): 587 | # glBegin(GL_LINES) 588 | # glVertex3d(i, 0, -0.5 * lz) 589 | # glVertex3d(i, 0, 0.5 * lz) 590 | # glEnd() 591 | # for i in np.linspace(-0.5 * lz, 0.5 * lz, nz): 592 | # glBegin(GL_LINES) 593 | # glVertex3d(-0.5 * lx, 0, i) 594 | # glVertex3d(0.5 * lx, 0, i) 595 | # glEnd() 596 | # elif axis is "z": 597 | # for i in np.linspace(-0.5 * lx, 0.5 * lx, nx): 598 | # glBegin(GL_LINES) 599 | # glVertex3d(i, -0.5 * lz, 0) 600 | # glVertex3d(i, 0.5 * lz, 0) 601 | # glEnd() 602 | # for i in np.linspace(-0.5 * lz, 0.5 * lz, nz): 603 | # glBegin(GL_LINES) 604 | # glVertex3d(-0.5 * lx, i, 0) 605 | # glVertex3d(0.5 * lx, i, 0) 606 | # glEnd() 607 | 608 | # if origin: 609 | # render_transform(constants.eye_T(), use_arrow=use_arrow) 610 | 611 | 612 | def render_ground( 613 | size=[20.0, 20.0], 614 | dsize=[1.0, 1.0], 615 | color=[0.0, 0.0, 0.0, 1.0], 616 | line_width=1.0, 617 | axis="y", 618 | origin=True, 619 | use_arrow=False, 620 | lighting=False, 621 | fillIn= False, 622 | ): 623 | lx = size[0] 624 | lz = size[1] 625 | dx = dsize[0] 626 | dz = dsize[1] 627 | nx = int(lx / dx) + 1 628 | nz = int(lz / dz) + 1 629 | 630 | glColor(color) 631 | glLineWidth(line_width) 632 | if lighting: 633 | glEnable(GL_LIGHTING) 634 | 635 | if fillIn: 636 | glBegin(GL_QUADS) 637 | cnt = 0 638 | if axis is "y": 639 | for i in np.linspace(-0.5 * lx, 0.5 * lx, nx): 640 | for j in np.linspace(-0.5 * lz, 0.5 * lz, nz): 641 | if cnt % 2 == 0: glColor3d(0.83, 0.83, 0.83) 642 | else: glColor3d(0.95, 0.95, 0.95) 643 | glNormal3d(0, 1, 0) 644 | glVertex3d(i, 0, j) 645 | glVertex3d(i, 0, j+dz) 646 | glVertex3d(i+dx, 0, j+dz) 647 | glVertex3d(i+dx, 0, j) 648 | cnt += 1 649 | glEnd() 650 | else: 651 | if axis is "x": 652 | for i in np.linspace(-0.5 * lx, 0.5 * lx, nx): 653 | glBegin(GL_LINES) 654 | glVertex3d(0, i, -0.5 * lz) 655 | glVertex3d(0, i, 0.5 * lz) 656 | glEnd() 657 | for i in np.linspace(-0.5 * lz, 0.5 * lz, nz): 658 | glBegin(GL_LINES) 659 | glVertex3d(0, -0.5 * lx, i) 660 | glVertex3d(0, 0.5 * lx, i) 661 | glEnd() 662 | elif axis is "y": 663 | for i in np.linspace(-0.5 * lx, 0.5 * lx, nx): 664 | glBegin(GL_LINES) 665 | glVertex3d(i, 0, -0.5 * lz) 666 | glVertex3d(i, 0, 0.5 * lz) 667 | glEnd() 668 | for i in np.linspace(-0.5 * lz, 0.5 * lz, nz): 669 | glBegin(GL_LINES) 670 | glVertex3d(-0.5 * lx, 0, i) 671 | glVertex3d(0.5 * lx, 0, i) 672 | glEnd() 673 | elif axis is "z": 674 | for i in np.linspace(-0.5 * lx, 0.5 * lx, nx): 675 | glBegin(GL_LINES) 676 | glVertex3d(i, -0.5 * lz, 0) 677 | glVertex3d(i, 0.5 * lz, 0) 678 | glEnd() 679 | for i in np.linspace(-0.5 * lz, 0.5 * lz, nz): 680 | glBegin(GL_LINES) 681 | glVertex3d(-0.5 * lx, i, 0) 682 | glVertex3d(0.5 * lx, i, 0) 683 | glEnd() 684 | 685 | 686 | if origin: 687 | render_transform(constants.eye_T(), use_arrow=use_arrow, scale=0.5) 688 | 689 | def render_line_2D(p1, p2, line_width=1.0, color=[0, 0, 0, 1]): 690 | glColor(color) 691 | glLineWidth(line_width) 692 | glBegin(GL_LINES) 693 | glVertex2f(p1[0], p1[1]) 694 | glVertex2f(p2[0], p2[1]) 695 | glEnd() 696 | 697 | 698 | def render_point_2D(p, size=5, color=[0, 0, 0, 1]): 699 | glColor(color) 700 | glPointSize(size) 701 | glBegin(GL_POINTS) 702 | glVertex2f(p[0], p[1]) 703 | glEnd() 704 | 705 | 706 | def render_quad_2D(p1, p2, p3, p4, color=[0, 0, 0, 1]): 707 | glColor(color) 708 | glBegin(GL_QUADS) 709 | glVertex2d(p1[0], p1[1]) 710 | glVertex2d(p2[0], p2[1]) 711 | glVertex2d(p3[0], p3[1]) 712 | glVertex2d(p4[0], p4[1]) 713 | glEnd() 714 | 715 | 716 | def render_text( 717 | text, pos, font=GLUT_BITMAP_TIMES_ROMAN_10, color=[0, 0, 0, 1] 718 | ): 719 | glPushAttrib(GL_DEPTH_TEST | GL_LIGHTING) 720 | 721 | glDisable(GL_DEPTH_TEST) 722 | glDisable(GL_LIGHTING) 723 | glColor(color) 724 | 725 | glRasterPos2f(pos[0], pos[1]) 726 | for ch in text: 727 | glutBitmapCharacter(font, ctypes.c_int(ord(ch))) 728 | 729 | glPopAttrib() 730 | 731 | 732 | def render_pyramid( 733 | T=None, scale=1.0, base_x=1.0, base_z=1.0, height=1.0, color=[0, 0, 0, 1] 734 | ): 735 | glPushMatrix() 736 | 737 | if T is not None: 738 | glTransform(T) 739 | 740 | glScalef(scale, scale, scale) 741 | glColor(color) 742 | 743 | p1 = np.array([0.5 * base_x, 0, 0.5 * base_z]) 744 | p2 = np.array([-0.5 * base_x, 0, 0.5 * base_z]) 745 | p3 = np.array([-0.5 * base_x, 0, -0.5 * base_z]) 746 | p4 = np.array([0.5 * base_x, 0, -0.5 * base_z]) 747 | p5 = np.array([0, height, 0]) 748 | 749 | render_quad(p1, p2, p3, p4, color=color) 750 | render_tri(p1, p2, p5, color=color) 751 | render_tri(p2, p3, p5, color=color) 752 | render_tri(p3, p4, p5, color=color) 753 | render_tri(p4, p1, p5, color=color) 754 | 755 | glPopMatrix() 756 | 757 | 758 | def render_graph_base_2D(origin=(0, 0), axis_len=150, pad_len=30): 759 | p1 = (origin[0] - pad_len, origin[1] + pad_len) 760 | p2 = (origin[0] + pad_len + axis_len, origin[1] + pad_len) 761 | p3 = (origin[0] + pad_len + axis_len, origin[1] - pad_len - axis_len) 762 | p4 = (origin[0] - pad_len, origin[1] - pad_len - axis_len) 763 | render_quad_2D(p1, p2, p3, p4, color=[0.9, 0.9, 0.9, 0.8]) 764 | 765 | # X-axis 766 | p1 = origin 767 | p2 = (origin[0] + axis_len, origin[1]) 768 | render_line_2D(p1, p2, line_width=3.0) 769 | 770 | # Y-axis 771 | p1 = origin 772 | p2 = (origin[0], origin[1] - axis_len) 773 | render_line_2D(p1, p2, line_width=3.0) 774 | 775 | 776 | def render_graph_data_point_2D( 777 | x_data, 778 | y_data, 779 | x_range=(0, 1), 780 | y_range=(0, 1), 781 | color=[0, 0, 0, 1], 782 | point_size=1.0, 783 | origin=(0, 0), 784 | axis_len=150, 785 | pad_len=30, 786 | ): 787 | assert len(x_data) == len(y_data) 788 | num_data = len(x_data) 789 | if num_data == 0: 790 | return 791 | x_range_len = x_range[1] - x_range[0] 792 | y_range_len = y_range[1] - y_range[0] 793 | for i in range(num_data): 794 | x_cur, y_cur = x_data[i], y_data[i] 795 | x = origin[0] + axis_len * (x_cur - x_range[0]) / x_range_len 796 | y = origin[1] - axis_len * (y_cur - y_range[0]) / y_range_len 797 | render_point_2D(p=(x, y), size=point_size, color=color) 798 | 799 | 800 | def render_graph_data_line_2D( 801 | x_data, 802 | y_data, 803 | x_range=(0, 1), 804 | y_range=(0, 1), 805 | color=[0, 0, 0, 1], 806 | line_width=1.0, 807 | origin=(0, 0), 808 | axis_len=150, 809 | pad_len=30, 810 | multiple_data=False, 811 | ): 812 | if multiple_data: 813 | x = x_data 814 | y = y_data 815 | x_r = x_range 816 | y_r = y_range 817 | c = color 818 | l_w = line_width 819 | else: 820 | x = [x_data] 821 | y = [y_data] 822 | x_r = [x_range] 823 | y_r = [y_range] 824 | c = [color] 825 | l_w = [line_width] 826 | 827 | assert len(x) == len(y) 828 | for i in range(len(x)): 829 | num_data = len(x[i]) 830 | if num_data <= 1: 831 | return 832 | x_prev, y_prev = x[i][0], y[i][0] 833 | x_range_len = x_r[i][1] - x_r[i][0] 834 | y_range_len = y_r[i][1] - y_r[i][0] 835 | for j in range(1, num_data): 836 | x_cur, y_cur = x[i][j], y[i][j] 837 | x0 = origin[0] + axis_len * (x_prev - x_r[i][0]) / x_range_len 838 | y0 = origin[1] - axis_len * (y_prev - y_r[i][0]) / y_range_len 839 | x1 = origin[0] + axis_len * (x_cur - x_r[i][0]) / x_range_len 840 | y1 = origin[1] - axis_len * (y_cur - y_r[i][0]) / y_range_len 841 | render_line_2D( 842 | p1=(x0, y0), p2=(x1, y1), line_width=l_w[i], color=c[i] 843 | ) 844 | x_prev, y_prev = x_cur, y_cur 845 | 846 | 847 | def render_progress_bar_2D_horizontal( 848 | progress, 849 | origin=(0, 0), 850 | width=100, 851 | height=10, 852 | line_width=2.0, 853 | color_base=[0.5, 0.5, 0.5, 1], 854 | color_input=[0, 0, 0, 1], 855 | ): 856 | progress = np.clip(progress, 0.0, 1.0) 857 | p1 = (origin[0], origin[1]) 858 | p2 = (origin[0] + progress * width, origin[1]) 859 | p3 = (origin[0] + progress * width, origin[1] + height) 860 | p4 = (origin[0], origin[1] + height) 861 | p22 = (origin[0] + width, origin[1]) 862 | p33 = (origin[0] + width, origin[1] + height) 863 | render_quad_2D(p1, p2, p3, p4, color=color_input) 864 | render_line_2D(p1=p1, p2=p22, line_width=line_width, color=color_base) 865 | render_line_2D(p1=p22, p2=p33, line_width=line_width, color=color_base) 866 | render_line_2D(p1=p33, p2=p4, line_width=line_width, color=color_base) 867 | render_line_2D(p1=p4, p2=p1, line_width=line_width, color=color_base) 868 | 869 | 870 | def render_progress_bar_2D_vertical( 871 | progress, 872 | origin=(0, 0), 873 | width=10, 874 | height=100, 875 | line_width=2.0, 876 | color_base=[0.5, 0.5, 0.5, 1], 877 | color_input=[0, 0, 0, 1], 878 | ): 879 | progress = np.clip(progress, 0.0, 1.0) 880 | p1 = (origin[0], origin[1]) 881 | p2 = (origin[0] + width, origin[1]) 882 | p3 = (origin[0] + width, origin[1] + progress * height) 883 | p4 = (origin[0], origin[1] + progress * height) 884 | p33 = (origin[0] + width, origin[1] + height) 885 | p44 = (origin[0], origin[1] + height) 886 | render_quad_2D(p1, p2, p3, p4, color=color_input) 887 | render_line_2D(p1=p1, p2=p2, line_width=line_width, color=color_base) 888 | render_line_2D(p1=p2, p2=p33, line_width=line_width, color=color_base) 889 | render_line_2D(p1=p33, p2=p44, line_width=line_width, color=color_base) 890 | render_line_2D(p1=p44, p2=p1, line_width=line_width, color=color_base) 891 | 892 | 893 | def render_progress_circle_2D( 894 | progress, 895 | origin=(0, 0), 896 | radius=100, 897 | line_width=2.0, 898 | color_base=[0.5, 0.5, 0.5, 1], 899 | color_input=[1, 0, 0, 1], 900 | scale_input=0.1, 901 | ): 902 | p = np.array([origin[0], origin[1], 0]) 903 | T = conversions.p2T(p) 904 | render_circle(T=T, r=radius, line_width=line_width, color=color_base) 905 | theta = 2 * math.pi * progress 906 | p += radius * np.array([math.cos(theta), math.sin(theta), 0]) 907 | render_point_2D((p[0], p[1]), size=scale_input*radius, color=color_input) 908 | 909 | 910 | def render_direction_input_2D( 911 | val, 912 | val_max, 913 | origin=(0, 0), 914 | radius=100, 915 | line_width=2.0, 916 | color_base=[0.5, 0.5, 0.5, 1], 917 | color_input=[1, 0, 0, 1], 918 | scale_input=0.1, 919 | ): 920 | v = np.array([val[0] / val_max[0], val[1] / val_max[1]]) 921 | v *= radius 922 | p = np.array([origin[0], origin[1], 0]) 923 | T = conversions.p2T(p) 924 | render_circle(T=T, r=radius, line_width=line_width, color=color_base) 925 | render_line_2D( 926 | p1=origin, p2=origin + v, line_width=line_width, color=color_input 927 | ) 928 | render_point_2D(origin, size=scale_input*radius, color=[0.5, 0.5, 0.5, 1]) 929 | 930 | 931 | def render_matrix( 932 | m, 933 | min_val=0.0, 934 | max_val=1.0, 935 | origin=(0, 0), 936 | width=100, 937 | height=100, 938 | min_color=[1, 1, 1], 939 | max_color=[1, 0, 0], 940 | line_width=1.0, 941 | ): 942 | assert min_val != max_val 943 | glPushMatrix() 944 | glTranslatef(origin[0], origin[1], 0) 945 | 946 | dim_x, dim_y = m.shape 947 | dx = width / dim_x 948 | dy = height / dim_y 949 | _min_color = np.array(min_color) 950 | _max_color = np.array(max_color) 951 | d_val = max_val - min_val 952 | d_color = _max_color - _min_color 953 | for i in range(dim_x): 954 | for j in range(dim_y): 955 | p1 = (dx * i, dy * j) 956 | p2 = (dx * i + dx, dy * j) 957 | p3 = (dx * i + dx, dy * j + dy) 958 | p4 = (dx * i, dy * j + dy) 959 | val = (m[i][j] - min_val) / d_val 960 | color = val * d_color + _min_color 961 | render_quad_2D(p1, p2, p3, p4, color=color) 962 | for i in range(dim_x): 963 | p1 = (dx * i, 0) 964 | p2 = (dx * i, height) 965 | render_line_2D(p1, p2, line_width=line_width) 966 | render_line_2D((width, 0), (width, height), line_width=line_width) 967 | for j in range(dim_y): 968 | p1 = (0, dy * j) 969 | p2 = (width, dy * j) 970 | render_line_2D(p1, p2, line_width=line_width) 971 | render_line_2D((0, height), (width, height), line_width=line_width) 972 | 973 | glPopMatrix() 974 | -------------------------------------------------------------------------------- /fairmotion_vis/glut_viewer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | import os 3 | # os.environ['PYOPENGL_PLATFORM'] = 'egl' 4 | from OpenGL.GL import * 5 | from OpenGL.GLU import * 6 | from OpenGL.GLUT import * 7 | # from OpenGL.raw.GL.ARB.vertex_array_object import glGenVertexArrays, \ 8 | # glBindVertexArray 9 | import sys 10 | import numpy as np 11 | from fairmotion_vis import camera, utils 12 | 13 | 14 | 15 | class Viewer: 16 | """Viewer class builds general infrastructure to implement visualizer 17 | class for motion sequences. 18 | 19 | Attributes: 20 | title: Title displayed on visualizer window 21 | cam: Camera object for the scene 22 | size: Tuple; Visualizer window dimensions 23 | mouse_last_pos: Tuple; Stores last recorded position of mouse on the 24 | screen 25 | pressed_button: str; Stores last pressed keyboard character 26 | time_checker: Object of utils.TimeChecker class to keep track of UNIX 27 | time and playback time 28 | 29 | To create a custom visualizer, extend this class and implement the 30 | following methods: 31 | - render_callback 32 | - idle_callback 33 | - keyboard_callback (optional) 34 | - overlay_callback (optional) 35 | 36 | Once the viewer is initialized, call `run` method to display visualization 37 | """ 38 | 39 | def __init__( 40 | self, title="glutgui_base", cam=None, size=(800, 600), 41 | bgcolor=[1.0, 1.0, 1.0, 1.0], use_msaa=False, mouse_sensitivity=0.005, 42 | ): 43 | self.title = title 44 | self.window = None 45 | self.window_size = size 46 | self.mouse_last_pos = None 47 | self.pressed_button = None 48 | self.bgcolor = bgcolor 49 | self.use_msaa = use_msaa 50 | self.mouse_sensitivity = mouse_sensitivity 51 | 52 | self.time_checker = utils.TimeChecker() 53 | if cam is None: 54 | self.cam_cur = camera.Camera( 55 | pos=np.array([0.0, 2.0, 4.0]), 56 | origin=np.array([0.0, 0.0, 0.0]), 57 | vup=np.array([0.0, 1.0, 0.0]), 58 | fov=45.0, 59 | ) 60 | else: 61 | self.cam_cur = cam 62 | 63 | def idle_callback(self): 64 | pass 65 | 66 | def overlay_callback(self): 67 | pass 68 | 69 | def keyboard_callback(self, key): 70 | return True 71 | 72 | def render_callback(self): 73 | return 74 | 75 | def _init_GL(self, w, h): 76 | glDisable(GL_CULL_FACE) 77 | glEnable(GL_DEPTH_TEST) 78 | 79 | glEnable(GL_BLEND) 80 | glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) 81 | 82 | glEnable(GL_DITHER) 83 | glShadeModel(GL_SMOOTH) 84 | glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST) 85 | 86 | glDepthFunc(GL_LEQUAL) 87 | glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST) 88 | 89 | glClearColor( 90 | self.bgcolor[0], 91 | self.bgcolor[1], 92 | self.bgcolor[2], 93 | self.bgcolor[3]) 94 | glClear(GL_COLOR_BUFFER_BIT) 95 | 96 | ambient = [0.2, 0.2, 0.2, 1.0] 97 | diffuse = [0.6, 0.6, 0.6, 1.0] 98 | front_mat_shininess = [60.0] 99 | front_mat_specular = [0.2, 0.2, 0.2, 1.0] 100 | front_mat_diffuse = [0.5, 0.28, 0.38, 1.0] 101 | lmodel_ambient = [0.2, 0.2, 0.2, 1.0] 102 | lmodel_twoside = [GL_FALSE] 103 | 104 | position = [1.0, 0.0, 0.0, 0.0] 105 | position1 = [-1.0, 1.0, 1.0, 0.0] 106 | 107 | glEnable(GL_LIGHT0) 108 | glLightfv(GL_LIGHT0, GL_AMBIENT, ambient) 109 | glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuse) 110 | glLightfv(GL_LIGHT0, GL_POSITION, position) 111 | 112 | glLightModelfv(GL_LIGHT_MODEL_AMBIENT, lmodel_ambient) 113 | glLightModelfv(GL_LIGHT_MODEL_TWO_SIDE, lmodel_twoside) 114 | 115 | glEnable(GL_LIGHT1) 116 | glLightfv(GL_LIGHT1, GL_DIFFUSE, diffuse) 117 | glLightfv(GL_LIGHT1, GL_POSITION, position1) 118 | glDisable(GL_LIGHTING) 119 | glEnable(GL_COLOR_MATERIAL) 120 | 121 | glMaterialfv(GL_FRONT_AND_BACK, GL_SHININESS, front_mat_shininess) 122 | glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, front_mat_specular) 123 | glMaterialfv(GL_FRONT_AND_BACK, GL_DIFFUSE, front_mat_diffuse) 124 | 125 | glEnable(GL_DEPTH_TEST) 126 | glDepthFunc(GL_LEQUAL) 127 | glDisable(GL_CULL_FACE) 128 | glEnable(GL_NORMALIZE) 129 | 130 | glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE) 131 | glEnable(GL_COLOR_MATERIAL) 132 | 133 | if self.use_msaa: 134 | self._init_msaa() 135 | 136 | glEnable(GL_LIGHTING) 137 | glEnable(GL_DEPTH_TEST) 138 | glEnable(GL_LIGHTING) 139 | 140 | 141 | def resize_GL(self, w, h): 142 | self.window_size = (w, h) 143 | glViewport(0, 0, w, h) 144 | glMatrixMode(GL_PROJECTION) 145 | glLoadIdentity() 146 | gluPerspective(self.cam_cur.fov, float(w) / float(h), 0.1, 100.0) 147 | glMatrixMode(GL_MODELVIEW) 148 | 149 | def draw_GL(self, swap_buffer=True): 150 | 151 | if self.use_msaa: 152 | glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self.msaa_fbo) 153 | 154 | # Clear The Screen And The Depth Buffer 155 | glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) 156 | 157 | glMatrixMode(GL_MODELVIEW) 158 | glLoadIdentity() 159 | gluLookAt( 160 | *self.cam_cur.pos, *self.cam_cur.origin, *self.cam_cur.vup, 161 | ) 162 | 163 | self.render_callback() 164 | 165 | if self.overlay_callback is not None: 166 | glClear(GL_DEPTH_BUFFER_BIT) 167 | glPushAttrib(GL_DEPTH_TEST) 168 | glDisable(GL_DEPTH_TEST) 169 | glMatrixMode(GL_PROJECTION) 170 | glPushMatrix() 171 | glLoadIdentity() 172 | glOrtho( 173 | 0.0, self.window_size[0], self.window_size[1], 0.0, 0.0, 1.0 174 | ) 175 | 176 | glMatrixMode(GL_MODELVIEW) 177 | glPushMatrix() 178 | glLoadIdentity() 179 | self.overlay_callback() 180 | glPopMatrix() 181 | 182 | glMatrixMode(GL_PROJECTION) 183 | glPopMatrix() 184 | glPopAttrib() 185 | 186 | if self.use_msaa: 187 | self._post_process_msaa() 188 | 189 | if swap_buffer: 190 | glutSwapBuffers() 191 | 192 | def _init_msaa(self): 193 | num_samples = glGetIntegerv(GL_MAX_SAMPLES) 194 | 195 | print('num_samples_for_msaa:', num_samples) 196 | 197 | self.msaa_tex = glGenTextures(1) 198 | print('msaa_tex:', self.msaa_tex) 199 | glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, self.msaa_tex) 200 | glTexImage2DMultisample( 201 | GL_TEXTURE_2D_MULTISAMPLE, num_samples, GL_RGBA8, self.window_size[0], self.window_size[1], False) 202 | 203 | self.msaa_rbo_color = glGenRenderbuffers(1) 204 | print('msaa_rbo_color:', self.msaa_rbo_color) 205 | glBindRenderbuffer(GL_RENDERBUFFER, self.msaa_rbo_color) 206 | glRenderbufferStorageMultisample( 207 | GL_RENDERBUFFER, num_samples, GL_RGBA8, self.window_size[0], self.window_size[1]) 208 | 209 | self.msaa_rbo_depth = glGenRenderbuffers(1) 210 | print('msaa_rbo_depth:', self.msaa_rbo_depth) 211 | glBindRenderbuffer(GL_RENDERBUFFER, self.msaa_rbo_depth) 212 | glRenderbufferStorageMultisample( 213 | GL_RENDERBUFFER, num_samples, GL_DEPTH_COMPONENT, self.window_size[0], self.window_size[1]) 214 | 215 | self.msaa_fbo = glGenFramebuffers(1) 216 | glBindFramebuffer(GL_FRAMEBUFFER, self.msaa_fbo) 217 | 218 | glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D_MULTISAMPLE, self.msaa_fbo, 0) 219 | glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, self.msaa_rbo_color) 220 | glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, self.msaa_rbo_depth) 221 | 222 | status = glCheckFramebufferStatus(GL_FRAMEBUFFER) 223 | print("frame_buffer_status:", status) 224 | 225 | glBindFramebuffer(GL_FRAMEBUFFER, self.msaa_fbo) 226 | 227 | iMultiSample = glGetIntegerv(GL_SAMPLE_BUFFERS) 228 | iNumSamples = glGetIntegerv(GL_SAMPLES) 229 | print("MSAA on, GL_SAMPLE_BUFFERS = %d, GL_SAMPLES = %d\n"%(iMultiSample, iNumSamples)) 230 | 231 | def _post_process_msaa(self): 232 | x, y, w, h = glGetIntegerv(GL_VIEWPORT) 233 | 234 | # Bind the multisampled FBO for reading 235 | glBindFramebuffer(GL_READ_FRAMEBUFFER, self.msaa_fbo) 236 | glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0) 237 | glDrawBuffer(GL_BACK) 238 | glBlitFramebuffer( 239 | x, y, w, h, x, y, w, h, 240 | GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT, 241 | GL_NEAREST) 242 | 243 | # The function called whenever a key is pressed. 244 | # Note the use of Python tuples to pass in: (key, x, y) 245 | def key_pressed(self, *args): 246 | handled = self.keyboard_callback(args[0]) 247 | if handled: 248 | return 249 | if args[0] == b"\x1b": 250 | print("Hit ESC key to quit.") 251 | glutDestroyWindow(self.window) 252 | sys.exit() 253 | 254 | def mouse_func(self, button, state, x, y): 255 | if state == 0: 256 | self.pressed_button = button 257 | else: 258 | self.pressed_button = None 259 | 260 | if state == 0: # Mouse pressed 261 | self.mouse_last_pos = np.array([x, y]) 262 | elif state == 1: 263 | self.mouse_last_pos = None 264 | 265 | if button == 3: 266 | self.cam_cur.zoom(0.95) 267 | elif button == 4: 268 | self.cam_cur.zoom(1.05) 269 | 270 | def motion_func(self, x, y): 271 | newPos = np.array([x, y]) 272 | d = self.mouse_sensitivity * (newPos - self.mouse_last_pos) 273 | if self.pressed_button == 0: 274 | self.cam_cur.rotate(d[1], -d[0], 0) 275 | elif self.pressed_button == 2: 276 | self.cam_cur.translate(np.array([d[0], d[1], 0]), frame_local=True) 277 | self.mouse_last_pos = newPos 278 | 279 | def render_timer(self, timer): 280 | glutPostRedisplay() 281 | glutTimerFunc(10, self.render_timer, 1) 282 | 283 | def run(self): 284 | # Init glut 285 | glutInit(()) 286 | glutInitDisplayMode( 287 | GLUT_RGBA 288 | | GLUT_DOUBLE 289 | | GLUT_ALPHA 290 | | GLUT_DEPTH 291 | # | GLUT_MULTISAMPLE 292 | ) 293 | glutInitWindowSize(*self.window_size) 294 | glutInitWindowPosition(0, 0) 295 | 296 | self.window = glutCreateWindow(self.title) 297 | 298 | # Init functions 299 | # glutFullScreen() 300 | glutDisplayFunc(self.draw_GL) 301 | glutIdleFunc(self.idle_callback) 302 | glutReshapeFunc(self.resize_GL) 303 | glutKeyboardFunc(self.key_pressed) 304 | glutMouseFunc(self.mouse_func) 305 | glutMotionFunc(self.motion_func) 306 | glutTimerFunc(10, self.render_timer, 1) 307 | self._init_GL(*self.window_size) 308 | self.time_checker.begin() 309 | 310 | # Run 311 | glutMainLoop() 312 | 313 | def save_screen(self, dir, name, format="png", render=False, save_alpha_channel=False): 314 | image = self.get_screen(render, save_alpha_channel) 315 | image.save(os.path.join(dir, "%s.%s" % (name, format)), format=format) 316 | 317 | def get_screen(self, render=False, save_alpha_channel=False): 318 | if render: 319 | self.draw_GL() 320 | 321 | x, y, width, height = glGetIntegerv(GL_VIEWPORT) 322 | glPixelStorei(GL_PACK_ALIGNMENT, 1) 323 | if save_alpha_channel: 324 | data = glReadPixels(x, y, width, height, GL_RGBA, GL_UNSIGNED_BYTE) 325 | image = Image.frombytes("RGBA", (width, height), data) 326 | else: 327 | data = glReadPixels(x, y, width, height, GL_RGB, GL_UNSIGNED_BYTE) 328 | image = Image.frombytes("RGB", (width, height), data) 329 | image = image.transpose(Image.FLIP_TOP_BOTTOM) 330 | 331 | return image 332 | -------------------------------------------------------------------------------- /fairmotion_vis/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | 3 | import time 4 | 5 | 6 | class TimeChecker: 7 | """Utility class that provides playback time related functionality. 8 | TimeChecker starts running the clock when it is initialized. `get_time()` 9 | method can be used to query current playback time. 10 | 11 | Attributes: 12 | start: Stores unix time at the start of visualization 13 | data: Dictionary to store messages with timestamps. Use `save()` to 14 | record messages and `get_data()` or `print_data()` to retrieve 15 | messages 16 | """ 17 | 18 | def __init__(self): 19 | self.start = 0.0 20 | self.data = [] 21 | self.begin() 22 | 23 | def begin(self): 24 | self.start = time.time() 25 | del self.data[:] 26 | 27 | def print_time(self, restart=True): 28 | print(f"Time elapsed: {self.get_time(restart)}") 29 | 30 | def get_time(self, restart=True): 31 | t = time.time() - self.start 32 | if restart: 33 | self.begin() 34 | return t 35 | 36 | def print_data(self): 37 | print(self.data) 38 | 39 | def get_data(self): 40 | return self.data 41 | 42 | def save(self, msg=" "): 43 | self.data.append([self.get_time(), msg]) 44 | -------------------------------------------------------------------------------- /imgs/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/snuvclab/Vive_Tracker/705c990d07f9aaccf34681a61f85a76974b4c34f/imgs/output.png -------------------------------------------------------------------------------- /imgs/requirehmd.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/snuvclab/Vive_Tracker/705c990d07f9aaccf34681a61f85a76974b4c34f/imgs/requirehmd.png -------------------------------------------------------------------------------- /imgs/steam_vr_location.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/snuvclab/Vive_Tracker/705c990d07f9aaccf34681a61f85a76974b4c34f/imgs/steam_vr_location.png -------------------------------------------------------------------------------- /imgs/steamvr_location.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/snuvclab/Vive_Tracker/705c990d07f9aaccf34681a61f85a76974b4c34f/imgs/steamvr_location.png -------------------------------------------------------------------------------- /imgs/vive_tracker.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/snuvclab/Vive_Tracker/705c990d07f9aaccf34681a61f85a76974b4c34f/imgs/vive_tracker.png -------------------------------------------------------------------------------- /render_argparse.py: -------------------------------------------------------------------------------- 1 | 2 | import argparse 3 | def get_render_args(): 4 | parser = argparse.ArgumentParser( 5 | description="Visualize BVH file with block body" 6 | ) 7 | parser.add_argument("--config", type=str, default="") 8 | 9 | ########################### render options ############################## 10 | parser.add_argument("--scale", type=float, default=1.0) 11 | parser.add_argument( 12 | "--thickness", type=float, default=1.0, 13 | help="Thickness (radius) of character body" 14 | ) 15 | parser.add_argument("--speed", type=float, default=1.0) 16 | parser.add_argument( 17 | "--axis-up", type=str, choices=["x", "y", "z"], default="y" 18 | ) 19 | parser.add_argument( 20 | "--axis-face", type=str, choices=["x", "y", "z"], default="z" 21 | ) 22 | parser.add_argument( 23 | "--camera-position", 24 | nargs="+", 25 | type=float, 26 | required=False, 27 | default=(10.0, 10.0, 10.0), 28 | ) 29 | parser.add_argument( 30 | "--camera-origin", 31 | nargs="+", 32 | type=float, 33 | required=False, 34 | default=(0.0, 0.0, 0.0), 35 | ) 36 | parser.add_argument("--hide-origin", action="store_true") 37 | parser.add_argument("--render-overlay", action="store_true") 38 | return parser -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | openvr 2 | PyOpenGL-3.1.6-cp38-cp38-win_amd64.whl 3 | PyOpenGL_accelerate-3.1.6-cp38-cp38-win_amd64.whl 4 | scipy 5 | ipython -------------------------------------------------------------------------------- /run_tracker.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import time 3 | import argparse 4 | from track import ViveTrackerModule 5 | from IPython import embed 6 | from render_argparse import * 7 | from vive_visualizer import ViveTrackerViewer 8 | from fairmotion_vis import camera 9 | from fairmotion_ops import conversions, math as fairmotion_math 10 | import numpy as np 11 | 12 | def parse_arguments(): 13 | parser = argparse.ArgumentParser(description="Vive Tracker Pose Data Display") 14 | parser.add_argument("-f", "--frequency", type=float, default=30.0, 15 | help="Frequency of tracker data updates (in Hz). Default: 30 Hz") 16 | return parser.parse_args() 17 | 18 | def print_tracker_data(tracker, interval): 19 | # Continuously print tracker pose data at the specified interval 20 | while True: 21 | start_time = time.time() 22 | 23 | # Get pose data for the tracker device and format as a string 24 | pose_data = " ".join(["%.4f" % val for val in tracker.get_pose_euler()]) 25 | 26 | # Print pose data in the same line 27 | print("\r" + pose_data, end="") 28 | 29 | # Calculate sleep time to maintain the desired interval 30 | sleep_time = interval - (time.time() - start_time) 31 | 32 | # Sleep if necessary 33 | if sleep_time > 0: 34 | time.sleep(sleep_time) 35 | 36 | class ViveTrackerUpdater(): 37 | def __init__(self): 38 | self.vive_tracker_module = ViveTrackerModule() 39 | self.vive_tracker_module.print_discovered_objects() 40 | 41 | self.fps = 30 42 | self.device_key = "tracker" 43 | self.tracking_devices = self.vive_tracker_module.return_selected_devices(self.device_key) 44 | self.tracking_result = [] 45 | 46 | # TODO connect this to config (arb. set) 47 | self.base_station_origin = conversions.p2T(np.array([3, -2.8, -3.0])) 48 | self.origin_inv = fairmotion_math.invertT(self.base_station_origin) 49 | 50 | # TODO add fps (not sleeping) 51 | def update(self, print=False): 52 | self.tracking_result = [self.origin_inv @ self.tracking_devices[key].get_T() for key in self.tracking_devices] 53 | if print: 54 | for r in self.tracking_result: 55 | # embed() 56 | print("\r" + r, end="") 57 | 58 | def main(args): 59 | # Parse command line arguments 60 | # args = parse_arguments() 61 | 62 | # # Calculate interval based on the specified frequency 63 | # interval = 1 / args.frequency 64 | 65 | # # Initialize Vive Tracker and print discovered objects 66 | # v_tracker = ViveTrackerModule() 67 | # v_tracker.print_discovered_objects() 68 | 69 | # # Print tracker data 70 | # tracker_1 = v_tracker.devices["tracker_1"] 71 | # print_tracker_data(tracker_1, interval) 72 | 73 | cam = camera.Camera( 74 | pos=np.array(args.camera_position), 75 | origin=np.array(args.camera_origin), 76 | vup=np.array([0,1,0]), 77 | fov=45.0, 78 | ) 79 | viewer = ViveTrackerViewer( 80 | v_track_updater=ViveTrackerUpdater(), 81 | play_speed=args.speed, 82 | scale=args.scale, 83 | thickness=args.thickness, 84 | render_overlay=args.render_overlay, 85 | hide_origin=args.hide_origin, 86 | title="Vive Viewer", 87 | cam=cam, 88 | size=(1920, 1280), 89 | ) 90 | viewer.run() 91 | 92 | if __name__ == "__main__": 93 | args = get_render_args().parse_args() 94 | main(args) 95 | -------------------------------------------------------------------------------- /track.py: -------------------------------------------------------------------------------- 1 | import time 2 | import sys 3 | import openvr 4 | import math 5 | import json 6 | from IPython import embed 7 | from fairmotion_utils import constants 8 | import numpy as np 9 | 10 | class ViveTrackerModule(): 11 | def __init__(self, configfile_path=None): 12 | self.vr = openvr.init(openvr.VRApplication_Other) 13 | self.vrsystem = openvr.VRSystem() 14 | self.object_names = {"Tracking Reference":[],"HMD":[],"Controller":[],"Tracker":[]} 15 | self.devices = {} 16 | self.device_index_map = {} 17 | poses = self.vr.getDeviceToAbsoluteTrackingPose(openvr.TrackingUniverseStanding, 0, 18 | openvr.k_unMaxTrackedDeviceCount) 19 | for i in range(openvr.k_unMaxTrackedDeviceCount): 20 | if poses[i].bDeviceIsConnected: 21 | self.add_tracked_device(i) 22 | 23 | def __del__(self): 24 | openvr.shutdown() 25 | 26 | def return_selected_devices(self, device_key=""): 27 | selected_devices = {} 28 | for key in self.devices: 29 | if device_key in key: 30 | selected_devices[key] = self.devices[key] 31 | return selected_devices 32 | 33 | def get_pose(self): 34 | return get_pose(self.vr) 35 | 36 | def poll_vr_events(self): 37 | event = openvr.VREvent_t() 38 | while self.vrsystem.pollNextEvent(event): 39 | if event.eventType == openvr.VREvent_TrackedDeviceActivated: 40 | self.add_tracked_device(event.trackedDeviceIndex) 41 | elif event.eventType == openvr.VREvent_TrackedDeviceDeactivated: 42 | if event.trackedDeviceIndex in self.device_index_map: 43 | self.remove_tracked_device(event.trackedDeviceIndex) 44 | 45 | def add_tracked_device(self, tracked_device_index): 46 | i = tracked_device_index 47 | device_class = self.vr.getTrackedDeviceClass(i) 48 | if (device_class == openvr.TrackedDeviceClass_Controller): 49 | device_name = "controller_"+str(len(self.object_names["Controller"])+1) 50 | self.object_names["Controller"].append(device_name) 51 | self.devices[device_name] = vr_tracked_device(self.vr,i,"Controller") 52 | self.device_index_map[i] = device_name 53 | elif (device_class == openvr.TrackedDeviceClass_HMD): 54 | device_name = "hmd_"+str(len(self.object_names["HMD"])+1) 55 | self.object_names["HMD"].append(device_name) 56 | self.devices[device_name] = vr_tracked_device(self.vr,i,"HMD") 57 | self.device_index_map[i] = device_name 58 | elif (device_class == openvr.TrackedDeviceClass_GenericTracker): 59 | device_name = "tracker_"+str(len(self.object_names["Tracker"])+1) 60 | self.object_names["Tracker"].append(device_name) 61 | self.devices[device_name] = vr_tracked_device(self.vr,i,"Tracker") 62 | self.device_index_map[i] = device_name 63 | elif (device_class == openvr.TrackedDeviceClass_TrackingReference): 64 | device_name = "tracking_reference_"+str(len(self.object_names["Tracking Reference"])+1) 65 | self.object_names["Tracking Reference"].append(device_name) 66 | self.devices[device_name] = vr_tracking_reference(self.vr,i,"Tracking Reference") 67 | self.device_index_map[i] = device_name 68 | 69 | def remove_tracked_device(self, tracked_device_index): 70 | if tracked_device_index in self.device_index_map: 71 | device_name = self.device_index_map[tracked_device_index] 72 | self.object_names[self.devices[device_name].device_class].remove(device_name) 73 | del self.device_index_map[tracked_device_index] 74 | del self.devices[device_name] 75 | else: 76 | raise Exception("Tracked device index {} not valid. Not removing.".format(tracked_device_index)) 77 | 78 | def rename_device(self,old_device_name,new_device_name): 79 | self.devices[new_device_name] = self.devices.pop(old_device_name) 80 | for i in range(len(self.object_names[self.devices[new_device_name].device_class])): 81 | if self.object_names[self.devices[new_device_name].device_class][i] == old_device_name: 82 | self.object_names[self.devices[new_device_name].device_class][i] = new_device_name 83 | 84 | def print_discovered_objects(self): 85 | for device_type in self.object_names: 86 | plural = device_type 87 | if len(self.object_names[device_type])!=1: 88 | plural+="s" 89 | print("Found "+str(len(self.object_names[device_type]))+" "+plural) 90 | for device in self.object_names[device_type]: 91 | if device_type == "Tracking Reference": 92 | print(" "+device+" ("+self.devices[device].get_serial()+ 93 | ", Mode "+self.devices[device].get_model()+ 94 | ", "+self.devices[device].get_model()+ 95 | ")") 96 | else: 97 | print(" "+device+" ("+self.devices[device].get_serial()+ 98 | ", "+self.devices[device].get_model()+")") 99 | 100 | def update_text(txt): 101 | 102 | """Update the text in the same line on the console. 103 | 104 | Args: 105 | txt (str): The text to display. 106 | """ 107 | sys.stdout.write('\r' + txt) 108 | sys.stdout.flush() 109 | 110 | def convert_to_euler(pose_mat): 111 | """Convert a 3x4 position/rotation matrix to an x, y, z location and the corresponding Euler angles (in degrees). 112 | 113 | Args: 114 | pose_mat (list): A 3x4 position/rotation matrix. 115 | 116 | Returns: 117 | list: A list containing x, y, z, yaw, pitch, and roll values. 118 | """ 119 | yaw = 180 / math.pi * math.atan2(pose_mat[1][0], pose_mat[0][0]) 120 | pitch = 180 / math.pi * math.atan2(pose_mat[2][0], pose_mat[0][0]) 121 | roll = 180 / math.pi * math.atan2(pose_mat[2][1], pose_mat[2][2]) 122 | x = pose_mat[0][3] 123 | y = pose_mat[1][3] 124 | z = pose_mat[2][3] 125 | return [x, y, z, yaw, pitch, roll] 126 | 127 | def convert_to_quaternion(pose_mat): 128 | 129 | """Convert a 3x4 position/rotation matrix to an x, y, z location and the corresponding quaternion. 130 | 131 | Args: 132 | pose_mat (list): A 3x4 position/rotation matrix. 133 | 134 | Returns: 135 | list: A list containing x, y, z, r_w, r_x, r_y, and r_z values. 136 | """ 137 | # Calculate quaternion values 138 | r_w = math.sqrt(abs(1 + pose_mat[0][0] + pose_mat[1][1] + pose_mat[2][2])) / 2 139 | r_x = (pose_mat[2][1] - pose_mat[1][2]) / (4 * r_w) 140 | r_y = (pose_mat[0][2] - pose_mat[2][0]) / (4 * r_w) 141 | r_z = (pose_mat[1][0] - pose_mat[0][1]) / (4 * r_w) 142 | 143 | # Get position values 144 | x = pose_mat[0][3] 145 | y = pose_mat[1][3] 146 | z = pose_mat[2][3] 147 | 148 | return [x, y, z, r_w, r_x, r_y, r_z] 149 | 150 | #Define a class to make it easy to append pose matricies and convert to both Euler and Quaternion for plotting 151 | class pose_sample_buffer(): 152 | def __init__(self): 153 | self.i = 0 154 | self.index = [] 155 | self.time = [] 156 | self.x = [] 157 | self.y = [] 158 | self.z = [] 159 | self.yaw = [] 160 | self.pitch = [] 161 | self.roll = [] 162 | self.r_w = [] 163 | self.r_x = [] 164 | self.r_y = [] 165 | self.r_z = [] 166 | 167 | def append(self,pose_mat,t): 168 | self.time.append(t) 169 | self.x.append(pose_mat[0][3]) 170 | self.y.append(pose_mat[1][3]) 171 | self.z.append(pose_mat[2][3]) 172 | self.yaw.append(180 / math.pi * math.atan(pose_mat[1][0] /pose_mat[0][0])) 173 | self.pitch.append(180 / math.pi * math.atan(-1 * pose_mat[2][0] / math.sqrt(pow(pose_mat[2][1], 2) + math.pow(pose_mat[2][2], 2)))) 174 | self.roll.append(180 / math.pi * math.atan(pose_mat[2][1] /pose_mat[2][2])) 175 | r_w = math.sqrt(abs(1+pose_mat[0][0]+pose_mat[1][1]+pose_mat[2][2]))/2 176 | self.r_w.append(r_w) 177 | self.r_x.append((pose_mat[2][1]-pose_mat[1][2])/(4*r_w)) 178 | self.r_y.append((pose_mat[0][2]-pose_mat[2][0])/(4*r_w)) 179 | self.r_z.append((pose_mat[1][0]-pose_mat[0][1])/(4*r_w)) 180 | 181 | def get_pose(vr_obj): 182 | 183 | """Get the pose of a tracked device in the virtual reality system. 184 | 185 | Args: 186 | vr_obj (openvr object): An instance of the openvr object. 187 | 188 | Returns: 189 | list: A list of poses for each tracked device in the system. 190 | """ 191 | return vr_obj.getDeviceToAbsoluteTrackingPose(openvr.TrackingUniverseStanding, 0, openvr.k_unMaxTrackedDeviceCount) 192 | 193 | 194 | class vr_tracked_device(): 195 | 196 | def __init__(self, vr_obj, index, device_class): 197 | self.device_class = device_class 198 | self.index = index 199 | self.vr = vr_obj 200 | self.T = constants.eye_T() 201 | 202 | 203 | def get_serial(self): 204 | """Get the serial number of the tracked device.""" 205 | return self.vr.getStringTrackedDeviceProperty(self.index, openvr.Prop_SerialNumber_String) 206 | 207 | def get_model(self): 208 | """Get the model number of the tracked device.""" 209 | return self.vr.getStringTrackedDeviceProperty(self.index, openvr.Prop_ModelNumber_String) 210 | 211 | def get_battery_percent(self): 212 | """Get the battery percentage of the tracked device.""" 213 | return self.vr.getFloatTrackedDeviceProperty(self.index, openvr.Prop_DeviceBatteryPercentage_Float) 214 | 215 | def is_charging(self): 216 | """Check if the tracked device is charging.""" 217 | return self.vr.getBoolTrackedDeviceProperty(self.index, openvr.Prop_DeviceIsCharging_Bool) 218 | 219 | 220 | def sample(self, num_samples, sample_rate): 221 | """Sample the pose of the tracked device. 222 | 223 | Args: 224 | num_samples (int): Number of samples to collect. 225 | sample_rate (float): Rate at which to collect samples. 226 | 227 | Returns: 228 | PoseSampleBuffer: A buffer containing the collected pose samples. 229 | """ 230 | interval = 1 / sample_rate 231 | rtn = pose_sample_buffer() 232 | sample_start = time.time() 233 | for i in range(num_samples): 234 | start = time.time() 235 | pose = get_pose(self.vr) 236 | rtn.append(pose[self.index].mDeviceToAbsoluteTracking, time.time() - sample_start) 237 | sleep_time = interval - (time.time() - start) 238 | if sleep_time > 0: 239 | time.sleep(sleep_time) 240 | return rtn 241 | 242 | def get_T(self, pose=None): 243 | pose_mat = self.get_pose_matrix() 244 | if pose_mat: # not None 245 | np_pose_mat = np.array(pose_mat)['m'] 246 | self.T[:3,:] = np_pose_mat 247 | return self.T 248 | 249 | def get_pose_euler(self, pose=None): 250 | """Get the pose of the tracked device in Euler angles. 251 | 252 | Args: 253 | pose (list, optional): The current pose of the device. If not provided, get_pose is called. 254 | 255 | Returns: 256 | tuple: Euler angles representing the pose, or None if the pose is not valid. 257 | """ 258 | if pose is None: 259 | pose = get_pose(self.vr) 260 | if pose[self.index].bPoseIsValid: 261 | return convert_to_euler(pose[self.index].mDeviceToAbsoluteTracking) 262 | else: 263 | return None 264 | 265 | def get_pose_matrix(self, pose=None): 266 | """Get the pose matrix of the tracked device. 267 | 268 | Args: 269 | pose (list, optional): The current pose of the device. If not provided, get_pose is called. 270 | 271 | Returns: 272 | list: The pose matrix of the device, or None if the pose is not valid. 273 | """ 274 | if pose is None: 275 | pose = get_pose(self.vr) 276 | if pose[self.index].bPoseIsValid: 277 | return pose[self.index].mDeviceToAbsoluteTracking 278 | else: 279 | return None 280 | 281 | def get_velocity(self, pose=None): 282 | """Get the linear velocity of the tracked device. 283 | 284 | Args: 285 | pose (list, optional): The current pose of the device. If not provided, get_pose is called. 286 | 287 | Returns: 288 | tuple: The linear velocity of the device, or None if the pose is not valid. 289 | """ 290 | if pose is None: 291 | pose = get_pose(self.vr) 292 | if pose[self.index].bPoseIsValid: 293 | return pose[self.index].vVelocity 294 | else: 295 | return None 296 | 297 | def get_angular_velocity(self, pose=None): 298 | # Get the angular velocity of the tracked device if its pose is valid. 299 | if pose == None: 300 | pose = get_pose(self.vr) 301 | if pose[self.index].bPoseIsValid: 302 | return pose[self.index].vAngularVelocity 303 | else: 304 | return None 305 | 306 | def get_pose_quaternion(self, pose=None): 307 | # Get the pose of the tracked device in the form of a quaternion if its pose is valid. 308 | if pose == None: 309 | pose = get_pose(self.vr) 310 | if pose[self.index].bPoseIsValid: 311 | return convert_to_quaternion(pose[self.index].mDeviceToAbsoluteTracking) 312 | else: 313 | return None 314 | 315 | def controller_state_to_dict(self, pControllerState): 316 | # Convert controller state data to a dictionary for easier use. 317 | d = {} 318 | # Fill dictionary with controller state data 319 | ... 320 | return d 321 | 322 | def get_controller_inputs(self): 323 | # Get the current state of the controller inputs. 324 | result, state = self.vr.getControllerState(self.index) 325 | return self.controller_state_to_dict(state) 326 | 327 | def trigger_haptic_pulse(self, duration_micros=1000, axis_id=0): 328 | # Trigger a haptic pulse on the controller. 329 | self.vr.triggerHapticPulse(self.index ,axis_id, duration_micros) 330 | 331 | class vr_tracking_reference(vr_tracked_device): 332 | def get_mode(self): 333 | # Get the mode of the tracking reference. 334 | return self.vr.getStringTrackedDeviceProperty(self.index,openvr.Prop_ModeLabel_String).decode('utf-8').upper() 335 | 336 | def sample(self,num_samples,sample_rate): 337 | # Warn the user that sampling a tracking reference is not useful, as they do not move. 338 | print("Tracker static!") 339 | 340 | -------------------------------------------------------------------------------- /vive_visualizer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | 3 | import argparse 4 | import numpy as np 5 | import os 6 | from OpenGL.GL import * 7 | from OpenGL.GLU import * 8 | from OpenGL.GLUT import * 9 | from fairmotion_vis import camera, gl_render, glut_viewer 10 | from fairmotion_utils import constants 11 | from fairmotion_ops import conversions 12 | # from fairmotion.data import bvh, asfamc 13 | # from fairmotion.ops import conversions, math, motion as motion_ops 14 | # from fairmotion.utils import utils 15 | 16 | from IPython import embed 17 | # import yaml 18 | 19 | class ViveTrackerViewer(glut_viewer.Viewer): 20 | 21 | def __init__( 22 | self, 23 | v_track_updater, 24 | # config, 25 | play_speed=1.0, 26 | scale=1.0, 27 | thickness=1.0, 28 | render_overlay=False, 29 | hide_origin=False, 30 | **kwargs, 31 | ): 32 | # Load vive tracker updater 33 | self.vive_updater = v_track_updater 34 | # vis related info 35 | self.update_tracker = False 36 | self.cur_time = 0.0 37 | self.play_speed = 1.0 38 | super().__init__(**kwargs) 39 | 40 | def keyboard_callback(self, key): 41 | if key == b"s": 42 | self.update_tracker = True 43 | self.cur_time = 0.0 44 | self.time_checker.begin() 45 | if key == b" ": 46 | self.update_tracker = not self.update_tracker 47 | else: 48 | return False 49 | return True 50 | 51 | def render_tracker(self): 52 | for t in self.vive_updater.tracking_result: 53 | gl_render.render_sphere(t, r=0.01) 54 | gl_render.render_transform(t, scale=0.07, use_arrow=True) 55 | 56 | def render_callback(self): 57 | if self.update_tracker: 58 | self.vive_updater.update() 59 | self.render_tracker() 60 | 61 | gl_render.render_ground( 62 | size=[100, 100], 63 | color=[0.8, 0.8, 0.8], 64 | axis="y", 65 | origin=True, 66 | use_arrow=True, 67 | fillIn=True 68 | ) 69 | 70 | def idle_callback(self): 71 | if not self.update_tracker: 72 | return 73 | time_elapsed = self.time_checker.get_time(restart=False) 74 | self.cur_time += self.play_speed * time_elapsed 75 | self.time_checker.begin() 76 | 77 | def overlay_callback(self): 78 | # if self.render_overlay: 79 | w, h = self.window_size 80 | gl_render.render_text( 81 | f"Press S to start tracking", 82 | pos=[0.05 * w, 0.05 * h], 83 | font=GLUT_BITMAP_TIMES_ROMAN_24, 84 | ) 85 | 86 | gl_render.render_text( 87 | f"Time: {self.cur_time}", 88 | pos=[0.05 * w, 0.95 * h], 89 | font=GLUT_BITMAP_TIMES_ROMAN_24, 90 | ) 91 | 92 | 93 | def main(args): 94 | 95 | cam = camera.Camera( 96 | pos=np.array(args.camera_position), 97 | origin=np.array(args.camera_origin), 98 | vup=np.array([0,1,0]), 99 | fov=45.0, 100 | ) 101 | viewer = ViveTrackerViewer( 102 | config=config, 103 | play_speed=args.speed, 104 | scale=args.scale, 105 | thickness=args.thickness, 106 | render_overlay=args.render_overlay, 107 | hide_origin=args.hide_origin, 108 | title="Boids Simulation Viewer", 109 | cam=cam, 110 | size=(1920, 1280), 111 | ) 112 | viewer.run() 113 | 114 | 115 | # if __name__ == "__main__": 116 | # parser = argparse.ArgumentParser( 117 | # description="Visualize BVH file with block body" 118 | # ) 119 | # parser.add_argument("--config", type=str, default="") 120 | 121 | # # ########################### render options ############################## 122 | # # parser.add_argument("--scale", type=float, default=1.0) 123 | # # parser.add_argument( 124 | # # "--thickness", type=float, default=1.0, 125 | # # help="Thickness (radius) of character body" 126 | # # ) 127 | # # parser.add_argument("--speed", type=float, default=1.0) 128 | # # parser.add_argument( 129 | # # "--axis-up", type=str, choices=["x", "y", "z"], default="y" 130 | # # ) 131 | # # parser.add_argument( 132 | # # "--axis-face", type=str, choices=["x", "y", "z"], default="z" 133 | # # ) 134 | # # parser.add_argument( 135 | # # "--camera-position", 136 | # # nargs="+", 137 | # # type=float, 138 | # # required=False, 139 | # # default=(10.0, 10.0, 10.0), 140 | # # ) 141 | # # parser.add_argument( 142 | # # "--camera-origin", 143 | # # nargs="+", 144 | # # type=float, 145 | # # required=False, 146 | # # default=(0.0, 0.0, 0.0), 147 | # # ) 148 | # # parser.add_argument("--hide-origin", action="store_true") 149 | # # parser.add_argument("--render-overlay", action="store_true") 150 | # # args = parser.parse_args() 151 | # # assert len(args.camera_position) == 3 and len(args.camera_origin) == 3, ( 152 | # # "Provide x, y and z coordinates for camera position/origin like " 153 | # # "--camera-position x y z" 154 | # # ) 155 | # main(args) 156 | --------------------------------------------------------------------------------