└── Body Language Decoder Tutorial.ipynb /Body Language Decoder Tutorial.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# 0. Install and Import Dependencies" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": null, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "!pip install mediapipe opencv-python pandas scikit-learn" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": null, 22 | "metadata": {}, 23 | "outputs": [], 24 | "source": [ 25 | "import mediapipe as mp # Import mediapipe\n", 26 | "import cv2 # Import opencv" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": null, 32 | "metadata": {}, 33 | "outputs": [], 34 | "source": [ 35 | "mp_drawing = mp.solutions.drawing_utils # Drawing helpers\n", 36 | "mp_holistic = mp.solutions.holistic # Mediapipe Solutions" 37 | ] 38 | }, 39 | { 40 | "cell_type": "markdown", 41 | "metadata": {}, 42 | "source": [ 43 | "# 1. Make Some Detections" 44 | ] 45 | }, 46 | { 47 | "cell_type": "code", 48 | "execution_count": null, 49 | "metadata": {}, 50 | "outputs": [], 51 | "source": [ 52 | "cap = cv2.VideoCapture(0)\n", 53 | "# Initiate holistic model\n", 54 | "with mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5) as holistic:\n", 55 | " \n", 56 | " while cap.isOpened():\n", 57 | " ret, frame = cap.read()\n", 58 | " \n", 59 | " # Recolor Feed\n", 60 | " image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n", 61 | " image.flags.writeable = False \n", 62 | " \n", 63 | " # Make Detections\n", 64 | " results = holistic.process(image)\n", 65 | " # print(results.face_landmarks)\n", 66 | " \n", 67 | " # face_landmarks, pose_landmarks, left_hand_landmarks, right_hand_landmarks\n", 68 | " \n", 69 | " # Recolor image back to BGR for rendering\n", 70 | " image.flags.writeable = True \n", 71 | " image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n", 72 | " \n", 73 | " # 1. Draw face landmarks\n", 74 | " mp_drawing.draw_landmarks(image, results.face_landmarks, mp_holistic.FACE_CONNECTIONS, \n", 75 | " mp_drawing.DrawingSpec(color=(80,110,10), thickness=1, circle_radius=1),\n", 76 | " mp_drawing.DrawingSpec(color=(80,256,121), thickness=1, circle_radius=1)\n", 77 | " )\n", 78 | " \n", 79 | " # 2. Right hand\n", 80 | " mp_drawing.draw_landmarks(image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS, \n", 81 | " mp_drawing.DrawingSpec(color=(80,22,10), thickness=2, circle_radius=4),\n", 82 | " mp_drawing.DrawingSpec(color=(80,44,121), thickness=2, circle_radius=2)\n", 83 | " )\n", 84 | "\n", 85 | " # 3. Left Hand\n", 86 | " mp_drawing.draw_landmarks(image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS, \n", 87 | " mp_drawing.DrawingSpec(color=(121,22,76), thickness=2, circle_radius=4),\n", 88 | " mp_drawing.DrawingSpec(color=(121,44,250), thickness=2, circle_radius=2)\n", 89 | " )\n", 90 | "\n", 91 | " # 4. Pose Detections\n", 92 | " mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS, \n", 93 | " mp_drawing.DrawingSpec(color=(245,117,66), thickness=2, circle_radius=4),\n", 94 | " mp_drawing.DrawingSpec(color=(245,66,230), thickness=2, circle_radius=2)\n", 95 | " )\n", 96 | " \n", 97 | " cv2.imshow('Raw Webcam Feed', image)\n", 98 | "\n", 99 | " if cv2.waitKey(10) & 0xFF == ord('q'):\n", 100 | " break\n", 101 | "\n", 102 | "cap.release()\n", 103 | "cv2.destroyAllWindows()" 104 | ] 105 | }, 106 | { 107 | "cell_type": "code", 108 | "execution_count": null, 109 | "metadata": {}, 110 | "outputs": [], 111 | "source": [ 112 | "results.face_landmarks.landmark[0].visibility" 113 | ] 114 | }, 115 | { 116 | "cell_type": "markdown", 117 | "metadata": {}, 118 | "source": [ 119 | "# 2. Capture Landmarks & Export to CSV\n", 120 | "\n", 121 | "" 122 | ] 123 | }, 124 | { 125 | "cell_type": "code", 126 | "execution_count": null, 127 | "metadata": {}, 128 | "outputs": [], 129 | "source": [ 130 | "import csv\n", 131 | "import os\n", 132 | "import numpy as np" 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": null, 138 | "metadata": {}, 139 | "outputs": [], 140 | "source": [ 141 | "num_coords = len(results.pose_landmarks.landmark)+len(results.face_landmarks.landmark)\n", 142 | "num_coords" 143 | ] 144 | }, 145 | { 146 | "cell_type": "code", 147 | "execution_count": null, 148 | "metadata": {}, 149 | "outputs": [], 150 | "source": [ 151 | "landmarks = ['class']\n", 152 | "for val in range(1, num_coords+1):\n", 153 | " landmarks += ['x{}'.format(val), 'y{}'.format(val), 'z{}'.format(val), 'v{}'.format(val)]" 154 | ] 155 | }, 156 | { 157 | "cell_type": "code", 158 | "execution_count": null, 159 | "metadata": {}, 160 | "outputs": [], 161 | "source": [ 162 | "landmarks" 163 | ] 164 | }, 165 | { 166 | "cell_type": "code", 167 | "execution_count": null, 168 | "metadata": {}, 169 | "outputs": [], 170 | "source": [ 171 | "with open('coords.csv', mode='w', newline='') as f:\n", 172 | " csv_writer = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n", 173 | " csv_writer.writerow(landmarks)" 174 | ] 175 | }, 176 | { 177 | "cell_type": "code", 178 | "execution_count": null, 179 | "metadata": {}, 180 | "outputs": [], 181 | "source": [ 182 | "class_name = \"Wakanda Forever\"" 183 | ] 184 | }, 185 | { 186 | "cell_type": "code", 187 | "execution_count": null, 188 | "metadata": {}, 189 | "outputs": [], 190 | "source": [ 191 | "cap = cv2.VideoCapture(0)\n", 192 | "# Initiate holistic model\n", 193 | "with mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5) as holistic:\n", 194 | " \n", 195 | " while cap.isOpened():\n", 196 | " ret, frame = cap.read()\n", 197 | " \n", 198 | " # Recolor Feed\n", 199 | " image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n", 200 | " image.flags.writeable = False \n", 201 | " \n", 202 | " # Make Detections\n", 203 | " results = holistic.process(image)\n", 204 | " # print(results.face_landmarks)\n", 205 | " \n", 206 | " # face_landmarks, pose_landmarks, left_hand_landmarks, right_hand_landmarks\n", 207 | " \n", 208 | " # Recolor image back to BGR for rendering\n", 209 | " image.flags.writeable = True \n", 210 | " image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n", 211 | " \n", 212 | " # 1. Draw face landmarks\n", 213 | " mp_drawing.draw_landmarks(image, results.face_landmarks, mp_holistic.FACE_CONNECTIONS, \n", 214 | " mp_drawing.DrawingSpec(color=(80,110,10), thickness=1, circle_radius=1),\n", 215 | " mp_drawing.DrawingSpec(color=(80,256,121), thickness=1, circle_radius=1)\n", 216 | " )\n", 217 | " \n", 218 | " # 2. Right hand\n", 219 | " mp_drawing.draw_landmarks(image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS, \n", 220 | " mp_drawing.DrawingSpec(color=(80,22,10), thickness=2, circle_radius=4),\n", 221 | " mp_drawing.DrawingSpec(color=(80,44,121), thickness=2, circle_radius=2)\n", 222 | " )\n", 223 | "\n", 224 | " # 3. Left Hand\n", 225 | " mp_drawing.draw_landmarks(image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS, \n", 226 | " mp_drawing.DrawingSpec(color=(121,22,76), thickness=2, circle_radius=4),\n", 227 | " mp_drawing.DrawingSpec(color=(121,44,250), thickness=2, circle_radius=2)\n", 228 | " )\n", 229 | "\n", 230 | " # 4. Pose Detections\n", 231 | " mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS, \n", 232 | " mp_drawing.DrawingSpec(color=(245,117,66), thickness=2, circle_radius=4),\n", 233 | " mp_drawing.DrawingSpec(color=(245,66,230), thickness=2, circle_radius=2)\n", 234 | " )\n", 235 | " # Export coordinates\n", 236 | " try:\n", 237 | " # Extract Pose landmarks\n", 238 | " pose = results.pose_landmarks.landmark\n", 239 | " pose_row = list(np.array([[landmark.x, landmark.y, landmark.z, landmark.visibility] for landmark in pose]).flatten())\n", 240 | " \n", 241 | " # Extract Face landmarks\n", 242 | " face = results.face_landmarks.landmark\n", 243 | " face_row = list(np.array([[landmark.x, landmark.y, landmark.z, landmark.visibility] for landmark in face]).flatten())\n", 244 | " \n", 245 | " # Concate rows\n", 246 | " row = pose_row+face_row\n", 247 | " \n", 248 | " # Append class name \n", 249 | " row.insert(0, class_name)\n", 250 | " \n", 251 | " # Export to CSV\n", 252 | " with open('coords.csv', mode='a', newline='') as f:\n", 253 | " csv_writer = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n", 254 | " csv_writer.writerow(row) \n", 255 | " \n", 256 | " except:\n", 257 | " pass\n", 258 | " \n", 259 | " cv2.imshow('Raw Webcam Feed', image)\n", 260 | "\n", 261 | " if cv2.waitKey(10) & 0xFF == ord('q'):\n", 262 | " break\n", 263 | "\n", 264 | "cap.release()\n", 265 | "cv2.destroyAllWindows()" 266 | ] 267 | }, 268 | { 269 | "cell_type": "markdown", 270 | "metadata": {}, 271 | "source": [ 272 | "# 3. Train Custom Model Using Scikit Learn" 273 | ] 274 | }, 275 | { 276 | "cell_type": "markdown", 277 | "metadata": {}, 278 | "source": [ 279 | "## 3.1 Read in Collected Data and Process" 280 | ] 281 | }, 282 | { 283 | "cell_type": "code", 284 | "execution_count": null, 285 | "metadata": {}, 286 | "outputs": [], 287 | "source": [ 288 | "import pandas as pd\n", 289 | "from sklearn.model_selection import train_test_split" 290 | ] 291 | }, 292 | { 293 | "cell_type": "code", 294 | "execution_count": null, 295 | "metadata": {}, 296 | "outputs": [], 297 | "source": [ 298 | "df = pd.read_csv('coords.csv')" 299 | ] 300 | }, 301 | { 302 | "cell_type": "code", 303 | "execution_count": null, 304 | "metadata": {}, 305 | "outputs": [], 306 | "source": [ 307 | "df.head()" 308 | ] 309 | }, 310 | { 311 | "cell_type": "code", 312 | "execution_count": null, 313 | "metadata": {}, 314 | "outputs": [], 315 | "source": [ 316 | "df.tail()" 317 | ] 318 | }, 319 | { 320 | "cell_type": "code", 321 | "execution_count": null, 322 | "metadata": {}, 323 | "outputs": [], 324 | "source": [ 325 | "df[df['class']=='Sad']" 326 | ] 327 | }, 328 | { 329 | "cell_type": "code", 330 | "execution_count": null, 331 | "metadata": {}, 332 | "outputs": [], 333 | "source": [ 334 | "X = df.drop('class', axis=1) # features\n", 335 | "y = df['class'] # target value" 336 | ] 337 | }, 338 | { 339 | "cell_type": "code", 340 | "execution_count": null, 341 | "metadata": {}, 342 | "outputs": [], 343 | "source": [ 344 | "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1234)" 345 | ] 346 | }, 347 | { 348 | "cell_type": "code", 349 | "execution_count": null, 350 | "metadata": {}, 351 | "outputs": [], 352 | "source": [ 353 | "y_test" 354 | ] 355 | }, 356 | { 357 | "cell_type": "markdown", 358 | "metadata": {}, 359 | "source": [ 360 | "## 3.2 Train Machine Learning Classification Model" 361 | ] 362 | }, 363 | { 364 | "cell_type": "code", 365 | "execution_count": null, 366 | "metadata": {}, 367 | "outputs": [], 368 | "source": [ 369 | "from sklearn.pipeline import make_pipeline \n", 370 | "from sklearn.preprocessing import StandardScaler \n", 371 | "\n", 372 | "from sklearn.linear_model import LogisticRegression, RidgeClassifier\n", 373 | "from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier" 374 | ] 375 | }, 376 | { 377 | "cell_type": "code", 378 | "execution_count": null, 379 | "metadata": {}, 380 | "outputs": [], 381 | "source": [ 382 | "pipelines = {\n", 383 | " 'lr':make_pipeline(StandardScaler(), LogisticRegression()),\n", 384 | " 'rc':make_pipeline(StandardScaler(), RidgeClassifier()),\n", 385 | " 'rf':make_pipeline(StandardScaler(), RandomForestClassifier()),\n", 386 | " 'gb':make_pipeline(StandardScaler(), GradientBoostingClassifier()),\n", 387 | "}" 388 | ] 389 | }, 390 | { 391 | "cell_type": "code", 392 | "execution_count": null, 393 | "metadata": {}, 394 | "outputs": [], 395 | "source": [ 396 | "fit_models = {}\n", 397 | "for algo, pipeline in pipelines.items():\n", 398 | " model = pipeline.fit(X_train, y_train)\n", 399 | " fit_models[algo] = model" 400 | ] 401 | }, 402 | { 403 | "cell_type": "code", 404 | "execution_count": null, 405 | "metadata": {}, 406 | "outputs": [], 407 | "source": [ 408 | "fit_models" 409 | ] 410 | }, 411 | { 412 | "cell_type": "code", 413 | "execution_count": null, 414 | "metadata": {}, 415 | "outputs": [], 416 | "source": [ 417 | "fit_models['rc'].predict(X_test)" 418 | ] 419 | }, 420 | { 421 | "cell_type": "markdown", 422 | "metadata": {}, 423 | "source": [ 424 | "## 3.3 Evaluate and Serialize Model " 425 | ] 426 | }, 427 | { 428 | "cell_type": "code", 429 | "execution_count": null, 430 | "metadata": {}, 431 | "outputs": [], 432 | "source": [ 433 | "from sklearn.metrics import accuracy_score # Accuracy metrics \n", 434 | "import pickle " 435 | ] 436 | }, 437 | { 438 | "cell_type": "code", 439 | "execution_count": null, 440 | "metadata": {}, 441 | "outputs": [], 442 | "source": [ 443 | "for algo, model in fit_models.items():\n", 444 | " yhat = model.predict(X_test)\n", 445 | " print(algo, accuracy_score(y_test, yhat))" 446 | ] 447 | }, 448 | { 449 | "cell_type": "code", 450 | "execution_count": null, 451 | "metadata": {}, 452 | "outputs": [], 453 | "source": [ 454 | "fit_models['rf'].predict(X_test)" 455 | ] 456 | }, 457 | { 458 | "cell_type": "code", 459 | "execution_count": null, 460 | "metadata": {}, 461 | "outputs": [], 462 | "source": [ 463 | "y_test" 464 | ] 465 | }, 466 | { 467 | "cell_type": "code", 468 | "execution_count": null, 469 | "metadata": {}, 470 | "outputs": [], 471 | "source": [ 472 | "with open('body_language.pkl', 'wb') as f:\n", 473 | " pickle.dump(fit_models['rf'], f)" 474 | ] 475 | }, 476 | { 477 | "cell_type": "markdown", 478 | "metadata": {}, 479 | "source": [ 480 | "# 4. Make Detections with Model" 481 | ] 482 | }, 483 | { 484 | "cell_type": "code", 485 | "execution_count": null, 486 | "metadata": {}, 487 | "outputs": [], 488 | "source": [ 489 | "with open('body_language.pkl', 'rb') as f:\n", 490 | " model = pickle.load(f)" 491 | ] 492 | }, 493 | { 494 | "cell_type": "code", 495 | "execution_count": null, 496 | "metadata": {}, 497 | "outputs": [], 498 | "source": [ 499 | "model" 500 | ] 501 | }, 502 | { 503 | "cell_type": "code", 504 | "execution_count": null, 505 | "metadata": {}, 506 | "outputs": [], 507 | "source": [ 508 | "cap = cv2.VideoCapture(0)\n", 509 | "# Initiate holistic model\n", 510 | "with mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5) as holistic:\n", 511 | " \n", 512 | " while cap.isOpened():\n", 513 | " ret, frame = cap.read()\n", 514 | " \n", 515 | " # Recolor Feed\n", 516 | " image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n", 517 | " image.flags.writeable = False \n", 518 | " \n", 519 | " # Make Detections\n", 520 | " results = holistic.process(image)\n", 521 | " # print(results.face_landmarks)\n", 522 | " \n", 523 | " # face_landmarks, pose_landmarks, left_hand_landmarks, right_hand_landmarks\n", 524 | " \n", 525 | " # Recolor image back to BGR for rendering\n", 526 | " image.flags.writeable = True \n", 527 | " image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n", 528 | " \n", 529 | " # 1. Draw face landmarks\n", 530 | " mp_drawing.draw_landmarks(image, results.face_landmarks, mp_holistic.FACE_CONNECTIONS, \n", 531 | " mp_drawing.DrawingSpec(color=(80,110,10), thickness=1, circle_radius=1),\n", 532 | " mp_drawing.DrawingSpec(color=(80,256,121), thickness=1, circle_radius=1)\n", 533 | " )\n", 534 | " \n", 535 | " # 2. Right hand\n", 536 | " mp_drawing.draw_landmarks(image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS, \n", 537 | " mp_drawing.DrawingSpec(color=(80,22,10), thickness=2, circle_radius=4),\n", 538 | " mp_drawing.DrawingSpec(color=(80,44,121), thickness=2, circle_radius=2)\n", 539 | " )\n", 540 | "\n", 541 | " # 3. Left Hand\n", 542 | " mp_drawing.draw_landmarks(image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS, \n", 543 | " mp_drawing.DrawingSpec(color=(121,22,76), thickness=2, circle_radius=4),\n", 544 | " mp_drawing.DrawingSpec(color=(121,44,250), thickness=2, circle_radius=2)\n", 545 | " )\n", 546 | "\n", 547 | " # 4. Pose Detections\n", 548 | " mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS, \n", 549 | " mp_drawing.DrawingSpec(color=(245,117,66), thickness=2, circle_radius=4),\n", 550 | " mp_drawing.DrawingSpec(color=(245,66,230), thickness=2, circle_radius=2)\n", 551 | " )\n", 552 | " # Export coordinates\n", 553 | " try:\n", 554 | " # Extract Pose landmarks\n", 555 | " pose = results.pose_landmarks.landmark\n", 556 | " pose_row = list(np.array([[landmark.x, landmark.y, landmark.z, landmark.visibility] for landmark in pose]).flatten())\n", 557 | " \n", 558 | " # Extract Face landmarks\n", 559 | " face = results.face_landmarks.landmark\n", 560 | " face_row = list(np.array([[landmark.x, landmark.y, landmark.z, landmark.visibility] for landmark in face]).flatten())\n", 561 | " \n", 562 | " # Concate rows\n", 563 | " row = pose_row+face_row\n", 564 | " \n", 565 | "# # Append class name \n", 566 | "# row.insert(0, class_name)\n", 567 | " \n", 568 | "# # Export to CSV\n", 569 | "# with open('coords.csv', mode='a', newline='') as f:\n", 570 | "# csv_writer = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n", 571 | "# csv_writer.writerow(row) \n", 572 | "\n", 573 | " # Make Detections\n", 574 | " X = pd.DataFrame([row])\n", 575 | " body_language_class = model.predict(X)[0]\n", 576 | " body_language_prob = model.predict_proba(X)[0]\n", 577 | " print(body_language_class, body_language_prob)\n", 578 | " \n", 579 | " # Grab ear coords\n", 580 | " coords = tuple(np.multiply(\n", 581 | " np.array(\n", 582 | " (results.pose_landmarks.landmark[mp_holistic.PoseLandmark.LEFT_EAR].x, \n", 583 | " results.pose_landmarks.landmark[mp_holistic.PoseLandmark.LEFT_EAR].y))\n", 584 | " , [640,480]).astype(int))\n", 585 | " \n", 586 | " cv2.rectangle(image, \n", 587 | " (coords[0], coords[1]+5), \n", 588 | " (coords[0]+len(body_language_class)*20, coords[1]-30), \n", 589 | " (245, 117, 16), -1)\n", 590 | " cv2.putText(image, body_language_class, coords, \n", 591 | " cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n", 592 | " \n", 593 | " # Get status box\n", 594 | " cv2.rectangle(image, (0,0), (250, 60), (245, 117, 16), -1)\n", 595 | " \n", 596 | " # Display Class\n", 597 | " cv2.putText(image, 'CLASS'\n", 598 | " , (95,12), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)\n", 599 | " cv2.putText(image, body_language_class.split(' ')[0]\n", 600 | " , (90,40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n", 601 | " \n", 602 | " # Display Probability\n", 603 | " cv2.putText(image, 'PROB'\n", 604 | " , (15,12), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)\n", 605 | " cv2.putText(image, str(round(body_language_prob[np.argmax(body_language_prob)],2))\n", 606 | " , (10,40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n", 607 | " \n", 608 | " except:\n", 609 | " pass\n", 610 | " \n", 611 | " cv2.imshow('Raw Webcam Feed', image)\n", 612 | "\n", 613 | " if cv2.waitKey(10) & 0xFF == ord('q'):\n", 614 | " break\n", 615 | "\n", 616 | "cap.release()\n", 617 | "cv2.destroyAllWindows()" 618 | ] 619 | }, 620 | { 621 | "cell_type": "code", 622 | "execution_count": null, 623 | "metadata": {}, 624 | "outputs": [], 625 | "source": [ 626 | "tuple(np.multiply(np.array((results.pose_landmarks.landmark[mp_holistic.PoseLandmark.LEFT_EAR].x, \n", 627 | "results.pose_landmarks.landmark[mp_holistic.PoseLandmark.LEFT_EAR].y)), [640,480]).astype(int))" 628 | ] 629 | }, 630 | { 631 | "cell_type": "code", 632 | "execution_count": null, 633 | "metadata": {}, 634 | "outputs": [], 635 | "source": [] 636 | }, 637 | { 638 | "cell_type": "code", 639 | "execution_count": null, 640 | "metadata": {}, 641 | "outputs": [], 642 | "source": [] 643 | }, 644 | { 645 | "cell_type": "code", 646 | "execution_count": null, 647 | "metadata": {}, 648 | "outputs": [], 649 | "source": [] 650 | } 651 | ], 652 | "metadata": { 653 | "kernelspec": { 654 | "display_name": "mediapipe", 655 | "language": "python", 656 | "name": "mediapipe" 657 | }, 658 | "language_info": { 659 | "codemirror_mode": { 660 | "name": "ipython", 661 | "version": 3 662 | }, 663 | "file_extension": ".py", 664 | "mimetype": "text/x-python", 665 | "name": "python", 666 | "nbconvert_exporter": "python", 667 | "pygments_lexer": "ipython3", 668 | "version": "3.7.3" 669 | } 670 | }, 671 | "nbformat": 4, 672 | "nbformat_minor": 2 673 | } 674 | --------------------------------------------------------------------------------