├── .gitignore ├── BUILD ├── README.md ├── build.bat ├── build_app.bat ├── classification_packet.cc ├── classification_packet.h ├── deploy.cmd ├── dummy_packet_callback.cc ├── dummy_packet_callback.h ├── face_geometry_packet.cc ├── face_geometry_packet.h ├── graphs ├── face_geometry_from_landmarks.pbtxt ├── face_mesh_desktop_live.pbtxt ├── holistic_landmark_cpu.pbtxt ├── holistic_tracking_cpu.pbtxt └── pose_tracking_cpu.pbtxt ├── graphs_back ├── face_landmarks_with_iris.pbtxt ├── holistic_landmarks.pbtxt ├── holistic_with_iris.pbtxt ├── multi_face_geometry.pbtxt ├── multi_face_landmarks.pbtxt ├── multi_hand_landmarks.pbtxt ├── objectron_landmarks.pbtxt └── pose_landmarks.pbtxt ├── landmarks_packet.cc ├── landmarks_packet.h ├── packet.cc ├── packet.h ├── packet_api.cc ├── packet_api.h ├── protobuf.cc ├── protobuf.h ├── side_packet.cc ├── side_packet.h ├── ump_api.h ├── ump_app.cc ├── ump_commons.h ├── ump_context.cc ├── ump_context.h ├── ump_dll.cc ├── ump_dll.h ├── ump_frame.h ├── ump_object.h ├── ump_observer.h ├── ump_packet.h ├── ump_pipeline.cc ├── ump_pipeline.h ├── ump_profiler.h ├── ump_shared.h ├── update_proto_models.bat └── vs ├── unreal_mediapipe.sln ├── unreal_mediapipe.vcxproj ├── unreal_mediapipe.vcxproj.filters └── unreal_mediapipe.vcxproj.user /.gitignore: -------------------------------------------------------------------------------- 1 | .vs/ 2 | x64/ 3 | -------------------------------------------------------------------------------- /BUILD: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) 2 | 3 | load("//mediapipe/framework/port:build_config.bzl", "mediapipe_cc_proto_library") 4 | 5 | package(default_visibility = [ 6 | "//visibility:public", 7 | ]) 8 | 9 | 10 | 11 | cc_library( 12 | name = "headers", 13 | hdrs = glob(["*.h"]), 14 | visibility = ["//visibility:public"], 15 | ) 16 | 17 | 18 | 19 | cc_library( 20 | name = "ump_core", 21 | srcs = [ 22 | "ump_context.cc", 23 | "ump_pipeline.cc", 24 | "packet.cc", 25 | "landmarks_packet.cc", 26 | "face_geometry_packet.cc", 27 | "classification_packet.cc", 28 | "packet_api.cc" 29 | ], 30 | deps = [ 31 | "@com_google_absl//absl/flags:flag", 32 | "@com_google_absl//absl/flags:parse", 33 | 34 | "//mediapipe/framework:calculator_framework", 35 | "//mediapipe/framework/formats:image_frame", 36 | "//mediapipe/framework/formats:image_frame_opencv", 37 | "//mediapipe/framework/port:opencv_imgproc", 38 | "//mediapipe/framework/port:opencv_video", 39 | "//mediapipe/framework/port:opencv_highgui", 40 | "//mediapipe/framework/port:parse_text_proto", 41 | "//mediapipe/framework/port:file_helpers", 42 | "//mediapipe/framework/port:status", 43 | 44 | "//mediapipe/calculators/core:mux_calculator", 45 | "//mediapipe/graphs/face_mesh:desktop_live_calculators", 46 | "//mediapipe/modules/face_geometry:env_generator_calculator", 47 | "//mediapipe/modules/face_geometry:face_geometry_from_landmarks", 48 | "//mediapipe/graphs/hand_tracking:desktop_tflite_calculators", 49 | "//mediapipe/graphs/pose_tracking:pose_tracking_cpu_deps", 50 | "//mediapipe/graphs/holistic_tracking:holistic_tracking_cpu_graph_deps", 51 | "//mediapipe/graphs/iris_tracking:iris_tracking_cpu_video_input_deps", 52 | "//mediapipe/graphs/iris_tracking:iris_tracking_cpu_deps", 53 | "//mediapipe/graphs/iris_tracking:iris_depth_cpu_deps", 54 | "//mediapipe/graphs/object_detection_3d:desktop_cpu_calculators", 55 | 56 | "//mediapipe/framework/formats/motion:optical_flow_field_data_cc_proto", 57 | ":headers", 58 | ] 59 | ) 60 | 61 | cc_binary( 62 | name = "mediapipe_api", 63 | srcs = ["ump_dll.cc"], 64 | defines = ["MP_EXPORTS"], 65 | linkshared = True, 66 | deps = [ 67 | ":ump_core", 68 | ], 69 | ) 70 | 71 | cc_binary( 72 | name = "ump_app", 73 | srcs = ["ump_app.cc", "dummy_packet_callback.cc"], 74 | deps = [ 75 | ":ump_core" 76 | ], 77 | ) 78 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # mediapipe-ue-export 2 | mediapipe for ue library export 3 | 4 | 5 | base on: https://github.com/wongfei/ue4-mediapipe-plugin 6 | 7 | copy project ,/mediapipe/ 8 | 9 | run build.bat 10 | 11 | dll will output in "/bazel-bin/mediapipe/" 12 | 13 | -------------------------------------------------------------------------------- /build.bat: -------------------------------------------------------------------------------- 1 | echo off 2 | set "SOURCE_DIR=%~dp0" 3 | pushd %~dp0 4 | cd.. 5 | cd.. 6 | set BAZEL_VS="C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise" 7 | set BAZEL_VC="C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC" 8 | set BAZEL_VC_FULL_VERSION=14.29.30133 9 | @rem C:\Program Files (x86)\Windows Kits 10 | set BAZEL_WINSDK_FULL_VERSION=10.0.19041.0 11 | 12 | set FN=%~p0 13 | set FN=%FN:~0,-1% 14 | :getfolder 15 | FOR /F "tokens=1,* delims=\/" %%i in ("%FN%") do ( 16 | if not "%%j"=="" ( 17 | set FN=%%j 18 | goto getfolder 19 | ) 20 | ) 21 | 22 | FOR /F %%i in ('where python') do ( 23 | set PY=%%i 24 | goto GET_PY 25 | ) 26 | :GET_PY 27 | 28 | set "PY=%PY:\=\\%" 29 | 30 | echo on 31 | @rem bazel build -c opt --define MEDIAPIPE_DISABLE_GPU=1 --action_env PYTHON_BIN_PATH="C:\\Users\\endink\\AppData\\Local\\Programs\\Python\\Python310\\python.exe" mediapipe/examples/desktop/hello_world 32 | 33 | bazel build -c opt --define MEDIAPIPE_DISABLE_GPU=1 --action_env PYTHON_BIN_PATH="%PY%" mediapipe/%FN%:mediapipe_api --verbose_failures 34 | 35 | pause -------------------------------------------------------------------------------- /build_app.bat: -------------------------------------------------------------------------------- 1 | echo off 2 | set "SCRIPTS_DIR=%~dp0" 3 | 4 | FOR /F %%i in ('where python') do ( 5 | set PY=%%i 6 | goto GET_PY 7 | ) 8 | :GET_PY 9 | 10 | set "PY=%PY:\=\\%" 11 | 12 | set FN=%~p0 13 | set FN=%FN:~0,-1% 14 | :getfolder 15 | FOR /F "tokens=1,* delims=\/" %%i in ("%FN%") do ( 16 | if not "%%j"=="" ( 17 | set FN=%%j 18 | goto getfolder 19 | ) 20 | ) 21 | 22 | 23 | cd "%SCRIPTS_DIR%..\..\" 24 | 25 | echo on 26 | 27 | bazel build -c opt --define MEDIAPIPE_DISABLE_GPU=1 --action_env PYTHON_BIN_PATH="%PY%" mediapipe/%FN%:ump_app 28 | 29 | echo off 30 | cd "%SCRIPTS_DIR%" 31 | 32 | pause 33 | -------------------------------------------------------------------------------- /classification_packet.cc: -------------------------------------------------------------------------------- 1 | #include "classification_packet.h" 2 | #include "packet.h" 3 | #include "mediapipe/framework/formats/classification.pb.h" 4 | 5 | MP_API int mp_Packet__GetClassificationListVector(void* packet, SerializedProtoArray* value_out) 6 | { 7 | auto p = (mediapipe::Packet*)packet; 8 | return mp_Packet__GetSerializedProtoVector(p, value_out); 9 | } 10 | 11 | MP_API int mp_Packet__GetClassificationList(void* packet, SerializedProto* value_out) 12 | { 13 | auto p = (mediapipe::Packet*)packet; 14 | return mp_Packet__GetSerializedProto(p, value_out); 15 | } 16 | -------------------------------------------------------------------------------- /classification_packet.h: -------------------------------------------------------------------------------- 1 | #ifndef CLASSIFICATION_PACKET_H 2 | #define CLASSIFICATION_PACKET_H 3 | 4 | #include "ump_commons.h" 5 | 6 | extern "C" { 7 | MP_API int mp_Packet__GetClassificationListVector(void* packet, SerializedProtoArray* value_out); 8 | MP_API int mp_Packet__GetClassificationList(void* packet, SerializedProto* value_out); 9 | } // extern "C" 10 | 11 | #endif // CLASSIFICATION_PACKET_H 12 | -------------------------------------------------------------------------------- /deploy.cmd: -------------------------------------------------------------------------------- 1 | :: TODO => USE BAZEL TO DEPLOY? 2 | 3 | @echo off 4 | 5 | set "UNREAL_PLUGIN_DIR=D:\3D_Works\UE\MediaPipe4U\MediaPipe4U" 6 | set "INITIAL_DIR=%cd%" 7 | 8 | set "SCRIPTS_DIR=%~dp0" 9 | 10 | set FN=%~p0 11 | set FN=%FN:~0,-1% 12 | :getfolder 13 | FOR /F "tokens=1,* delims=\/" %%i in ("%FN%") do ( 14 | if not "%%j"=="" ( 15 | set FN=%%j 16 | goto getfolder 17 | ) 18 | ) 19 | echo Current folder: %FN% 20 | 21 | 22 | FOR /F %%i in ('where python') do ( 23 | set PYTHON_EXE=%%i 24 | goto GET_PY 25 | ) 26 | :GET_PY 27 | 28 | set "PYTHON_EXE=%PYTHON_EXE:\=\\%" 29 | 30 | cd "%SCRIPTS_DIR%..\..\" 31 | 32 | set "MP_ROOT=%cd%" 33 | 34 | if not exist mediapipe ( echo "invalid directory" && exit ) 35 | 36 | if not exist bazel-bin\mediapipe\modules\face_geometry\data\geometry_pipeline_metadata_landmarks.binarypb ( 37 | bazel build -c opt --define MEDIAPIPE_DISABLE_GPU=1 --action_env PYTHON_BIN_PATH="%PYTHON_EXE%" mediapipe/modules/face_geometry/data:geometry_pipeline_metadata_landmarks 38 | ) 39 | 40 | set "DEPLOY_ROOT=%MP_ROOT%\bazel-bin\mediapipe\%FN%" 41 | 42 | :: face 43 | 44 | md "%DEPLOY_ROOT%\mediapipe\modules\face_detection" 45 | md "%DEPLOY_ROOT%\mediapipe\modules\face_landmark" 46 | md "%DEPLOY_ROOT%\mediapipe\modules\face_geometry\data" 47 | 48 | copy /Y "%MP_ROOT%\mediapipe\modules\face_detection\face_detection_short_range.tflite" "%DEPLOY_ROOT%\mediapipe\modules\face_detection\" 49 | copy /Y "%MP_ROOT%\mediapipe\modules\face_landmark\face_landmark.tflite" "%DEPLOY_ROOT%\mediapipe\modules\face_landmark\" 50 | copy /Y "%MP_ROOT%\mediapipe\modules\face_landmark\face_landmark_with_attention.tflite" "%DEPLOY_ROOT%\mediapipe\modules\face_landmark\" 51 | copy /Y "%MP_ROOT%\bazel-bin\mediapipe\modules\face_geometry\data\geometry_pipeline_metadata_landmarks.binarypb" "%DEPLOY_ROOT%\mediapipe\modules\face_geometry\data\" 52 | 53 | :: iris 54 | 55 | md "%DEPLOY_ROOT%\mediapipe\modules\iris_landmark" 56 | 57 | copy /Y "%MP_ROOT%\mediapipe\modules\iris_landmark\iris_landmark.tflite" "%DEPLOY_ROOT%\mediapipe\modules\iris_landmark\" 58 | 59 | :: hands 60 | 61 | md "%DEPLOY_ROOT%\mediapipe\modules\palm_detection" 62 | md "%DEPLOY_ROOT%\mediapipe\modules\hand_landmark" 63 | 64 | copy /Y "%MP_ROOT%\mediapipe\modules\palm_detection\palm_detection.tflite" "%DEPLOY_ROOT%\mediapipe\modules\palm_detection\" 65 | copy /Y "%MP_ROOT%\mediapipe\modules\hand_landmark\handedness.txt" "%DEPLOY_ROOT%\mediapipe\modules\hand_landmark\" 66 | copy /Y "%MP_ROOT%\mediapipe\modules\hand_landmark\hand_landmark_full.tflite" "%DEPLOY_ROOT%\mediapipe\modules\hand_landmark\" 67 | 68 | :: pose 69 | 70 | md "%DEPLOY_ROOT%\mediapipe\modules\pose_detection" 71 | md "%DEPLOY_ROOT%\mediapipe\modules\pose_landmark" 72 | 73 | copy /Y "%MP_ROOT%\mediapipe\modules\pose_detection\pose_detection.tflite" "%DEPLOY_ROOT%\mediapipe\modules\pose_detection\" 74 | copy /Y "%MP_ROOT%\mediapipe\modules\pose_landmark\pose_landmark_full.tflite" "%DEPLOY_ROOT%\mediapipe\modules\pose_landmark\" 75 | copy /Y "%MP_ROOT%\mediapipe\modules\pose_landmark\pose_landmark_lite.tflite" "%DEPLOY_ROOT%\mediapipe\modules\pose_landmark\" 76 | copy /Y "%MP_ROOT%\mediapipe\modules\pose_landmark\pose_landmark_heavy.tflite" "%DEPLOY_ROOT%\mediapipe\modules\pose_landmark\" 77 | 78 | :: holistic 79 | 80 | md "%DEPLOY_ROOT%\mediapipe\modules\holistic_landmark" 81 | 82 | copy /Y "%MP_ROOT%\mediapipe\modules\holistic_landmark\hand_recrop.tflite" "%DEPLOY_ROOT%\mediapipe\modules\holistic_landmark\" 83 | copy /Y "%MP_ROOT%\mediapipe\modules\holistic_landmark\handedness.txt" "%DEPLOY_ROOT%\mediapipe\modules\holistic_landmark\" 84 | 85 | :: objectron 86 | 87 | md "%DEPLOY_ROOT%\mediapipe\modules\objectron" 88 | 89 | copy /Y "%MP_ROOT%\mediapipe\modules\objectron\object_detection_ssd_mobilenetv2_oidv4_fp16.tflite" "%DEPLOY_ROOT%\mediapipe\modules\objectron\" 90 | copy /Y "%MP_ROOT%\mediapipe\modules\objectron\object_detection_oidv4_labelmap.txt" "%DEPLOY_ROOT%\mediapipe\modules\objectron\" 91 | copy /Y "%MP_ROOT%\mediapipe\modules\objectron\object_detection_3d_cup.tflite" "%DEPLOY_ROOT%\mediapipe\modules\objectron\" 92 | 93 | :: graphs 94 | copy /Y "%MP_ROOT%\mediapipe\graphs\holistic_tracking\holistic_tracking_cpu.pbtxt" "%SCRIPTS_DIR%\graphs\" 95 | copy /Y "%MP_ROOT%\mediapipe\graphs\pose_tracking\pose_tracking_cpu.pbtxt" "%SCRIPTS_DIR%\graphs\" 96 | copy /Y "%MP_ROOT%\mediapipe\graphs\face_mesh\face_mesh_desktop_live.pbtxt" "%SCRIPTS_DIR%\graphs\" 97 | copy /Y "%MP_ROOT%\mediapipe\modules\face_geometry\face_geometry_from_landmarks.pbtxt" "%SCRIPTS_DIR%\graphs\" 98 | 99 | copy /Y "%MP_ROOT%\mediapipe\modules\holistic_landmark\holistic_landmark_cpu.pbtxt" "%SCRIPTS_DIR%\graphs\" 100 | 101 | 102 | 103 | xcopy /Y /E "%SCRIPTS_DIR%\graphs" "%DEPLOY_ROOT%\mediapipe\graphs\" 104 | 105 | set /p answer="DEPLOY FILE TO %UNREAL_PLUGIN_DIR% ? (y/n)[n]" : 106 | 107 | if not %answer% == y goto QUIT 108 | 109 | :: UE4 110 | 111 | SET "DLL_DIR=%UNREAL_PLUGIN_DIR%\Source\ThirdParty\mediapipe\DLL\Win64" 112 | 113 | md "%DLL_DIR%" 114 | copy /Y "%DEPLOY_ROOT%\mediapipe_api.dll" "%DLL_DIR%\" 115 | :: copy /Y "%DEPLOY_ROOT%\mediapipe_api.dll" "%UNREAL_PLUGIN_DIR%\Binaries\ThirdParty\Win64\" 116 | copy /Y "%DEPLOY_ROOT%\opencv_world3410.dll" "%DLL_DIR%\" 117 | :: copy /Y "%DEPLOY_ROOT%\opencv_world3410.dll" "%UNREAL_PLUGIN_DIR%\ThirdParty\mediapipe\Binaries\Win64\" 118 | copy /Y "%SCRIPTS_DIR%\ump_commons.h" "%UNREAL_PLUGIN_DIR%\Source\MediaPipe\Public\Core\" 119 | copy /Y "%SCRIPTS_DIR%\ump_api.h" "%UNREAL_PLUGIN_DIR%\Source\MediaPipe\Public\Core\" 120 | copy /Y "%SCRIPTS_DIR%\ump_packet.h" "%UNREAL_PLUGIN_DIR%\Source\MediaPipe\Public\Core\" 121 | xcopy /Y /E "%MP_ROOT%\bazel-bin\mediapipe\%FN%\mediapipe" "%UNREAL_PLUGIN_DIR%\Source\ThirdParty\mediapipe\Data\mediapipe\" 122 | 123 | cd "%INITIAL_DIR%" 124 | 125 | :QUIT 126 | pause -------------------------------------------------------------------------------- /dummy_packet_callback.cc: -------------------------------------------------------------------------------- 1 | #include "dummy_packet_callback.h" 2 | #include 3 | 4 | DummyPacketCallback::DummyPacketCallback() 5 | { 6 | } 7 | 8 | inline bool DummyPacketCallback::OnUmpPacket(IUmpObserver* observer, void* packet) 9 | { 10 | auto api = observer->GetPacketAPI(); 11 | SerializedProto proto; 12 | api->GetNormalizedLandmarkList(packet, &proto); 13 | 14 | std::cout << proto.str << std::endl; 15 | return true; 16 | } 17 | -------------------------------------------------------------------------------- /dummy_packet_callback.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ump_api.h" 4 | 5 | class DummyPacketCallback : public IUmpPacketCallback 6 | { 7 | public: 8 | DummyPacketCallback(); 9 | virtual void OnUmpPresence(class IUmpObserver* observer, bool present) override { } 10 | virtual bool OnUmpPacket(class IUmpObserver* observer, void* packet) override; 11 | private: 12 | 13 | }; 14 | 15 | 16 | -------------------------------------------------------------------------------- /face_geometry_packet.cc: -------------------------------------------------------------------------------- 1 | #include "face_geometry_packet.h" 2 | #include "packet.h" 3 | #include "mediapipe/modules/face_geometry/protos/face_geometry.pb.h" 4 | 5 | MP_API int mp_Packet__GetFaceGeometryVector(void* packet, SerializedProtoArray* value_out) 6 | { 7 | auto p = (mediapipe::Packet*)packet; 8 | return mp_Packet__GetSerializedProtoVector(p, value_out); 9 | } 10 | 11 | MP_API int mp_Packet__GetFaceGeometry(void* packet, SerializedProto* value_out) 12 | { 13 | auto p = (mediapipe::Packet*)packet; 14 | return mp_Packet__GetSerializedProto(p, value_out); 15 | } 16 | -------------------------------------------------------------------------------- /face_geometry_packet.h: -------------------------------------------------------------------------------- 1 | #ifndef FACE_GEOMETRY_PACKET_H 2 | #define FACE_GEOMETRY_PACKET_H 3 | #include "ump_commons.h" 4 | 5 | extern "C" { 6 | MP_API int mp_Packet__GetFaceGeometryVector(void* packet, SerializedProtoArray* value_out); 7 | MP_API int mp_Packet__GetFaceGeometry(void* packet, SerializedProto* value_out); 8 | } // extern "C" 9 | 10 | #endif // LANDMARK_PACKET_H_ -------------------------------------------------------------------------------- /graphs/face_geometry_from_landmarks.pbtxt: -------------------------------------------------------------------------------- 1 | # MediaPipe graph to extract 3D transform from face landmarks for multiple faces. 2 | # 3 | # It is required that "geometry_pipeline_metadata_from_landmark.binarypb" is 4 | # available at 5 | # "mediapipe/modules/face_geometry/data/geometry_pipeline_metadata_from_landmarks.binarypb" 6 | # path during execution. 7 | # 8 | # EXAMPLE: 9 | # node { 10 | # calculator: "FaceGeometryFromLandmarks" 11 | # input_stream: "IMAGE_SIZE:image_size" 12 | # input_stream: "MULTI_FACE_LANDMARKS:multi_face_landmarks" 13 | # input_side_packet: "ENVIRONMENT:environment" 14 | # output_stream: "MULTI_FACE_GEOMETRY:multi_face_geometry" 15 | # } 16 | 17 | type: "FaceGeometryFromLandmarks" 18 | 19 | # The size of the input frame. The first element of the pair is the frame width; 20 | # the other one is the frame height. 21 | # 22 | # The face landmarks should have been detected on a frame with the same 23 | # ratio. If used as-is, the resulting face geometry visualization should be 24 | # happening on a frame with the same ratio as well. 25 | # 26 | # (std::pair) 27 | input_stream: "IMAGE_SIZE:image_size" 28 | 29 | # Collection of detected/predicted faces, each represented as a list of face 30 | # landmarks. (std::vector) 31 | input_stream: "MULTI_FACE_LANDMARKS:multi_face_landmarks" 32 | 33 | # Environment that describes the current virtual scene. 34 | # (face_geometry::Environment) 35 | input_side_packet: "ENVIRONMENT:environment" 36 | 37 | # A list of 3D transform data for each detected face. 38 | # (std::vector) 39 | output_stream: "MULTI_FACE_GEOMETRY:multi_face_geometry" 40 | 41 | # Extracts face 3D transform for multiple faces from a vector of face landmark 42 | # lists. 43 | node { 44 | calculator: "FaceGeometryPipelineCalculator" 45 | input_side_packet: "ENVIRONMENT:environment" 46 | input_stream: "IMAGE_SIZE:image_size" 47 | input_stream: "MULTI_FACE_LANDMARKS:multi_face_landmarks" 48 | output_stream: "MULTI_FACE_GEOMETRY:multi_face_geometry" 49 | options: { 50 | [mediapipe.FaceGeometryPipelineCalculatorOptions.ext] { 51 | metadata_path: "mediapipe/modules/face_geometry/data/geometry_pipeline_metadata_landmarks.binarypb" 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /graphs/face_mesh_desktop_live.pbtxt: -------------------------------------------------------------------------------- 1 | # MediaPipe graph that performs face mesh with TensorFlow Lite on CPU. 2 | 3 | # Input image. (ImageFrame) 4 | input_stream: "input_video" 5 | 6 | # Output image with rendered results. (ImageFrame) 7 | output_stream: "output_video" 8 | # Collection of detected/processed faces, each represented as a list of 9 | # landmarks. (std::vector) 10 | output_stream: "multi_face_landmarks" 11 | 12 | # Throttles the images flowing downstream for flow control. It passes through 13 | # the very first incoming image unaltered, and waits for downstream nodes 14 | # (calculators and subgraphs) in the graph to finish their tasks before it 15 | # passes through another image. All images that come in while waiting are 16 | # dropped, limiting the number of in-flight images in most part of the graph to 17 | # 1. This prevents the downstream nodes from queuing up incoming images and data 18 | # excessively, which leads to increased latency and memory usage, unwanted in 19 | # real-time mobile applications. It also eliminates unnecessarily computation, 20 | # e.g., the output produced by a node may get dropped downstream if the 21 | # subsequent nodes are still busy processing previous inputs. 22 | node { 23 | calculator: "FlowLimiterCalculator" 24 | input_stream: "input_video" 25 | input_stream: "FINISHED:output_video" 26 | input_stream_info: { 27 | tag_index: "FINISHED" 28 | back_edge: true 29 | } 30 | output_stream: "throttled_input_video" 31 | } 32 | 33 | # Defines side packets for further use in the graph. 34 | node { 35 | calculator: "ConstantSidePacketCalculator" 36 | output_side_packet: "PACKET:0:num_faces" 37 | output_side_packet: "PACKET:1:with_attention" 38 | node_options: { 39 | [type.googleapis.com/mediapipe.ConstantSidePacketCalculatorOptions]: { 40 | packet { int_value: 1 } 41 | packet { bool_value: true } 42 | } 43 | } 44 | } 45 | 46 | # Subgraph that detects faces and corresponding landmarks. 47 | node { 48 | calculator: "FaceLandmarkFrontCpu" 49 | input_stream: "IMAGE:throttled_input_video" 50 | input_side_packet: "NUM_FACES:num_faces" 51 | input_side_packet: "WITH_ATTENTION:with_attention" 52 | output_stream: "LANDMARKS:multi_face_landmarks" 53 | output_stream: "ROIS_FROM_LANDMARKS:face_rects_from_landmarks" 54 | output_stream: "DETECTIONS:face_detections" 55 | output_stream: "ROIS_FROM_DETECTIONS:face_rects_from_detections" 56 | } 57 | 58 | # Subgraph that renders face-landmark annotation onto the input image. 59 | node { 60 | calculator: "FaceRendererCpu" 61 | input_stream: "IMAGE:throttled_input_video" 62 | input_stream: "LANDMARKS:multi_face_landmarks" 63 | input_stream: "NORM_RECTS:face_rects_from_landmarks" 64 | input_stream: "DETECTIONS:face_detections" 65 | output_stream: "IMAGE:output_video" 66 | } 67 | -------------------------------------------------------------------------------- /graphs/holistic_landmark_cpu.pbtxt: -------------------------------------------------------------------------------- 1 | # Predicts pose + left/right hand + face landmarks. 2 | # 3 | # It is required that: 4 | # - "face_detection_short_range.tflite" is available at 5 | # "mediapipe/modules/face_detection/face_detection_short_range.tflite" 6 | # 7 | # - "face_landmark.tflite" is available at 8 | # "mediapipe/modules/face_landmark/face_landmark.tflite" 9 | # 10 | # - "hand_landmark_full.tflite" is available at 11 | # "mediapipe/modules/hand_landmark/hand_landmark_full.tflite" 12 | # 13 | # - "hand_recrop.tflite" is available at 14 | # "mediapipe/modules/holistic_landmark/hand_recrop.tflite" 15 | # 16 | # - "handedness.txt" is available at 17 | # "mediapipe/modules/hand_landmark/handedness.txt" 18 | # 19 | # - "pose_detection.tflite" is available at 20 | # "mediapipe/modules/pose_detection/pose_detection.tflite" 21 | # 22 | # - "pose_landmark_lite.tflite" or "pose_landmark_full.tflite" or 23 | # "pose_landmark_heavy.tflite" is available at 24 | # "mediapipe/modules/pose_landmark/pose_landmark_lite.tflite" or 25 | # "mediapipe/modules/pose_landmark/pose_landmark_full.tflite" or 26 | # "mediapipe/modules/pose_landmark/pose_landmark_heavy.tflite" 27 | # path respectively during execution, depending on the specification in the 28 | # MODEL_COMPLEXITY input side packet. 29 | # 30 | # EXAMPLE: 31 | # node { 32 | # calculator: "HolisticLandmarkCpu" 33 | # input_stream: "IMAGE:input_video" 34 | # input_side_packet: "MODEL_COMPLEXITY:model_complexity" 35 | # input_side_packet: "SMOOTH_LANDMARKS:smooth_landmarks" 36 | # input_side_packet: "ENABLE_SEGMENTATION:enable_segmentation" 37 | # input_side_packet: "SMOOTH_SEGMENTATION:smooth_segmentation" 38 | # input_side_packet: "REFINE_FACE_LANDMARKS:refine_face_landmarks" 39 | # input_side_packet: "USE_PREV_LANDMARKS:use_prev_landmarks" 40 | # output_stream: "POSE_LANDMARKS:pose_landmarks" 41 | # output_stream: "FACE_LANDMARKS:face_landmarks" 42 | # output_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks" 43 | # output_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks" 44 | # } 45 | # 46 | # NOTE: if a pose/hand/face output is not present in the image, for this 47 | # particular timestamp there will not be an output packet in the corresponding 48 | # output stream below. However, the MediaPipe framework will internally inform 49 | # the downstream calculators of the absence of this packet so that they don't 50 | # wait for it unnecessarily. 51 | 52 | type: "HolisticLandmarkCpu" 53 | 54 | # CPU image. (ImageFrame) 55 | input_stream: "IMAGE:image" 56 | 57 | # Complexity of the pose landmark model: 0, 1 or 2. Landmark accuracy as well as 58 | # inference latency generally go up with the model complexity. If unspecified, 59 | # functions as set to 1. (int) 60 | input_side_packet: "MODEL_COMPLEXITY:model_complexity" 61 | 62 | # Whether to filter landmarks across different input images to reduce jitter. 63 | # If unspecified, functions as set to true. (bool) 64 | input_side_packet: "SMOOTH_LANDMARKS:smooth_landmarks" 65 | 66 | # Whether to predict the segmentation mask. If unspecified, functions as set to 67 | # false. (bool) 68 | input_side_packet: "ENABLE_SEGMENTATION:enable_segmentation" 69 | 70 | # Whether to filter segmentation mask across different input images to reduce 71 | # jitter. If unspecified, functions as set to true. (bool) 72 | input_side_packet: "SMOOTH_SEGMENTATION:smooth_segmentation" 73 | 74 | # Whether to run the face landmark model with attention on lips and eyes to 75 | # provide more accuracy, and additionally output iris landmarks. If unspecified, 76 | # functions as set to false. (bool) 77 | input_side_packet: "REFINE_FACE_LANDMARKS:refine_face_landmarks" 78 | 79 | # Whether landmarks on the previous image should be used to help localize 80 | # landmarks on the current image. (bool) 81 | input_side_packet: "USE_PREV_LANDMARKS:use_prev_landmarks" 82 | 83 | # Pose landmarks. (NormalizedLandmarkList) 84 | # 33 pose landmarks. 85 | output_stream: "POSE_LANDMARKS:pose_landmarks" 86 | # 33 pose world landmarks. (LandmarkList) 87 | output_stream: "WORLD_LANDMARKS:pose_world_landmarks" 88 | # 21 left hand landmarks. (NormalizedLandmarkList) 89 | output_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks" 90 | # 21 right hand landmarks. (NormalizedLandmarkList) 91 | output_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks" 92 | # 468 face landmarks. (NormalizedLandmarkList) 93 | output_stream: "FACE_LANDMARKS:face_landmarks" 94 | 95 | # Segmentation mask. (ImageFrame in ImageFormat::VEC32F1) 96 | output_stream: "SEGMENTATION_MASK:segmentation_mask" 97 | 98 | # Debug outputs 99 | output_stream: "POSE_ROI:pose_landmarks_roi" 100 | output_stream: "POSE_DETECTION:pose_detection" 101 | 102 | # Predicts pose landmarks. 103 | node { 104 | calculator: "PoseLandmarkCpu" 105 | input_stream: "IMAGE:image" 106 | input_side_packet: "MODEL_COMPLEXITY:model_complexity" 107 | input_side_packet: "SMOOTH_LANDMARKS:smooth_landmarks" 108 | input_side_packet: "ENABLE_SEGMENTATION:enable_segmentation" 109 | input_side_packet: "SMOOTH_SEGMENTATION:smooth_segmentation" 110 | input_side_packet: "USE_PREV_LANDMARKS:use_prev_landmarks" 111 | output_stream: "LANDMARKS:pose_landmarks" 112 | output_stream: "WORLD_LANDMARKS:pose_world_landmarks" 113 | output_stream: "SEGMENTATION_MASK:segmentation_mask" 114 | output_stream: "ROI_FROM_LANDMARKS:pose_landmarks_roi" 115 | output_stream: "DETECTION:pose_detection" 116 | } 117 | 118 | # Predicts left and right hand landmarks based on the initial pose landmarks. 119 | node { 120 | calculator: "HandLandmarksLeftAndRightCpu" 121 | input_stream: "IMAGE:image" 122 | input_stream: "POSE_LANDMARKS:pose_landmarks" 123 | output_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks" 124 | output_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks" 125 | } 126 | 127 | # Extracts face-related pose landmarks. 128 | node { 129 | calculator: "SplitNormalizedLandmarkListCalculator" 130 | input_stream: "pose_landmarks" 131 | output_stream: "face_landmarks_from_pose" 132 | options: { 133 | [mediapipe.SplitVectorCalculatorOptions.ext] { 134 | ranges: { begin: 0 end: 11 } 135 | } 136 | } 137 | } 138 | 139 | # Predicts face landmarks based on the initial pose landmarks. 140 | node { 141 | calculator: "FaceLandmarksFromPoseCpu" 142 | input_stream: "IMAGE:image" 143 | input_stream: "FACE_LANDMARKS_FROM_POSE:face_landmarks_from_pose" 144 | input_side_packet: "REFINE_LANDMARKS:refine_face_landmarks" 145 | output_stream: "FACE_LANDMARKS:face_landmarks" 146 | } 147 | -------------------------------------------------------------------------------- /graphs/holistic_tracking_cpu.pbtxt: -------------------------------------------------------------------------------- 1 | # Tracks and renders pose + hands + face landmarks. 2 | 3 | # CPU image. (ImageFrame) 4 | input_stream: "input_video" 5 | 6 | # CPU image with rendered results. (ImageFrame) 7 | output_stream: "output_video" 8 | 9 | # Throttles the images flowing downstream for flow control. It passes through 10 | # the very first incoming image unaltered, and waits for downstream nodes 11 | # (calculators and subgraphs) in the graph to finish their tasks before it 12 | # passes through another image. All images that come in while waiting are 13 | # dropped, limiting the number of in-flight images in most part of the graph to 14 | # 1. This prevents the downstream nodes from queuing up incoming images and data 15 | # excessively, which leads to increased latency and memory usage, unwanted in 16 | # real-time mobile applications. It also eliminates unnecessarily computation, 17 | # e.g., the output produced by a node may get dropped downstream if the 18 | # subsequent nodes are still busy processing previous inputs. 19 | node { 20 | calculator: "FlowLimiterCalculator" 21 | input_stream: "input_video" 22 | input_stream: "FINISHED:output_video" 23 | input_stream_info: { 24 | tag_index: "FINISHED" 25 | back_edge: true 26 | } 27 | output_stream: "throttled_input_video" 28 | node_options: { 29 | [type.googleapis.com/mediapipe.FlowLimiterCalculatorOptions] { 30 | max_in_flight: 1 31 | max_in_queue: 1 32 | # Timeout is disabled (set to 0) as first frame processing can take more 33 | # than 1 second. 34 | in_flight_timeout: 0 35 | } 36 | } 37 | } 38 | 39 | node { 40 | calculator: "HolisticLandmarkCpu" 41 | input_stream: "IMAGE:throttled_input_video" 42 | output_stream: "POSE_LANDMARKS:pose_landmarks" 43 | output_stream: "POSE_ROI:pose_roi" 44 | output_stream: "POSE_DETECTION:pose_detection" 45 | output_stream: "FACE_LANDMARKS:face_landmarks" 46 | output_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks" 47 | output_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks" 48 | } 49 | 50 | # Gets image size. 51 | node { 52 | calculator: "ImagePropertiesCalculator" 53 | input_stream: "IMAGE:throttled_input_video" 54 | output_stream: "SIZE:image_size" 55 | } 56 | 57 | # Converts pose, hands and face landmarks to a render data vector. 58 | node { 59 | calculator: "HolisticTrackingToRenderData" 60 | input_stream: "IMAGE_SIZE:image_size" 61 | input_stream: "POSE_LANDMARKS:pose_landmarks" 62 | input_stream: "POSE_ROI:pose_roi" 63 | input_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks" 64 | input_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks" 65 | input_stream: "FACE_LANDMARKS:face_landmarks" 66 | output_stream: "RENDER_DATA_VECTOR:render_data_vector" 67 | } 68 | 69 | # Draws annotations and overlays them on top of the input images. 70 | node { 71 | calculator: "AnnotationOverlayCalculator" 72 | input_stream: "IMAGE:throttled_input_video" 73 | input_stream: "VECTOR:render_data_vector" 74 | output_stream: "IMAGE:output_video" 75 | } 76 | -------------------------------------------------------------------------------- /graphs/pose_tracking_cpu.pbtxt: -------------------------------------------------------------------------------- 1 | # MediaPipe graph that performs pose tracking with TensorFlow Lite on CPU. 2 | 3 | # CPU buffer. (ImageFrame) 4 | input_stream: "input_video" 5 | 6 | # Output image with rendered results. (ImageFrame) 7 | output_stream: "output_video" 8 | # Pose landmarks. (NormalizedLandmarkList) 9 | output_stream: "pose_landmarks" 10 | 11 | # Generates side packet to enable segmentation. 12 | node { 13 | calculator: "ConstantSidePacketCalculator" 14 | output_side_packet: "PACKET:enable_segmentation" 15 | node_options: { 16 | [type.googleapis.com/mediapipe.ConstantSidePacketCalculatorOptions]: { 17 | packet { bool_value: true } 18 | } 19 | } 20 | } 21 | 22 | # Throttles the images flowing downstream for flow control. It passes through 23 | # the very first incoming image unaltered, and waits for downstream nodes 24 | # (calculators and subgraphs) in the graph to finish their tasks before it 25 | # passes through another image. All images that come in while waiting are 26 | # dropped, limiting the number of in-flight images in most part of the graph to 27 | # 1. This prevents the downstream nodes from queuing up incoming images and data 28 | # excessively, which leads to increased latency and memory usage, unwanted in 29 | # real-time mobile applications. It also eliminates unnecessarily computation, 30 | # e.g., the output produced by a node may get dropped downstream if the 31 | # subsequent nodes are still busy processing previous inputs. 32 | node { 33 | calculator: "FlowLimiterCalculator" 34 | input_stream: "input_video" 35 | input_stream: "FINISHED:output_video" 36 | input_stream_info: { 37 | tag_index: "FINISHED" 38 | back_edge: true 39 | } 40 | output_stream: "throttled_input_video" 41 | } 42 | 43 | # Subgraph that detects poses and corresponding landmarks. 44 | node { 45 | calculator: "PoseLandmarkCpu" 46 | input_side_packet: "ENABLE_SEGMENTATION:enable_segmentation" 47 | input_stream: "IMAGE:throttled_input_video" 48 | output_stream: "LANDMARKS:pose_landmarks" 49 | output_stream: "SEGMENTATION_MASK:segmentation_mask" 50 | output_stream: "DETECTION:pose_detection" 51 | output_stream: "ROI_FROM_LANDMARKS:roi_from_landmarks" 52 | } 53 | 54 | # Subgraph that renders pose-landmark annotation onto the input image. 55 | node { 56 | calculator: "PoseRendererCpu" 57 | input_stream: "IMAGE:throttled_input_video" 58 | input_stream: "LANDMARKS:pose_landmarks" 59 | input_stream: "SEGMENTATION_MASK:segmentation_mask" 60 | input_stream: "DETECTION:pose_detection" 61 | input_stream: "ROI:roi_from_landmarks" 62 | output_stream: "IMAGE:output_video" 63 | } 64 | -------------------------------------------------------------------------------- /graphs_back/face_landmarks_with_iris.pbtxt: -------------------------------------------------------------------------------- 1 | 2 | input_stream: "input_video" 3 | output_stream: "output_video" 4 | 5 | output_stream: "multi_face_landmarks" 6 | output_stream: "multi_face_landmarks_presence" 7 | 8 | output_stream: "left_eye_contour_landmarks" 9 | output_stream: "left_iris_landmarks" 10 | output_stream: "left_eye_rect_from_landmarks" 11 | 12 | output_stream: "right_eye_contour_landmarks" 13 | output_stream: "right_iris_landmarks" 14 | output_stream: "right_eye_rect_from_landmarks" 15 | 16 | node { 17 | calculator: "FlowLimiterCalculator" 18 | input_stream: "input_video" 19 | input_stream: "FINISHED:face_landmarks_with_iris" 20 | input_stream_info: { 21 | tag_index: "FINISHED" 22 | back_edge: true 23 | } 24 | output_stream: "throttled_input_video" 25 | } 26 | 27 | node { 28 | calculator: "ImagePropertiesCalculator" 29 | input_stream: "IMAGE:throttled_input_video" 30 | output_stream: "SIZE:input_image_size" 31 | } 32 | 33 | node { 34 | calculator: "ConstantSidePacketCalculator" 35 | output_side_packet: "PACKET:num_faces" 36 | node_options: { 37 | [type.googleapis.com/mediapipe.ConstantSidePacketCalculatorOptions]: { 38 | packet { int_value: 1 } 39 | } 40 | } 41 | } 42 | 43 | node { 44 | calculator: "FaceLandmarkFrontCpu" 45 | input_stream: "IMAGE:throttled_input_video" 46 | input_side_packet: "NUM_FACES:num_faces" 47 | output_stream: "LANDMARKS:multi_face_landmarks" 48 | output_stream: "ROIS_FROM_LANDMARKS:face_rects_from_landmarks" 49 | output_stream: "DETECTIONS:face_detections" 50 | output_stream: "ROIS_FROM_DETECTIONS:face_rects_from_detections" 51 | } 52 | 53 | node { 54 | calculator: "PacketPresenceCalculator" 55 | input_stream: "PACKET:multi_face_landmarks" 56 | output_stream: "PRESENCE:multi_face_landmarks_presence" 57 | } 58 | 59 | node { 60 | calculator: "SplitNormalizedLandmarkListVectorCalculator" 61 | input_stream: "multi_face_landmarks" 62 | output_stream: "face_landmarks" 63 | node_options: { 64 | [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] { 65 | ranges: { begin: 0 end: 1 } 66 | element_only: true 67 | } 68 | } 69 | } 70 | 71 | node { 72 | calculator: "SplitNormalizedRectVectorCalculator" 73 | input_stream: "face_rects_from_landmarks" 74 | output_stream: "face_rect" 75 | node_options: { 76 | [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] { 77 | ranges: { begin: 0 end: 1 } 78 | element_only: true 79 | } 80 | } 81 | } 82 | 83 | node { 84 | calculator: "SplitNormalizedLandmarkListCalculator" 85 | input_stream: "face_landmarks" 86 | output_stream: "left_eye_boundary_landmarks" 87 | node_options: { 88 | [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] { 89 | ranges: { begin: 33 end: 34 } 90 | ranges: { begin: 133 end: 134 } 91 | combine_outputs: true 92 | } 93 | } 94 | } 95 | 96 | node { 97 | calculator: "SplitNormalizedLandmarkListCalculator" 98 | input_stream: "face_landmarks" 99 | output_stream: "right_eye_boundary_landmarks" 100 | node_options: { 101 | [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] { 102 | ranges: { begin: 362 end: 363 } 103 | ranges: { begin: 263 end: 264 } 104 | combine_outputs: true 105 | } 106 | } 107 | } 108 | 109 | node { 110 | calculator: "IrisLandmarkLeftAndRightCpu" 111 | input_stream: "IMAGE:input_video" 112 | input_stream: "LEFT_EYE_BOUNDARY_LANDMARKS:left_eye_boundary_landmarks" 113 | input_stream: "RIGHT_EYE_BOUNDARY_LANDMARKS:right_eye_boundary_landmarks" 114 | output_stream: "LEFT_EYE_CONTOUR_LANDMARKS:left_eye_contour_landmarks" 115 | output_stream: "LEFT_EYE_IRIS_LANDMARKS:left_iris_landmarks" 116 | output_stream: "LEFT_EYE_ROI:left_eye_rect_from_landmarks" 117 | output_stream: "RIGHT_EYE_CONTOUR_LANDMARKS:right_eye_contour_landmarks" 118 | output_stream: "RIGHT_EYE_IRIS_LANDMARKS:right_iris_landmarks" 119 | output_stream: "RIGHT_EYE_ROI:right_eye_rect_from_landmarks" 120 | } 121 | 122 | node { 123 | calculator: "ConcatenateNormalizedLandmarkListCalculator" 124 | input_stream: "left_eye_contour_landmarks" 125 | input_stream: "right_eye_contour_landmarks" 126 | output_stream: "refined_eye_landmarks" 127 | } 128 | 129 | node { 130 | calculator: "UpdateFaceLandmarksCalculator" 131 | input_stream: "NEW_EYE_LANDMARKS:refined_eye_landmarks" 132 | input_stream: "FACE_LANDMARKS:face_landmarks" 133 | output_stream: "UPDATED_FACE_LANDMARKS:updated_face_landmarks" 134 | } 135 | 136 | node { 137 | calculator: "IrisRendererCpu" 138 | input_stream: "IMAGE:input_video" 139 | input_stream: "FACE_LANDMARKS:updated_face_landmarks" 140 | input_stream: "EYE_LANDMARKS_LEFT:left_eye_contour_landmarks" 141 | input_stream: "EYE_LANDMARKS_RIGHT:right_eye_contour_landmarks" 142 | input_stream: "IRIS_LANDMARKS_LEFT:left_iris_landmarks" 143 | input_stream: "IRIS_LANDMARKS_RIGHT:right_iris_landmarks" 144 | input_stream: "NORM_RECT:face_rect" 145 | input_stream: "LEFT_EYE_RECT:left_eye_rect_from_landmarks" 146 | input_stream: "RIGHT_EYE_RECT:right_eye_rect_from_landmarks" 147 | input_stream: "DETECTIONS:face_detections" 148 | output_stream: "IRIS_LANDMARKS:iris_landmarks" 149 | output_stream: "IMAGE:output_video" 150 | } 151 | 152 | node { 153 | calculator: "ConcatenateNormalizedLandmarkListCalculator" 154 | input_stream: "updated_face_landmarks" 155 | input_stream: "iris_landmarks" 156 | output_stream: "face_landmarks_with_iris" 157 | } 158 | 159 | node { 160 | calculator: "PacketPresenceCalculator" 161 | input_stream: "PACKET:face_landmarks_with_iris" 162 | output_stream: "PRESENCE:face_landmarks_with_iris_presence" 163 | } 164 | -------------------------------------------------------------------------------- /graphs_back/holistic_landmarks.pbtxt: -------------------------------------------------------------------------------- 1 | 2 | input_stream: "input_video" 3 | output_stream: "output_video" 4 | 5 | output_stream: "POSE_LANDMARKS:pose_landmarks" 6 | output_stream: "WORLD_LANDMARKS:pose_world_landmarks" 7 | output_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks" 8 | output_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks" 9 | output_stream: "FACE_LANDMARKS:face_landmarks" 10 | 11 | node { 12 | calculator: "FlowLimiterCalculator" 13 | input_stream: "input_video" 14 | input_stream: "FINISHED:pose_landmarks" 15 | input_stream_info: { 16 | tag_index: "FINISHED" 17 | back_edge: true 18 | } 19 | output_stream: "throttled_input_video" 20 | node_options: { 21 | [type.googleapis.com/mediapipe.FlowLimiterCalculatorOptions] { 22 | max_in_flight: 1 23 | max_in_queue: 1 24 | # Timeout is disabled (set to 0) as first frame processing can take more 25 | # than 1 second. 26 | in_flight_timeout: 0 27 | } 28 | } 29 | } 30 | 31 | node { 32 | calculator: "ImagePropertiesCalculator" 33 | input_stream: "IMAGE:throttled_input_video" 34 | output_stream: "SIZE:image_size" 35 | } 36 | 37 | node { 38 | calculator: "HolisticLandmarkCpu" 39 | input_stream: "IMAGE:throttled_input_video" 40 | output_stream: "POSE_LANDMARKS:pose_landmarks" 41 | output_stream: "WORLD_LANDMARKS:pose_world_landmarks" 42 | output_stream: "POSE_ROI:pose_roi" 43 | output_stream: "POSE_DETECTION:pose_detection" 44 | output_stream: "FACE_LANDMARKS:face_landmarks" 45 | output_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks" 46 | output_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks" 47 | } 48 | 49 | node { 50 | calculator: "PacketPresenceCalculator" 51 | input_stream: "PACKET:pose_landmarks" 52 | output_stream: "PRESENCE:pose_landmarks_presence" 53 | } 54 | 55 | node { 56 | calculator: "PacketPresenceCalculator" 57 | input_stream: "PACKET:face_landmarks" 58 | output_stream: "PRESENCE:face_landmarks_presence" 59 | } 60 | 61 | node { 62 | calculator: "PacketPresenceCalculator" 63 | input_stream: "PACKET:left_hand_landmarks" 64 | output_stream: "PRESENCE:left_hand_landmarks_presence" 65 | } 66 | 67 | node { 68 | calculator: "PacketPresenceCalculator" 69 | input_stream: "PACKET:right_hand_landmarks" 70 | output_stream: "PRESENCE:right_hand_landmarks_presence" 71 | } 72 | 73 | node { 74 | calculator: "HolisticTrackingToRenderData" 75 | input_stream: "IMAGE_SIZE:image_size" 76 | input_stream: "POSE_LANDMARKS:pose_landmarks" 77 | input_stream: "POSE_ROI:pose_roi" 78 | input_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks" 79 | input_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks" 80 | input_stream: "FACE_LANDMARKS:face_landmarks" 81 | output_stream: "RENDER_DATA_VECTOR:render_data_vector" 82 | } 83 | 84 | node { 85 | calculator: "AnnotationOverlayCalculator" 86 | input_stream: "IMAGE:throttled_input_video" 87 | input_stream: "VECTOR:render_data_vector" 88 | output_stream: "IMAGE:output_video" 89 | } 90 | -------------------------------------------------------------------------------- /graphs_back/holistic_with_iris.pbtxt: -------------------------------------------------------------------------------- 1 | 2 | input_stream: "input_video" 3 | output_stream: "output_video" 4 | 5 | output_stream: "POSE_LANDMARKS:pose_landmarks" 6 | output_stream: "WORLD_LANDMARKS:pose_world_landmarks" 7 | output_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks" 8 | output_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks" 9 | output_stream: "FACE_LANDMARKS:face_landmarks" 10 | output_stream: "IRIS_LANDMARKS:face_landmarks_with_iris" 11 | 12 | ## INPUT ## 13 | 14 | node { 15 | calculator: "FlowLimiterCalculator" 16 | input_stream: "input_video" 17 | input_stream: "FINISHED:face_landmarks_with_iris" 18 | input_stream_info: { 19 | tag_index: "FINISHED" 20 | back_edge: true 21 | } 22 | output_stream: "throttled_input_video" 23 | node_options: { 24 | [type.googleapis.com/mediapipe.FlowLimiterCalculatorOptions] { 25 | max_in_flight: 1 26 | max_in_queue: 1 27 | # Timeout is disabled (set to 0) as first frame processing can take more 28 | # than 1 second. 29 | in_flight_timeout: 0 30 | } 31 | } 32 | } 33 | 34 | node { 35 | calculator: "ImagePropertiesCalculator" 36 | input_stream: "IMAGE:throttled_input_video" 37 | output_stream: "SIZE:image_size" 38 | } 39 | 40 | ## HOLISTIC ## 41 | 42 | node { 43 | calculator: "HolisticLandmarkCpu" 44 | input_stream: "IMAGE:throttled_input_video" 45 | output_stream: "POSE_LANDMARKS:pose_landmarks" 46 | output_stream: "WORLD_LANDMARKS:pose_world_landmarks" 47 | output_stream: "POSE_ROI:pose_roi" 48 | output_stream: "POSE_DETECTION:pose_detection" 49 | output_stream: "FACE_LANDMARKS:face_landmarks" 50 | output_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks" 51 | output_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks" 52 | } 53 | 54 | node { 55 | calculator: "PacketPresenceCalculator" 56 | input_stream: "PACKET:pose_landmarks" 57 | output_stream: "PRESENCE:pose_landmarks_presence" 58 | } 59 | 60 | node { 61 | calculator: "PacketPresenceCalculator" 62 | input_stream: "PACKET:face_landmarks" 63 | output_stream: "PRESENCE:face_landmarks_presence" 64 | } 65 | 66 | node { 67 | calculator: "PacketPresenceCalculator" 68 | input_stream: "PACKET:left_hand_landmarks" 69 | output_stream: "PRESENCE:left_hand_landmarks_presence" 70 | } 71 | 72 | node { 73 | calculator: "PacketPresenceCalculator" 74 | input_stream: "PACKET:right_hand_landmarks" 75 | output_stream: "PRESENCE:right_hand_landmarks_presence" 76 | } 77 | 78 | node { 79 | calculator: "HolisticTrackingToRenderData" 80 | input_stream: "IMAGE_SIZE:image_size" 81 | input_stream: "POSE_LANDMARKS:pose_landmarks" 82 | input_stream: "POSE_ROI:pose_roi" 83 | input_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks" 84 | input_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks" 85 | input_stream: "FACE_LANDMARKS:face_landmarks" 86 | output_stream: "RENDER_DATA_VECTOR:render_data_vector" 87 | } 88 | 89 | node { 90 | calculator: "AnnotationOverlayCalculator" 91 | input_stream: "IMAGE:throttled_input_video" 92 | input_stream: "VECTOR:render_data_vector" 93 | output_stream: "IMAGE:output_video_holistic" 94 | } 95 | 96 | ## IRIS ## 97 | 98 | node { 99 | calculator: "FaceLandmarksFromPoseToRecropRoi" 100 | input_stream: "FACE_LANDMARKS_FROM_POSE:face_landmarks" 101 | input_stream: "IMAGE_SIZE:image_size" 102 | output_stream: "ROI:face_roi_from_pose" 103 | } 104 | 105 | node { 106 | calculator: "FaceDetectionShortRangeByRoiCpu" 107 | input_stream: "IMAGE:throttled_input_video" 108 | input_stream: "ROI:face_roi_from_pose" 109 | output_stream: "DETECTIONS:face_detections" 110 | } 111 | 112 | node { 113 | calculator: "SplitNormalizedLandmarkListCalculator" 114 | input_stream: "face_landmarks" 115 | output_stream: "left_eye_boundary_landmarks" 116 | node_options: { 117 | [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] { 118 | ranges: { begin: 33 end: 34 } 119 | ranges: { begin: 133 end: 134 } 120 | combine_outputs: true 121 | } 122 | } 123 | } 124 | 125 | node { 126 | calculator: "SplitNormalizedLandmarkListCalculator" 127 | input_stream: "face_landmarks" 128 | output_stream: "right_eye_boundary_landmarks" 129 | node_options: { 130 | [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] { 131 | ranges: { begin: 362 end: 363 } 132 | ranges: { begin: 263 end: 264 } 133 | combine_outputs: true 134 | } 135 | } 136 | } 137 | 138 | node { 139 | calculator: "IrisLandmarkLeftAndRightCpu" 140 | input_stream: "IMAGE:input_video" 141 | input_stream: "LEFT_EYE_BOUNDARY_LANDMARKS:left_eye_boundary_landmarks" 142 | input_stream: "RIGHT_EYE_BOUNDARY_LANDMARKS:right_eye_boundary_landmarks" 143 | output_stream: "LEFT_EYE_CONTOUR_LANDMARKS:left_eye_contour_landmarks" 144 | output_stream: "LEFT_EYE_IRIS_LANDMARKS:left_iris_landmarks" 145 | output_stream: "LEFT_EYE_ROI:left_eye_rect_from_landmarks" 146 | output_stream: "RIGHT_EYE_CONTOUR_LANDMARKS:right_eye_contour_landmarks" 147 | output_stream: "RIGHT_EYE_IRIS_LANDMARKS:right_iris_landmarks" 148 | output_stream: "RIGHT_EYE_ROI:right_eye_rect_from_landmarks" 149 | } 150 | 151 | node { 152 | calculator: "ConcatenateNormalizedLandmarkListCalculator" 153 | input_stream: "left_eye_contour_landmarks" 154 | input_stream: "right_eye_contour_landmarks" 155 | output_stream: "refined_eye_landmarks" 156 | } 157 | 158 | node { 159 | calculator: "UpdateFaceLandmarksCalculator" 160 | input_stream: "NEW_EYE_LANDMARKS:refined_eye_landmarks" 161 | input_stream: "FACE_LANDMARKS:face_landmarks" 162 | output_stream: "UPDATED_FACE_LANDMARKS:updated_face_landmarks" 163 | } 164 | 165 | node { 166 | calculator: "IrisRendererCpu" 167 | input_stream: "IMAGE:output_video_holistic" 168 | input_stream: "FACE_LANDMARKS:updated_face_landmarks" 169 | input_stream: "EYE_LANDMARKS_LEFT:left_eye_contour_landmarks" 170 | input_stream: "EYE_LANDMARKS_RIGHT:right_eye_contour_landmarks" 171 | input_stream: "IRIS_LANDMARKS_LEFT:left_iris_landmarks" 172 | input_stream: "IRIS_LANDMARKS_RIGHT:right_iris_landmarks" 173 | input_stream: "NORM_RECT:face_roi_from_pose" 174 | input_stream: "LEFT_EYE_RECT:left_eye_rect_from_landmarks" 175 | input_stream: "RIGHT_EYE_RECT:right_eye_rect_from_landmarks" 176 | input_stream: "DETECTIONS:face_detections" 177 | output_stream: "IRIS_LANDMARKS:iris_landmarks" 178 | output_stream: "IMAGE:output_video" 179 | } 180 | 181 | node { 182 | calculator: "ConcatenateNormalizedLandmarkListCalculator" 183 | input_stream: "updated_face_landmarks" 184 | input_stream: "iris_landmarks" 185 | output_stream: "face_landmarks_with_iris" 186 | } 187 | 188 | node { 189 | calculator: "PacketPresenceCalculator" 190 | input_stream: "PACKET:face_landmarks_with_iris" 191 | output_stream: "PRESENCE:face_landmarks_with_iris_presence" 192 | } 193 | -------------------------------------------------------------------------------- /graphs_back/multi_face_geometry.pbtxt: -------------------------------------------------------------------------------- 1 | 2 | output_stream: "multi_face_geometry" 3 | output_stream: "multi_face_geometry_presence" 4 | 5 | node { 6 | calculator: "FaceGeometryEnvGeneratorCalculator" 7 | output_side_packet: "ENVIRONMENT:environment" 8 | node_options: { 9 | [type.googleapis.com/mediapipe.FaceGeometryEnvGeneratorCalculatorOptions] { 10 | environment: { 11 | origin_point_location: TOP_LEFT_CORNER 12 | perspective_camera: { 13 | vertical_fov_degrees: 63.0 14 | near: 1.0 # 1cm 15 | far: 10000.0 # 100m 16 | } 17 | } 18 | } 19 | } 20 | } 21 | 22 | node { 23 | calculator: "FaceGeometryFromLandmarks" 24 | input_stream: "IMAGE_SIZE:input_image_size" 25 | input_side_packet: "ENVIRONMENT:environment" 26 | input_stream: "MULTI_FACE_LANDMARKS:multi_face_landmarks" 27 | output_stream: "MULTI_FACE_GEOMETRY:multi_face_geometry" 28 | } 29 | 30 | node { 31 | calculator: "PacketPresenceCalculator" 32 | input_stream: "PACKET:multi_face_geometry" 33 | output_stream: "PRESENCE:multi_face_geometry_presence" 34 | } 35 | -------------------------------------------------------------------------------- /graphs_back/multi_face_landmarks.pbtxt: -------------------------------------------------------------------------------- 1 | 2 | input_stream: "input_video" 3 | output_stream: "output_video" 4 | 5 | output_stream: "multi_face_landmarks" 6 | output_stream: "multi_face_landmarks_presence" 7 | 8 | node { 9 | calculator: "FlowLimiterCalculator" 10 | input_stream: "input_video" 11 | input_stream: "FINISHED:multi_face_landmarks" 12 | input_stream_info: { 13 | tag_index: "FINISHED" 14 | back_edge: true 15 | } 16 | output_stream: "throttled_input_video" 17 | } 18 | 19 | node { 20 | calculator: "ImagePropertiesCalculator" 21 | input_stream: "IMAGE:throttled_input_video" 22 | output_stream: "SIZE:input_image_size" 23 | } 24 | 25 | node { 26 | calculator: "ConstantSidePacketCalculator" 27 | output_side_packet: "PACKET:num_faces" 28 | node_options: { 29 | [type.googleapis.com/mediapipe.ConstantSidePacketCalculatorOptions]: { 30 | packet { int_value: 1 } 31 | } 32 | } 33 | } 34 | 35 | node { 36 | calculator: "FaceLandmarkFrontCpu" 37 | input_stream: "IMAGE:throttled_input_video" 38 | input_side_packet: "NUM_FACES:num_faces" 39 | output_stream: "LANDMARKS:multi_face_landmarks" 40 | output_stream: "ROIS_FROM_LANDMARKS:face_rects_from_landmarks" 41 | output_stream: "DETECTIONS:face_detections" 42 | output_stream: "ROIS_FROM_DETECTIONS:face_rects_from_detections" 43 | } 44 | 45 | node { 46 | calculator: "PacketPresenceCalculator" 47 | input_stream: "PACKET:multi_face_landmarks" 48 | output_stream: "PRESENCE:multi_face_landmarks_presence" 49 | } 50 | 51 | node { 52 | calculator: "FaceRendererCpu" 53 | input_stream: "IMAGE:throttled_input_video" 54 | input_stream: "LANDMARKS:multi_face_landmarks" 55 | input_stream: "NORM_RECTS:face_rects_from_landmarks" 56 | input_stream: "DETECTIONS:face_detections" 57 | output_stream: "IMAGE:output_video" 58 | } 59 | -------------------------------------------------------------------------------- /graphs_back/multi_hand_landmarks.pbtxt: -------------------------------------------------------------------------------- 1 | 2 | input_stream: "input_video" 3 | output_stream: "output_video" 4 | 5 | output_stream: "multi_hand_landmarks" 6 | output_stream: "multi_hand_landmarks_presence" 7 | output_stream: "multi_handedness" 8 | output_stream: "multi_handedness_presence" 9 | 10 | node { 11 | calculator: "FlowLimiterCalculator" 12 | input_stream: "input_video" 13 | input_stream: "FINISHED:multi_hand_landmarks" 14 | input_stream_info: { 15 | tag_index: "FINISHED" 16 | back_edge: true 17 | } 18 | output_stream: "throttled_input_video" 19 | } 20 | 21 | node { 22 | calculator: "ImagePropertiesCalculator" 23 | input_stream: "IMAGE:throttled_input_video" 24 | output_stream: "SIZE:input_image_size" 25 | } 26 | 27 | node { 28 | calculator: "ConstantSidePacketCalculator" 29 | output_side_packet: "PACKET:num_hands" 30 | node_options: { 31 | [type.googleapis.com/mediapipe.ConstantSidePacketCalculatorOptions]: { 32 | packet { int_value: 2 } 33 | } 34 | } 35 | } 36 | 37 | node { 38 | calculator: "HandLandmarkTrackingCpu" 39 | input_stream: "IMAGE:throttled_input_video" 40 | input_side_packet: "NUM_HANDS:num_hands" 41 | output_stream: "LANDMARKS:multi_hand_landmarks" 42 | output_stream: "HANDEDNESS:multi_handedness" 43 | output_stream: "PALM_DETECTIONS:multi_palm_detections" 44 | output_stream: "HAND_ROIS_FROM_LANDMARKS:multi_hand_rects" 45 | output_stream: "HAND_ROIS_FROM_PALM_DETECTIONS:multi_palm_rects" 46 | } 47 | 48 | node { 49 | calculator: "PacketPresenceCalculator" 50 | input_stream: "PACKET:multi_hand_landmarks" 51 | output_stream: "PRESENCE:multi_hand_landmarks_presence" 52 | } 53 | 54 | node { 55 | calculator: "PacketPresenceCalculator" 56 | input_stream: "PACKET:multi_handedness" 57 | output_stream: "PRESENCE:multi_handedness_presence" 58 | } 59 | 60 | node { 61 | calculator: "HandRendererSubgraph" 62 | input_stream: "IMAGE:throttled_input_video" 63 | input_stream: "DETECTIONS:multi_palm_detections" 64 | input_stream: "LANDMARKS:multi_hand_landmarks" 65 | input_stream: "HANDEDNESS:multi_handedness" 66 | input_stream: "NORM_RECTS:0:multi_palm_rects" 67 | input_stream: "NORM_RECTS:1:multi_hand_rects" 68 | output_stream: "IMAGE:output_video" 69 | } 70 | -------------------------------------------------------------------------------- /graphs_back/objectron_landmarks.pbtxt: -------------------------------------------------------------------------------- 1 | input_stream: "input_video" 2 | output_stream: "output_video" 3 | 4 | output_stream: "objectron_landmarks" 5 | output_stream: "objectron_rects" 6 | 7 | ### CONFIG ### 8 | 9 | node { 10 | calculator: "ConstantSidePacketCalculator" 11 | output_side_packet: "PACKET:max_num_objects" 12 | node_options: { 13 | [type.googleapis.com/mediapipe.ConstantSidePacketCalculatorOptions]: { 14 | packet { int_value: 5 } 15 | } 16 | } 17 | } 18 | 19 | node { 20 | calculator: "ConstantSidePacketCalculator" 21 | output_side_packet: "PACKET:landmark_model_path" 22 | node_options: { 23 | [type.googleapis.com/mediapipe.ConstantSidePacketCalculatorOptions]: { 24 | packet { string_value: "mediapipe/modules/objectron/object_detection_3d_cup.tflite" } 25 | } 26 | } 27 | } 28 | 29 | node { 30 | calculator: "ConstantSidePacketCalculator" 31 | output_side_packet: "PACKET:allowed_labels" 32 | node_options: { 33 | [type.googleapis.com/mediapipe.ConstantSidePacketCalculatorOptions]: { 34 | packet { string_value: "Mug" } 35 | } 36 | } 37 | } 38 | 39 | #### 40 | 41 | node { 42 | calculator: "FlowLimiterCalculator" 43 | input_stream: "input_video" 44 | input_stream: "FINISHED:objectron_landmarks" 45 | input_stream_info: { 46 | tag_index: "FINISHED" 47 | back_edge: true 48 | } 49 | output_stream: "throttled_input_video" 50 | } 51 | 52 | node { 53 | calculator: "ImagePropertiesCalculator" 54 | input_stream: "IMAGE:throttled_input_video" 55 | output_stream: "SIZE:input_image_size" 56 | } 57 | 58 | node { 59 | calculator: "ObjectronCpuSubgraph" 60 | input_stream: "IMAGE:throttled_input_video" 61 | input_side_packet: "MODEL_PATH:landmark_model_path" 62 | input_side_packet: "LABELS_CSV:allowed_labels" 63 | input_side_packet: "MAX_NUM_OBJECTS:max_num_objects" 64 | output_stream: "MULTI_LANDMARKS:objectron_landmarks" 65 | output_stream: "NORM_RECTS:objectron_rects" 66 | } 67 | 68 | node { 69 | calculator: "PacketPresenceCalculator" 70 | input_stream: "PACKET:objectron_landmarks" 71 | output_stream: "PRESENCE:objectron_landmarks_presence" 72 | } 73 | 74 | node { 75 | calculator: "RendererSubgraph" 76 | input_stream: "IMAGE:throttled_input_video" 77 | input_stream: "MULTI_LANDMARKS:objectron_landmarks" 78 | input_stream: "NORM_RECTS:objectron_rects" 79 | output_stream: "IMAGE:output_video" 80 | } 81 | -------------------------------------------------------------------------------- /graphs_back/pose_landmarks.pbtxt: -------------------------------------------------------------------------------- 1 | 2 | input_stream: "input_video" 3 | output_stream: "output_video" 4 | 5 | output_stream: "pose_landmarks" 6 | output_stream: "pose_world_landmarks" 7 | 8 | node { 9 | calculator: "FlowLimiterCalculator" 10 | input_stream: "input_video" 11 | input_stream: "FINISHED:pose_landmarks" 12 | input_stream_info: { 13 | tag_index: "FINISHED" 14 | back_edge: true 15 | } 16 | output_stream: "throttled_input_video" 17 | } 18 | 19 | node { 20 | calculator: "ImagePropertiesCalculator" 21 | input_stream: "IMAGE:throttled_input_video" 22 | output_stream: "SIZE:input_image_size" 23 | } 24 | 25 | node { 26 | calculator: "PoseLandmarkCpu" 27 | input_stream: "IMAGE:throttled_input_video" 28 | output_stream: "LANDMARKS:pose_landmarks" 29 | output_stream: "DETECTION:pose_detections" 30 | output_stream: "ROI_FROM_LANDMARKS:pose_rects_from_landmarks" 31 | output_stream: "WORLD_LANDMARKS:pose_world_landmarks" 32 | } 33 | 34 | node { 35 | calculator: "PacketPresenceCalculator" 36 | input_stream: "PACKET:pose_landmarks" 37 | output_stream: "PRESENCE:pose_landmarks_presence" 38 | } 39 | 40 | node { 41 | calculator: "PacketPresenceCalculator" 42 | input_stream: "PACKET:pose_world_landmarks" 43 | output_stream: "PRESENCE:pose_world_landmarks_presence" 44 | } 45 | 46 | node { 47 | calculator: "PoseRendererCpu" 48 | input_stream: "IMAGE:throttled_input_video" 49 | input_stream: "LANDMARKS:pose_landmarks" 50 | input_stream: "ROI:pose_rects_from_landmarks" 51 | input_stream: "DETECTION:pose_detections" 52 | output_stream: "IMAGE:output_video" 53 | } 54 | -------------------------------------------------------------------------------- /landmarks_packet.cc: -------------------------------------------------------------------------------- 1 | #include "landmarks_packet.h" 2 | #include "packet.h" 3 | #include "mediapipe/framework/formats/landmark.pb.h" 4 | 5 | int mp_Packet__GetLandmarkList(void* packet, SerializedProto* value_out) 6 | { 7 | auto p = (mediapipe::Packet*)packet; 8 | return mp_Packet__GetSerializedProto(p, value_out); 9 | } 10 | 11 | int mp_Packet__GetLandmarkListVector(void* packet, SerializedProtoArray* value_out) 12 | { 13 | auto p = (mediapipe::Packet*)packet; 14 | return mp_Packet__GetSerializedProtoVector(p, value_out); 15 | } 16 | 17 | int mp_Packet__GetNormalizedLandmarkList(void* packet, SerializedProto* value_out) 18 | { 19 | auto p = (mediapipe::Packet*)packet; 20 | return mp_Packet__GetSerializedProto(p, value_out); 21 | } 22 | 23 | int mp_Packet__GetNormalizedLandmarkListVector(void* packet, SerializedProtoArray* value_out) 24 | { 25 | auto p = (mediapipe::Packet*)packet; 26 | return mp_Packet__GetSerializedProtoVector(p, value_out); 27 | } 28 | -------------------------------------------------------------------------------- /landmarks_packet.h: -------------------------------------------------------------------------------- 1 | #ifndef LANDMARK_PACKET_H_ 2 | #define LANDMARK_PACKET_H_ 3 | #include "ump_commons.h" 4 | 5 | extern "C" { 6 | 7 | MP_API int mp_Packet__GetLandmarkList(void* packet, SerializedProto* value_out); 8 | MP_API int mp_Packet__GetLandmarkListVector(void* packet, SerializedProtoArray* value_out); 9 | MP_API int mp_Packet__GetNormalizedLandmarkList(void* packet, SerializedProto* value_out); 10 | MP_API int mp_Packet__GetNormalizedLandmarkListVector(void* packet, SerializedProtoArray* value_out); 11 | 12 | } // extern "C" 13 | 14 | #endif // LANDMARK_PACKET_H_ -------------------------------------------------------------------------------- /packet.cc: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021 homuler 2 | // 3 | // Use of this source code is governed by an MIT-style 4 | // license that can be found in the LICENSE file or at 5 | // https://opensource.org/licenses/MIT. 6 | 7 | #include "packet.h" 8 | #include "ump_shared.h" 9 | 10 | #include 11 | #include 12 | 13 | 14 | void mp_Packet__delete(mediapipe::Packet* packet) { delete packet; } 15 | 16 | mediapipe::Packet* mp_Packet__At__Rt(mediapipe::Packet* packet, mediapipe::Timestamp* timestamp) { 17 | TRY 18 | // not move but copy 19 | return new mediapipe::Packet{packet->At(*timestamp)}; 20 | CATCH_ONLY 21 | return nullptr; 22 | } 23 | 24 | bool mp_Packet__IsEmpty(mediapipe::Packet* packet) { return packet->IsEmpty(); } 25 | 26 | int mp_Packet__ValidateAsProtoMessageLite(mediapipe::Packet* packet, absl::Status** status_out) { 27 | TRY 28 | *status_out = new absl::Status{packet->ValidateAsProtoMessageLite()}; 29 | CATCH_EXCEPTION 30 | } 31 | 32 | int mp_Packet__Timestamp(mediapipe::Packet* packet, mediapipe::Timestamp** timestamp_out) { 33 | TRY 34 | *timestamp_out = new mediapipe::Timestamp{packet->Timestamp()}; 35 | CATCH_EXCEPTION 36 | } 37 | 38 | int mp_Packet__DebugString(mediapipe::Packet* packet, const char** str_out) { 39 | TRY 40 | *str_out = strcpy_to_heap(packet->DebugString()); 41 | CATCH_EXCEPTION 42 | } 43 | 44 | int mp_Packet__RegisteredTypeName(mediapipe::Packet* packet, const char** str_out) { 45 | TRY 46 | *str_out = strcpy_to_heap(packet->RegisteredTypeName()); 47 | CATCH_EXCEPTION 48 | } 49 | 50 | int mp_Packet__DebugTypeName(mediapipe::Packet* packet, const char** str_out) { 51 | TRY 52 | *str_out = strcpy_to_heap(packet->DebugTypeName()); 53 | CATCH_EXCEPTION 54 | } 55 | 56 | // BoolPacket 57 | mediapipe::Packet* mp__MakeBoolPacket__b(bool value) { 58 | TRY 59 | return new mediapipe::Packet{mediapipe::MakePacket(value)}; 60 | CATCH_ONLY 61 | return nullptr; 62 | } 63 | 64 | mediapipe::Packet* mp__MakeBoolPacket_At__b_Rt(bool value, mediapipe::Timestamp* timestamp) { 65 | TRY 66 | return new mediapipe::Packet{mediapipe::MakePacket(value).At(*timestamp)}; 67 | CATCH_ONLY 68 | return nullptr; 69 | } 70 | 71 | int mp_Packet__GetBool(mediapipe::Packet* packet, bool* value_out) { 72 | TRY 73 | *value_out = packet->Get(); 74 | CATCH_EXCEPTION 75 | } 76 | 77 | int mp_Packet__ValidateAsBool(mediapipe::Packet* packet, absl::Status** status_out) { 78 | TRY 79 | *status_out = new absl::Status{packet->ValidateAsType()}; 80 | CATCH_EXCEPTION 81 | } 82 | 83 | // FloatPacket 84 | mediapipe::Packet* mp__MakeFloatPacket__f(float value) { 85 | TRY 86 | return new mediapipe::Packet{mediapipe::MakePacket(value)}; 87 | CATCH_ONLY 88 | return nullptr; 89 | } 90 | 91 | mediapipe::Packet* mp__MakeFloatPacket_At__f_Rt(float value, mediapipe::Timestamp* timestamp) { 92 | TRY 93 | return new mediapipe::Packet{mediapipe::MakePacket(value).At(*timestamp)}; 94 | CATCH_ONLY 95 | return nullptr; 96 | } 97 | 98 | int mp_Packet__GetFloat(mediapipe::Packet* packet, float* value_out) { 99 | TRY 100 | *value_out = packet->Get(); 101 | CATCH_EXCEPTION 102 | } 103 | 104 | int mp_Packet__ValidateAsFloat(mediapipe::Packet* packet, absl::Status** status_out) { 105 | TRY 106 | *status_out = new absl::Status{packet->ValidateAsType()}; 107 | CATCH_EXCEPTION 108 | } 109 | 110 | // IntPacket 111 | mediapipe::Packet* mp__MakeIntPacket__i(int value) { 112 | TRY 113 | return new mediapipe::Packet{mediapipe::MakePacket(value)}; 114 | CATCH_ONLY 115 | return nullptr; 116 | } 117 | 118 | mediapipe::Packet* mp__MakeIntPacket_At__i_Rt(int value, mediapipe::Timestamp* timestamp) { 119 | TRY 120 | return new mediapipe::Packet{mediapipe::MakePacket(value).At(*timestamp)}; 121 | CATCH_ONLY 122 | return nullptr; 123 | } 124 | 125 | int mp_Packet__GetInt(mediapipe::Packet* packet, int* value_out) { 126 | TRY 127 | *value_out = packet->Get(); 128 | CATCH_EXCEPTION 129 | } 130 | 131 | int mp_Packet__ValidateAsInt(mediapipe::Packet* packet, absl::Status** status_out) { 132 | TRY 133 | *status_out = new absl::Status{packet->ValidateAsType()}; 134 | CATCH_EXCEPTION 135 | } 136 | 137 | // FloatArrayPacket 138 | mediapipe::Packet* mp__MakeFloatArrayPacket__Pf_i(float* value, int size) { 139 | TRY 140 | float* array = new float[size]; 141 | std::memcpy(array, value, size * sizeof(float)); 142 | return new mediapipe::Packet{mediapipe::Adopt(reinterpret_cast(array))}; 143 | CATCH_ONLY 144 | return nullptr; 145 | } 146 | 147 | mediapipe::Packet* mp__MakeFloatArrayPacket_At__Pf_i_Rt(float* value, int size, mediapipe::Timestamp* timestamp) { 148 | TRY 149 | float* array = new float[size]; 150 | std::memcpy(array, value, size * sizeof(float)); 151 | return new mediapipe::Packet{mediapipe::Adopt(reinterpret_cast(array)).At(*timestamp)}; 152 | CATCH_ONLY 153 | return nullptr; 154 | } 155 | 156 | int mp_Packet__GetFloatArray(mediapipe::Packet* packet, const float** value_out) { 157 | TRY 158 | *value_out = packet->Get(); 159 | CATCH_EXCEPTION 160 | } 161 | 162 | int mp_Packet__ValidateAsFloatArray(mediapipe::Packet* packet, absl::Status** status_out) { 163 | TRY 164 | *status_out = new absl::Status{packet->ValidateAsType()}; 165 | CATCH_EXCEPTION 166 | } 167 | 168 | // StringPacket 169 | mediapipe::Packet* mp__MakeStringPacket__PKc(const char* str) { 170 | TRY 171 | return new mediapipe::Packet{mediapipe::MakePacket(std::string(str))}; 172 | CATCH_ONLY 173 | return nullptr; 174 | } 175 | 176 | mediapipe::Packet* mp__MakeStringPacket_At__PKc_Rt(const char* str, mediapipe::Timestamp* timestamp) { 177 | TRY 178 | return new mediapipe::Packet{mediapipe::MakePacket(std::string(str)).At(*timestamp)}; 179 | CATCH_ONLY 180 | return nullptr; 181 | } 182 | 183 | mediapipe::Packet* mp__MakeStringPacket__PKc_i(const char* str, int size) { 184 | TRY 185 | return new mediapipe::Packet{mediapipe::MakePacket(std::string(str, size))}; 186 | CATCH_ONLY 187 | return nullptr; 188 | } 189 | 190 | mediapipe::Packet* mp__MakeStringPacket_At__PKc_i_Rt(const char* str, int size, mediapipe::Timestamp* timestamp) { 191 | TRY 192 | return new mediapipe::Packet{mediapipe::MakePacket(std::string(str, size)).At(*timestamp)}; 193 | CATCH_ONLY 194 | return nullptr; 195 | } 196 | 197 | int mp_Packet__GetString(mediapipe::Packet* packet, const char** value_out) { 198 | TRY 199 | *value_out = strcpy_to_heap(packet->Get()); 200 | CATCH_EXCEPTION 201 | } 202 | 203 | int mp_Packet__GetByteString(mediapipe::Packet* packet, const char** value_out, int* size_out) { 204 | TRY 205 | auto& str = packet->Get(); 206 | auto length = str.size(); 207 | auto bytes = new char[length]; 208 | memcpy(bytes, str.c_str(), length); 209 | 210 | *value_out = bytes; 211 | *size_out = length; 212 | CATCH_EXCEPTION 213 | } 214 | 215 | int mp_Packet__ConsumeString(mediapipe::Packet* packet, absl::StatusOr** status_or_value_out) { 216 | TRY 217 | auto status_or_string = packet->Consume(); 218 | 219 | if (status_or_string.ok()) { 220 | *status_or_value_out = new absl::StatusOr{std::move(*status_or_string.value().release())}; 221 | } else { 222 | *status_or_value_out = new absl::StatusOr{status_or_string.status()}; 223 | } 224 | CATCH_EXCEPTION 225 | } 226 | 227 | int mp_Packet__ValidateAsString(mediapipe::Packet* packet, absl::Status** status_out) { 228 | TRY 229 | *status_out = new absl::Status{packet->ValidateAsType()}; 230 | CATCH_EXCEPTION 231 | } 232 | 233 | 234 | void mp_SidePacket__delete(SidePacket* side_packet) { delete side_packet; } 235 | 236 | int mp_SidePacket__emplace__PKc_Rp(SidePacket* side_packet, const char* key, mediapipe::Packet* packet) { 237 | TRY 238 | side_packet->emplace(std::string(key), std::move(*packet)); 239 | CATCH_EXCEPTION 240 | } 241 | 242 | int mp_SidePacket__at__PKc(SidePacket* side_packet, const char* key, mediapipe::Packet** packet_out) { 243 | TRY 244 | auto packet = side_packet->at(std::string(key)); 245 | // copy 246 | *packet_out = new mediapipe::Packet{packet}; 247 | CATCH_EXCEPTION 248 | } 249 | 250 | int mp_SidePacket__erase__PKc(SidePacket* side_packet, const char* key, int* count_out) { 251 | TRY 252 | *count_out = side_packet->erase(std::string(key)); 253 | CATCH_EXCEPTION 254 | } 255 | 256 | void mp_SidePacket__clear(SidePacket* side_packet) { side_packet->clear(); } 257 | 258 | int mp_SidePacket__size(SidePacket* side_packet) { return side_packet->size(); } 259 | -------------------------------------------------------------------------------- /packet.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021 homuler 2 | // 3 | // Use of this source code is governed by an MIT-style 4 | // license that can be found in the LICENSE file or at 5 | // https://opensource.org/licenses/MIT. 6 | 7 | #ifndef MP_PACKET_H_ 8 | #define MP_PACKET_H_ 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | #include "mediapipe/framework/packet.h" 17 | #include "ump_commons.h" 18 | #include "ump_shared.h" 19 | #include "protobuf.h" 20 | 21 | 22 | 23 | template 24 | inline int mp_Packet__Consume(mediapipe::Packet* packet, absl::StatusOr** status_or_value_out) { 25 | TRY 26 | auto status_or_unique_ptr = packet->Consume(); 27 | 28 | if (status_or_unique_ptr.ok()) { 29 | *status_or_value_out = new absl::StatusOr{ std::move(*status_or_unique_ptr.value().release()) }; 30 | } 31 | else { 32 | *status_or_value_out = new absl::StatusOr{ status_or_unique_ptr.status() }; 33 | } 34 | CATCH_EXCEPTION 35 | } 36 | 37 | 38 | 39 | template 40 | inline int mp_Packet__GetSerializedProto(mediapipe::Packet* packet, SerializedProto* value_out) { 41 | TRY 42 | auto proto = packet->Get(); 43 | SerializeProto(proto, value_out); 44 | CATCH_EXCEPTION 45 | } 46 | 47 | template 48 | inline int mp_Packet__GetSerializedProtoVector(mediapipe::Packet* packet, SerializedProtoArray* value_out) { 49 | TRY 50 | auto proto_vec = packet->Get>(); 51 | SerializeProtoVector(proto_vec, value_out); 52 | CATCH_EXCEPTION 53 | } 54 | 55 | extern "C" { 56 | typedef std::map SidePacket; 57 | 58 | /** mediapipe::Packet API */ 59 | MP_API void mp_Packet__delete(mediapipe::Packet* packet); 60 | MP_API int mp_Packet__At__Rt(mediapipe::Packet* packet, mediapipe::Timestamp* timestamp, mediapipe::Packet** packet_out); 61 | MP_API bool mp_Packet__IsEmpty(mediapipe::Packet* packet); 62 | MP_API int mp_Packet__ValidateAsProtoMessageLite(mediapipe::Packet* packet, absl::Status** status_out); 63 | MP_API int mp_Packet__Timestamp(mediapipe::Packet* packet, mediapipe::Timestamp** timestamp_out); 64 | MP_API int mp_Packet__DebugString(mediapipe::Packet* packet, const char** str_out); 65 | MP_API int mp_Packet__RegisteredTypeName(mediapipe::Packet* packet, const char** str_out); 66 | MP_API int mp_Packet__DebugTypeName(mediapipe::Packet* packet, const char** str_out); 67 | 68 | // Boolean 69 | MP_API mediapipe::Packet* mp__MakeBoolPacket__b(bool value); 70 | MP_API mediapipe::Packet* mp__MakeBoolPacket_At__b_Rt(bool value, mediapipe::Timestamp* timestamp); 71 | MP_API int mp_Packet__GetBool(mediapipe::Packet* packet, bool* value_out); 72 | MP_API int mp_Packet__ValidateAsBool(mediapipe::Packet* packet, absl::Status** status_out); 73 | 74 | // Float 75 | MP_API mediapipe::Packet* mp__MakeFloatPacket__f(float value); 76 | MP_API mediapipe::Packet* mp__MakeFloatPacket_At__f_Rt(float value, mediapipe::Timestamp* timestamp); 77 | MP_API int mp_Packet__GetFloat(mediapipe::Packet* packet, float* value_out); 78 | MP_API int mp_Packet__ValidateAsFloat(mediapipe::Packet* packet, absl::Status** status_out); 79 | 80 | // Int 81 | MP_API mediapipe::Packet* mp__MakeIntPacket__i(int value); 82 | MP_API int mp__MakeIntPacket_At__i_Rt(int value, mediapipe::Timestamp* timestamp, mediapipe::Packet** packet_out); 83 | MP_API int mp_Packet__GetInt(mediapipe::Packet* packet, int* value_out); 84 | MP_API int mp_Packet__ValidateAsInt(mediapipe::Packet* packet, absl::Status** status_out); 85 | 86 | // Float Array 87 | MP_API mediapipe::Packet* mp__MakeFloatArrayPacket__Pf_i(float* value, int size); 88 | MP_API mediapipe::Packet* mp__MakeFloatArrayPacket_At__Pf_i_Rt(float* value, int size, mediapipe::Timestamp* timestamp); 89 | MP_API int mp_Packet__GetFloatArray(mediapipe::Packet* packet, const float** value_out); 90 | MP_API int mp_Packet__ValidateAsFloatArray(mediapipe::Packet* packet, absl::Status** status_out); 91 | 92 | // String 93 | MP_API mediapipe::Packet* mp__MakeStringPacket__PKc(const char* str); 94 | MP_API mediapipe::Packet* mp__MakeStringPacket_At__PKc_Rt(const char* str, mediapipe::Timestamp* timestamp); 95 | MP_API mediapipe::Packet* mp__MakeStringPacket__PKc_i(const char* str, int size); 96 | MP_API mediapipe::Packet* mp__MakeStringPacket_At__PKc_i_Rt(const char* str, int size, mediapipe::Timestamp* timestamp); 97 | MP_API int mp_Packet__GetString(mediapipe::Packet* packet, const char** value_out); 98 | MP_API int mp_Packet__GetByteString(mediapipe::Packet* packet, const char** value_out, int* size_out); 99 | MP_API int mp_Packet__ConsumeString(mediapipe::Packet* packet, absl::StatusOr** status_or_value_out); 100 | MP_API int mp_Packet__ValidateAsString(mediapipe::Packet* packet, absl::Status** status_out); 101 | 102 | /** SidePacket API */ 103 | MP_API void mp_SidePacket__delete(SidePacket* side_packet); 104 | MP_API int mp_SidePacket__emplace__PKc_Rp(SidePacket* side_packet, const char* key, mediapipe::Packet* packet); 105 | MP_API int mp_SidePacket__at__PKc(SidePacket* side_packet, const char* key, mediapipe::Packet** packet_out); 106 | MP_API int mp_SidePacket__erase__PKc(SidePacket* side_packet, const char* key, int* count_out); 107 | MP_API void mp_SidePacket__clear(SidePacket* side_packet); 108 | MP_API int mp_SidePacket__size(SidePacket* side_packet); 109 | 110 | } // extern "C" 111 | 112 | 113 | 114 | #endif // MEDIAPIPE_API_FRAMEWORK_PACKET_H_ 115 | -------------------------------------------------------------------------------- /packet_api.cc: -------------------------------------------------------------------------------- 1 | #include "packet_api.h" 2 | #include "landmarks_packet.h" 3 | #include "face_geometry_packet.h" 4 | #include "classification_packet.h" 5 | #include "packet.h" 6 | #include "ump_shared.h" 7 | 8 | int PacketAPI::GetBoolean(void* packet, bool* value_out) 9 | { 10 | auto p = (mediapipe::Packet*)packet; 11 | return mp_Packet__GetBool(p, value_out); 12 | } 13 | 14 | int PacketAPI::GetLandmarkList(void* packet, SerializedProto* value_out) 15 | { 16 | return mp_Packet__GetLandmarkList(packet, value_out); 17 | } 18 | 19 | int PacketAPI::GetLandmarkListVector(void* packet, SerializedProtoArray* value_out) 20 | { 21 | return mp_Packet__GetLandmarkListVector(packet, value_out); 22 | } 23 | 24 | int PacketAPI::GetNormalizedLandmarkList(void* packet, SerializedProto* value_out) 25 | { 26 | return mp_Packet__GetNormalizedLandmarkList(packet, value_out); 27 | } 28 | 29 | int PacketAPI::GetNormalizedLandmarkListVector(void* packet, SerializedProtoArray* value_out) 30 | { 31 | return mp_Packet__GetNormalizedLandmarkListVector(packet, value_out); 32 | } 33 | 34 | int PacketAPI::GetFaceGeometry(void* packet, SerializedProto* value_out) 35 | { 36 | return mp_Packet__GetFaceGeometry(packet, value_out); 37 | } 38 | 39 | int PacketAPI::GetFaceGeometryVector(void* packet, SerializedProtoArray* value_out) 40 | { 41 | return mp_Packet__GetFaceGeometryVector(packet, value_out); 42 | } 43 | 44 | int PacketAPI::GetClassificationList(void* packet, SerializedProto* value_out) 45 | { 46 | return mp_Packet__GetClassificationList(packet, value_out); 47 | } 48 | 49 | int PacketAPI::GetClassificationListVector(void* packet, SerializedProtoArray* value_out) 50 | { 51 | return mp_Packet__GetClassificationListVector(packet, value_out); 52 | } 53 | 54 | void PacketAPI::ClearProtoArray(SerializedProtoArray* proto_array) 55 | { 56 | if(proto_array != nullptr) 57 | { 58 | DeleteSerializedProtoArray(proto_array->data, proto_array->size); 59 | }; 60 | } 61 | 62 | void* PacketAPI::NewSidePacket() 63 | { 64 | return new SidePacket(); 65 | } 66 | 67 | void PacketAPI::DeleteSidePacket(void* side_packet) 68 | { 69 | mp_SidePacket__delete((SidePacket*)side_packet); 70 | } 71 | 72 | 73 | int PacketAPI::SidePacketAddInt(void* side_packet, const char* key, int value) 74 | { 75 | TRY 76 | SidePacket* sp = (SidePacket*)side_packet; 77 | mediapipe::Packet* p = mp__MakeIntPacket__i(value); 78 | if (p != nullptr) 79 | { 80 | mp_SidePacket__emplace__PKc_Rp(sp, key, p); 81 | mp_Packet__delete(p); 82 | return 0; 83 | } 84 | CATCH_ONLY 85 | return -1; 86 | } 87 | 88 | int PacketAPI::SidePacketAddFloat(void* side_packet, const char* key, float value) 89 | { 90 | TRY 91 | SidePacket* sp = (SidePacket*)side_packet; 92 | mediapipe::Packet* p = mp__MakeFloatPacket__f(value); 93 | if (p != nullptr) 94 | { 95 | mp_SidePacket__emplace__PKc_Rp(sp, key, p); 96 | mp_Packet__delete(p); 97 | return 0; 98 | } 99 | CATCH_ONLY 100 | return -1; 101 | } 102 | 103 | int PacketAPI::SidePacketAddBoolean(void* side_packet, const char* key, bool value) 104 | { 105 | TRY 106 | SidePacket* sp = static_cast(side_packet); 107 | mediapipe::Packet* p = mp__MakeBoolPacket__b(value); 108 | if (p != nullptr) 109 | { 110 | mp_SidePacket__emplace__PKc_Rp(sp, key, p); 111 | mp_Packet__delete(p); 112 | return 0; 113 | } 114 | CATCH_ONLY 115 | return -1; 116 | } 117 | -------------------------------------------------------------------------------- /packet_api.h: -------------------------------------------------------------------------------- 1 | #ifndef UMP_PACKET_H 2 | #define UMP_PACKET_H 3 | 4 | #include "ump_commons.h" 5 | #include "ump_packet.h" 6 | #include "packet.h" 7 | 8 | class PacketAPI : public IPacketAPI 9 | { 10 | public: 11 | virtual int GetBoolean(void* packet, bool* value_out) override; 12 | virtual int GetLandmarkList(void* packet, SerializedProto* value_out) override; 13 | virtual int GetLandmarkListVector(void* packet, SerializedProtoArray* value_out) override; 14 | virtual int GetNormalizedLandmarkList(void* packet, SerializedProto* value_out) override; 15 | virtual int GetNormalizedLandmarkListVector(void* packet, SerializedProtoArray* value_out) override; 16 | virtual int GetFaceGeometry(void* packet, SerializedProto* value_out) override; 17 | virtual int GetFaceGeometryVector(void* packet, SerializedProtoArray* value_out) override; 18 | virtual int GetClassificationList(void* packet, SerializedProto* value_out) override; 19 | virtual int GetClassificationListVector(void* packet, SerializedProtoArray* value_out) override; 20 | 21 | virtual void ClearProtoArray(SerializedProtoArray* proto_array) override; 22 | 23 | virtual void* NewSidePacket() override; 24 | virtual void DeleteSidePacket(void* side_packet) override; 25 | virtual int SidePacketAddBoolean(void* side_packet, const char* key, bool value) override; 26 | virtual int SidePacketAddFloat(void* side_packet, const char* key, float value) override; 27 | virtual int SidePacketAddInt(void* side_packet, const char* key, int value) override; 28 | }; 29 | 30 | #endif // ! UMP_PACKET_H 31 | -------------------------------------------------------------------------------- /protobuf.cc: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021 homuler 2 | // 3 | // Use of this source code is governed by an MIT-style 4 | // license that can be found in the LICENSE file or at 5 | // https://opensource.org/licenses/MIT. 6 | 7 | #include "protobuf.h" 8 | 9 | #include 10 | #include 11 | 12 | -------------------------------------------------------------------------------- /protobuf.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021 homuler 2 | // 3 | // Use of this source code is governed by an MIT-style 4 | // license that can be found in the LICENSE file or at 5 | // https://opensource.org/licenses/MIT. 6 | 7 | #ifndef MEDIAPIPE_API_EXTERNAL_PROTOBUF_H_ 8 | #define MEDIAPIPE_API_EXTERNAL_PROTOBUF_H_ 9 | 10 | #include 11 | #include 12 | #include 13 | 14 | #include "ump_commons.h" 15 | #include "mediapipe/framework/port/parse_text_proto.h" 16 | 17 | template 18 | struct StructArray { 19 | T* data; 20 | int size; 21 | }; 22 | 23 | template 24 | inline void SerializeProto(const T& proto, SerializedProto* serialized_proto) { 25 | auto str = proto.SerializeAsString(); 26 | auto size = str.size(); 27 | auto bytes = new char[size + 1]; 28 | memcpy(bytes, str.c_str(), size); 29 | 30 | serialized_proto->str = bytes; 31 | serialized_proto->length = static_cast(size); 32 | } 33 | 34 | template 35 | inline void SerializeProtoVector(const std::vector& proto_vec, SerializedProtoArray* serialized_proto_vector) { 36 | auto vec_size = proto_vec.size(); 37 | auto data = new SerializedProto[vec_size]; 38 | 39 | for (auto i = 0; i < vec_size; ++i) { 40 | SerializeProto(proto_vec[i], &data[i]); 41 | } 42 | serialized_proto_vector->data = data; 43 | serialized_proto_vector->size = static_cast(vec_size); 44 | } 45 | 46 | template 47 | inline bool ConvertFromTextFormat(const char* str, SerializedProto* output) { 48 | T proto; 49 | auto result = google::protobuf::TextFormat::ParseFromString(str, &proto); 50 | 51 | if (result) { 52 | SerializeProto(proto, output); 53 | } 54 | return result; 55 | } 56 | 57 | inline void DeleteSerializedProtoArray(SerializedProto* serialized_proto_vector_data, int size) { 58 | auto serialized_proto = serialized_proto_vector_data; 59 | for (auto i = 0; i < size; ++i) { 60 | delete (serialized_proto++)->str; 61 | } 62 | delete[] serialized_proto_vector_data; 63 | } 64 | 65 | 66 | #endif -------------------------------------------------------------------------------- /side_packet.cc: -------------------------------------------------------------------------------- 1 | #include "side_packet.h" 2 | 3 | void* mp_SidePacket__new() { 4 | try 5 | { 6 | auto sp = new SidePacket(); 7 | return &sp; 8 | } 9 | catch (...) 10 | { 11 | return nullptr; 12 | } 13 | } 14 | 15 | void mp_SidePacket__delete(void* side_packet) { delete (SidePacket*)side_packet; } 16 | 17 | int mp_SidePacket__emplace__PKc_Rp(void* side_packet, const char* key, void* packet) 18 | { 19 | TRY 20 | ((SidePacket*)side_packet)->emplace(std::string(key), std::move(*((mediapipe::Packet*)packet))); 21 | CATCH_EXCEPTION 22 | } 23 | 24 | void* mp_SidePacket__at__PKc(void* side_packet, const char* key) 25 | { 26 | try 27 | { 28 | auto packet = ((SidePacket*)side_packet)->at(std::string(key)); 29 | auto p = new mediapipe::Packet{ packet }; 30 | return p; 31 | } 32 | catch(...) 33 | { 34 | return nullptr; 35 | } 36 | } 37 | 38 | int mp_SidePacket__erase__PKc(void* side_packet, const char* key, int* count_out) { 39 | TRY 40 | *count_out = ((SidePacket*)side_packet)->erase(std::string(key)); 41 | CATCH_EXCEPTION 42 | } 43 | 44 | void mp_SidePacket__clear(void* side_packet) { ((SidePacket*)side_packet)->clear(); } 45 | 46 | int mp_SidePacket__size(void* side_packet) { return ((SidePacket*)side_packet)->size(); } 47 | 48 | -------------------------------------------------------------------------------- /side_packet.h: -------------------------------------------------------------------------------- 1 | #ifndef U_SIDE_PACKET_H 2 | #define U_SIDE_PACKET_H 3 | #include "ump_shared.h" 4 | 5 | extern "C" { 6 | typedef std::map SidePacket; 7 | 8 | MP_API void* mp_SidePacket__new(); 9 | MP_API void mp_SidePacket__delete(void* side_packet); 10 | MP_API int mp_SidePacket__emplace__PKc_Rp(void* side_packet, const char* key, void* packet); 11 | MP_API void* mp_SidePacket__at__PKc(void* side_packet, const char* key); 12 | MP_API int mp_SidePacket__erase__PKc(void* side_packet, const char* key, int* count_out); 13 | MP_API void mp_SidePacket__clear(void* side_packet); 14 | MP_API int mp_SidePacket__size(void* side_packet); 15 | } 16 | #endif -------------------------------------------------------------------------------- /ump_api.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #include "ump_commons.h" 7 | #include "ump_packet.h" 8 | 9 | // 10 | // Log 11 | // 12 | 13 | enum class EUmpVerbosity 14 | { 15 | Error = 0, 16 | Warning, 17 | Info, 18 | Debug 19 | }; 20 | 21 | class IUmpLog 22 | { 23 | protected: 24 | virtual ~IUmpLog() {} 25 | 26 | public: 27 | virtual void Println(EUmpVerbosity verbosity, const char* msg) const { (void)verbosity; (void)msg; } 28 | }; 29 | 30 | 31 | 32 | // 33 | // Object 34 | // 35 | 36 | class IMediaPipeTexture 37 | { 38 | public: 39 | virtual ~IMediaPipeTexture() = default; 40 | virtual long GetImageId() const = 0; 41 | //get uint8* 42 | virtual void* GetData() const = 0; 43 | virtual MediaPipeImageFormat GetFormat() const = 0; 44 | virtual int GetWidthStep() const = 0; 45 | virtual int GetWidth() const = 0; 46 | virtual int GetHeight() const = 0; 47 | virtual void Release() = 0; 48 | }; 49 | 50 | class IImageSource 51 | { 52 | public: 53 | virtual ~IImageSource() = default; 54 | virtual bool GetTexture(IMediaPipeTexture*& outTexture) = 0; 55 | virtual bool IsStatic() const = 0; 56 | }; 57 | 58 | class IUmpObject 59 | { 60 | protected: 61 | virtual ~IUmpObject() {} 62 | 63 | public: 64 | struct Dtor { void operator()(IUmpObject* obj) { obj->Release(); } }; 65 | virtual void Release() = 0; 66 | virtual void AddRef() = 0; 67 | }; 68 | 69 | // 70 | // Context 71 | // 72 | 73 | class IUmpContext : public IUmpObject 74 | { 75 | public: 76 | virtual void SetLog(IUmpLog* log) = 0; 77 | virtual void SetResourceDir(const char* resource_dir) = 0; 78 | virtual class IUmpPipeline* CreatePipeline() = 0; 79 | }; 80 | 81 | extern "C" 82 | { 83 | MP_API IUmpContext* UmpCreateContext(); 84 | typedef IUmpContext* UmpCreateContext_Proto(); 85 | } 86 | // 87 | // Pipeline 88 | // 89 | 90 | class IUmpPipeline : public IUmpObject 91 | { 92 | public: 93 | virtual void SetGraphConfiguration(const char* filename) = 0; 94 | virtual void SetCaptureFromFile(const char* filename) = 0; 95 | virtual void SetCaptureFromCamera(int cam_id, int cam_api, int cam_resx, int cam_resy, int cam_fps) = 0; 96 | virtual void ShowVideoWindow(bool show) = 0; 97 | virtual void EnableFrameCallback(bool enabled) = 0; 98 | virtual bool IsFrameCallbackEnabled() = 0; 99 | virtual class IUmpObserver* CreateObserver(const char* stream_name, long timeoutMillisecond = 2000) = 0; 100 | virtual void SetFrameCallback(class IUmpFrameCallback* callback) = 0; 101 | virtual void SetListener(class IUmpPipelineListener* listener) = 0; 102 | virtual bool Start(void* side_packet = nullptr) = 0; 103 | virtual bool StartImageSource(IImageSource* image_source, void* side_packet = nullptr) =0; 104 | virtual void Stop() = 0; 105 | virtual IPacketAPI* GetPacketAPI() = 0; 106 | virtual void ClearObservers() = 0; 107 | 108 | // debug 109 | virtual void LogProfilerStats() = 0; 110 | virtual uint64_t GetLastFrameId() = 0; 111 | virtual double GetLastFrameTimestamp() = 0; 112 | }; 113 | 114 | // 115 | // Observer 116 | // 117 | 118 | class IUmpPipelineListener 119 | { 120 | public: 121 | virtual void OnImageSizeReceived(int width, int height) = 0; 122 | virtual void OnPipelineWorkThreadFault() = 0; 123 | virtual void OnEnterPipelineWorkThread() = 0; 124 | virtual void OnExitPipelineWorkThread() = 0; 125 | }; 126 | 127 | class IUmpPacketCallback 128 | { 129 | public: 130 | virtual bool OnUmpPacket(class IUmpObserver* observer, void* packet) = 0; 131 | }; 132 | 133 | class IUmpObserver : public IUmpObject 134 | { 135 | public: 136 | virtual void SetPacketCallback(IUmpPacketCallback* callback) = 0; 137 | virtual class IPacketAPI* GetPacketAPI() = 0; 138 | virtual IUmpPacketCallback* GetPacketCallback() = 0; 139 | }; 140 | 141 | 142 | // 143 | // Frame callback 144 | // 145 | 146 | 147 | enum class EUmpPixelFormat 148 | { 149 | Unknown = 0, 150 | B8G8R8A8, 151 | R8G8B8A8, 152 | NUM_ITEMS // last 153 | }; 154 | 155 | class IUmpFrame : public IUmpObject 156 | { 157 | public: 158 | virtual const void* GetData() const = 0; 159 | virtual EUmpPixelFormat GetFormat() const = 0; 160 | virtual int GetPitch() const = 0; 161 | virtual int GetWidth() const = 0; 162 | virtual int GetHeight() const = 0; 163 | }; 164 | 165 | class IUmpFrameCallback 166 | { 167 | public: 168 | virtual void OnUmpFrame(IUmpFrame* frame) = 0; 169 | }; 170 | 171 | 172 | 173 | 174 | -------------------------------------------------------------------------------- /ump_app.cc: -------------------------------------------------------------------------------- 1 | #include "ump_pipeline.h" 2 | #include "mediapipe/framework/port/opencv_video_inc.h" 3 | #include "dummy_packet_callback.h" 4 | 5 | #define UMP_UNIQ(_type) std::unique_ptr<_type, IUmpObject::Dtor> 6 | 7 | class UmpStdoutLog : public IUmpLog 8 | { 9 | public: 10 | void Println(EUmpVerbosity verbosity, const char* msg) const override { std::cout << msg << std::endl; } 11 | }; 12 | 13 | int main(int argc, char* argv[]) 14 | { 15 | std::cout << "== INIT ==" << std::endl; 16 | 17 | google::InitGoogleLogging(argv[0]); 18 | absl::ParseCommandLine(argc, argv); 19 | 20 | { 21 | UmpStdoutLog log; 22 | UMP_UNIQ(IUmpContext) context(UmpCreateContext()); 23 | context->SetLog(&log); 24 | context->SetResourceDir(""); 25 | 26 | UMP_UNIQ(IUmpPipeline) pipe(context->CreatePipeline()); 27 | pipe->SetCaptureFromCamera(0, cv::CAP_DSHOW, 0, 0, 0); // CAP_DSHOW | CAP_MSMF 28 | pipe->ShowVideoWindow(true); 29 | 30 | std::vector observers; 31 | 32 | pipe->SetGraphConfiguration("mediapipe/graphs/holistic_tracking_cpu.pbtxt"); 33 | auto ob = pipe->CreateObserver("pose_landmarks"); 34 | observers.emplace_back(ob); 35 | observers.emplace_back(pipe->CreateObserver("face_landmarks")); 36 | observers.emplace_back(pipe->CreateObserver("left_hand_landmarks")); 37 | observers.emplace_back(pipe->CreateObserver("right_hand_landmarks")); 38 | 39 | auto callback = new DummyPacketCallback(); 40 | ob->SetPacketCallback(callback); 41 | 42 | pipe->Start(); 43 | getchar(); 44 | pipe->Stop(); 45 | delete callback; 46 | pipe->LogProfilerStats(); 47 | } 48 | 49 | std::cout << "press enter to exit" << std::endl; 50 | getchar(); 51 | return 0; 52 | } 53 | -------------------------------------------------------------------------------- /ump_commons.h: -------------------------------------------------------------------------------- 1 | #ifndef COMMONS_H 2 | #define COMMONS_H 3 | 4 | #include 5 | 6 | #if defined(_MSC_VER) // M$VS 7 | #define DLL_EXPORT __declspec(dllexport) 8 | #define DLL_IMPORT __declspec(dllimport) 9 | #elif defined(__GNUC__) // GCC 10 | #define DLL_EXPORT __attribute__((visibility("default"))) 11 | #define DLL_IMPORT 12 | #else 13 | #define DLL_EXPORT 14 | #define DLL_IMPORT 15 | #pragma warning Unknown dynamic link import/export semantics. 16 | #endif 17 | 18 | #if defined(MP_EXPORTS) 19 | #define MP_API DLL_EXPORT 20 | #elif defined(MP_IMPORTS) 21 | #define MP_API DLL_IMPORT 22 | #else 23 | #define MP_API 24 | #endif 25 | 26 | struct SerializedProto { 27 | const char* str; 28 | int length; 29 | }; 30 | 31 | struct SerializedProtoArray { 32 | SerializedProto* data; 33 | int size; 34 | }; 35 | 36 | enum class MediaPipeImageFormat : int { 37 | UNKNOWN = 0, 38 | SRGB = 1, 39 | SRGBA = 2, 40 | GRAY8 = 3, 41 | GRAY16 = 4, 42 | YCBCR420P = 5, 43 | YCBCR420P10 = 6, 44 | SRGB48 = 7, 45 | SRGBA64 = 8, 46 | VEC32F1 = 9, 47 | VEC32F2 = 12, 48 | LAB8 = 10, 49 | SBGRA = 11 50 | }; 51 | 52 | 53 | 54 | 55 | #endif // ! 56 | -------------------------------------------------------------------------------- /ump_context.cc: -------------------------------------------------------------------------------- 1 | #include "ump_context.h" 2 | #include "ump_pipeline.h" 3 | #include "packet_api.h" 4 | 5 | ABSL_DECLARE_FLAG(std::string, resource_root_dir); 6 | 7 | IUmpLog* _ump_log = nullptr; 8 | 9 | UmpContext::UmpContext() { 10 | log_d("+UmpContext"); 11 | } 12 | 13 | UmpContext::~UmpContext() { 14 | log_d("~UmpContext"); 15 | } 16 | 17 | void UmpContext::SetLog(IUmpLog* log) { 18 | _ump_log = log; 19 | } 20 | 21 | void UmpContext::SetResourceDir(const char* resource_dir) { 22 | log_i(strf("SetResourceDir: %s", resource_dir)); 23 | absl::SetFlag(&FLAGS_resource_root_dir, resource_dir); 24 | } 25 | 26 | IUmpPipeline* UmpContext::CreatePipeline() { 27 | return new UmpPipeline(); 28 | } 29 | 30 | IUmpContext* UmpCreateContext() { 31 | return new UmpContext(); 32 | } 33 | -------------------------------------------------------------------------------- /ump_context.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ump_object.h" 4 | #include "ump_packet.h" 5 | 6 | using UmpContextBase = UmpObject; 7 | 8 | class UmpContext : public UmpContextBase 9 | { 10 | public: 11 | UmpContext(); 12 | virtual ~UmpContext() override; 13 | 14 | virtual void SetLog(class IUmpLog* log) override; 15 | virtual void SetResourceDir(const char* resource_dir) override; 16 | virtual class IUmpPipeline* CreatePipeline() override; 17 | }; 18 | -------------------------------------------------------------------------------- /ump_dll.cc: -------------------------------------------------------------------------------- 1 | #include "ump_pipeline.h" 2 | #include "ump_dll.h" 3 | 4 | IUmpContext* CreateMediapipeContext() 5 | { 6 | return UmpCreateContext(); 7 | } 8 | 9 | #if defined(_WIN32) 10 | #include 11 | BOOL WINAPI DllMain(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpReserved) { return TRUE; } 12 | #endif 13 | -------------------------------------------------------------------------------- /ump_dll.h: -------------------------------------------------------------------------------- 1 | #ifndef UMP_DLL_H 2 | #define UMP_DLL_H 3 | 4 | #include "ump_commons.h" 5 | #include "ump_api.h" 6 | #include "ump_packet.h" 7 | 8 | extern "C" 9 | { 10 | MP_API IUmpContext* CreateMediapipeContext(); 11 | } 12 | 13 | #endif 14 | 15 | -------------------------------------------------------------------------------- /ump_frame.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ump_object.h" 4 | #include "opencv2/core/mat.hpp" 5 | 6 | using UmpFrameBase = UmpObject; 7 | 8 | class UmpFrame : public UmpFrameBase 9 | { 10 | friend class UmpPipeline; 11 | 12 | protected: 13 | virtual ~UmpFrame() override { log_d(strf("~UmpFrame %p", this)); } 14 | 15 | public: 16 | UmpFrame(UmpCustomDtor& dtor) : UmpFrameBase(dtor) { log_d(strf("+UmpFrame %p", this)); } 17 | 18 | virtual const void* GetData() const override { return _cvmat.data; } 19 | virtual EUmpPixelFormat GetFormat() const override { return _format; } 20 | virtual int GetPitch() const override { return _cvmat.step; } 21 | virtual int GetWidth() const override { return _cvmat.cols; } 22 | virtual int GetHeight() const override { return _cvmat.rows; } 23 | 24 | inline cv::Mat& GetMatrixRef() { return _cvmat; } 25 | 26 | protected: 27 | cv::Mat _cvmat; 28 | EUmpPixelFormat _format = EUmpPixelFormat::Unknown; 29 | }; 30 | -------------------------------------------------------------------------------- /ump_object.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ump_shared.h" 4 | 5 | class IExtDummy {}; 6 | 7 | using UmpCustomDtor = std::function; 8 | 9 | template 10 | class UmpObject : public TBase, public TExt 11 | { 12 | protected: 13 | virtual ~UmpObject() override {} 14 | 15 | public: 16 | UmpObject() : _ref_count(1) {} 17 | UmpObject(UmpCustomDtor& dtor) : _ref_count(1), _dtor(dtor) {} 18 | 19 | // non copyable 20 | UmpObject(const UmpObject&) = delete; 21 | UmpObject(UmpObject&&) = delete; 22 | UmpObject& operator=(const UmpObject&) = delete; 23 | UmpObject& operator=(const UmpObject&&) = delete; 24 | 25 | virtual void Release() override { ReleaseImpl(); } 26 | virtual void AddRef() override { AddRefImpl(); } 27 | 28 | inline void log_e(const char* msg) const { log(EUmpVerbosity::Error, msg); } 29 | inline void log_w(const char* msg) const { log(EUmpVerbosity::Warning, msg); } 30 | inline void log_i(const char* msg) const { log(EUmpVerbosity::Info, msg); } 31 | inline void log_d(const char* msg) const { log(EUmpVerbosity::Debug, msg); } 32 | inline void log_e(const std::string& msg) const { log(EUmpVerbosity::Error, *msg); } 33 | inline void log_w(const std::string& msg) const { log(EUmpVerbosity::Warning, *msg); } 34 | inline void log_i(const std::string& msg) const { log(EUmpVerbosity::Info, *msg); } 35 | inline void log_d(const std::string& msg) const { log(EUmpVerbosity::Debug, *msg); } 36 | 37 | protected: 38 | inline int ReleaseImpl() 39 | { 40 | const int n = _ref_count.fetch_sub(1); // TODO: better memory_order? 41 | if (n == 1) 42 | { 43 | if (_dtor) 44 | _dtor(static_cast(this)); 45 | else 46 | delete this; 47 | } 48 | return n; 49 | } 50 | 51 | inline int AddRefImpl() 52 | { 53 | return _ref_count.fetch_add(1); // TODO: better memory_order? 54 | } 55 | 56 | protected: 57 | std::atomic _ref_count; 58 | UmpCustomDtor _dtor; 59 | }; 60 | -------------------------------------------------------------------------------- /ump_observer.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ump_object.h" 4 | #include "packet_api.h" 5 | #include "mediapipe/framework/calculator_graph.h" 6 | 7 | using UmpObserverBase = UmpObject; 8 | 9 | class UmpObserver : public UmpObserverBase 10 | { 11 | protected: 12 | virtual ~UmpObserver() override 13 | { 14 | log_d(strf("~UmpObserver %s", *_stream_name)); 15 | } 16 | 17 | public: 18 | UmpObserver(const char* in_stream_name, const std::shared_ptr& packet_api, long timeoutMillisecond) : 19 | _stream_name(in_stream_name), 20 | _packet_api{ packet_api } 21 | { 22 | log_d(strf("+UmpObserver %s", *_stream_name)); 23 | _timeout = timeoutMillisecond * 1000; 24 | } 25 | 26 | 27 | absl::Status ObserveOutput(mediapipe::CalculatorGraph* graph) 28 | { 29 | 30 | /*graph->ObserveOutputStream(*presence_name, [this](const mediapipe::Packet& pk) 31 | { 32 | _presence = pk.Get(); 33 | 34 | if (_callback) 35 | _callback->OnUmpPresence(this, _presence); 36 | 37 | return absl::OkStatus(); 38 | }, true);*/ 39 | 40 | RET_CHECK_OK(graph->ObserveOutputStream(*_stream_name, [this](const mediapipe::Packet& pk) 41 | { 42 | if(!_callback) 43 | { 44 | log_w(strf("IUmpObserver::OnUmpPacket enter but call back ptr is null. (out stream : %s)", _stream_name.c_str())); 45 | } 46 | if (!pk.IsEmpty()) 47 | { 48 | auto current = pk.Timestamp().Microseconds(); 49 | if(_lastTimestampMicrosec <= 0) 50 | { 51 | _lastTimestampMicrosec = current; 52 | } 53 | bool hasTimedOut = (static_cast(current) - _lastTimestampMicrosec) >= _timeout; 54 | if(!hasTimedOut) 55 | { 56 | const void* p = &pk; 57 | const auto succeed = _callback->OnUmpPacket(this, const_cast(p)); 58 | if (!succeed) 59 | { 60 | return absl::AbortedError(strf("IUmpObserver::OnUmpPacket return false (out stream : %s), pipeline will be stopped.", _stream_name.c_str())); 61 | } 62 | } 63 | else 64 | { 65 | log_w(strf("IUmpObserver::OnUmpPacket packet timeout. (out stream : %s, current: %d, last: %d, timeout: %d)", _stream_name.c_str(), current, _lastTimestampMicrosec, _timeout)); 66 | } 67 | _lastTimestampMicrosec = current; 68 | } 69 | else 70 | { 71 | _lastTimestampMicrosec = -1; 72 | } 73 | return absl::OkStatus(); 74 | }, true)); 75 | 76 | return absl::OkStatus(); 77 | } 78 | 79 | virtual void SetPacketCallback(IUmpPacketCallback* in_callback) override { _callback = in_callback; } 80 | 81 | virtual IUmpPacketCallback* GetPacketCallback() override { return _callback; } 82 | 83 | virtual class IPacketAPI* GetPacketAPI() override 84 | { 85 | return _packet_api.get(); 86 | } 87 | 88 | 89 | 90 | protected: 91 | std::string _stream_name; 92 | IUmpPacketCallback* _callback = nullptr; 93 | bool _presence = false; 94 | std::shared_ptr _packet_api; 95 | long _timeout = -1; 96 | long _lastTimestampMicrosec = -1; 97 | }; 98 | -------------------------------------------------------------------------------- /ump_packet.h: -------------------------------------------------------------------------------- 1 | #ifndef IPACKET_API_H 2 | #define IPACKET_API_H 3 | 4 | #include "ump_commons.h" 5 | 6 | class IPacketAPI 7 | { 8 | public: 9 | virtual ~IPacketAPI() = default; 10 | virtual int GetBoolean(void* packet, bool* value_out) = 0; 11 | virtual int GetLandmarkList(void* packet, SerializedProto* value_out) = 0; 12 | virtual int GetLandmarkListVector(void* packet, SerializedProtoArray* value_out) = 0; 13 | virtual int GetNormalizedLandmarkList(void* packet, SerializedProto* value_out) = 0; 14 | virtual int GetNormalizedLandmarkListVector(void* packet, SerializedProtoArray* value_out) = 0; 15 | virtual int GetFaceGeometry(void* packet, SerializedProto* value_out) = 0; 16 | virtual int GetFaceGeometryVector(void* packet, SerializedProtoArray* value_out) = 0; 17 | virtual int GetClassificationList(void* packet, SerializedProto* value_out) = 0; 18 | virtual int GetClassificationListVector(void* packet, SerializedProtoArray* value_out) = 0; 19 | 20 | virtual void ClearProtoArray(SerializedProtoArray* proto_array) = 0; 21 | 22 | virtual void* NewSidePacket() = 0; 23 | virtual void DeleteSidePacket(void* side_packet) = 0; 24 | 25 | virtual int SidePacketAddBoolean(void* side_packet, const char* key, bool value) = 0; 26 | virtual int SidePacketAddFloat(void* side_packet, const char* key, float value) = 0; 27 | virtual int SidePacketAddInt(void* side_packet, const char* key, int value) = 0; 28 | }; 29 | 30 | #endif // ! UMP_PACKET_H -------------------------------------------------------------------------------- /ump_pipeline.cc: -------------------------------------------------------------------------------- 1 | #include "ump_pipeline.h" 2 | #include "ump_observer.h" 3 | #include "ump_frame.h" 4 | 5 | #include "mediapipe/framework/formats/image_frame.h" 6 | #include "mediapipe/framework/formats/image_frame_opencv.h" 7 | #include "mediapipe/framework/port/opencv_video_inc.h" 8 | #include "mediapipe/framework/port/opencv_highgui_inc.h" 9 | 10 | #include "mediapipe/framework/output_stream_poller.h" 11 | 12 | #include "mediapipe/framework/port/parse_text_proto.h" 13 | #include "mediapipe/framework/port/file_helpers.h" 14 | #include "mediapipe/util/resource_util.h" 15 | 16 | #include 17 | #include 18 | #include 19 | 20 | inline double get_timestamp_us() // microseconds 21 | { 22 | return static_cast(cv::getTickCount()) / (double)cv::getTickFrequency() * 1e6; 23 | } 24 | 25 | UmpPipeline::UmpPipeline() 26 | { 27 | log_d("+UmpPipeline"); 28 | _packet_api.reset(new PacketAPI()); 29 | } 30 | 31 | absl::Status UmpPipeline::AddImageFrameIntoStream(const char* stream_name, IMediaPipeTexture* texture) const 32 | { 33 | TRY 34 | 35 | auto image_frame_out =absl::make_unique( 36 | static_cast(static_cast(texture->GetFormat())), 37 | texture->GetWidth(), 38 | texture->GetHeight(), 39 | texture->GetWidthStep(), 40 | static_cast(texture->GetData()), 41 | [texture](uint8*) 42 | { 43 | texture->Release(); 44 | } 45 | ); 46 | const auto packet_in = Adopt(image_frame_out.release()).At(mediapipe::Timestamp(static_cast(get_timestamp_us()))); 47 | auto status = _graph->AddPacketToInputStream(stream_name, packet_in); 48 | //delete image_frame_out; //just delete semantics, its moved 49 | return status; 50 | CATCH_RETURN_STATUS 51 | } 52 | 53 | UmpPipeline::~UmpPipeline() 54 | { 55 | log_d("~UmpPipeline"); 56 | UmpPipeline::Stop(); 57 | UmpPipeline::ClearObservers(); 58 | } 59 | 60 | void UmpPipeline::SetGraphConfiguration(const char* filename) 61 | { 62 | log_i(strf("SetGraphConfiguration: %s", filename)); 63 | _config_filename = filename; 64 | } 65 | 66 | void UmpPipeline::SetCaptureFromFile(const char* filename) 67 | { 68 | log_i(strf("SetCaptureFromFile: %s", filename)); 69 | _input_filename = filename; 70 | } 71 | 72 | void UmpPipeline::SetCaptureFromCamera(int cam_id, int cam_api, int cam_resx, int cam_resy, int cam_fps) 73 | { 74 | log_i(strf("SetCaptureParams: cam=%d api=%d w=%d h=%d fps=%d", cam_id, cam_api, cam_resx, cam_resy, cam_fps)); 75 | _cam_id = cam_id; 76 | _cam_api = cam_api; 77 | _cam_resx = cam_resx; 78 | _cam_resy = cam_resy; 79 | _cam_fps = cam_fps; 80 | } 81 | 82 | void UmpPipeline::ShowVideoWindow(bool show) 83 | { 84 | log_i(strf("ShowVideo: %d", (show ? 1 : 0))); 85 | _show_video_winow = show; 86 | } 87 | 88 | 89 | IUmpObserver* UmpPipeline::CreateObserver(const char* stream_name, long timeoutMillisecond) 90 | { 91 | log_i(strf("CreateObserver: %s", stream_name)); 92 | if (_run_flag) 93 | { 94 | log_e("Invalid state: pipeline running"); 95 | return nullptr; 96 | } 97 | auto* observer = new UmpObserver(stream_name, _packet_api, timeoutMillisecond); 98 | observer->AddRef(); 99 | _observers.emplace_back(observer); 100 | return observer; 101 | } 102 | 103 | void UmpPipeline::SetListener(IUmpPipelineListener* listener) 104 | { 105 | _listener = listener; 106 | } 107 | 108 | void UmpPipeline::SetFrameCallback(class IUmpFrameCallback* callback) 109 | { 110 | log_i(strf("SetFrameCallback: %p", callback)); 111 | _frame_callback = callback; 112 | } 113 | 114 | bool UmpPipeline::Start(void* side_packet) 115 | { 116 | Stop(); 117 | try 118 | { 119 | log_i("UmpPipeline::Start"); 120 | _frame_id = 0; 121 | _frame_ts = 0; 122 | _run_flag = true; 123 | SidePacket packet = side_packet != nullptr ? *static_cast(side_packet) : SidePacket(); 124 | if(side_packet == nullptr) 125 | { 126 | log_w("StartImageSource use null packet"); 127 | } 128 | _worker = std::make_unique([this, packet]() { this->WorkerThread(packet, nullptr); }); 129 | log_i("UmpPipeline::Start OK"); 130 | return true; 131 | } 132 | catch (const std::exception& ex) 133 | { 134 | log_e(ex.what()); 135 | } 136 | return false; 137 | } 138 | 139 | bool UmpPipeline::StartImageSource(IImageSource* image_source, void* side_packet) 140 | { 141 | Stop(); 142 | try 143 | { 144 | log_i("UmpPipeline::Start"); 145 | _frame_id = 0; 146 | _frame_ts = 0; 147 | _run_flag = true; 148 | SidePacket packet = side_packet != nullptr ? *static_cast(side_packet) : SidePacket(); 149 | if(side_packet == nullptr) 150 | { 151 | log_w("StartImageSource use null packet"); 152 | } 153 | _worker = std::make_unique([this, packet, image_source]() { this->WorkerThread(packet, image_source); }); 154 | log_i("UmpPipeline::Start OK"); 155 | return true; 156 | } 157 | catch (const std::exception& ex) 158 | { 159 | log_e(ex.what()); 160 | } 161 | return false; 162 | } 163 | 164 | void UmpPipeline::Stop() 165 | { 166 | try 167 | { 168 | _run_flag = false; 169 | if (_worker) 170 | { 171 | log_i("UmpPipeline::Stop"); 172 | _worker->join(); 173 | _worker.reset(); 174 | _frame_id = 0; 175 | log_i("UmpPipeline::Stop OK"); 176 | } 177 | } 178 | catch (const std::exception& ex) 179 | { 180 | log_e(ex.what()); 181 | } 182 | } 183 | 184 | IPacketAPI* UmpPipeline::GetPacketAPI() 185 | { 186 | return _packet_api.get(); 187 | } 188 | 189 | void UmpPipeline::ClearObservers() 190 | { 191 | _observers.clear(); 192 | } 193 | 194 | void UmpPipeline::WorkerThread(SidePacket side_packet, IImageSource* image_source) 195 | { 196 | _image_size_known = false; 197 | log_i("Enter WorkerThread"); 198 | // RUN 199 | if(_listener) 200 | { 201 | _listener->OnEnterPipelineWorkThread(); 202 | } 203 | TRY 204 | auto status = image_source != nullptr ? this->RunImageImpl(side_packet, image_source) : this->RunCaptureImpl(side_packet); 205 | if (!status.ok()) 206 | { 207 | std::string msg(status.message()); 208 | log_e(msg); 209 | if(_listener) 210 | { 211 | _listener->OnPipelineWorkThreadFault(); 212 | } 213 | } 214 | CATCH_ONLY 215 | // SHUTDOWN 216 | 217 | TRY 218 | ShutdownImpl(); 219 | CATCH_ONLY 220 | log_i("Leave WorkerThread"); 221 | if(_listener) 222 | { 223 | _listener->OnExitPipelineWorkThread();; 224 | } 225 | } 226 | 227 | void UmpPipeline::OptimizeGraphConfig(SidePacket& side_packet, mediapipe::CalculatorGraphConfig& config) 228 | { 229 | //fix: https://github.com/google/mediapipe/issues/3003 230 | google::protobuf::RepeatedPtrField nodes = config.node(); 231 | for (int i = 0; i < config.node_size(); i ++) 232 | { 233 | mediapipe::CalculatorGraphConfig_Node& node = nodes[i]; 234 | auto findIndex = static_cast(node.calculator().find("FaceGeometry")); 235 | if(findIndex >= 0 && side_packet.find("refine_face_landmarks") != side_packet.end()) 236 | { 237 | side_packet["refine_face_landmarks"] = mediapipe::MakePacket(false); 238 | log_w("FaceGeometry is enabled, auto disable refine_face_landmarks options."); 239 | break; 240 | } 241 | } 242 | } 243 | 244 | absl::Status UmpPipeline::ShutdownImpl() 245 | { 246 | _frame_id = 0; 247 | absl::Status status; 248 | _run_flag = false; 249 | log_i(strf("CalculatorGraph::CloseInputStream: %d", status.raw_code())); 250 | status = _graph->CloseAllPacketSources(); 251 | log_i(strf("CalculatorGraph::CloseAllPacketSources: %d", status.raw_code())); 252 | status = _graph->WaitUntilDone(); 253 | log_i(strf("CalculatorGraph::WaitUntilDone: %d", status.raw_code())); 254 | _graph.reset(); 255 | if (_show_video_winow) 256 | { 257 | cv::destroyAllWindows(); 258 | } 259 | ReleaseFramePool(); 260 | log_i("UmpPipeline::Shutdown OK"); 261 | 262 | return absl::OkStatus(); 263 | } 264 | 265 | absl::Status UmpPipeline::RunImageImpl(SidePacket& side_packet, IImageSource* image_source) 266 | { 267 | constexpr char kInputStream[] = "input_video"; 268 | 269 | log_i("UmpPipeline::Run"); 270 | 271 | // init mediapipe 272 | 273 | std::string config_str; 274 | RET_CHECK_OK(LoadGraphConfig(_config_filename, config_str)); 275 | 276 | log_i("Parse Graph Proto"); 277 | mediapipe::CalculatorGraphConfig config{}; 278 | RET_CHECK(mediapipe::ParseTextProto(config_str, &config)); 279 | 280 | OptimizeGraphConfig(side_packet, config); 281 | 282 | log_i("CalculatorGraph::Initialize"); 283 | _graph.reset(new mediapipe::CalculatorGraph()); 284 | RET_CHECK_OK(_graph->Initialize(config)); 285 | 286 | for (auto& iter : _observers) 287 | { 288 | RET_CHECK_OK(iter->ObserveOutput(_graph.get())); 289 | } 290 | 291 | 292 | std::string str = "CalculatorGraph::StartRun\n"; 293 | if (side_packet.size() > 0) 294 | { 295 | for (auto& value : side_packet) { 296 | str += strf("%s : %s\n", value.first.c_str(), value.second.DebugString().c_str()); 297 | } 298 | } 299 | else 300 | { 301 | str += "Empty size package used. "; 302 | } 303 | log_i(str); 304 | 305 | if(image_source->IsStatic()) 306 | { 307 | std::string key("static_image_mode"); 308 | side_packet[key] = mediapipe::MakePacket(true); 309 | log_i("Static mode used."); 310 | } 311 | RET_CHECK_OK(_graph->StartRun(side_packet)); 312 | 313 | log_i("------------> Start Loop Work Thread <------------"); 314 | bool first_loop = true; 315 | bool is_static = image_source->IsStatic(); 316 | auto mills = !is_static ? 33 : 1000; 317 | while (_run_flag) 318 | { 319 | IMediaPipeTexture* image = nullptr; 320 | if(!image_source->GetTexture(image)) 321 | { 322 | std::this_thread::sleep_for(std::chrono::milliseconds(mills)); 323 | continue; 324 | } 325 | if(_listener && !_image_size_known) 326 | { 327 | _listener->OnImageSizeReceived(image->GetWidth(),image->GetHeight()); 328 | _image_size_known = true; 329 | } 330 | auto status = AddImageFrameIntoStream(kInputStream, image); 331 | if(!status.ok()) 332 | { 333 | log_e(strf("AddImageFrameIntoStream failed: %.*s", static_cast(status.message().size()), status.message().data())); 334 | //image->Release(); 335 | std::this_thread::sleep_for(std::chrono::milliseconds(mills)); 336 | continue; 337 | } 338 | if(first_loop) 339 | { 340 | log_i("UmpPipeline::AddImageFrameIntoStream (in loop) OK."); 341 | } 342 | _frame_id++; 343 | if (first_loop) 344 | { 345 | first_loop = false; 346 | } 347 | if(is_static) 348 | { 349 | std::this_thread::sleep_for(std::chrono::milliseconds(mills)); 350 | } 351 | } 352 | return absl::OkStatus(); 353 | } 354 | 355 | 356 | absl::Status UmpPipeline::RunCaptureImpl(SidePacket& side_packet) 357 | { 358 | constexpr char kInputStream[] = "input_video"; 359 | constexpr char kOutputStream[] = "output_video"; 360 | constexpr char kWindowName[] = "MediaPipe"; 361 | 362 | log_i("UmpPipeline::Run"); 363 | 364 | // init mediapipe 365 | 366 | std::string config_str; 367 | RET_CHECK_OK(LoadGraphConfig(_config_filename, config_str)); 368 | 369 | log_i("Parse Graph Proto"); 370 | mediapipe::CalculatorGraphConfig config; 371 | RET_CHECK(mediapipe::ParseTextProto(config_str, &config)); 372 | OptimizeGraphConfig(side_packet, config); 373 | 374 | log_i("CalculatorGraph::Initialize"); 375 | _graph.reset(new mediapipe::CalculatorGraph()); 376 | RET_CHECK_OK(_graph->Initialize(config)); 377 | 378 | for (auto& iter : _observers) 379 | { 380 | RET_CHECK_OK(iter->ObserveOutput(_graph.get())); 381 | } 382 | 383 | std::unique_ptr output_poller; 384 | if (_show_video_winow || (_frame_callback && _frame_callback_enabled)) 385 | { 386 | //ASSIGN_OR_RETURN(mediapipe::OutputStreamPoller poller, graph->AddOutputStreamPoller(kOutputStream)); 387 | auto output_poller_sop = _graph->AddOutputStreamPoller(kOutputStream); 388 | RET_CHECK(output_poller_sop.ok()); 389 | output_poller = std::make_unique(std::move(output_poller_sop.value())); 390 | } 391 | 392 | // init opencv 393 | 394 | log_i("VideoCapture::open"); 395 | cv::VideoCapture capture; 396 | _use_camera = _input_filename.empty(); 397 | 398 | if (_use_camera) 399 | { 400 | #if defined(_WIN32) 401 | if (_cam_api == cv::CAP_ANY) 402 | { 403 | // CAP_MSMF is broken on windows! use CAP_DSHOW by default, also see: https://github.com/opencv/opencv/issues/17687 404 | _cam_api = cv::CAP_DSHOW; 405 | } 406 | #endif 407 | 408 | capture.open(_cam_id, _cam_api); 409 | } 410 | else 411 | { 412 | capture.open(*_input_filename); 413 | } 414 | 415 | RET_CHECK(capture.isOpened()); 416 | 417 | if (_use_camera) 418 | { 419 | if (_cam_resx > 0 && _cam_resy > 0) 420 | { 421 | capture.set(cv::CAP_PROP_FRAME_WIDTH, _cam_resx); 422 | capture.set(cv::CAP_PROP_FRAME_HEIGHT, _cam_resy); 423 | } 424 | 425 | if (_cam_fps > 0) 426 | capture.set(cv::CAP_PROP_FPS, _cam_fps); 427 | } 428 | 429 | const int cap_resx = (int)capture.get(cv::CAP_PROP_FRAME_WIDTH); 430 | const int cap_resy = (int)capture.get(cv::CAP_PROP_FRAME_HEIGHT); 431 | const double cap_fps =capture.get(cv::CAP_PROP_FPS); 432 | log_i(strf("capture: w=%d h=%d fps=%f, overlay: %s", cap_resx, cap_resy, cap_fps, _show_video_winow ? "true" : "false")); 433 | if (_show_video_winow) 434 | { 435 | cv::namedWindow(kWindowName, cv::WINDOW_AUTOSIZE); 436 | } 437 | 438 | // start 439 | 440 | cv::Mat cvmat_bgr; 441 | cv::Mat cvmat_rgb; 442 | 443 | auto frame_dtor = [](UmpFrame* frame) { }; 444 | 445 | RET_CHECK_OK(_graph->StartRun(side_packet)); 446 | 447 | std::string str = "CalculatorGraph::StartRun\n"; 448 | if (side_packet.size() > 0) 449 | { 450 | for (auto& value : side_packet) { 451 | str += strf("%s : %s\n", value.first.c_str(), value.second.DebugString().c_str()); 452 | } 453 | } 454 | log_i(str); 455 | 456 | double t0 = get_timestamp_us(); 457 | 458 | log_i("------------> Start Loop Work Thread <------------"); 459 | bool firstLoop = true; 460 | int maxEmptyCount = 60; 461 | while (_run_flag) 462 | { 463 | double t1 = get_timestamp_us(); 464 | double dt = t1 - t0; 465 | t0 = t1; 466 | 467 | PROF_NAMED("pipeline_tick"); 468 | 469 | { 470 | PROF_NAMED("capture_frame"); 471 | capture >> cvmat_bgr; 472 | } 473 | 474 | if (!_use_camera && cvmat_bgr.empty()) 475 | { 476 | maxEmptyCount--; 477 | if (maxEmptyCount > 0) 478 | { 479 | continue; 480 | } 481 | else 482 | { 483 | log_e("VideoCapture: frame is empty !"); 484 | break; 485 | } 486 | } 487 | const double frame_timestamp_us = get_timestamp_us(); 488 | _frame_ts = frame_timestamp_us; 489 | 490 | { 491 | PROF_NAMED("enque_frame"); 492 | 493 | cv::cvtColor(cvmat_bgr, cvmat_rgb, cv::COLOR_BGR2RGB); 494 | /*if (_use_camera) 495 | cv::flip(cvmat_rgb, cvmat_rgb, 1);*/ 496 | 497 | auto input_mif = absl::make_unique( 498 | mediapipe::ImageFormat::SRGB, cvmat_rgb.cols, cvmat_rgb.rows, 499 | mediapipe::ImageFrame::kDefaultAlignmentBoundary); 500 | 501 | // TODO: zero copy 502 | cv::Mat input_mif_view = mediapipe::formats::MatView(input_mif.get()); 503 | cvmat_rgb.copyTo(input_mif_view); 504 | 505 | 506 | RET_CHECK_OK(_graph->AddPacketToInputStream( 507 | kInputStream, 508 | mediapipe::Adopt(input_mif.release()) 509 | .At(mediapipe::Timestamp((size_t)frame_timestamp_us)))); 510 | 511 | 512 | if(_listener && !_image_size_known) 513 | { 514 | _listener->OnImageSizeReceived(cvmat_rgb.cols,cvmat_rgb.rows); 515 | _image_size_known = true; 516 | } 517 | 518 | if (firstLoop) 519 | { 520 | log_i("CalculatorGraph::AddPacketToInputStream OK"); 521 | } 522 | } 523 | 524 | if (output_poller) 525 | { 526 | PROF_NAMED("poll_output"); 527 | 528 | mediapipe::Packet packet; 529 | if (!output_poller->Next(&packet)) 530 | { 531 | log_w("OutputStreamPoller::Next failed"); 532 | continue; 533 | } 534 | 535 | // TODO: zero copy 536 | auto& output_mif = packet.Get(); 537 | cv::Mat output_mif_view = mediapipe::formats::MatView(&output_mif); 538 | 539 | if (_frame_callback) 540 | { 541 | UmpFrame* frame = AllocFrame(); 542 | auto& dst_mat = frame->GetMatrixRef(); 543 | cv::cvtColor(output_mif_view, dst_mat, cv::COLOR_RGB2BGRA); // unreal requires BGRA8 or RGBA8 544 | frame->_format = EUmpPixelFormat::B8G8R8A8; 545 | _frame_callback->OnUmpFrame(frame); // unreal should call frame->Release() 546 | } 547 | 548 | if (_show_video_winow) 549 | { 550 | auto stat = strf("%.0f | %.4f | %" PRIu64 "", _frame_ts, dt * 0.001, _frame_id); 551 | cv::putText(output_mif_view, *stat, cv::Point(10, 20), cv::FONT_HERSHEY_PLAIN, 1, cv::Scalar(0, 255, 0)); 552 | 553 | cv::cvtColor(output_mif_view, output_mif_view, cv::COLOR_RGB2BGR); 554 | cv::imshow(kWindowName, output_mif_view); 555 | cv::waitKey(1); // required for cv::imshow 556 | } 557 | } 558 | 559 | // wait for next frame (when playing from file) 560 | if (!_use_camera && cap_fps > 0.0) 561 | { 562 | PROF_NAMED("wait_next_frame"); 563 | 564 | const double frame_us = (1.0 / cap_fps) * 1e6; 565 | for (;;) 566 | { 567 | const double cur_timestamp_us = get_timestamp_us(); 568 | const double delta = fabs(cur_timestamp_us - frame_timestamp_us); 569 | if (delta >= frame_us) 570 | break; 571 | std::this_thread::sleep_for(std::chrono::microseconds((size_t)(frame_us - delta))); 572 | } 573 | } 574 | 575 | _frame_id++; 576 | if (firstLoop) 577 | { 578 | firstLoop = false; 579 | } 580 | } 581 | _frame_id = 0; 582 | firstLoop = true; 583 | absl::Status status; 584 | _run_flag = false; 585 | status = _graph->CloseInputStream(kInputStream); 586 | log_i(strf("CalculatorGraph::CloseInputStream: %d", status.raw_code())); 587 | status = _graph->CloseAllPacketSources(); 588 | log_i(strf("CalculatorGraph::CloseAllPacketSources: %d", status.raw_code())); 589 | status = _graph->WaitUntilDone(); 590 | log_i(strf("CalculatorGraph::WaitUntilDone: %d", status.raw_code())); 591 | return absl::OkStatus(); 592 | } 593 | 594 | UmpFrame* UmpPipeline::AllocFrame() 595 | { 596 | PROF_NAMED("alloc_frame"); 597 | UmpFrame* frame = nullptr; 598 | 599 | if (!_frame_pool.empty()) 600 | { 601 | std::lock_guard lock(_frame_mux); 602 | if (!_frame_pool.empty()) 603 | { 604 | frame = _frame_pool.back(); 605 | _frame_pool.pop_back(); 606 | 607 | //log_d(strf("reuse UmpFrame %p", frame)); 608 | return frame; 609 | } 610 | } 611 | 612 | auto* context = this; 613 | UmpCustomDtor dtor = [context](IUmpObject* obj) { context->ReturnFrameToPool(static_cast(obj)); }; 614 | frame = new UmpFrame(dtor); // frame->Release() triggers custom dtor 615 | 616 | log_d(strf("new UmpFrame %p", frame)); 617 | return frame; 618 | } 619 | 620 | void UmpPipeline::ReturnFrameToPool(UmpFrame* frame) 621 | { 622 | //log_d(strf("pool UmpFrame %p", frame)); 623 | frame->AddRef(); // keep ref counter alive 624 | std::lock_guard lock(_frame_mux); 625 | _frame_pool.push_back(frame); 626 | } 627 | 628 | void UmpPipeline::ReleaseFramePool() 629 | { 630 | // manual delete because frame->Release() triggers ReturnFrameToPool() 631 | for (auto* frame : _frame_pool) 632 | { 633 | log_d(strf("delete UmpFrame %p", frame)); 634 | delete frame; 635 | } 636 | _frame_pool.clear(); 637 | } 638 | 639 | // allows multiple files separated by ';' 640 | absl::Status UmpPipeline::LoadGraphConfig(const std::string& filename, std::string& out_str) 641 | { 642 | log_i(strf("LoadGraphConfig: %s", filename.c_str())); 643 | 644 | out_str.clear(); 645 | out_str.reserve(4096); 646 | 647 | std::string sub_str; 648 | sub_str.reserve(1024); 649 | 650 | std::stringstream filename_ss(filename); 651 | std::string sub_name; 652 | 653 | while(std::getline(filename_ss, sub_name, ';')) 654 | { 655 | sub_str.clear(); 656 | RET_CHECK_OK(LoadResourceFile(sub_name, sub_str)); 657 | out_str.append(sub_str); 658 | } 659 | 660 | return absl::OkStatus(); 661 | } 662 | 663 | absl::Status UmpPipeline::LoadResourceFile(const std::string& filename, std::string& out_str) 664 | { 665 | out_str.clear(); 666 | 667 | std::string path; 668 | ASSIGN_OR_RETURN(path, mediapipe::PathToResourceAsFile(filename)); 669 | 670 | RET_CHECK_OK(mediapipe::file::GetContents(path, &out_str)); 671 | 672 | return absl::OkStatus(); 673 | } 674 | 675 | void UmpPipeline::LogProfilerStats() { 676 | #if defined(PROF_ENABLE) 677 | log_i(std::string(PROF_SUMMARY)); 678 | #endif 679 | } 680 | -------------------------------------------------------------------------------- /ump_pipeline.h: -------------------------------------------------------------------------------- 1 | #ifndef UMP_PIPELINE_H 2 | #define UMP_PIPELINE_H 3 | 4 | #include "ump_object.h" 5 | #include "mediapipe/framework/calculator_graph.h" 6 | 7 | using UmpPipelineBase = UmpObject; 8 | 9 | class UmpPipeline : public UmpPipelineBase 10 | { 11 | typedef std::map SidePacket; 12 | protected: 13 | virtual ~UmpPipeline() override; 14 | 15 | public: 16 | UmpPipeline(); 17 | 18 | absl::Status AddImageFrameIntoStream(const char* stream_name, IMediaPipeTexture* texture) const; 19 | virtual void SetGraphConfiguration(const char* filename) override; 20 | virtual void SetCaptureFromFile(const char* filename) override; 21 | virtual void SetCaptureFromCamera(int cam_id, int cam_api, int cam_resx, int cam_resy, int cam_fps) override; 22 | virtual void ShowVideoWindow(bool show) override; 23 | inline virtual void EnableFrameCallback(bool enabled) override { _frame_callback_enabled = enabled; }; 24 | inline virtual bool IsFrameCallbackEnabled() override { return _frame_callback_enabled; }; 25 | virtual IUmpObserver* CreateObserver(const char* stream_name, long timeoutMillisecond = 2000) override; 26 | virtual void SetListener(IUmpPipelineListener* listener) override; 27 | virtual void SetFrameCallback(class IUmpFrameCallback* callback) override; 28 | virtual bool Start(void* side_packet) override; 29 | virtual bool StartImageSource(IImageSource* image_source, void* side_packet) override; 30 | virtual void Stop() override; 31 | virtual IPacketAPI* GetPacketAPI() override; 32 | virtual void ClearObservers() override; 33 | 34 | virtual void LogProfilerStats() override; 35 | virtual uint64_t GetLastFrameId() override { return _frame_id; } 36 | virtual double GetLastFrameTimestamp() override { return _frame_ts; } 37 | 38 | private: 39 | void WorkerThread(SidePacket side_packet, IImageSource* image_source); 40 | void OptimizeGraphConfig(SidePacket& side_packet, mediapipe::CalculatorGraphConfig& config); 41 | absl::Status ShutdownImpl(); 42 | absl::Status RunImageImpl(SidePacket& side_packet, IImageSource* image_source); 43 | absl::Status RunCaptureImpl(SidePacket& side_packet); 44 | absl::Status LoadGraphConfig(const std::string& filename, std::string& out_str); 45 | absl::Status LoadResourceFile(const std::string& filename, std::string& out_str); 46 | class UmpFrame* AllocFrame(); 47 | void ReturnFrameToPool(class UmpFrame* frame); 48 | void ReleaseFramePool(); 49 | 50 | private: 51 | std::string resource_dir; 52 | std::string _config_filename; 53 | std::string _input_filename; 54 | int _cam_id = -1; 55 | int _cam_api = 0; 56 | int _cam_resx = 0; 57 | int _cam_resy = 0; 58 | int _cam_fps = 0; 59 | bool _use_camera = false; 60 | bool _show_video_winow = false; 61 | bool _frame_callback_enabled = true; 62 | bool _image_size_known = false; 63 | 64 | using ObserverPtr = std::unique_ptr; 65 | std::list _observers{}; 66 | 67 | std::list _frame_pool{}; 68 | class IUmpFrameCallback* _frame_callback = nullptr; 69 | class IUmpPipelineListener* _listener = nullptr; 70 | std::mutex _frame_mux; 71 | 72 | std::shared_ptr _graph; 73 | std::shared_ptr _packet_api; 74 | 75 | std::unique_ptr _worker{}; 76 | std::atomic _run_flag; 77 | 78 | uint64_t _frame_id = 0; 79 | double _frame_ts = 0; 80 | }; 81 | 82 | #endif 83 | -------------------------------------------------------------------------------- /ump_profiler.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #if !defined(PROF_ENABLE) 4 | 5 | #include 6 | 7 | #define PROF_NAMED(name) 8 | #define PROF_SCOPED 9 | #define PROF_SUMMARY std::string() 10 | 11 | #else 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | 22 | #define PROF_STRINGIZE(x) PROF_STRINGIZE_INNER(x) 23 | #define PROF_STRINGIZE_INNER(x) #x 24 | 25 | #define PROF_CONCAT(a, b) PROF_CONCAT_INNER(a, b) 26 | #define PROF_CONCAT_INNER(a, b) a ## b 27 | 28 | #define PROF_UNIQ_VAR(base) PROF_CONCAT(base, __LINE__) 29 | #define PROF_UNIQ_NAME __FILE__ ":" PROF_STRINGIZE(__LINE__) 30 | 31 | #define PROF_NAMED(name) bicycle::ScopedProfiler PROF_UNIQ_VAR(prof_named_) (name, bicycle::file_name(PROF_UNIQ_NAME)) 32 | #define PROF_SCOPED bicycle::ScopedProfiler PROF_UNIQ_VAR(prof_scoped_) (__FUNCTION__, bicycle::file_name(PROF_UNIQ_NAME)) 33 | #define PROF_SUMMARY bicycle::Profiler::GetInstance().LogSummary() 34 | 35 | namespace bicycle { 36 | 37 | constexpr const char* file_name(const char* path) { 38 | const char* file = path; 39 | while (*path) { 40 | if (*path == '/' || *path == '\\') { 41 | file = path + 1; 42 | } 43 | path++; 44 | } 45 | return file; 46 | } 47 | 48 | class NonCopyable 49 | { 50 | private: 51 | NonCopyable(const NonCopyable&) = delete; 52 | NonCopyable& operator=(const NonCopyable&) = delete; 53 | public: 54 | NonCopyable() {} 55 | }; 56 | 57 | class ProfilerSection : public NonCopyable 58 | { 59 | public: 60 | ProfilerSection(const char* name, const char* uniq) : 61 | m_name(name), m_uniq(uniq), m_sum(0), m_count(0) 62 | {} 63 | 64 | void Begin() { 65 | m_ticks = std::chrono::high_resolution_clock::now(); 66 | } 67 | 68 | void End() { 69 | const auto ticks = std::chrono::high_resolution_clock::now(); 70 | const double delta = std::chrono::duration_cast(ticks - m_ticks).count() * 1e-6; // milliseconds 71 | m_sum += delta; 72 | ++m_count; 73 | } 74 | 75 | void ResetCounters() { 76 | m_sum = 0; 77 | m_count = 0; 78 | } 79 | 80 | const char* GetName() const { return m_name; } 81 | const char* GetUniq() const { return m_uniq; } 82 | size_t GetCount() const { return m_count; } 83 | double GetSum() const { return m_sum; } 84 | double GetAvg() const { return (m_count > 0 ? (m_sum / (double)m_count) : 0); } 85 | 86 | private: 87 | const char* m_name; 88 | const char* m_uniq; 89 | std::chrono::time_point m_ticks; 90 | double m_sum; 91 | size_t m_count; 92 | }; 93 | 94 | class Profiler : public NonCopyable 95 | { 96 | public: 97 | typedef std::unordered_map SectionMap; 98 | 99 | Profiler() {} 100 | ~Profiler() { Release(); } 101 | 102 | static Profiler& GetInstance() { 103 | static Profiler instance; 104 | return instance; 105 | } 106 | 107 | void Release() { 108 | auto l = std::lock_guard(m_lock); 109 | for (auto& iter : m_sections) { 110 | delete iter.second; 111 | } 112 | m_sections.clear(); 113 | } 114 | 115 | ProfilerSection* GetSection(const char* name, const char* uniq) { 116 | auto l = std::lock_guard(m_lock); 117 | auto iter = m_sections.find(uniq); 118 | if (iter != m_sections.end()) { 119 | return iter->second; 120 | } 121 | else { 122 | auto* node = new ProfilerSection(name, uniq); 123 | m_sections.insert({ uniq, node }); 124 | return node; 125 | } 126 | } 127 | 128 | std::string LogSummary() { 129 | auto l = std::lock_guard(m_lock); 130 | if (m_sections.empty()) { 131 | return std::string(); 132 | } 133 | 134 | std::vector sections; 135 | sections.reserve(m_sections.size()); 136 | 137 | for (auto& iter : m_sections) { 138 | sections.push_back(iter.second); 139 | } 140 | 141 | struct SectionComparatorSum { 142 | inline bool operator() (ProfilerSection* a, ProfilerSection* b) const { return a->GetSum() > b->GetSum(); } 143 | }; 144 | struct SectionComparatorAvg { 145 | inline bool operator() (ProfilerSection* a, ProfilerSection* b) const { return a->GetAvg() > b->GetAvg(); } 146 | }; 147 | std::sort(sections.begin(), sections.end(), SectionComparatorSum()); 148 | 149 | constexpr int buf_size = 2048; 150 | char buffer[buf_size]; 151 | 152 | std::string log; 153 | log.append("== PROFILER SUMMARY ==\n"); 154 | 155 | snprintf(buffer, buf_size - 1, 156 | "%10s %10s %10s %10s %-30s %s\n", 157 | "", "", "", "", "", ""); 158 | log.append(buffer); 159 | 160 | for (auto* section : sections) { 161 | snprintf(buffer, buf_size - 1, 162 | "%10.3f %10.3f %10u %10.4f %-30s %s\n", 163 | (section->GetSum() * 0.001), section->GetSum(), (unsigned int)section->GetCount(), section->GetAvg(), section->GetName(), section->GetUniq()); 164 | log.append(buffer); 165 | } 166 | 167 | log.append("\n"); 168 | return std::move(log); 169 | } 170 | 171 | void Profiler::ResetCounters() { 172 | auto l = std::lock_guard(m_lock); 173 | for (auto& iter : m_sections) { 174 | iter.second->ResetCounters(); 175 | } 176 | } 177 | 178 | private: 179 | SectionMap m_sections; 180 | std::mutex m_lock; 181 | }; 182 | 183 | class ScopedProfiler : public NonCopyable 184 | { 185 | public: 186 | ScopedProfiler(const char* name, const char* uniq) { 187 | m_section = Profiler::GetInstance().GetSection(name, uniq); 188 | m_section->Begin(); 189 | } 190 | 191 | ~ScopedProfiler() { 192 | m_section->End(); 193 | } 194 | 195 | private: 196 | ProfilerSection* m_section; 197 | }; 198 | 199 | } // namespace 200 | 201 | #endif 202 | -------------------------------------------------------------------------------- /ump_shared.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ump_api.h" 4 | 5 | #include 6 | #include 7 | #include 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | #include 14 | #include 15 | #include 16 | 17 | #include 18 | #include 19 | #include 20 | 21 | #include "absl/flags/flag.h" 22 | #include "absl/flags/parse.h" 23 | #include "mediapipe/framework/calculator_framework.h" 24 | 25 | #include "ump_commons.h" 26 | 27 | //#define PROF_ENABLE 28 | #include "ump_profiler.h" 29 | 30 | // Macro 31 | 32 | #define UMP_LOCK(_mux) auto _lock = std::lock_guard(_mux) 33 | 34 | #define TRY \ 35 | try { 36 | 37 | #define CATCH_EXCEPTION \ 38 | return 0; \ 39 | } \ 40 | catch (std::exception & e) { \ 41 | log(EUmpVerbosity::Error, e.what()); \ 42 | } \ 43 | catch (...) { \ 44 | log(EUmpVerbosity::Error, "Unknown exception occured"); \ 45 | } \ 46 | return -1; 47 | 48 | #define CATCH_RETURN(RET_VALUE, FAILED) \ 49 | return RET_VALUE; \ 50 | } \ 51 | catch (std::exception & e) { \ 52 | log(EUmpVerbosity::Error, e.what()); \ 53 | } \ 54 | catch (...) { \ 55 | log(EUmpVerbosity::Error, "Unknown exception occured"); \ 56 | } \ 57 | return FAILED; 58 | 59 | #define CATCH_RETURN_STATUS \ 60 | } \ 61 | catch (std::exception & e) { \ 62 | return absl::Status(absl::StatusCode::kAborted, e.what()); \ 63 | } \ 64 | catch (...) { \ 65 | return absl::Status(absl::StatusCode::kAborted,"Unknown exception occured"); \ 66 | } 67 | 68 | 69 | #define CATCH_ONLY \ 70 | } \ 71 | catch (std::exception & e) { \ 72 | log(EUmpVerbosity::Error, e.what()); \ 73 | } \ 74 | catch (...) { \ 75 | log(EUmpVerbosity::Error, "Unknown exception occured"); \ 76 | } 77 | 78 | 79 | 80 | 81 | // Globals 82 | extern IUmpLog* _ump_log; 83 | 84 | inline void log(EUmpVerbosity verbosity, const char* msg) { if (_ump_log) { _ump_log->Println(verbosity, msg); } } 85 | 86 | // String 87 | 88 | inline const char* operator*(const std::string& str) { return str.c_str(); } 89 | 90 | inline std::string strf(const char* format, ...) 91 | { 92 | const size_t buf_size = 2048; 93 | char buf[buf_size]; 94 | va_list args; 95 | va_start(args, format); 96 | int n = vsnprintf_s(buf, buf_size - 1, _TRUNCATE, format, args); 97 | va_end(args); 98 | if (n >= 0) 99 | buf[n] = 0; 100 | else 101 | buf[0] = 0; 102 | return std::move(std::string(buf)); 103 | } 104 | 105 | 106 | extern inline const char* strcpy_to_heap(const std::string& str) { 107 | if (str.empty()) { 108 | return nullptr; 109 | } 110 | 111 | auto str_ptr = new char[str.length() + 1]; 112 | snprintf(str_ptr, str.length() + 1, str.c_str()); 113 | 114 | return str_ptr; 115 | } -------------------------------------------------------------------------------- /update_proto_models.bat: -------------------------------------------------------------------------------- 1 | :: TODO => USE BAZEL TO DEPLOY? 2 | echo off 3 | chcp 65001 4 | setlocal 5 | 6 | set "UNREAL_PLUGIN_DIR=D:\3D_Works\UE\MediaPipe4U" 7 | set "INITIAL_DIR=%cd%" 8 | 9 | set "SCRIPTS_DIR=%~dp0" 10 | for /f "delims=#" %%i in ('prompt #$E#^&echo on^&for %%a in ^(1^) do rem') do set "ESC_CHAR=%%i" 11 | 12 | set FN=%~p0 13 | set FN=%FN:~0,-1% 14 | 15 | 16 | :getfolder 17 | FOR /F "tokens=1,* delims=\/" %%i in ("%FN%") do ( 18 | if not "%%j"=="" ( 19 | set FN=%%j 20 | goto getfolder 21 | ) 22 | ) 23 | echo Current folder: %FN% 24 | 25 | FOR /F %%i in ('where python') do ( 26 | set PYTHON_EXE=%%i 27 | goto GET_PY 28 | ) 29 | :GET_PY 30 | 31 | set "PYTHON_EXE=%PYTHON_EXE:\=\\%" 32 | 33 | cd "%SCRIPTS_DIR%..\..\" 34 | 35 | SET "ROOT=%CD%" 36 | 37 | if not exist mediapipe ( echo "invalid directory" && exit ) 38 | 39 | if not exist bazel-bin\mediapipe\modules\face_geometry\data\geometry_pipeline_metadata_landmarks.binarypb ( 40 | bazel build -c opt --define MEDIAPIPE_DISABLE_GPU=1 --action_env PYTHON_BIN_PATH="%PYTHON_EXE%" mediapipe/modules/face_geometry/data:geometry_pipeline_metadata_landmarks 41 | ) 42 | 43 | set "BIN_DIR=%ROOT%\bazel-bin" 44 | 45 | set "FORMAT=%UNREAL_PLUGIN_DIR%\Source\MediaPipe\Private\mediapipe\framework\formats" 46 | set "MODULES=%UNREAL_PLUGIN_DIR%\Source\MediaPipe\Private\mediapipe\modules" 47 | 48 | set "S_FORMAT=%BIN_DIR%\mediapipe\framework\formats" 49 | set "S_MODULES=%BIN_DIR%\mediapipe\modules" 50 | 51 | 52 | echo off 53 | setlocal enabledelayedexpansion 54 | call :setESC 55 | 56 | for /R %FORMAT% %%f in (*.*) do ( 57 | @rem echo %%f 58 | @rem echo %%~nxf 59 | set "EMPTY=" 60 | set "FPATH=%%f" 61 | set "FPATH=!FPATH:%FORMAT%=%S_FORMAT%!" 62 | 63 | set "RR=%%f" 64 | set "RR=!RR:%FORMAT%\=!" 65 | if exist !FPATH! ( 66 | copy /Y "!FPATH!" "%%f" 67 | echo !RR! CPYIED 68 | 69 | ) else ( 70 | 71 | echo !ESC_CHAR![31m!RR! is not existed!ESC_CHAR![m 72 | ) 73 | ) 74 | 75 | 76 | for /R %MODULES% %%f in (*.*) do ( 77 | @rem echo %%f 78 | @rem echo %%~nxf 79 | set "EMPTY=" 80 | set "FPATH=%%f" 81 | set "FPATH=!FPATH:%MODULES%=%S_MODULES%!" 82 | 83 | set "RR=%%f" 84 | set "RR=!RR:%MODULES%\=!" 85 | if exist !FPATH! ( 86 | copy /Y "!FPATH!" "%%f" 87 | echo !RR! CPYIED 88 | 89 | ) else ( 90 | 91 | echo !ESC_CHAR![31m!RR! is not existed!ESC_CHAR![m 92 | ) 93 | ) 94 | 95 | pause -------------------------------------------------------------------------------- /vs/unreal_mediapipe.sln: -------------------------------------------------------------------------------- 1 |  2 | Microsoft Visual Studio Solution File, Format Version 12.00 3 | # Visual Studio Version 16 4 | VisualStudioVersion = 16.0.31112.23 5 | MinimumVisualStudioVersion = 10.0.40219.1 6 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "unreal_mediapipe", "unreal_mediapipe.vcxproj", "{D1EFB5F2-2EB7-408D-B571-BB4969063A1F}" 7 | EndProject 8 | Global 9 | GlobalSection(SolutionConfigurationPlatforms) = preSolution 10 | Debug|x64 = Debug|x64 11 | Debug|x86 = Debug|x86 12 | Release|x64 = Release|x64 13 | Release|x86 = Release|x86 14 | EndGlobalSection 15 | GlobalSection(ProjectConfigurationPlatforms) = postSolution 16 | {D1EFB5F2-2EB7-408D-B571-BB4969063A1F}.Debug|x64.ActiveCfg = Debug|x64 17 | {D1EFB5F2-2EB7-408D-B571-BB4969063A1F}.Debug|x64.Build.0 = Debug|x64 18 | {D1EFB5F2-2EB7-408D-B571-BB4969063A1F}.Debug|x86.ActiveCfg = Debug|Win32 19 | {D1EFB5F2-2EB7-408D-B571-BB4969063A1F}.Debug|x86.Build.0 = Debug|Win32 20 | {D1EFB5F2-2EB7-408D-B571-BB4969063A1F}.Release|x64.ActiveCfg = Release|x64 21 | {D1EFB5F2-2EB7-408D-B571-BB4969063A1F}.Release|x64.Build.0 = Release|x64 22 | {D1EFB5F2-2EB7-408D-B571-BB4969063A1F}.Release|x86.ActiveCfg = Release|Win32 23 | {D1EFB5F2-2EB7-408D-B571-BB4969063A1F}.Release|x86.Build.0 = Release|Win32 24 | EndGlobalSection 25 | GlobalSection(SolutionProperties) = preSolution 26 | HideSolutionNode = FALSE 27 | EndGlobalSection 28 | GlobalSection(ExtensibilityGlobals) = postSolution 29 | SolutionGuid = {11BAD89A-BD08-43FF-B4E8-31F615BD7CC4} 30 | EndGlobalSection 31 | EndGlobal 32 | -------------------------------------------------------------------------------- /vs/unreal_mediapipe.vcxproj: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Debug 6 | Win32 7 | 8 | 9 | Release 10 | Win32 11 | 12 | 13 | Debug 14 | x64 15 | 16 | 17 | Release 18 | x64 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 16.0 64 | Win32Proj 65 | {d1efb5f2-2eb7-408d-b571-bb4969063a1f} 66 | mptest 67 | 10.0 68 | 69 | 70 | 71 | Application 72 | true 73 | v142 74 | Unicode 75 | 76 | 77 | Application 78 | false 79 | v142 80 | true 81 | Unicode 82 | 83 | 84 | Makefile 85 | true 86 | v142 87 | Unicode 88 | 89 | 90 | Makefile 91 | false 92 | v142 93 | true 94 | Unicode 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | true 116 | 117 | 118 | false 119 | 120 | 121 | true 122 | 123 | 124 | ..\scripts\build_app.cmd 125 | 126 | $(VC_IncludePath);$(SolutionDir);..\..\..\bazel-mediapipe\external\windows_opencv\include\;..\..\..\bazel-mediapipe\external\windows_opencv\include\opencv2\;..\..\..\bazel-mediapipe\external\com_google_absl\;..\..\..\bazel-mediapipe\external\com_google_protobuf\;..\..\..\bazel-mediapipe\external\com_github_gflags_gflags\;..\..\..\bazel-mediapipe\external\com_github_glog_glog\;..\..\..\bazel-bin\;..\..\..\;..\;..\..\..\bazel-mediapipe\ 127 | 128 | 129 | false 130 | ..\scripts\build_app.cmd 131 | 132 | 133 | 134 | Level3 135 | true 136 | WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) 137 | true 138 | 139 | 140 | Console 141 | true 142 | 143 | 144 | 145 | 146 | Level3 147 | true 148 | true 149 | true 150 | WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) 151 | true 152 | 153 | 154 | Console 155 | true 156 | true 157 | true 158 | 159 | 160 | 161 | 162 | Level3 163 | true 164 | _DEBUG;_CONSOLE;%(PreprocessorDefinitions) 165 | true 166 | 167 | 168 | Console 169 | true 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | Level3 183 | true 184 | true 185 | true 186 | NDEBUG;_CONSOLE;%(PreprocessorDefinitions) 187 | true 188 | 189 | 190 | Console 191 | true 192 | true 193 | true 194 | 195 | 196 | 197 | 198 | 199 | -------------------------------------------------------------------------------- /vs/unreal_mediapipe.vcxproj.filters: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | {c66b3cff-ac02-4260-b08b-c4ee8ef19020} 6 | 7 | 8 | {20ff5d45-3275-405c-9698-ac290758b1ef} 9 | 10 | 11 | {7da886f8-92d9-4e1b-bc4b-b4cd6f0fc1c8} 12 | 13 | 14 | {cdaae859-b792-49eb-8046-af5ea4c637f4} 15 | 16 | 17 | 18 | 19 | 20 | graph 21 | 22 | 23 | graph 24 | 25 | 26 | graph 27 | 28 | 29 | graph 30 | 31 | 32 | graph 33 | 34 | 35 | graph 36 | 37 | 38 | 39 | 40 | core 41 | 42 | 43 | core 44 | 45 | 46 | core 47 | 48 | 49 | core 50 | 51 | 52 | core 53 | 54 | 55 | core 56 | 57 | 58 | core 59 | 60 | 61 | core 62 | 63 | 64 | app 65 | 66 | 67 | core 68 | 69 | 70 | core 71 | 72 | 73 | exports 74 | 75 | 76 | exports 77 | 78 | 79 | core 80 | 81 | 82 | core 83 | 84 | 85 | core 86 | 87 | 88 | exports 89 | 90 | 91 | app 92 | 93 | 94 | 95 | 96 | core 97 | 98 | 99 | app 100 | 101 | 102 | app 103 | 104 | 105 | core 106 | 107 | 108 | core 109 | 110 | 111 | core 112 | 113 | 114 | core 115 | 116 | 117 | core 118 | 119 | 120 | core 121 | 122 | 123 | core 124 | 125 | 126 | app 127 | 128 | 129 | -------------------------------------------------------------------------------- /vs/unreal_mediapipe.vcxproj.user: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | WindowsLocalDebugger 7 | ..\scripts\run_app.cmd 8 | $(SolutionDir) 9 | 10 | 11 | ..\scripts\run_app.cmd 12 | WindowsLocalDebugger 13 | $(SolutionDir) 14 | 15 | 16 | false 17 | 18 | --------------------------------------------------------------------------------