├── CMakeLists.txt ├── README.md ├── citation.bib ├── cmake └── FindSSE.cmake ├── dataset ├── CMU_Sample_05_01.bvh ├── README.md ├── axis.bvh ├── gestures │ ├── doubleclap.bvh │ ├── handsup.bvh │ ├── help.bvh │ ├── lefthandcircle.bvh │ ├── leftkick.bvh │ ├── push.bvh │ ├── righthandcircle.bvh │ ├── rightkick.bvh │ ├── test.bvh │ ├── tpose.bvh │ ├── waveleft.bvh │ └── waveright.bvh ├── headerAndOneMotion.bvh ├── headerWithHeadAndOneMotion.bvh ├── human3D.conf ├── lhand.bvh ├── lhand.qbvh ├── rhand.bvh ├── rhand.qbvh ├── sample.csv ├── shuffle.csv └── test.jpg ├── dependencies ├── InputParser │ ├── InputParser_C.cpp │ ├── InputParser_C.h │ └── README └── README.md ├── doc ├── BMVC21YoutubeVideo.png ├── BVHGUI2.jpg ├── CSVClusterPlot.jpg ├── DoxygenMainpage.h ├── ICPR2020_posterYoutubeVideoLink.png ├── OpenPose.md ├── ablation_bmvc21 │ └── README.md ├── benchmarknew.png ├── benchmarkold.png ├── benchmarkview.png ├── blender.png ├── blenderscript.jpg ├── blenderytb.png ├── bonsappshackathon.jpg ├── breakingnews.jpeg ├── bvh.png ├── createDoxygen.sh ├── demogesture.png ├── demojson.png ├── demoogl.png ├── demoperjoint.png ├── demoview.jpg ├── doxyfile ├── figureA.png ├── figureB.png ├── figureC.png ├── iccv23.jpg ├── icon.png ├── leedsDataset.jpg ├── mediapipeConverter.jpg ├── mnet2.png ├── mocapnetBannerImages.png ├── phd_thesis_compressed_signed.pdf ├── ren2022.jpg ├── show0.jpg ├── show0ogl.jpg ├── show1.jpg ├── show3.jpg ├── shuffle.gif ├── thesisdefence.jpg ├── transparentTab.png ├── youtube.png ├── youtubevideolink.jpg ├── youtubevideolink2.jpg └── youtubevideolinkdemo.jpg ├── docker ├── Dockerfile └── build_and_deploy.sh ├── initialize.sh ├── license.txt ├── revert.sh ├── scripts ├── createRandomHandDataset.sh ├── createRandomizedDataset.sh ├── createTestDataset.sh ├── debug.sh ├── dump_and_process_video.sh ├── dump_video.sh ├── experimentWithDifferentIterationNumbers.sh ├── getOpenCV.sh ├── processDatasetWithOpenpose.sh ├── splitStereo.sh ├── tensorflow2Build.sh └── tensorflowBuild.sh ├── src ├── GroundTruthGenerator │ ├── CMakeLists.txt │ └── README.md ├── HelloWorld │ ├── CMakeLists.txt │ ├── main.cpp │ └── testCMake.sh ├── JointEstimator2D │ ├── CMakeLists.txt │ ├── cameraControl.cpp │ ├── cameraControl.hpp │ ├── jointEstimator2D.cpp │ ├── jointEstimator2D.hpp │ ├── visualization.cpp │ ├── visualization.hpp │ └── webcamAnd2DJoints.cpp ├── MocapNET2 │ ├── BVHGUI2 │ │ ├── CMakeLists.txt │ │ └── bvhGUI2.cpp │ ├── BVHTemplate │ │ ├── CMakeLists.txt │ │ └── main.c │ ├── CSVClusterPlot │ │ ├── CMakeLists.txt │ │ ├── csvClusterPlot.cpp │ │ ├── listOfFilesToCluster │ │ ├── listOfFilesToClusterMocapNET1 │ │ ├── perform2DClustering.cpp │ │ ├── perform2DClustering.hpp │ │ ├── perform3DClustering.cpp │ │ └── perform3DClustering.hpp │ ├── Converters │ │ ├── H36M │ │ │ ├── CMakeLists.txt │ │ │ └── convertH36GroundTruthToMocapNETInput.cpp │ │ ├── Openpose │ │ │ ├── CMakeLists.txt │ │ │ └── convertOpenPoseJSONToCSV.cpp │ │ └── convertCSV3D │ │ │ ├── CMakeLists.txt │ │ │ ├── convertCSV3DToMocapNETInput.cpp │ │ │ ├── plot2D.py │ │ │ └── plot3D.py │ ├── HandOnlyTest │ │ ├── CMakeLists.txt │ │ └── handTest.cpp │ ├── MocapNET2LiveWebcamDemo │ │ ├── CMakeLists.txt │ │ └── livedemo.cpp │ ├── MocapNETFromCSV │ │ ├── CMakeLists.txt │ │ └── mocapnet2CSV.cpp │ ├── MocapNETLib2 │ │ ├── CMakeLists.txt │ │ ├── IO │ │ │ ├── bvh.cpp │ │ │ ├── bvh.hpp │ │ │ ├── bvhJointList │ │ │ ├── commonSkeleton.hpp │ │ │ ├── conversions.cpp │ │ │ ├── conversions.hpp │ │ │ ├── csvRead.cpp │ │ │ ├── csvRead.hpp │ │ │ ├── csvWrite.cpp │ │ │ ├── csvWrite.hpp │ │ │ ├── jsonMocapNETHelpers.cpp │ │ │ ├── jsonMocapNETHelpers.hpp │ │ │ ├── jsonRead.cpp │ │ │ ├── jsonRead.hpp │ │ │ ├── skeletonAbstraction.cpp │ │ │ ├── skeletonAbstraction.hpp │ │ │ └── skeletonSerializedToBVHTransform.hpp │ │ ├── NSDM │ │ │ ├── generated_body.hpp │ │ │ ├── generated_lowerbody.hpp │ │ │ └── generated_upperbody.hpp │ │ ├── applicationLogic │ │ │ ├── artifactRecognition.cpp │ │ │ ├── artifactRecognition.hpp │ │ │ ├── gestureRecognition.cpp │ │ │ ├── gestureRecognition.hpp │ │ │ ├── parseCommandlineOptions.cpp │ │ │ ├── parseCommandlineOptions.hpp │ │ │ ├── poseRecognition.cpp │ │ │ └── poseRecognition.hpp │ │ ├── config.h │ │ ├── core │ │ │ ├── core.cpp │ │ │ ├── core.hpp │ │ │ ├── multiThreaded.cpp │ │ │ ├── multiThreaded.hpp │ │ │ ├── singleThreaded.cpp │ │ │ └── singleThreaded.hpp │ │ ├── mocapnet2.cpp │ │ ├── mocapnet2.hpp │ │ ├── postProcessing │ │ │ └── outputFiltering.hpp │ │ ├── qualityControl │ │ │ ├── qualityControl.cpp │ │ │ └── qualityControl.hpp │ │ ├── remoteExecution.cpp │ │ ├── remoteExecution.hpp │ │ ├── solutionParts │ │ │ ├── body.cpp │ │ │ ├── body.hpp │ │ │ ├── lowerBody.cpp │ │ │ ├── lowerBody.hpp │ │ │ ├── upperBody.cpp │ │ │ └── upperBody.hpp │ │ ├── tools.cpp │ │ ├── tools.hpp │ │ └── visualization │ │ │ ├── allInOne.cpp │ │ │ ├── allInOne.hpp │ │ │ ├── camera_ready.cpp │ │ │ ├── camera_ready.hpp │ │ │ ├── drawSkeleton.cpp │ │ │ ├── drawSkeleton.hpp │ │ │ ├── map.cpp │ │ │ ├── map.hpp │ │ │ ├── opengl.cpp │ │ │ ├── opengl.hpp │ │ │ ├── rgb.cpp │ │ │ ├── rgb.hpp │ │ │ ├── template.cpp │ │ │ ├── template.hpp │ │ │ ├── visualization.cpp │ │ │ ├── visualization.hpp │ │ │ ├── widgets.cpp │ │ │ └── widgets.hpp │ ├── drawCSV │ │ ├── CMakeLists.txt │ │ └── drawCSV.cpp │ ├── reshapeCSVFileToMakeClassification │ │ ├── CMakeLists.txt │ │ └── reshapeCSV.cpp │ └── testCSV │ │ ├── CMakeLists.txt │ │ └── testCSV.cpp ├── NeuralNetworkAbstractionLayer │ ├── README.md │ ├── neuralNetworkAbstraction.cpp │ └── neuralNetworkAbstraction.hpp ├── Tensorflow │ ├── createTensorflowConfigurationForC.py │ ├── tensorflow.cpp │ ├── tensorflow.hpp │ ├── tf_utils.cpp │ └── tf_utils.hpp ├── Tensorflow2 │ ├── CMakeLists.txt │ ├── tensorflow2.h │ └── testtf2.cpp ├── Webcam │ ├── CMakeLists.txt │ └── webcam.cpp └── python │ ├── blender │ ├── blender_mocapnet.py │ └── downloadAndInstallBlender.sh │ ├── compareUtility │ ├── aT.sh │ ├── align2DPoints.py │ ├── compareUtility.py │ ├── drawPointClouds.py │ └── writeCSVResults.py │ ├── ctypes │ ├── build.sh │ ├── c.c │ ├── c.h │ ├── libcalci.so │ └── p.py │ ├── datasetAnnotations │ └── annotate.py │ ├── mediapipe │ ├── C_Parser.py │ ├── README.md │ ├── RHD.py │ ├── handsWebcam.py │ ├── holisticPartNames.py │ ├── holisticWebcam.py │ ├── mediapipe.jpeg │ ├── mediapipeHolistic2CSV.py │ ├── mediapipehand.png │ ├── mediapipehead.png │ ├── setup.sh │ └── tools.py │ └── vae_hands_3d │ ├── mnet.py │ ├── plot.py │ ├── prepareVE.sh │ └── rename_files.sh └── update.sh /citation.bib: -------------------------------------------------------------------------------- 1 | @inproceedings{Qammaz2021, 2 | author = {Qammaz, Ammar and Argyros, Antonis A}, 3 | title = {Towards Holistic Real-time Human 3D Pose Estimation using MocapNETs}, 4 | booktitle = {British Machine Vision Conference (BMVC 2021)}, 5 | publisher = {BMVA}, 6 | year = {2021}, 7 | month = {November}, 8 | projects = {I.C.HUMANS}, 9 | videolink = {https://www.youtube.com/watch?v=aaLOSY_p6Zc} 10 | } 11 | 12 | @inproceedings{Qammaz2020, 13 | author = {Ammar Qammaz and Antonis A. Argyros}, 14 | title = {Occlusion-tolerant and personalized 3D human pose estimation in RGB images}, 15 | booktitle = {IEEE International Conference on Pattern Recognition (ICPR 2020), (to appear)}, 16 | year = {2021}, 17 | month = {January}, 18 | url = {http://users.ics.forth.gr/argyros/res_mocapnet_II.html}, 19 | projects = {Co4Robots}, 20 | pdflink = {http://users.ics.forth.gr/argyros/mypapers/2021_01_ICPR_Qammaz.pdf}, 21 | videolink = {https://youtu.be/Jgz1MRq-I-k} 22 | } 23 | 24 | @inproceedings{Qammaz2019, 25 | author = {Qammaz, Ammar and Argyros, Antonis A}, 26 | title = {MocapNET: Ensemble of SNN Encoders for 3D Human Pose Estimation in RGB Images}, 27 | booktitle = {British Machine Vision Conference (BMVC 2019)}, 28 | publisher = {BMVA}, 29 | year = {2019}, 30 | month = {September}, 31 | address = {Cardiff, UK}, 32 | url = {http://users.ics.forth.gr/argyros/res_mocapnet.html}, 33 | projects = {CO4ROBOTS,MINGEI}, 34 | pdflink = {http://users.ics.forth.gr/argyros/mypapers/2019_09_BMVC_mocapnet.pdf}, 35 | videolink = {https://youtu.be/fH5e-KMBvM0} 36 | } 37 | -------------------------------------------------------------------------------- /dataset/README.md: -------------------------------------------------------------------------------- 1 | # MocapNET Project 2 | 3 | ## Dataset generation 4 | ------------------------------------------------------------------ 5 | 6 | If you are interested in generating the data to train a MocapNET please use the scripts [createRandomizedDataset.sh](https://github.com/FORTH-ModelBasedTracker/MocapNET/blob/master/createRandomizedDataset.sh) and [createTestDataset.sh](https://github.com/FORTH-ModelBasedTracker/MocapNET/blob/master/createTestDataset.sh) after building the GroundTruthDumper tool. (It will be automatically generated after running the initialization script) 7 | 8 | 9 | -------------------------------------------------------------------------------- /dataset/axis.bvh: -------------------------------------------------------------------------------- 1 | HIERARCHY 2 | ROOT root 3 | { 4 | OFFSET 0 0 0 5 | CHANNELS 6 Xposition Yposition Zposition Zrotation Yrotation Xrotation 6 | JOINT X_Axis 7 | { 8 | OFFSET 10.0 0.0 0.0 9 | CHANNELS 3 Zrotation Xrotation Yrotation 10 | JOINT X_Axis_Ornament 11 | { 12 | OFFSET 1.0 0.0 0.0 13 | CHANNELS 3 Zrotation Xrotation Yrotation 14 | JOINT X_Ornament_A 15 | { 16 | OFFSET 0.0 0.0 0.0 17 | CHANNELS 3 Zrotation Xrotation Yrotation 18 | End Site 19 | { 20 | OFFSET 0.0 -5.0 -5.0 21 | } 22 | } 23 | JOINT X_Ornament_B 24 | { 25 | OFFSET 0.0 0.0 0.0 26 | CHANNELS 3 Zrotation Xrotation Yrotation 27 | End Site 28 | { 29 | OFFSET 0.0 5.0 -5.0 30 | } 31 | } 32 | JOINT X_Ornament_C 33 | { 34 | OFFSET 0.0 0.0 0.0 35 | CHANNELS 3 Zrotation Xrotation Yrotation 36 | End Site 37 | { 38 | OFFSET 0.0 5.0 5.0 39 | } 40 | } 41 | JOINT X_Ornament_D 42 | { 43 | OFFSET 0.0 0.0 0.0 44 | CHANNELS 3 Zrotation Xrotation Yrotation 45 | End Site 46 | { 47 | OFFSET 0.0 -5.0 5.0 48 | } 49 | } 50 | } 51 | } 52 | JOINT Y_Axis 53 | { 54 | OFFSET 0.0 10.0 0.0 55 | CHANNELS 3 Zrotation Xrotation Yrotation 56 | JOINT Y_Axis_Ornament 57 | { 58 | OFFSET 0.0 1.0 0.0 59 | CHANNELS 3 Zrotation Xrotation Yrotation 60 | JOINT Y_Ornament_A 61 | { 62 | OFFSET 0.0 0.0 0.0 63 | CHANNELS 3 Zrotation Xrotation Yrotation 64 | End Site 65 | { 66 | OFFSET -5.0 0.0 -5.0 67 | } 68 | } 69 | JOINT Y_Ornament_B 70 | { 71 | OFFSET 0.0 0.0 0.0 72 | CHANNELS 3 Zrotation Xrotation Yrotation 73 | End Site 74 | { 75 | OFFSET 5.0 0.0 -5.0 76 | } 77 | } 78 | JOINT Y_Ornament_C 79 | { 80 | OFFSET 0.0 0.0 0.0 81 | CHANNELS 3 Zrotation Xrotation Yrotation 82 | End Site 83 | { 84 | OFFSET 0.0 0.0 5.0 85 | } 86 | } 87 | } 88 | } 89 | JOINT Z_Axis 90 | { 91 | OFFSET 0.0 0.0 10.0 92 | CHANNELS 3 Zrotation Xrotation Yrotation 93 | JOINT Z_Axis_Ornament 94 | { 95 | OFFSET 0.0 0.0 1.0 96 | CHANNELS 3 Zrotation Xrotation Yrotation 97 | JOINT Z_Ornament_A0 98 | { 99 | OFFSET -2.5 -2.5 0.0 100 | CHANNELS 3 Zrotation Xrotation Yrotation 101 | JOINT Z_Ornament_A1 102 | { 103 | OFFSET -2.5 -2.5 0.0 104 | CHANNELS 3 Zrotation Xrotation Yrotation 105 | JOINT Z_Ornament_A2 106 | { 107 | OFFSET 5.0 0.0 0.0 108 | CHANNELS 3 Zrotation Xrotation Yrotation 109 | End Site 110 | { 111 | OFFSET 5.0 0.0 0.0 112 | } 113 | } 114 | } 115 | } 116 | JOINT Z_Ornament_B0 117 | { 118 | OFFSET 2.5 2.5 0.0 119 | CHANNELS 3 Zrotation Xrotation Yrotation 120 | JOINT Z_Ornament_B1 121 | { 122 | OFFSET 2.5 2.5 0.0 123 | CHANNELS 3 Zrotation Xrotation Yrotation 124 | JOINT Z_Ornament_B2 125 | { 126 | OFFSET -5.0 0.0 0.0 127 | CHANNELS 3 Zrotation Xrotation Yrotation 128 | End Site 129 | { 130 | OFFSET -5.0 0.0 0.0 131 | } 132 | } 133 | } 134 | } 135 | } 136 | } 137 | } 138 | MOTION 139 | Frames: 1 140 | Frame Time: 0.040000 141 | 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 142 | -------------------------------------------------------------------------------- /dataset/lhand.bvh: -------------------------------------------------------------------------------- 1 | HIERARCHY 2 | ROOT lHand 3 | { 4 | OFFSET 0 0 0 5 | CHANNELS 6 Xposition Yposition Zposition Zrotation Yrotation Xrotation 6 | JOINT metacarpal1.l 7 | { 8 | OFFSET 4.0 -0.27 2.00 9 | CHANNELS 3 Zrotation Xrotation Yrotation 10 | JOINT finger2-1.l 11 | { 12 | OFFSET 4.5 0.27 0.0 13 | CHANNELS 3 Zrotation Xrotation Yrotation 14 | JOINT finger2-2.l 15 | { 16 | OFFSET 3.5 -0.32 0.0 17 | CHANNELS 3 Zrotation Xrotation Yrotation 18 | JOINT finger2-3.l 19 | { 20 | OFFSET 2.1 -0.29 0.0 21 | CHANNELS 3 Zrotation Xrotation Yrotation 22 | End Site 23 | { 24 | OFFSET 1.98 -0.68 0.0 25 | } 26 | } 27 | } 28 | } 29 | } 30 | JOINT metacarpal2.l 31 | { 32 | OFFSET 4.0 -0.27 0.0 33 | CHANNELS 3 Zrotation Xrotation Yrotation 34 | JOINT finger3-1.l 35 | { 36 | OFFSET 4.50 0.62 0.0 37 | CHANNELS 3 Zrotation Xrotation Yrotation 38 | JOINT finger3-2.l 39 | { 40 | OFFSET 3.97 -0.58 0.0 41 | CHANNELS 3 Zrotation Xrotation Yrotation 42 | JOINT finger3-3.l 43 | { 44 | OFFSET 2.5 -0.42 0.0 45 | CHANNELS 3 Zrotation Xrotation Yrotation 46 | End Site 47 | { 48 | OFFSET 1.964 -0.73 0.00 49 | } 50 | } 51 | } 52 | } 53 | } 54 | JOINT __metacarpal3.l 55 | { 56 | OFFSET 2.5 -0.27 -2.0 57 | CHANNELS 3 Zrotation Xrotation Yrotation 58 | JOINT metacarpal3.l 59 | { 60 | OFFSET 1.5 -0.16 0.0 61 | CHANNELS 3 Zrotation Xrotation Yrotation 62 | JOINT finger4-1.l 63 | { 64 | OFFSET 4.5 0.70 0.0 65 | CHANNELS 3 Zrotation Xrotation Yrotation 66 | JOINT finger4-2.l 67 | { 68 | OFFSET 3.67 -0.48 0.0 69 | CHANNELS 3 Zrotation Xrotation Yrotation 70 | JOINT finger4-3.l 71 | { 72 | OFFSET 2.27 -0.35 -0.0 73 | CHANNELS 3 Zrotation Xrotation Yrotation 74 | End Site 75 | { 76 | OFFSET 1.904 -0.62 0.0 77 | } 78 | } 79 | } 80 | } 81 | } 82 | } 83 | JOINT __metacarpal4.l 84 | { 85 | OFFSET 2.0 -0.27 -3.0 86 | CHANNELS 3 Zrotation Xrotation Yrotation 87 | JOINT metacarpal4.l 88 | { 89 | OFFSET 2.0 -0.16 -1.0 90 | CHANNELS 3 Zrotation Xrotation Yrotation 91 | JOINT finger5-1.l 92 | { 93 | OFFSET 4.0 0.17 0.0 94 | CHANNELS 3 Zrotation Xrotation Yrotation 95 | JOINT finger5-2.l 96 | { 97 | OFFSET 3.2 -0.17 0.0 98 | CHANNELS 3 Zrotation Xrotation Yrotation 99 | JOINT finger5-3.l 100 | { 101 | OFFSET 1.495 -0.10 0.0 102 | CHANNELS 3 Zrotation Xrotation Yrotation 103 | End Site 104 | { 105 | OFFSET 1.9 -0.10 0.0 106 | } 107 | } 108 | } 109 | } 110 | } 111 | } 112 | JOINT lthumbBase 113 | { 114 | OFFSET 2.0 -0.279 4.0 115 | CHANNELS 3 Zrotation Xrotation Yrotation 116 | JOINT lthumb 117 | { 118 | OFFSET 2.0 -0.142 1.0 119 | CHANNELS 3 Zrotation Xrotation Yrotation 120 | JOINT finger1-2.l 121 | { 122 | OFFSET 1.9598 -2.10 1.0 123 | CHANNELS 3 Zrotation Xrotation Yrotation 124 | JOINT finger1-3.l 125 | { 126 | OFFSET 2.76 -0.46 0.0 127 | CHANNELS 3 Zrotation Xrotation Yrotation 128 | End Site 129 | { 130 | OFFSET 2.595 -0.16 0.0 131 | } 132 | } 133 | } 134 | } 135 | } 136 | } 137 | MOTION 138 | Frames: 1 139 | Frame Time: 0.040000 140 | 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 141 | -------------------------------------------------------------------------------- /dataset/lhand.qbvh: -------------------------------------------------------------------------------- 1 | HIERARCHY 2 | ROOT lHand 3 | { 4 | OFFSET 0 0 0 5 | CHANNELS 7 Xposition Yposition Zposition Wrotation Xrotation Yrotation Zrotation 6 | JOINT metacarpal1.l 7 | { 8 | OFFSET 4.0 -0.27 2.00 9 | CHANNELS 3 Zrotation Xrotation Yrotation 10 | JOINT finger2-1.l 11 | { 12 | OFFSET 4.5 0.27 0.0 13 | CHANNELS 3 Zrotation Xrotation Yrotation 14 | JOINT finger2-2.l 15 | { 16 | OFFSET 3.5 -0.32 0.0 17 | CHANNELS 3 Zrotation Xrotation Yrotation 18 | JOINT finger2-3.l 19 | { 20 | OFFSET 2.1 -0.29 0.0 21 | CHANNELS 3 Zrotation Xrotation Yrotation 22 | End Site 23 | { 24 | OFFSET 1.98 -0.68 0.0 25 | } 26 | } 27 | } 28 | } 29 | } 30 | JOINT metacarpal2.l 31 | { 32 | OFFSET 4.0 -0.27 0.0 33 | CHANNELS 3 Zrotation Xrotation Yrotation 34 | JOINT finger3-1.l 35 | { 36 | OFFSET 4.50 0.62 0.0 37 | CHANNELS 3 Zrotation Xrotation Yrotation 38 | JOINT finger3-2.l 39 | { 40 | OFFSET 3.97 -0.58 0.0 41 | CHANNELS 3 Zrotation Xrotation Yrotation 42 | JOINT finger3-3.l 43 | { 44 | OFFSET 2.5 -0.42 0.0 45 | CHANNELS 3 Zrotation Xrotation Yrotation 46 | End Site 47 | { 48 | OFFSET 1.964 -0.73 0.00 49 | } 50 | } 51 | } 52 | } 53 | } 54 | JOINT __metacarpal3.l 55 | { 56 | OFFSET 2.5 -0.27 -2.0 57 | CHANNELS 3 Zrotation Xrotation Yrotation 58 | JOINT metacarpal3.l 59 | { 60 | OFFSET 1.5 -0.16 0.0 61 | CHANNELS 3 Zrotation Xrotation Yrotation 62 | JOINT finger4-1.l 63 | { 64 | OFFSET 4.5 0.70 0.0 65 | CHANNELS 3 Zrotation Xrotation Yrotation 66 | JOINT finger4-2.l 67 | { 68 | OFFSET 3.67 -0.48 0.0 69 | CHANNELS 3 Zrotation Xrotation Yrotation 70 | JOINT finger4-3.l 71 | { 72 | OFFSET 2.27 -0.35 -0.0 73 | CHANNELS 3 Zrotation Xrotation Yrotation 74 | End Site 75 | { 76 | OFFSET 1.904 -0.62 0.0 77 | } 78 | } 79 | } 80 | } 81 | } 82 | } 83 | JOINT __metacarpal4.l 84 | { 85 | OFFSET 2.0 -0.27 -3.0 86 | CHANNELS 3 Zrotation Xrotation Yrotation 87 | JOINT metacarpal4.l 88 | { 89 | OFFSET 2.0 -0.16 -1.0 90 | CHANNELS 3 Zrotation Xrotation Yrotation 91 | JOINT finger5-1.l 92 | { 93 | OFFSET 4.0 0.17 0.0 94 | CHANNELS 3 Zrotation Xrotation Yrotation 95 | JOINT finger5-2.l 96 | { 97 | OFFSET 3.2 -0.17 0.0 98 | CHANNELS 3 Zrotation Xrotation Yrotation 99 | JOINT finger5-3.l 100 | { 101 | OFFSET 1.495 -0.10 0.0 102 | CHANNELS 3 Zrotation Xrotation Yrotation 103 | End Site 104 | { 105 | OFFSET 1.9 -0.10 0.0 106 | } 107 | } 108 | } 109 | } 110 | } 111 | } 112 | JOINT lthumbBase 113 | { 114 | OFFSET 0.0 -0.279 2.0 115 | CHANNELS 3 Zrotation Xrotation Yrotation 116 | JOINT lthumb 117 | { 118 | OFFSET 2.0 -0.142 1.0 119 | CHANNELS 3 Zrotation Xrotation Yrotation 120 | JOINT finger1-2.l 121 | { 122 | OFFSET 1.9598 -2.10 1.0 123 | CHANNELS 3 Zrotation Xrotation Yrotation 124 | JOINT finger1-3.l 125 | { 126 | OFFSET 2.76 -0.46 0.0 127 | CHANNELS 3 Zrotation Xrotation Yrotation 128 | End Site 129 | { 130 | OFFSET 2.595 -0.16 0.0 131 | } 132 | } 133 | } 134 | } 135 | } 136 | } 137 | MOTION 138 | Frames: 1 139 | Frame Time: 0.040000 140 | 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 141 | -------------------------------------------------------------------------------- /dataset/rhand.bvh: -------------------------------------------------------------------------------- 1 | HIERARCHY 2 | ROOT rHand 3 | { 4 | OFFSET 0 0 0 5 | CHANNELS 6 Xposition Yposition Zposition Zrotation Yrotation Xrotation 6 | JOINT metacarpal1.r 7 | { 8 | OFFSET -4.0 -0.27 2.00 9 | CHANNELS 3 Zrotation Xrotation Yrotation 10 | JOINT finger2-1.r 11 | { 12 | OFFSET -4.5 0.27 0.0 13 | CHANNELS 3 Zrotation Xrotation Yrotation 14 | JOINT finger2-2.r 15 | { 16 | OFFSET -3.5 -0.32 0.0 17 | CHANNELS 3 Zrotation Xrotation Yrotation 18 | JOINT finger2-3.r 19 | { 20 | OFFSET -2.1 -0.29 0.0 21 | CHANNELS 3 Zrotation Xrotation Yrotation 22 | End Site 23 | { 24 | OFFSET -1.98 -0.68 0.0 25 | } 26 | } 27 | } 28 | } 29 | } 30 | JOINT metacarpal2.r 31 | { 32 | OFFSET -4.0 -0.27 0.0 33 | CHANNELS 3 Zrotation Xrotation Yrotation 34 | JOINT finger3-1.r 35 | { 36 | OFFSET -4.50 0.62 0.0 37 | CHANNELS 3 Zrotation Xrotation Yrotation 38 | JOINT finger3-2.r 39 | { 40 | OFFSET -3.97 -0.58 0.0 41 | CHANNELS 3 Zrotation Xrotation Yrotation 42 | JOINT finger3-3.r 43 | { 44 | OFFSET -2.5 -0.42 0.0 45 | CHANNELS 3 Zrotation Xrotation Yrotation 46 | End Site 47 | { 48 | OFFSET -1.964 -0.73 0.00 49 | } 50 | } 51 | } 52 | } 53 | } 54 | JOINT __metacarpal3.r 55 | { 56 | OFFSET -2.5 -0.27 -2.0 57 | CHANNELS 3 Zrotation Xrotation Yrotation 58 | JOINT metacarpal3.r 59 | { 60 | OFFSET -1.5 -0.16 0.0 61 | CHANNELS 3 Zrotation Xrotation Yrotation 62 | JOINT finger4-1.r 63 | { 64 | OFFSET -4.5 0.70 0.0 65 | CHANNELS 3 Zrotation Xrotation Yrotation 66 | JOINT finger4-2.r 67 | { 68 | OFFSET -3.67 -0.48 0.0 69 | CHANNELS 3 Zrotation Xrotation Yrotation 70 | JOINT finger4-3.r 71 | { 72 | OFFSET -2.27 -0.35 -0.0 73 | CHANNELS 3 Zrotation Xrotation Yrotation 74 | End Site 75 | { 76 | OFFSET -1.904 -0.62 0.0 77 | } 78 | } 79 | } 80 | } 81 | } 82 | } 83 | JOINT __metacarpal4.r 84 | { 85 | OFFSET -2.0 -0.27 -3.0 86 | CHANNELS 3 Zrotation Xrotation Yrotation 87 | JOINT metacarpal4.r 88 | { 89 | OFFSET -2.0 -0.16 -1.0 90 | CHANNELS 3 Zrotation Xrotation Yrotation 91 | JOINT finger5-1.r 92 | { 93 | OFFSET -4.0 0.17 0.0 94 | CHANNELS 3 Zrotation Xrotation Yrotation 95 | JOINT finger5-2.r 96 | { 97 | OFFSET -3.2 -0.17 0.0 98 | CHANNELS 3 Zrotation Xrotation Yrotation 99 | JOINT finger5-3.r 100 | { 101 | OFFSET -1.495 -0.10 0.0 102 | CHANNELS 3 Zrotation Xrotation Yrotation 103 | End Site 104 | { 105 | OFFSET -1.9 -0.10 0.0 106 | } 107 | } 108 | } 109 | } 110 | } 111 | } 112 | JOINT rthumbBase 113 | { 114 | OFFSET -2.0 -0.279 4.0 115 | CHANNELS 3 Zrotation Xrotation Yrotation 116 | JOINT rthumb 117 | { 118 | OFFSET -2.0 -0.142 1.0 119 | CHANNELS 3 Zrotation Xrotation Yrotation 120 | JOINT finger1-2.r 121 | { 122 | OFFSET -1.9598 -2.10 1.0 123 | CHANNELS 3 Zrotation Xrotation Yrotation 124 | JOINT finger1-3.r 125 | { 126 | OFFSET -2.76 -0.46 0.0 127 | CHANNELS 3 Zrotation Xrotation Yrotation 128 | End Site 129 | { 130 | OFFSET -2.595 -0.16 0.0 131 | } 132 | } 133 | } 134 | } 135 | } 136 | } 137 | MOTION 138 | Frames: 1 139 | Frame Time: 0.040000 140 | 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 141 | -------------------------------------------------------------------------------- /dataset/rhand.qbvh: -------------------------------------------------------------------------------- 1 | HIERARCHY 2 | ROOT rHand 3 | { 4 | OFFSET 0 0 0 5 | CHANNELS 7 Xposition Yposition Zposition Wrotation Xrotation Yrotation Zrotation 6 | JOINT metacarpal1.r 7 | { 8 | OFFSET -4.0 -0.27 2.00 9 | CHANNELS 3 Zrotation Xrotation Yrotation 10 | JOINT finger2-1.r 11 | { 12 | OFFSET -4.5 0.27 0.0 13 | CHANNELS 3 Zrotation Xrotation Yrotation 14 | JOINT finger2-2.r 15 | { 16 | OFFSET -3.5 -0.32 0.0 17 | CHANNELS 3 Zrotation Xrotation Yrotation 18 | JOINT finger2-3.r 19 | { 20 | OFFSET -2.1 -0.29 0.0 21 | CHANNELS 3 Zrotation Xrotation Yrotation 22 | End Site 23 | { 24 | OFFSET -1.98 -0.68 0.0 25 | } 26 | } 27 | } 28 | } 29 | } 30 | JOINT metacarpal2.r 31 | { 32 | OFFSET -4.0 -0.27 0.0 33 | CHANNELS 3 Zrotation Xrotation Yrotation 34 | JOINT finger3-1.r 35 | { 36 | OFFSET -4.50 0.62 0.0 37 | CHANNELS 3 Zrotation Xrotation Yrotation 38 | JOINT finger3-2.r 39 | { 40 | OFFSET -3.97 -0.58 0.0 41 | CHANNELS 3 Zrotation Xrotation Yrotation 42 | JOINT finger3-3.r 43 | { 44 | OFFSET -2.5 -0.42 0.0 45 | CHANNELS 3 Zrotation Xrotation Yrotation 46 | End Site 47 | { 48 | OFFSET -1.964 -0.73 0.00 49 | } 50 | } 51 | } 52 | } 53 | } 54 | JOINT __metacarpal3.r 55 | { 56 | OFFSET -2.5 -0.27 -2.0 57 | CHANNELS 3 Zrotation Xrotation Yrotation 58 | JOINT metacarpal3.r 59 | { 60 | OFFSET -1.5 -0.16 0.0 61 | CHANNELS 3 Zrotation Xrotation Yrotation 62 | JOINT finger4-1.r 63 | { 64 | OFFSET -4.5 0.70 0.0 65 | CHANNELS 3 Zrotation Xrotation Yrotation 66 | JOINT finger4-2.r 67 | { 68 | OFFSET -3.67 -0.48 0.0 69 | CHANNELS 3 Zrotation Xrotation Yrotation 70 | JOINT finger4-3.r 71 | { 72 | OFFSET -2.27 -0.35 -0.0 73 | CHANNELS 3 Zrotation Xrotation Yrotation 74 | End Site 75 | { 76 | OFFSET -1.904 -0.62 0.0 77 | } 78 | } 79 | } 80 | } 81 | } 82 | } 83 | JOINT __metacarpal4.r 84 | { 85 | OFFSET -2.0 -0.27 -3.0 86 | CHANNELS 3 Zrotation Xrotation Yrotation 87 | JOINT metacarpal4.r 88 | { 89 | OFFSET -2.0 -0.16 -1.0 90 | CHANNELS 3 Zrotation Xrotation Yrotation 91 | JOINT finger5-1.r 92 | { 93 | OFFSET -4.0 0.17 0.0 94 | CHANNELS 3 Zrotation Xrotation Yrotation 95 | JOINT finger5-2.r 96 | { 97 | OFFSET -3.2 -0.17 0.0 98 | CHANNELS 3 Zrotation Xrotation Yrotation 99 | JOINT finger5-3.r 100 | { 101 | OFFSET -1.495 -0.10 0.0 102 | CHANNELS 3 Zrotation Xrotation Yrotation 103 | End Site 104 | { 105 | OFFSET -1.9 -0.10 0.0 106 | } 107 | } 108 | } 109 | } 110 | } 111 | } 112 | JOINT rthumbBase 113 | { 114 | OFFSET 0.0 -0.279 2.0 115 | CHANNELS 3 Zrotation Xrotation Yrotation 116 | JOINT rthumb 117 | { 118 | OFFSET -2.0 -0.142 1.0 119 | CHANNELS 3 Zrotation Xrotation Yrotation 120 | JOINT finger1-2.r 121 | { 122 | OFFSET -1.9598 -2.10 1.0 123 | CHANNELS 3 Zrotation Xrotation Yrotation 124 | JOINT finger1-3.r 125 | { 126 | OFFSET -2.76 -0.46 0.0 127 | CHANNELS 3 Zrotation Xrotation Yrotation 128 | End Site 129 | { 130 | OFFSET -2.595 -0.16 0.0 131 | } 132 | } 133 | } 134 | } 135 | } 136 | } 137 | MOTION 138 | Frames: 1 139 | Frame Time: 0.040000 140 | 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 141 | -------------------------------------------------------------------------------- /dataset/test.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/dataset/test.jpg -------------------------------------------------------------------------------- /dependencies/InputParser/README: -------------------------------------------------------------------------------- 1 | This code actually has it's own repository ( https://github.com/AmmarkoV/InputParser ) and also included in the AmmarServer dependency ( https://github.com/AmmarkoV/AmmarServer/tree/master/src/InputParser ) 2 | However to simplify build process it is also included as standalone here. 3 | -------------------------------------------------------------------------------- /dependencies/README.md: -------------------------------------------------------------------------------- 1 | This is the dependencies subdirectory 2 | 3 | At minimum here there should be a libtensorflow , RGBDAcquisition and maybe an opencv-3.2.0 4 | -------------------------------------------------------------------------------- /doc/BMVC21YoutubeVideo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/BMVC21YoutubeVideo.png -------------------------------------------------------------------------------- /doc/BVHGUI2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/BVHGUI2.jpg -------------------------------------------------------------------------------- /doc/CSVClusterPlot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/CSVClusterPlot.jpg -------------------------------------------------------------------------------- /doc/ICPR2020_posterYoutubeVideoLink.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/ICPR2020_posterYoutubeVideoLink.png -------------------------------------------------------------------------------- /doc/OpenPose.md: -------------------------------------------------------------------------------- 1 | Workflow for 2-D Video Conversion to a .bvh File on Windows and Linux 2 | 3 | For highest speed and accuracy install OpenPose, in a Linux, Windows or Mac system, using the faster GPU version if a capable GPU is available. Probably a graphics card with 8GB of VRAM is the minimum needed. As described later, a Linux system will be needed to run the MocapNET utilities for making conversions to a .bvh file. 4 | 5 | After installing OpenPose, navigate in a terminal to wherever the openpose directory is located. For example in Windows PowerShell: 6 | 7 | cd D:\openpose-1.7.0-binaries-win64-gpu-python3.7-flir-3d_recommended\openpose 8 | 9 | Run OpenPose as shown here for Windows PowerShell: 10 | 11 | bin\OpenPoseDemo.exe --video D:\Test4.mp4 --hand --scale_number 8 --scale_gap 0.125 --net_resolution "320x176" --write_json output_jsons/ --model_pose BODY_25B -number_people_max 1 12 | 13 | The above command specifies slower, higher accuracy scale flags with body bones and additional hand bones. Add --face to generate face bones if there is enough graphics memory. A rather low net resolution is needed to prevent Cuda out of video memory errors. 320x176 resolution works on an 8GB VRAM RTX 2070. If you have more video memory you might be able to set the resolution with --net_resolution -1x368 or higher. OpenPose can process a crowd but the programs to convert its output to a .bvh file handle one person only. The --model_pose BODY_25B flag specifies a high resolution body model that can be added as follows: 14 | 15 | Download the Caffe model (100 MB) from: 16 | 17 | http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/1_25BBkg/body_25b/pose_iter_XXXXXX.caffemodel 18 | 19 | Then go to: 20 | 21 | https://github.com/CMU-Perceptual-Computing-Lab/openpose_train/blob/master/experimental_models/1_25BBkg/body_25b 22 | 23 | Right-click on the pose_deploy.prototxt file and Save link as..., which will name it as pose_deploy.prototxt. Save it into an openpose/models/pose/body_25b folder you create. Also put the Caffe model you downloaded in this folder. You can now run OpenPose with this higher accuracy model. 24 | 25 | The output of the OpenPose run will be in the folder openpose/output_jsons containing .json files that specify the joints’ x and y positions for each frame. For the next operation change all the .json file names using, for example, Bulk Rename Utility in Windows and rename the resulting folder with a convenient name such as BRUout. Change sections Name (2) Fixed, and Numbering (10) Mode, Insert at 13, Start 0 and Pad 5. The exact new names of the .json files should be: 26 | 27 | colorFrame_0_00000_keypoints.json, colorFrame_0_00001_keypoints.json, etc. 28 | 29 | 30 | Install MocapNET 31 | 32 | Run the MocapNET installer in an Ubuntu-like Linux system after installing the dependencies. MocapNET has utilities to convert the OpenPose .json files into a comma-separated-values (CSV) file, which in turn will be converted into a .bvh file. If your computer has Windows only, you can try the command-line Windows Subsystem for Linux (WSL) in newer versions of Win 10, though it won’t provide a graphical output. Otherwise install Ubuntu as a virtual machine in VirtualBox on Windows. Another solution is to add Ubuntu as a dual-boot option on a Windows computer. 33 | 34 | After installation of MocapNET copy the BRUout folder created above into the MocapNET folder. Then in a terminal navigate to the MocapNET folder and run: 35 | 36 | ./convertOpenPoseJSONToCSV --from BRUout --size 640 480 37 | 38 | The program will run, showing the progress of processing the data and will output a file stored at BRUout/2dJoints_v1.4.csv. This CSV file can then be further processed with the following command: 39 | 40 | ./MocapNET2CSV --from BRUout/2dJoints_v1.4.csv --size 640 480 41 | 42 | After completion the output will be an out.bvh file in the MocapNET folder. This file can be loaded into Blender, etc. using an import mocap bvh option, and re-targeted to any desired armature for animation. One good option is to use the MakeHuman program to generate a model with a matching CMU body, hands and face rig. The MakehHuman MHX2 option provides another way to retarget the .bvh to an armature. 43 | 44 | Note that presently the public version of MocapNET supports conversion of body bones only, but hand and face bone support is well underway. 45 | 46 | - end - 47 | 48 | 49 | 50 | -------------------------------------------------------------------------------- /doc/benchmarknew.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/benchmarknew.png -------------------------------------------------------------------------------- /doc/benchmarkold.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/benchmarkold.png -------------------------------------------------------------------------------- /doc/benchmarkview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/benchmarkview.png -------------------------------------------------------------------------------- /doc/blender.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/blender.png -------------------------------------------------------------------------------- /doc/blenderscript.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/blenderscript.jpg -------------------------------------------------------------------------------- /doc/blenderytb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/blenderytb.png -------------------------------------------------------------------------------- /doc/bonsappshackathon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/bonsappshackathon.jpg -------------------------------------------------------------------------------- /doc/breakingnews.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/breakingnews.jpeg -------------------------------------------------------------------------------- /doc/bvh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/bvh.png -------------------------------------------------------------------------------- /doc/createDoxygen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #Switch to this directory 4 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 5 | cd "$DIR" 6 | 7 | 8 | export MOCAPNET_DOXYGEN_INPUT="$DIR/.." 9 | export MOCAPNET_DOXYGEN_OUTPUT="$DIR/../doc" 10 | 11 | echo "Doxygen Input file will be $MOCAPNET_DOXYGEN_INPUT" 12 | echo "Doxygen Output file will be $MOCAPNET_DOXYGEN_OUTPUT" 13 | 14 | 15 | cd .. 16 | doxygen doc/doxyfile 17 | cd doc/latex 18 | make 19 | cd .. 20 | cd .. 21 | 22 | ln -s doc/latex/refman.pdf MocapNETManual.pdf 23 | 24 | 25 | cd "$DIR" 26 | 27 | 28 | exit 0 29 | -------------------------------------------------------------------------------- /doc/demogesture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/demogesture.png -------------------------------------------------------------------------------- /doc/demojson.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/demojson.png -------------------------------------------------------------------------------- /doc/demoogl.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/demoogl.png -------------------------------------------------------------------------------- /doc/demoperjoint.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/demoperjoint.png -------------------------------------------------------------------------------- /doc/demoview.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/demoview.jpg -------------------------------------------------------------------------------- /doc/figureA.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/figureA.png -------------------------------------------------------------------------------- /doc/figureB.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/figureB.png -------------------------------------------------------------------------------- /doc/figureC.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/figureC.png -------------------------------------------------------------------------------- /doc/iccv23.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/iccv23.jpg -------------------------------------------------------------------------------- /doc/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/icon.png -------------------------------------------------------------------------------- /doc/leedsDataset.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/leedsDataset.jpg -------------------------------------------------------------------------------- /doc/mediapipeConverter.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/mediapipeConverter.jpg -------------------------------------------------------------------------------- /doc/mnet2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/mnet2.png -------------------------------------------------------------------------------- /doc/mocapnetBannerImages.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/mocapnetBannerImages.png -------------------------------------------------------------------------------- /doc/phd_thesis_compressed_signed.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/phd_thesis_compressed_signed.pdf -------------------------------------------------------------------------------- /doc/ren2022.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/ren2022.jpg -------------------------------------------------------------------------------- /doc/show0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/show0.jpg -------------------------------------------------------------------------------- /doc/show0ogl.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/show0ogl.jpg -------------------------------------------------------------------------------- /doc/show1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/show1.jpg -------------------------------------------------------------------------------- /doc/show3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/show3.jpg -------------------------------------------------------------------------------- /doc/shuffle.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/shuffle.gif -------------------------------------------------------------------------------- /doc/thesisdefence.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/thesisdefence.jpg -------------------------------------------------------------------------------- /doc/transparentTab.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/transparentTab.png -------------------------------------------------------------------------------- /doc/youtube.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/youtube.png -------------------------------------------------------------------------------- /doc/youtubevideolink.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/youtubevideolink.jpg -------------------------------------------------------------------------------- /doc/youtubevideolink2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/youtubevideolink2.jpg -------------------------------------------------------------------------------- /doc/youtubevideolinkdemo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/doc/youtubevideolinkdemo.jpg -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tensorflow/tensorflow:latest-gpu 2 | 3 | ARG user_id 4 | ARG root_psw="12345678" 5 | ARG user_psw="ok" 6 | ARG user_name=user 7 | 8 | # Installs the necessary pkgs. 9 | RUN \ 10 | echo "**** packages installation ****" \ 11 | && apt-key adv --fetch-keys http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/3bf863cc.pub \ 12 | && apt-get update && apt-get install -y \ 13 | vim \ 14 | build-essential \ 15 | cmake \ 16 | libopencv-dev \ 17 | libjpeg-dev \ 18 | libpng-dev \ 19 | libglew-dev \ 20 | libpthread-stubs0-dev \ 21 | git \ 22 | virtualenv \ 23 | time \ 24 | sudo \ 25 | wget \ 26 | nano \ 27 | python3.8-venv \ 28 | && echo "**** python pip update ****" \ 29 | && /usr/bin/python3 -m pip install --upgrade pip \ 30 | && echo "**** aliases for l and ll commands creation ****" \ 31 | && echo -e 'ls --color=auto "$@"' > /usr/bin/l \ 32 | && echo -e 'ls -lah --color=auto "$@"' > /usr/bin/ll \ 33 | && chmod +x /usr/bin/ll /usr/bin/l \ 34 | && echo "**** history-search-backward by pressing F8 ****" \ 35 | && sed -i 's/# "\\e\[5~": history-search-backward/"\\e\[19~": history-search-backward/' /etc/inputrc \ 36 | && echo "**** root password creation ****" \ 37 | && echo "root:${root_psw}" | chpasswd \ 38 | && echo "**** user creation ****" \ 39 | && useradd -m -s /usr/bin/bash -u ${user_id} -G sudo ${user_name} \ 40 | && echo "${user_name}:${user_psw}" | chpasswd \ 41 | && chown -R ${user_name}:${user_name} /home/${user_name}/ \ 42 | && mkdir /home/${user_name}/workspace/ \ 43 | && chown -R user:user /home/${user_name}/workspace 44 | 45 | 46 | USER ${user_name} 47 | WORKDIR /home/${user_name}/ 48 | 49 | RUN cd /home/${user_name}/workspace && git clone https://github.com/FORTH-ModelBasedTracker/MocapNET && cd MocapNET && ./initialize.sh --collab 50 | 51 | -------------------------------------------------------------------------------- /docker/build_and_deploy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # This script builds and runs a docker image for local use. 3 | 4 | #Although I dislike the use of docker for a myriad of reasons, due needing it to deploy on a particular machine 5 | #I am adding a docker container builder for the repository to automate the process 6 | 7 | 8 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 9 | cd "$DIR" 10 | cd .. 11 | REPOSITORY=`pwd` 12 | 13 | cd "$DIR" 14 | 15 | NAME="mocapnet" 16 | dockerfile_pth="$DIR" 17 | mount_pth="$REPOSITORY" 18 | 19 | # update tensorflow image 20 | docker pull tensorflow/tensorflow:latest-gpu 21 | 22 | # build and run tensorflow 23 | docker build \ 24 | -t $NAME \ 25 | $dockerfile_pth \ 26 | --build-arg user_id=$UID 27 | 28 | docker run -d \ 29 | --gpus all \ 30 | --shm-size 8G \ 31 | -it \ 32 | --name $NAME-container \ 33 | -v $mount_pth:/home/user/workspace \ 34 | $NAME 35 | 36 | 37 | docker ps -a 38 | 39 | OUR_DOCKER_ID=`docker ps -a | grep mocapnet | cut -f1 -d' '` 40 | echo "Our docker ID is : $OUR_DOCKER_ID" 41 | echo "Attaching it using : docker attach $OUD_DOCKER_ID" 42 | docker attach $OUR_DOCKER_ID 43 | 44 | exit 0 45 | -------------------------------------------------------------------------------- /revert.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #Reminder , to get a single file out of the repo use "git checkout -- path/to/file.c" 4 | 5 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 6 | cd "$DIR" 7 | 8 | #All OpenGL/BVH code relies on RGBDAcquisition code so we need to sync this as well 9 | 10 | 11 | if [ -f dependencies/RGBDAcquisition/README.md ]; then 12 | echo "RGBDAcquisition appears to exist, reverting it .." 13 | cd dependencies/RGBDAcquisition 14 | git reset --hard HEAD 15 | git pull origin master 16 | cd "$DIR" 17 | else 18 | echo "Could not find RGBDAcquisition, please rerun the initialize.sh script .." 19 | fi 20 | 21 | 22 | 23 | if [ -f dependencies/AmmarServer/README.md ]; then 24 | echo "AmmarServer appears to exist, reverting it .." 25 | cd dependencies/AmmarServer 26 | git reset --hard HEAD 27 | git pull origin master 28 | cd "$DIR" 29 | fi 30 | 31 | 32 | #Now sync rest of code 33 | cd "$DIR" 34 | git reset --hard HEAD 35 | git pull origin master 36 | 37 | exit 0 38 | -------------------------------------------------------------------------------- /scripts/createTestDataset.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 4 | cd "$DIR" 5 | cd .. 6 | 7 | red=$(printf "\033[31m") 8 | green=$(printf "\033[32m") 9 | yellow=$(printf "\033[33m") 10 | blue=$(printf "\033[34m") 11 | magenta=$(printf "\033[35m") 12 | cyan=$(printf "\033[36m") 13 | white=$(printf "\033[37m") 14 | normal=$(printf "\033[m") 15 | 16 | SELECTBODY="--selectJoints 21 hip abdomen chest neck head rcollar rshoulder relbow rhand lcollar lshoulder lelbow lhand rhip rknee rfoot lhip lknee lfoot toe3-2.r toe3-2.l" 17 | SELECTRHAND="--selectJoints 11 rhand rthumb1 rthumb2 rindex1 rindex2 rmid1 rmid2 rring1 rring2 rpinky1 rpinky2" 18 | SELECTLHAND="--selectJoints 11 lhand lthumb1 lthumb2 lindex1 lindex2 lmid1 lmid2 lring1 lring2 lpinky1 lpinky2" 19 | 20 | datasetDir="dataset/MotionCapture" 21 | datasetSubDir="01 02 03 05 06 07 08 09 10 11 12 13 14 15 16 17 18 19" 22 | outputDir="dataset/" 23 | 24 | rm $outputDir/2d_body_test.csv 25 | rm $outputDir/3d_body_test.csv 26 | rm $outputDir/bvh_body_test.csv 27 | 28 | #create a small test file with one example 29 | #rm $outputDir/testSMALL.csv 30 | # ./GroundTruthDumper --from MotionCapture/01/01_01.bvh --offsetPositionRotation 0 750 2500 0 0 0 --maxFrames 1 --csv $outputDir testSMALL.csv 31 | 32 | #PRODUCESVG="--svg $outputDir/svg" 33 | PRODUCESVG=" " 34 | 35 | 36 | datasetsProcessed=0 37 | totaldatasetsToGo=`echo "$datasetSubDir" | awk '{print NF}'` 38 | 39 | rm $outputDir/test.csv 40 | for d in $datasetSubDir 41 | do 42 | if [ -d $datasetDir/$d ] 43 | then 44 | echo "$green Found $d directory $normal" 45 | datasetFile=`ls $datasetDir/$d | grep bvh` 46 | ((datasetsProcessed++)) 47 | #------------------------------------------------------- 48 | 49 | for f in $datasetFile 50 | do 51 | echo "$green Generating Test - $datasetsProcessed/$totaldatasetsToGo - $datasetDir/$d/$f file $normal" 52 | #|||||||||||||||||||||||||||||||||||||||||||||||||||||||| 53 | BVHFILE="$datasetDir/$d/$f" # --svg $outputDir 54 | #./GroundTruthDumper --from $BVHFILE --csv $outputDir test.csv# --bvh $outputDir/$f-random.bvh 55 | ./GroundTruthDumper --from $BVHFILE $SELECTBODY --offsetPositionRotation 0 750 2000 0 0 0 --occlusions --csv $outputDir body_test.csv 2d+bvh $PRODUCESVG #--to ~/Documents/Programming/RGBDAcquisition/opengl_acquisition_shared_library/opengl_depth_and_color_renderer/Scenes/testBVH.conf --bvh $outputDir/$f-random.bvh --svg $outputDir/svg 56 | #exit 0 57 | #./GroundTruthDumper --from $BVHFILE --setPositionRotation 0 400 0 0 0 0 --csv $outputDir test.csv# --bvh $outputDir/$f-random.bvh 58 | #|||||||||||||||||||||||||||||||||||||||||||||||||||||||| 59 | done 60 | 61 | #------------------------------------------------------- 62 | else 63 | echo "$red Could not find $d directory $normal" 64 | fi 65 | done 66 | 67 | exit 0 68 | -------------------------------------------------------------------------------- /scripts/debug.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 4 | cd "$DIR" 5 | cd .. 6 | 7 | 8 | #Valgrind can also be downloaded.. 9 | #https://www.valgrind.org/downloads/current.html 10 | 11 | #Simple dependency checker that will apt-get stuff if something is missing 12 | SYSTEM_DEPENDENCIES="valgrind" 13 | 14 | for REQUIRED_PKG in $SYSTEM_DEPENDENCIES 15 | do 16 | PKG_OK=$(dpkg-query -W --showformat='${Status}\n' $REQUIRED_PKG|grep "install ok installed") 17 | echo "Checking for $REQUIRED_PKG: $PKG_OK" 18 | if [ "" = "$PKG_OK" ]; then 19 | 20 | echo "No $REQUIRED_PKG. Setting up $REQUIRED_PKG." 21 | 22 | #If this is uncommented then only packages that are missing will get prompted.. 23 | #sudo apt-get --yes install $REQUIRED_PKG 24 | 25 | #if this is uncommented then if one package is missing then all missing packages are immediately installed.. 26 | sudo apt-get install $SYSTEM_DEPENDENCIES 27 | break 28 | fi 29 | done 30 | #------------------------------------------------------------------------------ 31 | 32 | 33 | 34 | 35 | valgrind --tool=memcheck --leak-check=yes --show-reachable=yes --track-origins=yes --num-callers=20 --track-fds=yes ./MocapNETLiveWebcamDemo --from shuffle $@ 2>error.txt 36 | 37 | exit 0 38 | -------------------------------------------------------------------------------- /scripts/dump_and_process_video.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | DATASET="" 5 | if (( $#<1 )) 6 | then 7 | echo "Please provide arguments first argument is dataset " 8 | exit 1 9 | else 10 | DATASET=$1-data 11 | fi 12 | # $1 holds the video file we want to process ( it should be something like path/to/videofile.mp4 ) 13 | #DATASET now holds the output directory we will create ( it should be something like path/to/videofile.mp4-data/ ) 14 | 15 | 16 | #Remember the directory where user started 17 | STARTDIR=`pwd` 18 | 19 | #Remember the directory where the script is ( and where MocapNET is :) ) 20 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 21 | 22 | #Please give the path to openpose here.. 23 | OPENPOSE_DIR="/home/ammar/Documents/3dParty/openpose/" 24 | OPENPOSE_BINARY_DIR="/home/ammar/Documents/3dParty/openpose/build/examples/openpose/" 25 | 26 | #Dataset dumping using ffmpeg 27 | #----------------------------------------------------------------------------- 28 | #----------------------------------------------------------------------------- 29 | #----------------------------------------------------------------------------- 30 | #Create the new dataset directory 31 | mkdir $DATASET 32 | 33 | #Dump the video files using our naming scheme 34 | ffmpeg -i $1 -r 30 -q:v 1 $DATASET/colorFrame_0_%05d.jpg 35 | 36 | #Make sure we start at frame 0 37 | cp $DATASET/colorFrame_0_00001.jpg $DATASET/colorFrame_0_00000.jpg 38 | 39 | #We now want to grab an absolute path to our dataset 40 | cd $DATASET 41 | FULL_PATH_TO_DATASET=`pwd` 42 | #----------------------------------------------------------------------------- 43 | #----------------------------------------------------------------------------- 44 | #----------------------------------------------------------------------------- 45 | 46 | 47 | 48 | #2D pose estimation using OpenPose 49 | #----------------------------------------------------------------------------- 50 | #----------------------------------------------------------------------------- 51 | #----------------------------------------------------------------------------- 52 | cd $OPENPOSE_DIR 53 | LD_LIBRARY_PATH=/usr/local/cuda-9.0/lib64 $OPENPOSE_BINARY_DIR/openpose.bin -number_people_max 1 --hand --face --write_json $FULL_PATH_TO_DATASET -image_dir $FULL_PATH_TO_DATASET $@ 54 | #----------------------------------------------------------------------------- 55 | #----------------------------------------------------------------------------- 56 | #----------------------------------------------------------------------------- 57 | 58 | 59 | 60 | #2D pose conversion to CSV using our tool 61 | #----------------------------------------------------------------------------- 62 | #----------------------------------------------------------------------------- 63 | #----------------------------------------------------------------------------- 64 | cd "$DIR" 65 | cd .. 66 | 67 | ./convertOpenPoseJSONToCSV --from $DATASET 68 | #----------------------------------------------------------------------------- 69 | #----------------------------------------------------------------------------- 70 | #----------------------------------------------------------------------------- 71 | 72 | 73 | 74 | #3D pose estimation using MocapNET2 75 | #----------------------------------------------------------------------------- 76 | #----------------------------------------------------------------------------- 77 | #----------------------------------------------------------------------------- 78 | cd "$DIR" 79 | cd .. 80 | 81 | ./MocapNET2CSV --from $FULL_PATH_TO_DATASET/2dJoints_v1.4.csv --mt --show 3 --save 82 | cp out.bvh $FULL_PATH_TO_DATASET/predicted.bvh 83 | #----------------------------------------------------------------------------- 84 | #----------------------------------------------------------------------------- 85 | #----------------------------------------------------------------------------- 86 | 87 | 88 | 89 | 90 | 91 | cd $STARTDIR 92 | exit 0 93 | 94 | -------------------------------------------------------------------------------- /scripts/dump_video.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #This is a script to dump a video file to JPG files 4 | #it uses ffmpeg so make sure you have sudo apt-get install ffmpeg 5 | #If you do ./dump_video.sh yourvideo.mp4 6 | #There should be an output folder yourvideo.mp4-data that has all the frames of the video 7 | #you can then point OpenPose to this directory to convert them to JSON 2D detections 8 | 9 | STARTDIR=`pwd` 10 | #Switch to this directory 11 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 12 | cd "$DIR" 13 | 14 | DATASET="" 15 | 16 | if (( $#<1 )) 17 | then 18 | echo "Please provide arguments first argument is dataset " 19 | exit 1 20 | else 21 | DATASET=$1-data 22 | fi 23 | 24 | THEDATETAG=`date +"%y-%m-%d_%H-%M-%S"` 25 | 26 | mkdir $DATASET 27 | 28 | ffmpeg -i $1 -r 30 -q:v 1 $DATASET/colorFrame_0_%05d.jpg 29 | 30 | cp $DATASET/colorFrame_0_00001.jpg $DATASET/colorFrame_0_00000.jpg 31 | 32 | cd $DATASET 33 | cd .. 34 | 35 | cd $STARTDIR 36 | exit 0 37 | 38 | -------------------------------------------------------------------------------- /scripts/experimentWithDifferentIterationNumbers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | #Simple dependency checker that will apt-get stuff if something is missing 5 | # sudo apt-get install gnuplot 6 | SYSTEM_DEPENDENCIES="gnuplot" 7 | 8 | for REQUIRED_PKG in $SYSTEM_DEPENDENCIES 9 | do 10 | PKG_OK=$(dpkg-query -W --showformat='${Status}\n' $REQUIRED_PKG|grep "install ok installed") 11 | echo "Checking for $REQUIRED_PKG: $PKG_OK" 12 | if [ "" = "$PKG_OK" ]; then 13 | 14 | echo "No $REQUIRED_PKG. Setting up $REQUIRED_PKG." 15 | 16 | #If this is uncommented then only packages that are missing will get prompted.. 17 | #sudo apt-get --yes install $REQUIRED_PKG 18 | 19 | #if this is uncommented then if one package is missing then all missing packages are immediately installed.. 20 | sudo apt-get install $SYSTEM_DEPENDENCIES 21 | break 22 | fi 23 | done 24 | #------------------------------------------------------------------------------ 25 | 26 | 27 | 28 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 29 | cd "$DIR" 30 | 31 | cd .. 32 | 33 | 34 | ./GroundTruthDumper --from dataset/CMU_Sample_05_01.bvh --selectJoints 0 23 hip eye.r eye.l abdomen chest neck head rshoulder relbow rhand lshoulder lelbow lhand rhip rknee rfoot lhip lknee lfoot toe1-2.r toe5-3.r toe1-2.l toe5-3.l --tuneIterations 130 100 130 0.01 5 30 20 > tuneIterations.dat 35 | 36 | 37 | #set view 45,45; set view map; set yrange [9:90]; set zrange [30:90]; 38 | # set dgrid3d 25,25 qnorm 4; 39 | #splot \"tuneIterations.dat\" using 1:3:2 with points pointsize 3 pointtype 7; 40 | #HCD tuning iteration hyperparameter 41 | GNUPLOT_CMD="set terminal png size 1000,800 font \"Helvetica,34\"; set output \"tuneIterations.png\"; set isosample 160; set pm3d at b; set palette defined (65 \"black\", 70 \"red\", 80 \"yellow\", 90 \"yellow\", 100 \"white\"); set view 30,45; set hidden3d; set xrange [1:25]; set yrange [9:90]; set zrange [30:90]; set style fill solid; set xlabel \"HCD Iterations\" rotate parallel; set ylabel \"Frames per second\" rotate parallel; set zlabel \"Mean average error in mm\" rotate parallel; set ztics 30; set title \"Mean average error in mm\"; set ytics 30; set multiplot; splot \"tuneIterations.dat\" using 1:3:2 with lines palette lw 2 title \" \"; splot \"tuneIterations.dat\" using 1:3:2 with points palette pointsize 1 pointtype 7 title \" \"; " 42 | 43 | gnuplot -e "$GNUPLOT_CMD" 44 | 45 | echo "Result of experiment is now ready @ tuneIterations.png" 46 | timeout 10 gpicview tuneIterations.png 47 | 48 | exit 0 49 | -------------------------------------------------------------------------------- /scripts/getOpenCV.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | 3 | 4 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 5 | cd "$DIR/../dependencies" 6 | 7 | DEPENDENCIES_PATH=`pwd` 8 | 9 | sudo apt-get install build-essential cmake git libgtk2.0-dev pkg-config ffmpeg libavcodec-dev libavformat-dev libavcodec-dev libavformat-dev libswscale-dev libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff5-dev libdc1394-22-dev libeigen3-dev libtheora-dev libvorbis-dev libxvidcore-dev libx264-dev sphinx-common libtbb-dev yasm libfaac-dev libopencore-amrnb-dev libopencore-amrwb-dev libopenexr-dev libgstreamer-plugins-base1.0-dev libavutil-dev libavfilter-dev libavresample-dev libpython3-all-dev python3-numpy python3-dev 10 | 11 | #cd "$1" 12 | echo "Will Install @ `pwd`" 13 | 14 | 15 | #Want a different version? 16 | #Check https://opencv.org/releases/ for versions and 17 | #change the OPENCV_VERSION in the next line.. 18 | OPENCV_VERSION="3.2.0" 19 | echo "Downloading" 20 | 21 | wget http://ammar.gr/programs/opencv-$OPENCV_VERSION.zip 22 | wget http://ammar.gr/programs/opencv_contrib-$OPENCV_VERSION.tar.gz 23 | 24 | #wget https://codeload.github.com/opencv/opencv/zip/$OPENCV_VERSION -O opencv-$OPENCV_VERSION.zip 25 | #wget https://codeload.github.com/opencv/opencv_contrib/zip/$OPENCV_VERSION -O opencv_contrib-$OPENCV_VERSION.tar.gz 26 | 27 | echo "Extracting" 28 | 29 | #tar xvzf opencv_contrib-$OPENCV_VERSION.tar.gz 30 | unzip opencv_contrib-$OPENCV_VERSION.zip 31 | unzip opencv-$OPENCV_VERSION.zip 32 | 33 | echo "Building" 34 | 35 | cd opencv-$OPENCV_VERSION 36 | mkdir build 37 | cd build 38 | cmake -DOPENCV_ENABLE_NONFREE=ON -DOPENCV_EXTRA_MODULES_PATH=$DEPENDENCIES_PATH/opencv_contrib-$OPENCV_VERSION/modules .. 39 | make -j5 40 | 41 | echo "Do you want to install OpenCV to your system ? " 42 | echo 43 | echo -n " (Y/N)?" 44 | read answer 45 | if test "$answer" != "N" -a "$answer" != "n"; 46 | then 47 | sudo make install 48 | fi 49 | 50 | cd "$DIR" 51 | 52 | echo "Done" 53 | 54 | exit 0 55 | -------------------------------------------------------------------------------- /scripts/processDatasetWithOpenpose.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #This script should be run after you have dumped a video to a directory using the dump_video.sh utility 4 | #You should give a full path to this utility i.e. ./processDatasetWithOpenpose.sh "~/myDatasets/yourvideo.mp4-data/" 5 | #and also dont forget to change the PATH_TO_OPENPOSE to the directory that has your openpose.bin! 6 | PATH_TO_OPENPOSE="PLEASE/CHANGE/THIS/" 7 | 8 | 9 | STARTDIR=`pwd` 10 | #Switch to this directory 11 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 12 | 13 | 14 | DATASET="" 15 | 16 | if (( $#<1 )) 17 | then 18 | echo "Please provide arguments first argument is dataset " 19 | exit 1 20 | else 21 | DATASET=$1 22 | 23 | $PATH_TO_OPENPOSE/openpose.bin -number_people_max 1 --hand --face --write_json $DATASET -image_dir $DATASET $@ 24 | 25 | 26 | cd "$DIR" 27 | echo "Went back to $DIR" 28 | echo "Hopefully the path you have given is an absolute path.." 29 | ./convertBody25JSONToCSV -i $DATASET 30 | 31 | exit 0 32 | 33 | -------------------------------------------------------------------------------- /scripts/splitStereo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | FROMDATASET="$1" 4 | X=0 5 | 6 | # sudo apt-get install imagemagick 7 | 8 | if (( $# != 1 )); then 9 | echo "Please run giving the path to download and build" 10 | echo "$0 \"DatasetNameSource\" " 11 | exit 0 12 | fi 13 | 14 | 15 | TOTAL_FRAMES=100000 16 | 17 | cd $FROMDATASET 18 | 19 | echo "Spliting Dataset $FROMDATASET " 20 | echo "Please wait .. " 21 | for (( i=$X; i<=$TOTAL_FRAMES; i++ )) 22 | do 23 | XNUM=`printf %05u $i` 24 | 25 | if [ -f "colorFrame_0_$XNUM.jpg" ] 26 | then 27 | width=$(identify -format "%w" "colorFrame_0_$XNUM.jpg")> /dev/null 28 | height=$(identify -format "%h" "colorFrame_0_$XNUM.jpg")> /dev/null 29 | halfWidth=$((width / 2)) 30 | echo "$width x $height -> 2x $halfWidth x $height" 31 | 32 | 33 | #First crop second half ( before source file gets rewritten ) 34 | convert colorFrame_0_$XNUM.jpg -crop "$halfWidth"x$height+$halfWidth+0 colorFrame_1_$XNUM.jpg 35 | convert colorFrame_0_$XNUM.jpg -crop "$halfWidth"x$height+0+0 colorFrame_0_$XNUM.jpg 36 | else 37 | break 38 | fi 39 | 40 | echo -n "." 41 | done 42 | 43 | echo "Passed TOTAL_FRAMES (!) this is a bug! :S" 44 | 45 | cd .. 46 | 47 | exit 0 48 | -------------------------------------------------------------------------------- /scripts/tensorflow2Build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #Script last updated 25-03-2022 4 | 5 | #Script that seems to be able to build bazel/tf2 on Ubuntu 20.04 6 | #I really deeply dislike the bazel build system which is bloated and obfuscated for no reason, just Google "NIH syndrome" 7 | #However Tensorflow2 is a great NN framework 8 | # See this video "How To Make Package Managers Cry" -> https://www.youtube.com/watch?v=NSemlYagjIU#t=19m0s 9 | 10 | echo "Check CUDA" 11 | ls /usr/local/cuda/extras/CUPTI/lib64 12 | ls /usr/local/cuda 13 | ls /usr/local/cuda/lib64/ | grep libcudnn.so 14 | nvcc -V 15 | 16 | 17 | VERSION="2.8" 18 | 19 | #Get number of bytes in RAM 20 | RAM=`free | grep Mem | tr -s ' '| cut -f2 -d ' '` 21 | 22 | 23 | BazelPleaseSlowDown="--local_resources 2048,.5,1.0" 24 | 25 | if [ "$RAM" -gt "11286464" ]; then 26 | echo "More than 12GB"; 27 | BazelPleaseSlowDown=" " 28 | fi 29 | if [ "$RAM" -gt "15297500" ]; then 30 | echo "More than 16GB"; 31 | BazelPleaseSlowDown=" " 32 | fi 33 | if [ "$RAM" -gt "31861780" ]; then 34 | echo "More than 32GB"; 35 | BazelPleaseSlowDown=" " 36 | fi 37 | 38 | 39 | #Tensorflow is a great Neural network library that unfortunately is coupled to the terrible Bazel build system 40 | #This is a download and build script for Ubuntu 20.04, that should work building release 2.8 41 | 42 | sudo apt-get install python3-dev python3-pip python3-venv python3-tk 43 | 44 | pip3 install -U --user pip numpy wheel packaging 45 | pip3 install -U --user keras_preprocessing --no-deps 46 | 47 | cd ~/Documents 48 | mkdir 3dParty 49 | cd 3dParty 50 | 51 | #wget https://github.com/bazelbuild/bazel/releases/download/4.2.1/bazel-4.2.1-installer-linux-x86_64.sh 52 | #chmod +x bazel-4.2.1-installer-linux-x86_64.sh 53 | #./bazel-4.2.1-installer-linux-x86_64.sh --user 54 | 55 | #r2.8 56 | mkdir -p "$HOME/.bazel/bin" && cd "$HOME/.bazel/bin" && curl -fLO https://releases.bazel.build/4.2.1/release/bazel-4.2.1-linux-x86_64 && chmod +x bazel-4.2.1-linux-x86_64 57 | 58 | #Create shared directory 59 | if [ -f ~/.bashrc ] 60 | then 61 | if cat ~/.bashrc | grep -q "BAZEL_CANCER" 62 | then 63 | echo "Bazel includes seem to be set-up.." 64 | else 65 | USER=`whoami` 66 | echo "#BAZEL_CANCER" >> ~/.bashrc 67 | echo "source $HOME/.bazel/bin/bazel-complete.bash" >> ~/.bashrc 68 | echo "export PATH=\"\$PATH:\$HOME/bin\"" >> ~/.bashrc 69 | source ~/.bashrc 70 | fi 71 | fi 72 | 73 | if [ ! -d tensorflow ] 74 | then 75 | git clone https://github.com/tensorflow/tensorflow.git 76 | fi 77 | 78 | cd tensorflow 79 | git pull 80 | git checkout r$VERSION 81 | 82 | 83 | #Make sure to check your target CPU and when asked used the correct -march= / -mtune= 84 | # for example for an old intel i7 -march=nehalem is used.. 85 | #https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html 86 | 87 | 88 | echo "Answers to configure questions :" 89 | echo "/usr/bin/python3" 90 | echo "/usr/lib/python3/dist-packages" 91 | echo "cuda Y" 92 | echo "tensorrt Y" 93 | echo "CUDA 11.2" 94 | echo "CuDNN 8" 95 | echo "TensorRT 8" 96 | echo "/usr/local/cuda/,/usr/local/cuda/include/,/usr/local/cuda/bin/,/usr/local/cuda/lib64/,/usr/local/cuda/lib64/,/usr/local/tensorrt-8.2.3/,/usr/local/tensorrt-8.2.3/include/,/usr/local/tensorrt-8.2.3/lib/," 97 | echo "Compute capability 6.1 ( for GTX 1050 + cards )" 98 | 99 | #Attempt to inform the configure script on how to find the CUDA stuff.. 100 | export CUDNN_INSTALL_PATH=/usr/local/cuda/,/usr/local/cuda/include/,/usr/local/cuda/bin/,/usr/local/cuda/lib64/,/usr/local/cuda/lib64/,/usr/local/tensorrt-8.2.3/,/usr/local/tensorrt-8.2.3/include/,/usr/local/tensorrt-8.2.3/lib/ 101 | 102 | 103 | ./configure 104 | 105 | bazel clean --expunge 106 | 107 | 108 | 109 | #You should use CUDA 11.2 and cudnn-11.2-linux-x64-v8.1.1.33 and TensorRT 8.2.3 110 | 111 | bazel build --config=opt --config=cuda --config=mkl --config=monolithic $BazelPleaseSlowDown //tensorflow/tools/pip_package:build_pip_package 112 | ./bazel-bin/tensorflow/tools/pip_package/build_pip_package ~/Documents/3dParty/ 113 | #To install 114 | #pip3 --user install ~/Documents/3dParty/tensorflow-2.4.0-cp36-cp36m-linux_x86_64.whl 115 | 116 | 117 | 118 | bazel build --config opt --config=cuda --config=monolithic //tensorflow/tools/lib_package:libtensorflow 119 | cp bazel-bin/tensorflow/tools/lib_package/libtensorflow.tar.gz ~/Documents/3dParty/libtensorflow-r$VERSION.tar.gz 120 | 121 | 122 | #Build tensorflow lite 123 | #https://www.tensorflow.org/lite/guide/build_cmake 124 | mkdir tflite_buld 125 | cd tflite_buld/ 126 | cmake ../tensorflow/lite 127 | cmake --build . -j 128 | 129 | 130 | echo "Please visit ~/Documents/3dParty/ to collect your tensorflow python3 wheel, and C Library.." 131 | echo "Will now use : python -c 'import tensorflow as tf;' to test your tensorflow" 132 | 133 | python -c 'import tensorflow as tf;' 134 | 135 | exit 0 136 | -------------------------------------------------------------------------------- /scripts/tensorflowBuild.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #Script source repository https://github.com/AmmarkoV/MyScripts/blob/master/Tensorflow/tensorflowBuild.sh 4 | #Script last updated 23-06-2020 5 | 6 | #Script that seems to be able to build bazel/tf2 on Ubuntu 20.04 7 | #I really deeply dislike the bazel build system which is bloated and obfuscated for no reason, just Google "NIH syndrome" 8 | #However Tensorflow2 is a great NN framework 9 | # See this video "How To Make Package Managers Cry" -> https://www.youtube.com/watch?v=NSemlYagjIU#t=19m0s 10 | 11 | 12 | #Get number of bytes in RAM 13 | RAM=`free | grep Mem | tr -s ' '| cut -f2 -d ' '` 14 | 15 | 16 | BazelPleaseSlowDown="--local_resources 2048,.5,1.0" 17 | 18 | if [ "$RAM" -gt "11286464" ]; then 19 | echo "More than 12GB"; 20 | BazelPleaseSlowDown=" " 21 | fi 22 | if [ "$RAM" -gt "15297500" ]; then 23 | echo "More than 16GB"; 24 | BazelPleaseSlowDown=" " 25 | fi 26 | if [ "$RAM" -gt "31861780" ]; then 27 | echo "More than 32GB"; 28 | BazelPleaseSlowDown=" " 29 | fi 30 | 31 | 32 | #Tensorflow is a great Neural network library that unfortunately is coupled to the terrible Bazel build system 33 | #This is a download and build script for Ubuntu 18.04, that should work building release 1.15 34 | 35 | sudo apt-get install python3-dev python3-pip python3-venv python3-tk 36 | 37 | pip install -U --user pip six numpy wheel setuptools mock 'future>=0.17.1' 38 | pip install -U --user keras_applications --no-deps 39 | pip install -U --user keras_preprocessing --no-deps 40 | 41 | 42 | cd ~/Documents 43 | mkdir 3dParty 44 | cd 3dParty 45 | 46 | wget http://ammar.gr/mocapnet/bazel-0.24.1-installer-linux-x86_64-for-tensorflow-r1.15.sh 47 | chmod +x bazel-0.24.1-installer-linux-x86_64-for-tensorflow-r1.15.sh 48 | ./bazel-0.24.1-installer-linux-x86_64-for-tensorflow-r1.15.sh --user 49 | 50 | #Create shared directory 51 | if [ -f ~/.bashrc ] 52 | then 53 | if cat ~/.bashrc | grep -q "BAZEL_CANCER" 54 | then 55 | echo "Bazel includes seem to be set-up.." 56 | else 57 | USER=`whoami` 58 | echo "#BAZEL_CANCER" >> ~/.bashrc 59 | echo "source ~/.bazel/bin/bazel-complete.bash" >> ~/.bashrc 60 | echo "export PATH=\"\$PATH:\$HOME/bin\"" >> ~/.bashrc 61 | source ~/.bashrc 62 | fi 63 | fi 64 | 65 | if [ ! -d tensorflow ] 66 | then 67 | git clone https://github.com/tensorflow/tensorflow.git 68 | fi 69 | 70 | cd tensorflow 71 | git pull 72 | git checkout r1.15 73 | 74 | 75 | ./configure 76 | 77 | bazel clean --expunge 78 | 79 | #Flags for bazel if you have gcc<5.0 80 | #--cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" 81 | #--noincompatible_do_not_split_linking_cmdline 82 | 83 | bazel build --config=opt --config=cuda --config=mkl --config=monolithic $BazelPleaseSlowDown //tensorflow/tools/pip_package:build_pip_package 84 | ./bazel-bin/tensorflow/tools/pip_package/build_pip_package ~/Documents/3dParty/ 85 | 86 | 87 | bazel build --config opt --config=cuda --config=monolithic //tensorflow/tools/lib_package:libtensorflow 88 | cp bazel-bin/tensorflow/tools/lib_package/libtensorflow.tar.gz ~/Documents/3dParty/libtensorflow-r1.15.tar.gz 89 | 90 | echo "Please visit ~/Documents/3dParty/ to collect your tensorflow python3 wheel, and C Library.." 91 | 92 | 93 | exit 0 94 | -------------------------------------------------------------------------------- /src/GroundTruthGenerator/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project( GroundTruthDumper ) 2 | cmake_minimum_required( VERSION 2.8.13 ) 3 | set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/../cmake/modules ${CMAKE_MODULE_PATH}) 4 | 5 | 6 | add_executable( 7 | GroundTruthDumper 8 | ${CMAKE_SOURCE_DIR}/dependencies/RGBDAcquisition/opengl_acquisition_shared_library/opengl_depth_and_color_renderer/src/Applications/BVHTester/main.c 9 | ${CMAKE_SOURCE_DIR}/dependencies/RGBDAcquisition/opengl_acquisition_shared_library/opengl_depth_and_color_renderer/src/Library/TrajectoryParser/InputParser_C.c 10 | ${BVH_SOURCE} 11 | ) 12 | 13 | target_link_libraries(GroundTruthDumper rt m pthread ) 14 | #add_dependencies(GroundTruthDumper OGLRendererSandbox) 15 | 16 | 17 | set_target_properties(GroundTruthDumper PROPERTIES 18 | ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 19 | LIBRARY_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 20 | RUNTIME_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 21 | ) 22 | -------------------------------------------------------------------------------- /src/GroundTruthGenerator/README.md: -------------------------------------------------------------------------------- 1 | To simplify package maintenance the GroundTruthGenerator uses the code from the BVHTester of RGBDAcquisition 2 | If you want to browse the code, i.e. to find about different command line options this is the [main.c](https://github.com/AmmarkoV/RGBDAcquisition/blob/master/opengl_acquisition_shared_library/opengl_depth_and_color_renderer/src/Applications/BVHTester/main.c) 3 | 4 | All the implementation files to parse BVH files are part of the MotionCaptureLoader module of the RGDAcquisition OGL Renderer Sandbox. You can [find the bvh code here](https://github.com/AmmarkoV/RGBDAcquisition/tree/master/opengl_acquisition_shared_library/opengl_depth_and_color_renderer/src/Library/MotionCaptureLoader) 5 | 6 | Sorry for having different stuff in different places but it really minimizes the effort needed to relase the code and it also reduces the number of possible bugs from keeping the same code in sync in different machines etc. 7 | -------------------------------------------------------------------------------- /src/HelloWorld/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project( helloWorld ) 2 | cmake_minimum_required(VERSION 3.5) 3 | 4 | 5 | 6 | #----------------------------------------------- 7 | # This is the converter utilities.. 8 | #----------------------------------------------- 9 | project( helloWorld ) 10 | add_executable(helloWorld main.cpp) 11 | target_link_libraries(helloWorld rt dl m ) 12 | set_target_properties(helloWorld PROPERTIES DEBUG_POSTFIX "D") 13 | set_target_properties(helloWorld PROPERTIES 14 | ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 15 | LIBRARY_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 16 | RUNTIME_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 17 | ) 18 | 19 | 20 | -------------------------------------------------------------------------------- /src/HelloWorld/main.cpp: -------------------------------------------------------------------------------- 1 | 2 | #include 3 | 4 | int main() { 5 | std::cout << "\n\n\n\n\n\nHello World!\n"; 6 | std::cout << "Congrats, If you can read this you can compile C++ code using CMake..!\n\n\n\n\n"; 7 | return 0; 8 | } 9 | -------------------------------------------------------------------------------- /src/HelloWorld/testCMake.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 4 | cd "$DIR" 5 | 6 | mkdir build 7 | cd build 8 | cmake .. 9 | make 10 | 11 | cd "$DIR" 12 | ./helloWorld 13 | 14 | exit 0 15 | -------------------------------------------------------------------------------- /src/JointEstimator2D/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project( JointEstimator2D ) 2 | cmake_minimum_required(VERSION 3.5) 3 | 4 | 5 | set_property(GLOBAL PROPERTY USE_FOLDERS ON) 6 | #set(CMAKE_CXX_STANDARD 11) 7 | 8 | include_directories(${TENSORFLOW_INCLUDE_ROOT}) 9 | 10 | add_library( 11 | JointEstimator2D SHARED 12 | jointEstimator2D.cpp 13 | cameraControl.cpp 14 | visualization.cpp 15 | ${CMAKE_SOURCE_DIR}/src/Tensorflow/tensorflow.cpp 16 | ${CMAKE_SOURCE_DIR}/src/Tensorflow/tf_utils.cpp 17 | ) 18 | 19 | target_link_libraries(JointEstimator2D rt dl m Tensorflow TensorflowFramework ) 20 | set_target_properties(JointEstimator2D PROPERTIES DEBUG_POSTFIX "D") 21 | 22 | 23 | set_target_properties(JointEstimator2D PROPERTIES 24 | ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 25 | LIBRARY_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 26 | RUNTIME_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 27 | ) 28 | 29 | 30 | 31 | 32 | find_package(OpenCV REQUIRED) 33 | INCLUDE_DIRECTORIES(${OpenCV_INCLUDE_DIRS}) 34 | 35 | add_executable(WebcamAnd2DJoints webcamAnd2DJoints.cpp) 36 | target_link_libraries(WebcamAnd2DJoints rt dl m ${OpenCV_LIBRARIES} JointEstimator2D Tensorflow TensorflowFramework) 37 | set_target_properties(WebcamAnd2DJoints PROPERTIES DEBUG_POSTFIX "D") 38 | 39 | 40 | set_target_properties(WebcamAnd2DJoints PROPERTIES 41 | ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 42 | LIBRARY_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 43 | RUNTIME_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 44 | ) 45 | 46 | -------------------------------------------------------------------------------- /src/JointEstimator2D/cameraControl.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | 4 | #include "opencv2/opencv.hpp" 5 | using namespace cv; 6 | 7 | 8 | int getBestCropWindow( 9 | int maximumCrop, 10 | unsigned int * x, 11 | unsigned int * y, 12 | unsigned int * width, 13 | unsigned int * height, 14 | struct boundingBox * bbox, 15 | unsigned int inputWidth2DJointDetector, 16 | unsigned int inputHeight2DJointDetector, 17 | unsigned int fullFrameWidth, 18 | unsigned int fullFrameHeight 19 | ); 20 | -------------------------------------------------------------------------------- /src/JointEstimator2D/visualization.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "opencv2/opencv.hpp" 4 | /** @file webcam.cpp 5 | * @brief This is a simple test file to make sure your camera or video files can be opened using OpenCV 6 | * @author Ammar Qammaz (AmmarkoV) 7 | */ 8 | #include 9 | #include 10 | #include "jointEstimator2D.hpp" 11 | 12 | #include "../MocapNET2/MocapNETLib2/IO/commonSkeleton.hpp" 13 | 14 | using namespace cv; 15 | 16 | int visualizeHeatmaps(struct JointEstimator2D * jointEstimator,std::vector > heatmapNNOutput,unsigned int frameNumber); 17 | 18 | void dj_drawExtractedSkeletons( 19 | cv::Mat img, 20 | struct Skeletons2DDetected * sk, 21 | float factorX, 22 | float factorY 23 | ); 24 | 25 | 26 | 27 | int cropAndResizeCVMatToMatchSkeleton( 28 | struct JointEstimator2D * jest, 29 | cv::Mat & frame, 30 | struct Skeletons2DDetected * sk 31 | ); 32 | -------------------------------------------------------------------------------- /src/MocapNET2/BVHGUI2/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project( BVHGUI2 ) 2 | cmake_minimum_required( VERSION 2.8.13 ) 3 | #cmake_minimum_required(VERSION 3.5) 4 | find_package(OpenCV REQUIRED) 5 | INCLUDE_DIRECTORIES(${OpenCV_INCLUDE_DIRS}) 6 | 7 | #set_property(GLOBAL PROPERTY USE_FOLDERS ON) 8 | set(CMAKE_CXX_STANDARD 11) 9 | include_directories(${TENSORFLOW_INCLUDE_ROOT}) 10 | 11 | 12 | add_executable(BVHGUI2 bvhGUI2.cpp 13 | ${BVH_SOURCE} ) 14 | 15 | target_link_libraries(BVHGUI2 rt dl m ${OpenCV_LIBRARIES} ${OPENGL_LIBS} Tensorflow TensorflowFramework MocapNETLib2 ) 16 | set_target_properties(BVHGUI2 PROPERTIES DEBUG_POSTFIX "D") 17 | 18 | 19 | set_target_properties(BVHGUI2 PROPERTIES 20 | ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 21 | LIBRARY_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 22 | RUNTIME_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 23 | ) 24 | 25 | -------------------------------------------------------------------------------- /src/MocapNET2/BVHTemplate/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project( BVHTemplate ) 2 | cmake_minimum_required( VERSION 2.8.7 ) 3 | set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/../cmake/modules ${CMAKE_MODULE_PATH}) 4 | 5 | 6 | add_executable(BVHTemplate main.c ${BVH_SOURCE} ) 7 | 8 | target_link_libraries(BVHTemplate rt m pthread ) 9 | #add_dependencies(BVHTemplate OGLRendererSandbox) 10 | 11 | 12 | set_target_properties(BVHTemplate PROPERTIES 13 | ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 14 | LIBRARY_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 15 | RUNTIME_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 16 | ) 17 | -------------------------------------------------------------------------------- /src/MocapNET2/BVHTemplate/main.c: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FORTH-ModelBasedTracker/MocapNET/4aa3720855b99244d39aa77d637a97b563474113/src/MocapNET2/BVHTemplate/main.c -------------------------------------------------------------------------------- /src/MocapNET2/CSVClusterPlot/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project( CSVClusterPlot ) 2 | cmake_minimum_required( VERSION 2.8.13 ) 3 | #cmake_minimum_required(VERSION 3.5) 4 | find_package(OpenCV REQUIRED) 5 | INCLUDE_DIRECTORIES(${OpenCV_INCLUDE_DIRS}) 6 | 7 | #set_property(GLOBAL PROPERTY USE_FOLDERS ON) 8 | set(CMAKE_CXX_STANDARD 11) 9 | include_directories(${TENSORFLOW_INCLUDE_ROOT}) 10 | 11 | 12 | add_executable( 13 | CSVClusterPlot 14 | csvClusterPlot.cpp 15 | perform2DClustering.cpp 16 | perform3DClustering.cpp 17 | ${BVH_SOURCE} 18 | ) 19 | 20 | target_link_libraries(CSVClusterPlot rt dl m ${OpenCV_LIBRARIES} ${OPENGL_LIBS} Tensorflow TensorflowFramework MocapNETLib2 ) 21 | #set_target_properties(CSVClusterPlot PROPERTIES DEBUG_POSTFIX "D") 22 | 23 | 24 | set_target_properties(CSVClusterPlot PROPERTIES 25 | ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 26 | LIBRARY_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 27 | RUNTIME_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 28 | ) 29 | 30 | -------------------------------------------------------------------------------- /src/MocapNET2/CSVClusterPlot/perform2DClustering.cpp: -------------------------------------------------------------------------------- 1 | #include "perform2DClustering.hpp" 2 | 3 | 4 | #include "../MocapNETLib2/IO/bvh.hpp" 5 | 6 | #include "../../../dependencies/RGBDAcquisition/opengl_acquisition_shared_library/opengl_depth_and_color_renderer/src/Library/MotionCaptureLoader/bvh_loader.h" 7 | #include "../../../dependencies/RGBDAcquisition/opengl_acquisition_shared_library/opengl_depth_and_color_renderer/src/Library/MotionCaptureLoader/edit/bvh_filter.h" 8 | #include "../../../dependencies/RGBDAcquisition/opengl_acquisition_shared_library/opengl_depth_and_color_renderer/src/Library/MotionCaptureLoader/edit/bvh_rename.h" 9 | #include "../../../dependencies/RGBDAcquisition/opengl_acquisition_shared_library/opengl_depth_and_color_renderer/src/Library/MotionCaptureLoader/edit/bvh_randomize.h" 10 | 11 | 12 | 13 | std::vector > collectPoses( 14 | struct applicationState * state, 15 | std::vector &activeJoints, 16 | struct BVH_MotionCapture * bvhMotion, 17 | float distance, 18 | unsigned int width, 19 | unsigned int height 20 | ) 21 | { 22 | 23 | std::vector > bvh2DPoints; 24 | 25 | if (bvhMotion==0) 26 | { 27 | return bvh2DPoints; 28 | } 29 | 30 | 31 | std::vector bvhConfiguration; 32 | 33 | bvhConfiguration.clear(); 34 | for (int i=0; inumberOfValuesPerFrame; i++) 35 | { 36 | bvhConfiguration.push_back(0.0); 37 | } 38 | bvhConfiguration[2]=(float) -150.0; 39 | 40 | 41 | 42 | if ( bvhConfiguration.size() < bvhMotion->numberOfValuesPerFrame) 43 | { 44 | return bvh2DPoints; 45 | } 46 | 47 | unsigned int mID=0; 48 | 49 | fprintf(stderr,"Collecting %u poses\n",bvhMotion->numberOfFrames); 50 | 51 | // state->maxAccumulatedSample= 0; 52 | 53 | for (int frameID=0; frameIDnumberOfFrames; frameID++) 54 | { 55 | //fprintf(stderr,".%u",frameID); 56 | 57 | for (int i=0; inumberOfValuesPerFrame; i++) 58 | { 59 | //fprintf(stderr,"%u ",i); 60 | int motionValueID = mID % (bvhMotion->numberOfValuesPerFrame * bvhMotion->numberOfFrames); 61 | bvhConfiguration[i]=bvhMotion->motionValues[motionValueID]; 62 | ++mID; 63 | } 64 | //fprintf(stderr,"!"); 65 | 66 | 67 | if (bvhConfiguration.size()>5) 68 | { 69 | bvhConfiguration[0]=0; 70 | bvhConfiguration[1]=0; 71 | bvhConfiguration[2]=distance; 72 | bvhConfiguration[3]=0; 73 | bvhConfiguration[4]=state->rotation; 74 | bvhConfiguration[5]=0; 75 | 76 | bvh2DPoints = convertBVHFrameTo2DPoints(bvhConfiguration); //,width, height 77 | 78 | 79 | 80 | if (bvh2DPoints.size()>0) 81 | { 82 | ++state->accumulatedSamples; 83 | for (int i=0; i0) && (bvh2DPoints[i][1]>0) && (bvh2DPoints[i][0]accumulatedImage[y*width+x] + 1; 97 | 98 | if (newValue<=state->accumulatedSamples) 99 | { 100 | state->accumulatedImage[y*width+x] = newValue; 101 | 102 | if (state->maxAccumulatedSamplemaxAccumulatedSample=newValue; 105 | //fprintf(stderr,"Pixel %u,%u has largest value %lu\n",x,y,newValue); 106 | } 107 | } 108 | } 109 | else 110 | { 111 | fprintf(stderr,"%0.2f,%0.2f wrongly casted to %u,%u",bvh2DPoints[i][0],bvh2DPoints[i][1],x,y); 112 | } 113 | 114 | } 115 | } 116 | } 117 | else 118 | { 119 | fprintf(stderr,"Could not project BVH 2D points\n"); 120 | } 121 | } 122 | } //For every frame loop.. 123 | 124 | return bvh2DPoints; 125 | } -------------------------------------------------------------------------------- /src/MocapNET2/CSVClusterPlot/perform2DClustering.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | struct applicationState 7 | { 8 | int rotation; 9 | int selectedJoint,previousSelection; 10 | int visualizationType,previousVisualizationType; 11 | int stop; 12 | int save; 13 | int redraw; 14 | //----------------- 15 | unsigned long * accumulatedImage; 16 | unsigned long accumulatedSamples; 17 | unsigned long maxAccumulatedSample; 18 | }; 19 | 20 | 21 | std::vector > collectPoses( 22 | struct applicationState * state, 23 | std::vector &activeJoints, 24 | struct BVH_MotionCapture * bvhMotion, 25 | float distance, 26 | unsigned int width, 27 | unsigned int height 28 | ); -------------------------------------------------------------------------------- /src/MocapNET2/CSVClusterPlot/perform3DClustering.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | 7 | struct voxelElement 8 | { 9 | char element; 10 | }; 11 | 12 | 13 | struct clusteringOf3DPoses 14 | { 15 | FILE *fp; 16 | struct voxelElement * space; 17 | unsigned long allocatedSpaceMemorySize; 18 | unsigned int width,height,depth; 19 | unsigned long accumulatedSamples; 20 | unsigned long maxAccumulatedSample; 21 | 22 | }; 23 | 24 | 25 | 26 | 27 | std::vector > collect3DPoses( 28 | struct clusteringOf3DPoses * state, 29 | std::vector &activeJoints, 30 | struct BVH_MotionCapture * bvhMotion, 31 | float distance, 32 | unsigned int width, 33 | unsigned int height, 34 | unsigned int depth 35 | ); -------------------------------------------------------------------------------- /src/MocapNET2/Converters/H36M/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project( convertH36GroundTruthToMocapNETInput ) 2 | cmake_minimum_required(VERSION 3.5) 3 | 4 | 5 | set_property(GLOBAL PROPERTY USE_FOLDERS ON) 6 | 7 | include_directories(${TENSORFLOW_INCLUDE_ROOT}) 8 | 9 | #----------------------------------------------- 10 | # This is the converter utilities.. 11 | #----------------------------------------------- 12 | add_executable( 13 | convertH36GroundTruthToMocapNETInput 14 | convertH36GroundTruthToMocapNETInput.cpp 15 | ../../MocapNETLib2/tools.cpp 16 | ../../MocapNETLib2/IO/jsonRead.cpp 17 | ../../MocapNETLib2/IO/jsonMocapNETHelpers.cpp 18 | ../../../../dependencies/InputParser/InputParser_C.cpp 19 | ${BVH_SOURCE} 20 | ${OPENGL_SOURCE} 21 | ../../MocapNETLib2/IO/bvh.cpp 22 | ../../MocapNETLib2/IO/conversions.cpp 23 | ../../MocapNETLib2/IO/csvRead.cpp 24 | ../../MocapNETLib2/IO/csvWrite.cpp 25 | ) 26 | 27 | #----------------------------------------------- 28 | 29 | target_link_libraries(convertH36GroundTruthToMocapNETInput rt dl m pthread MocapNETLib2) 30 | set_target_properties(convertH36GroundTruthToMocapNETInput PROPERTIES DEBUG_POSTFIX "D") 31 | set_target_properties(convertH36GroundTruthToMocapNETInput PROPERTIES 32 | ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 33 | LIBRARY_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 34 | RUNTIME_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 35 | ) 36 | 37 | -------------------------------------------------------------------------------- /src/MocapNET2/Converters/Openpose/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project( convertOpenPoseJSONToCSV ) 2 | cmake_minimum_required(VERSION 3.5) 3 | 4 | 5 | set_property(GLOBAL PROPERTY USE_FOLDERS ON) 6 | 7 | include_directories(${TENSORFLOW_INCLUDE_ROOT}) 8 | 9 | 10 | #----------------------------------------------- 11 | # This is the converter utilities.. 12 | #----------------------------------------------- 13 | add_executable( 14 | convertOpenPoseJSONToCSV 15 | convertOpenPoseJSONToCSV.cpp 16 | ../../MocapNETLib2/tools.cpp 17 | ../../MocapNETLib2/IO/jsonRead.cpp 18 | ../../MocapNETLib2/IO/jsonMocapNETHelpers.cpp 19 | ../../../../dependencies/InputParser/InputParser_C.cpp 20 | ${BVH_SOURCE} 21 | ${OPENGL_SOURCE} 22 | ../../MocapNETLib2/IO/bvh.cpp 23 | ../../MocapNETLib2/IO/csvWrite.cpp 24 | ) 25 | 26 | 27 | #----------------------------------------------- 28 | 29 | target_link_libraries(convertOpenPoseJSONToCSV rt dl m pthread MocapNETLib2) 30 | set_target_properties(convertOpenPoseJSONToCSV PROPERTIES DEBUG_POSTFIX "D") 31 | set_target_properties(convertOpenPoseJSONToCSV PROPERTIES 32 | ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 33 | LIBRARY_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 34 | RUNTIME_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 35 | ) 36 | 37 | 38 | -------------------------------------------------------------------------------- /src/MocapNET2/Converters/convertCSV3D/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project( convertCSV3DToMocapNETInput ) 2 | cmake_minimum_required(VERSION 3.5) 3 | 4 | 5 | set_property(GLOBAL PROPERTY USE_FOLDERS ON) 6 | 7 | include_directories(${TENSORFLOW_INCLUDE_ROOT}) 8 | 9 | #----------------------------------------------- 10 | # This is the converter utilities.. 11 | #----------------------------------------------- 12 | add_executable( 13 | convertCSV3DToMocapNETInput 14 | convertCSV3DToMocapNETInput.cpp 15 | ../../MocapNETLib2/tools.cpp 16 | ../../MocapNETLib2/IO/jsonRead.cpp 17 | ../../MocapNETLib2/IO/jsonMocapNETHelpers.cpp 18 | ../../../../dependencies/InputParser/InputParser_C.cpp 19 | ${BVH_SOURCE} 20 | ${OPENGL_SOURCE} 21 | ../../MocapNETLib2/IO/bvh.cpp 22 | ../../MocapNETLib2/IO/conversions.cpp 23 | ../../MocapNETLib2/IO/csvRead.cpp 24 | ../../MocapNETLib2/IO/csvWrite.cpp 25 | ) 26 | 27 | #----------------------------------------------- 28 | 29 | target_link_libraries(convertCSV3DToMocapNETInput rt dl m pthread MocapNETLib2) 30 | set_target_properties(convertCSV3DToMocapNETInput PROPERTIES DEBUG_POSTFIX "D") 31 | set_target_properties(convertCSV3DToMocapNETInput PROPERTIES 32 | ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 33 | LIBRARY_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 34 | RUNTIME_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 35 | ) 36 | 37 | -------------------------------------------------------------------------------- /src/MocapNET2/HandOnlyTest/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project( HandOnlyTest ) 2 | cmake_minimum_required( VERSION 2.8.7 ) 3 | #cmake_minimum_required(VERSION 3.5) 4 | 5 | find_package(OpenCV REQUIRED) 6 | INCLUDE_DIRECTORIES(${OpenCV_INCLUDE_DIRS}) 7 | 8 | add_executable(HandOnlyTest handTest.cpp) 9 | target_link_libraries(HandOnlyTest rt dl m ${OpenCV_LIBRARIES} Tensorflow TensorflowFramework MocapNETLib2 ) 10 | set_target_properties(HandOnlyTest PROPERTIES DEBUG_POSTFIX "D") 11 | 12 | 13 | set_target_properties(HandOnlyTest PROPERTIES 14 | ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 15 | LIBRARY_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 16 | RUNTIME_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 17 | ) 18 | 19 | -------------------------------------------------------------------------------- /src/MocapNET2/MocapNET2LiveWebcamDemo/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project( MocapNET2LiveWebcamDemo ) 2 | cmake_minimum_required( VERSION 2.8.13 ) 3 | #cmake_minimum_required(VERSION 3.5) 4 | find_package(OpenCV REQUIRED) 5 | INCLUDE_DIRECTORIES(${OpenCV_INCLUDE_DIRS}) 6 | 7 | #set_property(GLOBAL PROPERTY USE_FOLDERS ON) 8 | set(CMAKE_CXX_STANDARD 11) 9 | include_directories(${TENSORFLOW_INCLUDE_ROOT}) 10 | 11 | 12 | add_executable(MocapNET2LiveWebcamDemo livedemo.cpp ) 13 | 14 | target_link_libraries(MocapNET2LiveWebcamDemo rt dl m ${OpenCV_LIBRARIES} ${OPENGL_LIBS} JointEstimator2D Tensorflow TensorflowFramework MocapNETLib2 ${NETWORK_CLIENT_LIBRARIES} ${PNG_Libs} ${JPG_Libs} ) 15 | set_target_properties(MocapNET2LiveWebcamDemo PROPERTIES DEBUG_POSTFIX "D") 16 | 17 | 18 | set_target_properties(MocapNET2LiveWebcamDemo PROPERTIES 19 | ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 20 | LIBRARY_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 21 | RUNTIME_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 22 | ) 23 | 24 | -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETFromCSV/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project( MocapNET2CSV ) 2 | cmake_minimum_required( VERSION 2.8.13 ) 3 | #cmake_minimum_required(VERSION 3.5) 4 | 5 | find_package(OpenCV REQUIRED) 6 | INCLUDE_DIRECTORIES(${OpenCV_INCLUDE_DIRS}) 7 | 8 | add_executable(MocapNET2CSV mocapnet2CSV.cpp ) 9 | target_link_libraries(MocapNET2CSV rt dl m ${OpenCV_LIBRARIES} Tensorflow TensorflowFramework MocapNETLib2 ) 10 | set_target_properties(MocapNET2CSV PROPERTIES DEBUG_POSTFIX "D") 11 | 12 | 13 | set_target_properties(MocapNET2CSV PROPERTIES 14 | ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 15 | LIBRARY_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 16 | RUNTIME_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 17 | ) 18 | 19 | -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project( MocapNETLib2 ) 2 | cmake_minimum_required(VERSION 3.5) 3 | 4 | set_property(GLOBAL PROPERTY USE_FOLDERS ON) 5 | 6 | #Unfortunately needed for tf_utils.cpp 7 | set(CMAKE_CXX_STANDARD 11) 8 | 9 | include_directories(${TENSORFLOW_INCLUDE_ROOT}) 10 | 11 | 12 | #add_executable(MocapNETLib mocapnet.cpp ${CMAKE_SOURCE_DIR}/src/Tensorflow/tf_utils.cpp) 13 | 14 | add_library(MocapNETLib2 SHARED 15 | ${BVH_SOURCE} 16 | ${OPENGL_SOURCE} 17 | ../MocapNETLib2/config.h 18 | ../MocapNETLib2/core/core.cpp 19 | ../MocapNETLib2/core/core.hpp 20 | ../MocapNETLib2/core/singleThreaded.cpp 21 | ../MocapNETLib2/core/singleThreaded.hpp 22 | ../MocapNETLib2/core/multiThreaded.cpp 23 | ../MocapNETLib2/core/multiThreaded.hpp 24 | ../MocapNETLib2/mocapnet2.cpp 25 | ../MocapNETLib2/tools.cpp 26 | ../MocapNETLib2/remoteExecution.cpp 27 | ../MocapNETLib2/applicationLogic/parseCommandlineOptions.cpp 28 | ../MocapNETLib2/applicationLogic/poseRecognition.cpp 29 | ../MocapNETLib2/applicationLogic/gestureRecognition.cpp 30 | ../MocapNETLib2/applicationLogic/artifactRecognition.cpp 31 | ../MocapNETLib2/qualityControl/qualityControl.cpp 32 | ../MocapNETLib2/postProcessing/outputFiltering.hpp 33 | ../MocapNETLib2/NSDM/generated_body.hpp 34 | ../MocapNETLib2/NSDM/generated_upperbody.hpp 35 | ../MocapNETLib2/NSDM/generated_lowerbody.hpp 36 | ../MocapNETLib2/IO/bvh.cpp 37 | ../MocapNETLib2/IO/commonSkeleton.hpp 38 | ../MocapNETLib2/IO/skeletonAbstraction.cpp 39 | ../MocapNETLib2/IO/skeletonSerializedToBVHTransform.hpp 40 | ../MocapNETLib2/IO/csvRead.cpp 41 | ../MocapNETLib2/IO/csvWrite.cpp 42 | ../MocapNETLib2/IO/jsonRead.cpp 43 | ../MocapNETLib2/IO/jsonMocapNETHelpers.cpp 44 | ../MocapNETLib2/IO/conversions.cpp 45 | ../MocapNETLib2/visualization/rgb.cpp 46 | ../MocapNETLib2/visualization/allInOne.cpp 47 | ../MocapNETLib2/visualization/widgets.cpp 48 | ../MocapNETLib2/visualization/visualization.cpp 49 | ../MocapNETLib2/visualization/drawSkeleton.cpp 50 | ../MocapNETLib2/visualization/opengl.cpp 51 | ../MocapNETLib2/visualization/camera_ready.cpp 52 | ../MocapNETLib2/visualization/map.cpp 53 | ../MocapNETLib2/visualization/template.cpp 54 | ../MocapNETLib2/solutionParts/body.cpp 55 | ../MocapNETLib2/solutionParts/upperBody.cpp 56 | ../MocapNETLib2/solutionParts/lowerBody.cpp 57 | ${CMAKE_SOURCE_DIR}/dependencies/InputParser/InputParser_C.cpp 58 | #Tensorflow stuff.. 59 | ${TENSORFLOW_SOURCE_FILES} 60 | ) 61 | 62 | 63 | target_link_libraries(MocapNETLib2 rt dl m pthread ${OpenCV_LIBRARIES} ${OPENGL_LIBS} Tensorflow TensorflowFramework ${NETWORK_CLIENT_LIBRARIES} ${PNG_Libs} ${JPG_Libs} ) 64 | set_target_properties(MocapNETLib2 PROPERTIES DEBUG_POSTFIX "D") 65 | 66 | 67 | set_target_properties(MocapNETLib2 PROPERTIES 68 | ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 69 | LIBRARY_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 70 | RUNTIME_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 71 | ) 72 | 73 | -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/IO/bvhJointList: -------------------------------------------------------------------------------- 1 | abdomen 2 | chest 3 | neck 4 | neck1 5 | head 6 | __jaw 7 | jaw 8 | special04 9 | oris02 10 | oris01 11 | oris06.l 12 | oris07.l 13 | oris06.r 14 | oris07.r 15 | tongue00 16 | tongue01 17 | tongue02 18 | tongue03 19 | __tongue04 20 | tongue04 21 | tongue07.l 22 | tongue07.r 23 | tongue06.l 24 | tongue06.r 25 | tongue05.l 26 | tongue05.r 27 | __levator02.l 28 | levator02.l 29 | levator03.l 30 | levator04.l 31 | levator05.l 32 | __levator02.r 33 | levator02.r 34 | levator03.r 35 | levator04.r 36 | levator05.r 37 | __special01 38 | special01 39 | oris04.l 40 | oris03.l 41 | oris04.r 42 | oris03.r 43 | oris06 44 | oris05 45 | __special03 46 | special03 47 | __levator06.l 48 | levator06.l 49 | __levator06.r 50 | levator06.r 51 | special06.l 52 | special05.l 53 | eye.l 54 | orbicularis03.l 55 | orbicularis04.l 56 | special06.r 57 | special05.r 58 | eye.r 59 | orbicularis03.r 60 | orbicularis04.r 61 | __temporalis01.l 62 | temporalis01.l 63 | oculi02.l 64 | oculi01.l 65 | __temporalis01.r 66 | temporalis01.r 67 | oculi02.r 68 | oculi01.r 69 | __temporalis02.l 70 | temporalis02.l 71 | risorius02.l 72 | risorius03.l 73 | __temporalis02.r 74 | temporalis02.r 75 | risorius02.r 76 | risorius03.r 77 | rCollar 78 | rShldr 79 | rForeArm 80 | rHand 81 | metacarpal1.r 82 | finger2-1.r 83 | finger2-2.r 84 | finger2-3.r 85 | metacarpal2.r 86 | finger3-1.r 87 | finger3-2.r 88 | finger3-3.r 89 | __metacarpal3.r 90 | metacarpal3.r 91 | finger4-1.r 92 | finger4-2.r 93 | finger4-3.r 94 | __metacarpal4.r 95 | metacarpal4.r 96 | finger5-1.r 97 | finger5-2.r 98 | finger5-3.r 99 | __rthumb 100 | rthumb 101 | finger1-2.r 102 | finger1-3.r 103 | lCollar 104 | lShldr 105 | lForeArm 106 | lHand 107 | metacarpal1.l 108 | finger2-1.l 109 | finger2-2.l 110 | finger2-3.l 111 | metacarpal2.l 112 | finger3-1.l 113 | finger3-2.l 114 | finger3-3.l 115 | __metacarpal3.l 116 | metacarpal3.l 117 | finger4-1.l 118 | finger4-2.l 119 | finger4-3.l 120 | __metacarpal4.l 121 | metacarpal4.l 122 | finger5-1.l 123 | finger5-2.l 124 | finger5-3.l 125 | __lthumb 126 | lthumb 127 | finger1-2.l 128 | finger1-3.l 129 | rButtock 130 | rThigh 131 | rShin 132 | rFoot 133 | toe1-1.R 134 | toe1-2.R 135 | toe2-1.R 136 | toe2-2.R 137 | toe2-3.R 138 | toe3-1.R 139 | toe3-2.R 140 | toe3-3.R 141 | toe4-1.R 142 | toe4-2.R 143 | toe4-3.R 144 | toe5-1.R 145 | toe5-2.R 146 | toe5-3.R 147 | lButtock 148 | lThigh 149 | lShin 150 | lFoot 151 | toe1-1.L 152 | toe1-2.L 153 | toe2-1.L 154 | toe2-2.L 155 | toe2-3.L 156 | toe3-1.L 157 | toe3-2.L 158 | toe3-3.L 159 | toe4-1.L 160 | toe4-2.L 161 | toe4-3.L 162 | toe5-1.L 163 | toe5-2.L 164 | toe5-3.L 165 | -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/IO/conversions.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | /** @file conversions.hpp 3 | * @brief Unfortunately due to the complexity of the problem and the different libraries used there cannot be a single skeleton representation. 4 | * This module handles conversions between std::vector which is used by Tensorflow/NeuralNetwork layer , skeletonSerialized which is used by MocapNET and Skeletons2DDetected which is used by the 2D estimator 5 | * @author Ammar Qammaz (AmmarkoV) 6 | */ 7 | 8 | 9 | #include 10 | #include "../mocapnet2.hpp" 11 | #include "../../../JointEstimator2D/jointEstimator2D.hpp" 12 | 13 | 14 | int appendVectorToFile(const char * filename, std::vector vec); 15 | 16 | int initializeAssociationsForSubsetOfSkeleton( 17 | unsigned int * targetIndexIsInitializedFlag, 18 | unsigned int * targetIndexes, 19 | unsigned int targetLength, 20 | const char * * targetLabels, 21 | struct skeletonSerialized * input 22 | ); 23 | 24 | 25 | 26 | std::vector deriveMocapNET2InputUsingAssociations( 27 | struct MocapNET2 * mnet, 28 | struct skeletonSerialized * input, 29 | unsigned int * targetIndexIsInitializedFlag, 30 | unsigned int * targetIndexes, 31 | unsigned int targetLength, 32 | const char * * targetLabels, 33 | int verbose 34 | ); 35 | 36 | 37 | 38 | int convertSkeletons2DDetectedToSkeletonsSerialized( 39 | struct skeletonSerialized * output, 40 | struct Skeletons2DDetected * input, 41 | unsigned int frameNumber, 42 | unsigned int width, 43 | unsigned int height 44 | ); 45 | 46 | 47 | int convertMocapNET2OutputToSkeletonSerialized( 48 | struct MocapNET2 * mnet , 49 | struct skeletonSerialized * output , 50 | std::vector > mocapNET2DPointsResult, 51 | unsigned int frameNumber, 52 | unsigned int width, 53 | unsigned int height 54 | ); 55 | 56 | 57 | 58 | int affineSkeletonRotation(struct skeletonSerialized * input,float degrees); 59 | 60 | float rotationRequiredToMakeSkeletonCloserToTrainingDataset(struct skeletonSerialized * input); 61 | 62 | int makeSkeletonUpright(struct skeletonSerialized * input); 63 | -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/IO/csvRead.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | /** @file csvRead.hpp 3 | * @brief To simplify dataset parsing the very simple CSV ( comma seperated value ) format is used by MocapNET2 4 | * This module facilitates opening,parsing and performing some trivial processing of CSV input files.. 5 | * @author Ammar Qammaz (AmmarkoV) 6 | */ 7 | 8 | #include 9 | 10 | 11 | #include "commonSkeleton.hpp" 12 | #define MAX_CSV_HEADER_FIELDS 1024 13 | 14 | 15 | /** 16 | * @brief Each CSV file needs a context to be parsed, this contains the file discriptor the current line number, the number of fields in the header as well as header fields 17 | */ 18 | struct CSVFileContext 19 | { 20 | FILE * fp; 21 | unsigned int lineNumber; 22 | unsigned int numberOfHeaderFields; 23 | struct headerField field[MAX_CSV_HEADER_FIELDS]; 24 | }; 25 | 26 | /** 27 | * @brief We may choose to parse a CSV file that only consists of floats, for this specific type there is an optimized CSV context 28 | */ 29 | struct CSVFloatFileLine 30 | { 31 | unsigned int lineNumber; 32 | unsigned int numberOfFields; 33 | float field[MAX_CSV_HEADER_FIELDS]; 34 | }; 35 | 36 | 37 | /** 38 | * @brief Write a skeleton to a CSV. This is the first call that prepares the header 39 | * @param CSV context 40 | * @param filename of CSV file 41 | * @retval 0=Failure/No lines, Otherwise the number of body lines is returned 42 | */ 43 | unsigned int getBodyLinesOfCSVFIle(struct CSVFileContext * csv,const char * filename); 44 | 45 | 46 | 47 | /** 48 | * @brief This is the initial call to open a CSV file, you need to have a preallocated CSVFileContext strcture as well as a filename. Also don't forget to call closeCSVFile after you are done parsing it 49 | * @param CSV context 50 | * @param filename of CSV file 51 | * @retval 0=Failure/1=Success 52 | */ 53 | int openCSVFile(struct CSVFileContext * csv,const char * filename); 54 | 55 | /** 56 | * @brief This call should be executed once after opening a CSV file to parse it's header 57 | * @param CSV context 58 | * @retval 0=Failure/1=Success 59 | */ 60 | int parseCSVHeader(struct CSVFileContext * csv); 61 | 62 | 63 | /** 64 | * @brief This is the final call to close a CSV file and release its file descriptor after you are done parsing it 65 | * @param CSV context 66 | * @retval 0=Failure/1=Success 67 | */ 68 | int closeCSVFile(struct CSVFileContext * csv); 69 | 70 | 71 | int parseNextCSVFloatLine(struct CSVFileContext * csv,struct CSVFloatFileLine * csvLine); 72 | 73 | 74 | int parseNextCSVCOCOSkeleton(struct CSVFileContext * csv, struct skeletonSerialized * skel); 75 | 76 | 77 | 78 | int uniformlyScaleSerializedSkeleton(struct skeletonSerialized * skel,float factor); 79 | 80 | 81 | int scaleSerializedSkeletonX(struct skeletonSerialized * skel,float factorX); 82 | int scaleSerializedSkeletonY(struct skeletonSerialized * skel,float factorY); 83 | 84 | int scaleSerializedSkeletonFromCenter(struct skeletonSerialized * skel,float factorX,float factorY); 85 | 86 | 87 | int perturbSerializedSkeletonUsingGaussianNoise(struct skeletonSerialized * skel,float gaussianNoiseInNormalizedPixelsX,float gaussianNoiseInNormalizedPixelsY); 88 | 89 | 90 | 91 | std::vector > get2DPointsFromSkeleton(struct skeletonSerialized * skel); 92 | -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/IO/csvWrite.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | /** @file csvWrite.hpp 3 | * @brief These are some simple functions to facilitate CSV writing 4 | * @author Ammar Qammaz (AmmarkoV) 5 | */ 6 | 7 | 8 | #include "commonSkeleton.hpp" 9 | 10 | 11 | /** 12 | * @brief Write a skeleton to a CSV. This is the first call that prepares the header 13 | * @param Path to output file i.e. "output.bvh" 14 | * @param Pointer to Skeleton that we want to save 15 | * @param Width of Image where the skeleton was retreieved from 16 | * @param Height of Image where the skeleton was retreieved from 17 | * @retval 1=Success,0=Failure 18 | */ 19 | int writeCSVHeaderFromSkeleton(const char * filename,struct skeletonStructure * skeleton,unsigned int width,unsigned int height); 20 | 21 | 22 | /** 23 | * @brief Write the body of a skeleton to a CSV. Each time this call is repeated an extra line is appended 24 | * @param Path to output file i.e. "output.bvh" 25 | * @param Pointer to Skeleton that we want to save 26 | * @param Width of Image where the skeleton was retreieved from 27 | * @param Height of Image where the skeleton was retreieved from 28 | * @retval 1=Success,0=Failure 29 | */ 30 | int writeCSVBodyFromSkeleton(const char * filename,struct skeletonStructure * skeleton,unsigned int width,unsigned int height); 31 | 32 | 33 | 34 | 35 | 36 | /** 37 | * @brief Write an array of C-Strings 38 | * @param Path to output file i.e. "output.bvh" 39 | * @param Pointer to array of C-Strings i.e. MocapNETInputUncompressedArrayNames 40 | * @param Number of elements of labels array, MOCAPNET_UNCOMPRESSED_JOINT_PARTS*3 in the case of MocapNETInputUncompressedArrayNames 41 | * @retval 1=Success,0=Failure 42 | */ 43 | int writeCSVHeaderFromVector(const char * filename,const char ** labels,unsigned int numberOfLabels); 44 | 45 | 46 | 47 | /** 48 | * @brief Write the body of a skeleton in the form of std::vectors to a CSV. Each time this call is repeated an extra line is appended 49 | * @param Path to output file i.e. "output.bvh" 50 | * @param Pointer to Skeleton that we want to save 51 | * @param Width of Image where the skeleton was retreieved from 52 | * @param Height of Image where the skeleton was retreieved from 53 | * @retval 1=Success,0=Failure 54 | */ 55 | int writeCSVBodyFromVector(const char * filename,std::vector inputValues); 56 | 57 | 58 | int writeCSVHeaderFromLabelsAndVectorOfVectors(const char * filename,const char ** labels,unsigned int numberOfLabels,std::vector > inputFrames); 59 | 60 | 61 | 62 | int writeOpenPoseCSVHeaderFromSkeleton(const char * filename,struct skeletonStructure * skeleton,unsigned int width,unsigned int height); 63 | 64 | 65 | int writeOpenPoseCSVBodyFromSkeleton(const char * filename,struct skeletonStructure * skeleton,unsigned int respectTrainingAspectRatio,unsigned int width,unsigned int height); 66 | -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/IO/jsonMocapNETHelpers.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | /** @file jsonMocapNETHelpers.hpp 3 | * @brief This file contains helpers to facilitate conversion between different skeleton formats. 4 | * @author Ammar Qammaz (AmmarkoV) 5 | */ 6 | #include "jsonRead.hpp" 7 | #include 8 | 9 | 10 | void addSkeletonJointFromTwoJoints( 11 | struct skeletonStructure * sk, 12 | std::vector &result, 13 | int jointIDA, 14 | int jointIDB 15 | ); 16 | 17 | 18 | void addSkeletonJoint( 19 | struct skeletonStructure * sk, 20 | std::vector &result, 21 | int jointID 22 | ); 23 | 24 | void addRightFinger( 25 | struct skeletonStructure * sk, 26 | std::vector &result, 27 | int fingerJointA, 28 | int fingerJointB, 29 | int fingerJointC 30 | ); 31 | 32 | 33 | 34 | void addLeftFinger( 35 | struct skeletonStructure * sk, 36 | std::vector &result, 37 | int fingerJointA, 38 | int fingerJointB, 39 | int fingerJointC 40 | ); 41 | 42 | std::vector flattenskeletonCOCOToVector(struct skeletonStructure * sk,unsigned int width ,unsigned int height); 43 | 44 | 45 | 46 | int convertBVHFrameToSkeletonCOCO(struct skeletonStructure * sk,std::vector bvhFrame,unsigned int width ,unsigned int height); -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/IO/jsonRead.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | /** @file jsonCocoSkeleton.hpp 3 | * @brief This is the code needed to parse an OpenPose JSON file to our struct skeletonStructure. This JSON parser barely works for the specific JSON output and so should be treated with caution. 4 | * It is not comformant to the JSON spec nor will it work for an arbitrary JSON file..! 5 | * @author Ammar Qammaz (AmmarkoV) 6 | */ 7 | 8 | #include 9 | 10 | #include "commonSkeleton.hpp" 11 | 12 | /** 13 | * @brief Parse a JSON file and retrieve a skeleton 14 | * @param Path to JSON file 15 | * @param Pointer to a struct skeletonStructure that will hold the information loaded 16 | * @param Threshold to set a joint to active ( 0.4-0.5 is a good value ) 17 | * @retval 1=Success/0=Failure 18 | */ 19 | int parseJsonCOCOSkeleton(const char * filename , struct skeletonStructure * skel,float acceptableThreshold,unsigned int frameID); 20 | -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/IO/skeletonAbstraction.cpp: -------------------------------------------------------------------------------- 1 | #include "skeletonAbstraction.hpp" 2 | 3 | #include "csvRead.hpp" 4 | 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | 13 | #define NORMAL "\033[0m" 14 | #define BLACK "\033[30m" /* Black */ 15 | #define RED "\033[31m" /* Red */ 16 | #define GREEN "\033[32m" /* Green */ 17 | #define YELLOW "\033[33m" /* Yellow */ 18 | 19 | 20 | #include //toupper 21 | int strcasecmp_sk(const char * input1,const char * input2) 22 | { 23 | #if CASE_SENSITIVE_OBJECT_NAMES 24 | return strcmp(input1,input2); 25 | #endif 26 | 27 | if ( (input1==0) || (input2==0) ) 28 | { 29 | fprintf(stderr,"Error , calling strcasecmp_internal with null parameters \n"); 30 | return 1; 31 | } 32 | unsigned int len1 = strlen(input1); 33 | unsigned int len2 = strlen(input2); 34 | if (len1!=len2) 35 | { 36 | //mismatched lengths of strings , they can't be equal..! 37 | return 1; 38 | } 39 | 40 | char A; //<- character buffer for input1 41 | char B; //<- character buffer for input2 42 | unsigned int i=0; 43 | while (iskeletonBodyElements; i++) 61 | { 62 | //If the specific label exists in our skeletonSerialized 63 | if (skel->skeletonHeader[i].str!=0) 64 | { 65 | //The strcmp was flipped, thx to yangjituan for noticing this https://github.com/FORTH-ModelBasedTracker/MocapNET/issues/57 66 | //This is a pretty inefficient function, at some point I need to restructure the skeletonSerialized structure to an enum like 67 | //https://github.com/FORTH-ModelBasedTracker/MocapNET/blob/master/src/MocapNET2/MocapNETLib2/mocapnet2.hpp#L839 68 | //it would just be 4 floating point checks instead of this loop going through labels etc. 69 | 70 | //If we are at the correct skeletonSerialized label for the particular r/l foot or knee and check its value and it is non-zero 71 | //we can count it as present in the list of activeLegJoints count the number of active joints.. 72 | if ( (strcasecmp_sk(skel->skeletonHeader[i].str,"visible_rfoot")==0) && (skel->skeletonBody[i].value) ) { ++activeLegJoints; } else 73 | if ( (strcasecmp_sk(skel->skeletonHeader[i].str,"visible_rknee")==0) && (skel->skeletonBody[i].value) ) { ++activeLegJoints; } else 74 | if ( (strcasecmp_sk(skel->skeletonHeader[i].str,"visible_lfoot")==0) && (skel->skeletonBody[i].value) ) { ++activeLegJoints; } else 75 | if ( (strcasecmp_sk(skel->skeletonHeader[i].str,"visible_lknee")==0) && (skel->skeletonBody[i].value) ) { ++activeLegJoints; } 76 | } 77 | } 78 | 79 | if (activeLegJoints<3) 80 | { 81 | fprintf(stderr,YELLOW "Feet are missing, Only %u leg joints detected..\n" NORMAL,activeLegJoints); 82 | return 1; 83 | } 84 | return 0; 85 | } 86 | 87 | 88 | 89 | 90 | int isLeftHardExtended(std::vector result) 91 | { 92 | //TODO: 93 | return 0; 94 | } 95 | 96 | int isRightHardExtended(std::vector result) 97 | { 98 | //TODO: 99 | return 0; 100 | } 101 | 102 | 103 | int getPointOrientation(std::vector result,float *x, float *y,float *r) 104 | { 105 | //TODO: 106 | return 0; 107 | } 108 | -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/IO/skeletonAbstraction.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | 4 | #include "commonSkeleton.hpp" 5 | #include 6 | 7 | 8 | 9 | int areFeetMissing(struct skeletonSerialized * skel); 10 | 11 | 12 | int isLeftHardExtended(std::vector result); 13 | int isRightHardExtended(std::vector result); 14 | 15 | 16 | int getPointOrientation(std::vector result,float *x, float *y,float *r); -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/applicationLogic/artifactRecognition.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | /** @file artifactRecognition.hpp 3 | * @brief MocapNET Artifact recognition is implemented here. Artifacts are places in 3D space that can trigger specific events for applications that need human computer interaction 4 | * @author Ammar Qammaz (AmmarkoV) 5 | */ 6 | 7 | #include 8 | #include 9 | 10 | #define NUMBER_OF_ARTIFACTS 100 11 | 12 | struct artifactData 13 | { 14 | float x1; 15 | float y1; 16 | float z1; 17 | float x2; 18 | float y2; 19 | float z2; 20 | char is3D; 21 | char activatesOnOrientation; 22 | char activatesOnLocation; 23 | char activatesOnGestures; 24 | 25 | char hasAction; 26 | char active; 27 | char activatesOnLook; 28 | char activatesOnPosition; 29 | char activateOnGesture; 30 | char label[512]; 31 | char actionToExecuteOnActivation[512]; 32 | char actionToExecuteOnDeactivation[512]; 33 | }; 34 | 35 | 36 | 37 | /** 38 | * @brief recorded gestures that can be used 39 | */ 40 | struct sceneArtifacts 41 | { 42 | int numberOfArtifacts; 43 | struct artifactData artifact[NUMBER_OF_ARTIFACTS]; 44 | }; 45 | 46 | 47 | //int initializeArtifacts(struct sceneArtifacts * scene); 48 | 49 | int initializeArtifactsFromFile(struct sceneArtifacts * scene,const char * filename); 50 | 51 | 52 | int check3DArtifactCollision(struct artifactData * artifact,float x1, float y1, float z1,float x2, float y2, float z2); 53 | 54 | int checkArtifactCollision(struct artifactData * artifact,float x, float y, float r); 55 | 56 | int checkArtifactDirection(struct artifactData * artifact,float x1, float y1, float x2,float y2); 57 | 58 | int doIntersect(float p1X,float p1Y, float q1X, float q1Y,float p2X,float p2Y,float q2X, float q2Y) ; -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/applicationLogic/gestureRecognition.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | /** @file gestureRecognition.hpp 3 | * @brief MocapNET Gesture recognition is implemented here 4 | * @author Ammar Qammaz (AmmarkoV) 5 | */ 6 | 7 | #include 8 | #include 9 | 10 | 11 | /** 12 | * @brief This is an array of names for the hardcoded gestures included in the dataset/gestures/ subdirectory 13 | * Please remember to update hardcodedPoseNumber, its value should be the number of arguments in this array 14 | * If you have arguments 0 - 10 you should set hardcodedGestureNumber to 11 15 | */ 16 | static const char * hardcodedGestureName[] = 17 | { 18 | "help.bvh", //0 19 | "push.bvh", //1 20 | "lefthandcircle.bvh", //2 21 | "righthandcircle.bvh", //3 22 | "waveleft.bvh", //4 23 | "doubleclap.bvh", //5 24 | "waveright.bvh", //6 25 | "leftkick.bvh", //8 26 | "rightkick.bvh", //9 27 | "tpose.bvh", //10 28 | "handsup.bvh", //7 29 | "", // 30 | "" //13 31 | //hardcodedGestureNumber should be kept in sync 32 | }; 33 | 34 | 35 | /** 36 | * @brief This needs to be kept in sync with hardcodedGestureName */ 37 | const unsigned int hardcodedGestureNumber=10; 38 | 39 | 40 | /** 41 | * @brief Gesture tuning controls for the brave.. 42 | **/ 43 | const unsigned int GESTURE_ACTIVATION_COOLDOWN_IN_FRAMES=50; 44 | const float GESTURE_COMPLETION_PERCENT=80.0; 45 | const float GESTURE_ANGLE_SENSITIVITY=25.0; 46 | 47 | 48 | /** 49 | * @brief history of poses 50 | */ 51 | struct PoseHistory 52 | { 53 | unsigned int maxPoseHistory; 54 | std::vector > history; 55 | }; 56 | 57 | 58 | /** 59 | * @brief recorded gestures that can be used 60 | */ 61 | struct RecordedGesture 62 | { 63 | unsigned int lastActivation; 64 | float percentageComplete; 65 | char loaded; 66 | char label[128]; 67 | std::vector > gesture; 68 | std::vector usedJoints; 69 | void * gestureCallback; 70 | }; 71 | 72 | 73 | /** 74 | * @brief gesture detection context, to facilitate gestures 75 | */ 76 | struct GestureDatabase 77 | { 78 | unsigned int gestureChecksPerformed; 79 | unsigned int numberOfLoadedGestures; 80 | struct RecordedGesture gesture[hardcodedGestureNumber]; 81 | unsigned long previousGestureCheckTimestamp; 82 | }; 83 | 84 | 85 | int loadGestures(struct GestureDatabase * gestureDB); 86 | 87 | int addToMotionHistory(struct PoseHistory * poseHistoryStorage,std::vector pose); 88 | 89 | 90 | int dumpMotionHistory(const char * filename,struct PoseHistory * poseHistoryStorage); 91 | 92 | 93 | int compareHistoryWithKnownGestures(struct GestureDatabase * gestureDB,struct PoseHistory * poseHistoryStorage,float percentageForDetection,float threshold); -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/applicationLogic/parseCommandlineOptions.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | /** @file parseCommandlineOptions.hpp 3 | * @brief MocapNET applications handle a large number of parameters. In order to simplify development and not having to manually sync all accepted parameters through all end-user applications this is the 4 | * central module that parses commandline parameters and populates the MocapNET2Options structure that holds them 5 | * @author Ammar Qammaz (AmmarkoV) 6 | */ 7 | 8 | 9 | /** 10 | * @brief MocapNET has many options depending on datasets etc, instead of storing them in each application 11 | * this is a central strcture to make it easier to parse them.. 12 | */ 13 | struct MocapNET2Options 14 | { 15 | const char * webcamSource; 16 | const char * path; 17 | char * datasetPath; 18 | unsigned int inputIsSingleImage; 19 | 20 | unsigned int isJSONFile; 21 | unsigned int isCSVFile; 22 | unsigned int jointEstimatorUsed; 23 | 24 | unsigned int doUpperBody,doLowerBody,doFace,doHands; 25 | unsigned int forceFront,forceLeft,forceRight,forceBack; 26 | unsigned int useInverseKinematics; 27 | unsigned int visualizationType; 28 | 29 | unsigned int skipNeuralNetworkIfItIsNotNeeded; 30 | unsigned int maximumNeuralNetworkSkipFrames; 31 | 32 | float inputFramerate; 33 | 34 | float learningRate; 35 | float spring; 36 | unsigned int iterations; 37 | unsigned int epochs; 38 | 39 | float addNormalizedPixelGaussianNoiseX,addNormalizedPixelGaussianNoiseY; 40 | 41 | char * outputPath; 42 | 43 | unsigned int visualize,useOpenGLVisualization,save3DVisualization,save2DVisualization,saveVisualization,saveCSV3DFile,constrainPositionRotation; 44 | 45 | char CPUName[512]; 46 | char GPUName[512]; 47 | char message[512]; 48 | 49 | unsigned int delay; 50 | unsigned int prependTPose; 51 | unsigned int serialLength; 52 | const char * label; 53 | unsigned int bvhCenter; 54 | 55 | float quality; 56 | unsigned int mocapNETMode; 57 | int doGestureDetection; 58 | int doOutputFiltering; 59 | int doMultiThreadedIK; 60 | 61 | unsigned int useCPUOnlyForMocapNET; 62 | unsigned int useCPUOnlyFor2DEstimator; 63 | unsigned int brokenFrames; 64 | unsigned int numberOfMissingJoints; 65 | 66 | unsigned int visWidth,visHeight; 67 | unsigned int width,height; 68 | 69 | long loopStartTime,loopEndTime; 70 | float totalLoopFPS; 71 | float fpsAcquisition,fpsMocapNET,fps2DEstimator,fpsIK; 72 | unsigned int frameLimit; 73 | unsigned int frameSkip; 74 | 75 | float scale,scaleX,scaleY,fScaleX,fScaleY; 76 | 77 | 78 | float skeletonRotation; 79 | 80 | int dontBend; 81 | char forceOutputPositionRotation; 82 | float outputPosRot[6]; 83 | 84 | 85 | char mapFilePath[512]; 86 | 87 | int hasInit; 88 | }; 89 | 90 | 91 | 92 | void defaultMocapNET2Options(struct MocapNET2Options * options); 93 | 94 | int loadOptionsFromCommandlineOptions(struct MocapNET2Options * options,int argc, char *argv[]); 95 | 96 | int loadOptionsAfterBVHLoadFromCommandlineOptions(struct MocapNET2Options * options,int argc, char *argv[]); 97 | 98 | 99 | int takeCareOfScalingInputAndAddingNoiseAccordingToOptions(struct MocapNET2Options * options,struct skeletonSerialized * skeleton); 100 | -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/applicationLogic/poseRecognition.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | /** @file poseRecognition.hpp 3 | * @brief MocapNET Pose recognition is implemented here 4 | * @author Ammar Qammaz (AmmarkoV) 5 | */ 6 | 7 | #include 8 | #include 9 | 10 | 11 | /** 12 | * @brief This is an array of names for the hardcoded poses included in the dataset/poses/ subdirectory 13 | * Please remember to update hardcodedPoseNumber, its value should be the number of arguments in this array 14 | * If you have arguments 0 - 10 you should set hardcodedPoseNumber to 11 15 | */ 16 | static const char * hardcodedPoseName[] = 17 | { 18 | "neutral.bvh", 19 | "tpose.bvh", //0 20 | "x.bvh", 21 | "handsup.bvh", 22 | "leftwave.bvh", 23 | "rightright.bvh", 24 | "leftleft.bvh", 25 | "push.bvh", 26 | "rightwave.bvh", 27 | "rightkick.bvh", 28 | "leftkick.bvh", 29 | "" 30 | //hardcodedPoseName should be kept in sync 31 | }; 32 | 33 | 34 | /** 35 | * @brief This needs to be kept in sync with hardcodedPoseName */ 36 | const unsigned int hardcodedPoseNumber=9; 37 | 38 | 39 | /** 40 | * @brief recorded gestures that can be used 41 | */ 42 | struct RecordedPose 43 | { 44 | unsigned int lastActivation; 45 | float percentageComplete; 46 | char loaded; 47 | char label[128]; 48 | std::vector pose; 49 | std::vector usedJoints; 50 | void * poseCallback; 51 | }; 52 | 53 | 54 | /** 55 | * @brief gesture detection context, to facilitate gestures 56 | */ 57 | struct PoseDatabase 58 | { 59 | unsigned int poseChecksPerformed; 60 | unsigned int numberOfLoadedPoses; 61 | struct RecordedPose pose[hardcodedPoseNumber]; 62 | unsigned long previousPoseCheckTimestamp; 63 | }; 64 | 65 | 66 | int loadPoses(struct PoseDatabase * poseDB); 67 | 68 | int areTwoBVHFramesCloseEnough(std::vector vecA,std::vector vecB,std::vector active,float threshold); 69 | 70 | 71 | int isThisPoseFamiliar( 72 | struct PoseDatabase * poseDB, 73 | std::vector currentPose, 74 | float percentageForDetection, 75 | float threshold 76 | ); -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/config.h: -------------------------------------------------------------------------------- 1 | #ifndef MOCAPNET_CONFIGURATION_H_INCLUDED 2 | #define MOCAPNET_CONFIGURATION_H_INCLUDED 3 | 4 | #ifdef __cplusplus 5 | extern "C" 6 | { 7 | #endif 8 | 9 | 10 | //Neural network orientations centered around 0 11 | #define NN_ORIENTATIONS_TRAINED_AROUND_ZERO_AND_REQUIRE_TRICK 1 12 | 13 | //Also swap bvh rotations before IK step 14 | #define APPLY_BVH_FIX_TO_IK_INPUT 0 15 | 16 | //Test swapped 17 | #define SWAP_LEFT_RIGHT_ENSEMBLES 0 18 | 19 | //Limits synced to scripts/createRandomizedDatset.sh 20 | const float FRONT_MIN_ORIENTATION = -45.0; 21 | const float FRONT_MAX_ORIENTATION = 45.0; 22 | //-------------------------------- 23 | const float BACK_MIN_ORIENTATION = 135.0; 24 | const float BACK_MAX_ORIENTATION = 225.0; 25 | const float BACK_ALT_MIN_ORIENTATION = -225; 26 | const float BACK_ALT_MAX_ORIENTATION = -135; 27 | //-------------------------------- 28 | const float LEFT_MIN_ORIENTATION = -135.0; 29 | const float LEFT_MAX_ORIENTATION = -45.0; 30 | //-------------------------------- 31 | const float RIGHT_MIN_ORIENTATION = 45.0; 32 | const float RIGHT_MAX_ORIENTATION = 135.0; 33 | //-------------------------------- 34 | 35 | 36 | #ifdef __cplusplus 37 | } 38 | #endif 39 | 40 | #endif // MOCAPNET_CONFIGURATION_H_INCLUDED 41 | -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/core/core.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | 4 | #include "../../MocapNETLib2/mocapnet2.hpp" 5 | 6 | 7 | /** 8 | * @brief Given an orientation angle this function can decide on which orientation class it belong to corresponding to the MOCAPNET_Orientation enum 9 | * @param Angle in degrees 10 | * @retval MOCAPNET_Orientation enumeration value 11 | */ 12 | int getMocapNETOrientationFromAngle(float direction); 13 | 14 | int getMocapNETOrientationFromOutputVector(std::vector direction); 15 | 16 | 17 | int localOrientationExtraction(struct MocapNET2SolutionPart * mnet,std::vector mnetInput); 18 | 19 | 20 | 21 | /** 22 | * @brief An internal function that handles local execution of a part of the final solution 23 | * @param Pointer to a valid and populated MocapNET2SolutionPart instance 24 | * @param The input to this MocapNET solution part 25 | * @param Orientation extracted from the localOrientationExtraction call 26 | * @param Some ensembles require an orientation change 27 | * @retval 1=Success,0=Failure 28 | */ 29 | std::vector localExecution( 30 | struct MocapNET2SolutionPart * mnet, 31 | std::vector mnetInput, 32 | int orientation, 33 | int targetHasOrientationTrick 34 | ); 35 | -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/core/multiThreaded.cpp: -------------------------------------------------------------------------------- 1 | #include "multiThreaded.hpp" 2 | #include "singleThreaded.hpp" 3 | #include "core.hpp" 4 | 5 | void * mocapNETWorkerThread(void * arg) 6 | { 7 | //We are a thread so lets retrieve our variables.. 8 | struct threadContext * ptr = (struct threadContext *) arg; 9 | fprintf(stderr,"MNET Thread-%u: Started..!\n",ptr->threadID); 10 | struct mocapNETContext * contextArray = (struct mocapNETContext *) ptr->argumentToPass; 11 | struct mocapNETContext * ctx = &contextArray[ptr->threadID]; 12 | 13 | struct MocapNET2 * mnet = ctx->mnet; 14 | struct skeletonSerialized * input = ctx->input; 15 | int doLowerbody = ctx->doLowerbody; 16 | int doHands = ctx->doHands; 17 | int doFace = ctx->doFace; 18 | int doGestureDetection = ctx->doGestureDetection; 19 | unsigned int useInverseKinematics = ctx->useInverseKinematics; 20 | int doOutputFiltering = ctx->doOutputFiltering; 21 | 22 | std::vector result; 23 | 24 | threadpoolWorkerInitialWait(ptr); 25 | 26 | while (threadpoolWorkerLoopCondition(ptr)) 27 | { 28 | switch (ptr->threadID) 29 | { 30 | case 0: 31 | result = mocapnetUpperBody_evaluateInput(mnet,input); 32 | break; 33 | //---------------------------------------------------------------- 34 | case 1: 35 | if ( (doLowerbody) && (mnet->lowerBody.loadedModels>0) ) 36 | { 37 | result = mocapnetLowerBody_evaluateInput(mnet,input); 38 | } 39 | break; 40 | //---------------------------------------------------------------- 41 | case 2: 42 | if ( (doHands) && (mnet->leftHand.loadedModels>0) ) 43 | { 44 | //TODO add hands 45 | //result = mocapnetLeftHand_evaluateInput(mnet,input); 46 | } 47 | break; 48 | //---------------------------------------------------------------- 49 | case 3: 50 | if ( (doHands) && (mnet->rightHand.loadedModels>0) ) 51 | { 52 | //TODO add hands 53 | //result = mocapnetRightHand_evaluateInput(mnet,input); 54 | } 55 | break; 56 | }; 57 | 58 | //-------------------------------- 59 | threadpoolWorkerLoopEnd(ptr); 60 | } 61 | 62 | return 0; 63 | } 64 | 65 | 66 | 67 | std::vector multiThreadedMocapNET( 68 | struct MocapNET2 * mnet, 69 | struct skeletonSerialized * input, 70 | int doLowerbody, 71 | int doHands, 72 | int doFace, 73 | int doGestureDetection, 74 | unsigned int useInverseKinematics, 75 | int doOutputFiltering 76 | ) 77 | { 78 | #if USE_BVH 79 | if (mnet->options->doMultiThreadedIK) 80 | { 81 | struct mocapNETContext ctx[4]; 82 | 83 | for (int i=0; i<4; i++) 84 | { 85 | ctx[i].mnet=mnet; 86 | ctx[i].input=input; 87 | ctx[i].doLowerbody=doLowerbody; 88 | ctx[i].doHands=doHands; 89 | ctx[i].doFace=doFace; 90 | ctx[i].doGestureDetection=doGestureDetection; 91 | ctx[i].useInverseKinematics=useInverseKinematics; 92 | ctx[i].doOutputFiltering=doOutputFiltering; 93 | } 94 | 95 | int okToRunMTCode=0; 96 | if (!mnet->threadPool.initialized) 97 | { 98 | if ( 99 | threadpoolCreate( 100 | &mnet->threadPool, 101 | 4, 102 | (void*) mocapNETWorkerThread, 103 | ctx 104 | ) 105 | ) 106 | { 107 | fprintf(stderr,"MNET2: Survived threadpool creation \n"); 108 | nanoSleepT(1000*1000); 109 | okToRunMTCode=1; 110 | } 111 | } else 112 | { 113 | okToRunMTCode=1; 114 | } 115 | 116 | 117 | if (okToRunMTCode) 118 | { 119 | threadpoolMainThreadPrepareWorkForWorkers(&mnet->threadPool); 120 | mocapnetUpperBody_getOrientation(mnet,input); 121 | threadpoolMainThreadWaitForWorkersToFinish(&mnet->threadPool); 122 | 123 | std::vector result = gatherResults( 124 | mnet, 125 | mnet->body.result, 126 | mnet->upperBody.result, 127 | mnet->lowerBody.result, 128 | mnet->leftHand.result, 129 | mnet->rightHand.result, 130 | mnet->face.result 131 | ); 132 | return result; 133 | } 134 | } 135 | #endif 136 | 137 | //If we have reached this point it means that the multi-threaded code has failed..! 138 | //Fallback on single-threaded code 139 | return singleThreadedMocapNET( 140 | mnet, 141 | input, 142 | doLowerbody, 143 | doHands, 144 | doFace, 145 | doGestureDetection, 146 | useInverseKinematics, 147 | doOutputFiltering 148 | ); 149 | } 150 | -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/core/multiThreaded.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "../../../Tensorflow/tf_utils.hpp" 4 | 5 | #include 6 | 7 | //MOCAPNET2 ------------------------------------ 8 | #include "../../MocapNETLib2/mocapnet2.hpp" 9 | #include "../../MocapNETLib2/tools.hpp" 10 | #include "../../MocapNETLib2/IO/bvh.hpp" 11 | #include "../../MocapNETLib2/IO/jsonRead.hpp" 12 | #include "../../MocapNETLib2/IO/conversions.hpp" 13 | 14 | #include "../../MocapNETLib2/remoteExecution.hpp" 15 | //---------------------------------------------- 16 | #include "../../MocapNETLib2/solutionParts/body.hpp" 17 | #include "../../MocapNETLib2/solutionParts/upperBody.hpp" 18 | #include "../../MocapNETLib2/solutionParts/lowerBody.hpp" 19 | 20 | #if USE_BVH 21 | #include "../../../../dependencies/RGBDAcquisition/tools/PThreadWorkerPool/pthreadWorkerPool.h" 22 | #endif 23 | 24 | 25 | struct mocapNETContext 26 | { 27 | struct MocapNET2 * mnet; 28 | struct skeletonSerialized * input; 29 | int doLowerbody; 30 | int doHands; 31 | int doFace; 32 | int doGestureDetection; 33 | unsigned int useInverseKinematics; 34 | int doOutputFiltering; 35 | int forceFront; 36 | }; 37 | 38 | std::vector multiThreadedMocapNET( 39 | struct MocapNET2 * mnet, 40 | struct skeletonSerialized * input, 41 | int doLowerbody, 42 | int doHands, 43 | int doFace, 44 | int doGestureDetection, 45 | unsigned int useInverseKinematics, 46 | int doOutputFiltering 47 | ); 48 | -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/core/singleThreaded.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "../../../Tensorflow/tf_utils.hpp" 4 | 5 | #include 6 | 7 | //MOCAPNET2 ------------------------------------ 8 | #include "../../MocapNETLib2/mocapnet2.hpp" 9 | #include "../../MocapNETLib2/tools.hpp" 10 | #include "../../MocapNETLib2/IO/bvh.hpp" 11 | #include "../../MocapNETLib2/IO/jsonRead.hpp" 12 | #include "../../MocapNETLib2/IO/conversions.hpp" 13 | 14 | #include "../../MocapNETLib2/remoteExecution.hpp" 15 | //---------------------------------------------- 16 | #include "../../MocapNETLib2/solutionParts/body.hpp" 17 | #include "../../MocapNETLib2/solutionParts/upperBody.hpp" 18 | #include "../../MocapNETLib2/solutionParts/lowerBody.hpp" 19 | 20 | std::vector gatherResults( 21 | struct MocapNET2 * mnet, 22 | std::vector resultBody, 23 | std::vector resultUpperBody, 24 | std::vector resultLowerBody, 25 | std::vector resultLeftHand, 26 | std::vector resultRightHand, 27 | std::vector resultFace 28 | ); 29 | 30 | std::vector singleThreadedMocapNET( 31 | struct MocapNET2 * mnet, 32 | struct skeletonSerialized * input, 33 | int doLowerbody, 34 | int doHands, 35 | int doFace, 36 | int doGestureDetection, 37 | unsigned int useInverseKinematics, 38 | int doOutputFiltering 39 | ); 40 | -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/postProcessing/outputFiltering.hpp: -------------------------------------------------------------------------------- 1 | #ifndef _BUTTERWORTH_FILTER_H_INCLUDED 2 | #define _BUTTERWORTH_FILTER_H_INCLUDED 3 | 4 | /** @file outputFiltering.hpp 5 | * @brief From Wikipedia : The Butterworth filter is a type of signal processing filter designed to have a frequency response as flat as possible in the passband. 6 | * It is also referred to as a maximally flat magnitude filter. 7 | * It was first described in 1930 by the British engineer and physicist Stephen Butterworth in his paper entitled "On the Theory of Filter Amplifiers" 8 | * https://en.wikipedia.org/wiki/Butterworth_filter 9 | * 10 | * The frequency response of the Butterworth filter is maximally flat (i.e. has no ripples) in the passband and rolls off towards zero in the stopband. 11 | * That's why it is used as a post-processing step if you don't disable it from the GUI. It should be noted that this is a relatively new addition to the codebase ( 30 -10-2019 ) 12 | * the original BMVC 2019 paper ( https://www.youtube.com/watch?v=fH5e-KMBvM0 ) did not have any post processing done..! 13 | * 14 | * However some sort of filtering had to be added after numerous comments regarding signal noise. And here it is, in a header-only vanilla C compatible version. 15 | * Thanks to Stelios Piperakis ( https://github.com/mrsp ) for giving me the initial code implementation that this filter is based on 16 | * 17 | * @author Ammar Qammaz (AmmarkoV) 18 | */ 19 | 20 | 21 | #include 22 | 23 | /** 24 | * @brief The complete state of a Butterworth filter instance 25 | */ 26 | struct ButterWorth 27 | { 28 | //https://en.wikipedia.org/wiki/Butterworth_filter 29 | //https://github.com/mrsp/serow/blob/master/src/butterworthLPF.cpp 30 | float unfilteredValue; 31 | float filteredValue; 32 | //----------- 33 | char initialized; 34 | //----------- 35 | float a; 36 | float fx; 37 | float fs; 38 | float a1; 39 | float a2; 40 | float b0; 41 | float b1; 42 | float b2; 43 | float ff; 44 | float ita; 45 | float q; 46 | int i; 47 | float y_p; 48 | float y_pp; 49 | float x_p; 50 | float x_pp; 51 | }; 52 | 53 | /** 54 | * @brief Initialize a "sensor" using fsampling/fcutoff values 55 | * @param Butterworth filter instance 56 | * @param frequency of sampling 57 | * @param frequency of cutoff 58 | */ 59 | static void initButterWorth(struct ButterWorth * sensor,float fsampling,float fcutoff) 60 | { 61 | sensor->fs = fsampling; 62 | sensor->fx = fcutoff; 63 | 64 | sensor->i = 0; 65 | sensor->ff = (float) sensor->fx/sensor->fs; 66 | sensor->ita = (float) 1.0/tan((float) 3.14159265359 * sensor->ff); 67 | sensor->q = 1.41421356237; 68 | sensor->b0 = (float) 1.0 / (1.0 + sensor->q*sensor->ita + sensor->ita*sensor->ita); 69 | sensor->b1 = 2*sensor->b0; 70 | sensor->b2 = sensor->b0; 71 | sensor->a1 = 2.0 * (sensor->ita*sensor->ita - 1.0) * sensor->b0; 72 | sensor->a2 = -(1.0 - sensor->q*sensor->ita + sensor->ita*sensor->ita) * sensor->b0; 73 | sensor->a =(float) (2.0*3.14159265359*sensor->ff)/(2.0*3.14159265359*sensor->ff+1.0); 74 | } 75 | 76 | 77 | /** 78 | * @brief Filter a new incoming value and get the result 79 | * @param Butterworth filter instance 80 | * @param Unfiltered input value 81 | * @retval Filtered output value 82 | */ 83 | static float filter(struct ButterWorth * sensor,float unfilteredValue) 84 | { 85 | sensor->unfilteredValue = unfilteredValue; 86 | 87 | float y = sensor->unfilteredValue; 88 | float out; 89 | if ((sensor->i>2)&&(1)) 90 | { 91 | out = sensor->b0 * y + sensor->b1 * sensor->y_p + sensor->b2* sensor->y_pp + sensor->a1 * sensor->x_p + sensor->a2 * sensor->x_pp; 92 | } 93 | else 94 | { 95 | out = sensor->x_p + sensor->a * (y - sensor->x_p); 96 | sensor->i=sensor->i+1; 97 | } 98 | 99 | sensor->y_pp = sensor->y_p; 100 | sensor->y_p = y; 101 | sensor->x_pp = sensor->x_p; 102 | sensor->x_p = out; 103 | 104 | sensor->filteredValue = out; 105 | 106 | if (!sensor->initialized) 107 | { 108 | //Do a warmup.. 109 | //Make sure we dont start from 0 110 | 111 | sensor->initialized=1; 112 | for (unsigned int i=0; i<5; i++) 113 | { 114 | filter(sensor,unfilteredValue); 115 | } 116 | 117 | return filter(sensor,unfilteredValue); 118 | } 119 | 120 | return out; 121 | } 122 | 123 | #endif -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/qualityControl/qualityControl.cpp: -------------------------------------------------------------------------------- 1 | #include "qualityControl.hpp" 2 | #include 3 | 4 | //./GroundTruthDumper --from dataset/MotionCapture/01/01_02.bvh --selectJoints 1 13 hip eye.r eye.l abdomen chest neck head rshoulder relbow rhand lshoulder lelbow lhand --selectJoints 0 13 hip eye.r eye.l abdomen chest neck head rshoulder relbow rhand lshoulder lelbow lhand --selectJoints 1 14 hip abdomen chest neck rhip rknee rfoot lhip lknee lfoot toe1-2.r toe5-3.r toe1-2.l toe5-3.l --hide2DLocationOfJoints 0 6 abdomen chest toe1-2.r toe5-3.r toe1-2.l toe5-3.l --selectJoints 1 16 rhand finger5-1.r finger5-2.r finger5-3.r finger4-1.r finger4-2.r finger4-3.r finger3-1.r finger3-2.r finger3-3.r finger2-1.r finger2-2.r finger2-3.r rthumb finger1-2.r finger1-3.r --selectJoints 1 16 lhand finger5-1.l finger5-2.l finger5-3.l finger4-1.l finger4-2.l finger4-3.l finger3-1.l finger3-2.l finger3-3.l finger2-1.l finger2-2.l finger2-3.l lthumb finger1-2.l finger1-3.l --occlusions --setPositionRotation 0 0 2000 0 0 0 --csv ./ quality.csv 2d+bvh 5 | 6 | 7 | //./GroundTruthDumper --from dataset/MotionCapture/02/02_05.bvh --selectJoints 1 13 hip eye.r eye.l abdomen chest neck head rshoulder relbow rhand lshoulder lelbow lhand --selectJoints 0 13 hip eye.r eye.l abdomen chest neck head rshoulder relbow rhand lshoulder lelbow lhand --selectJoints 1 14 hip abdomen chest neck rhip rknee rfoot lhip lknee lfoot toe1-2.r toe5-3.r toe1-2.l toe5-3.l --hide2DLocationOfJoints 0 6 abdomen chest toe1-2.r toe5-3.r toe1-2.l toe5-3.l --selectJoints 1 16 rhand finger5-1.r finger5-2.r finger5-3.r finger4-1.r finger4-2.r finger4-3.r finger3-1.r finger3-2.r finger3-3.r finger2-1.r finger2-2.r finger2-3.r rthumb finger1-2.r finger1-3.r --selectJoints 1 16 lhand finger5-1.l finger5-2.l finger5-3.l finger4-1.l finger4-2.l finger4-3.l finger3-1.l finger3-2.l finger3-3.l finger2-1.l finger2-2.l finger2-3.l lthumb finger1-2.l finger1-3.l --occlusions --offsetPositionRotation 0 800 1500 0 0 0 --csv ./ quality.csv 2d+bvh 8 | -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/qualityControl/qualityControl.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/remoteExecution.cpp: -------------------------------------------------------------------------------- 1 | #include "remoteExecution.hpp" 2 | #include "../../../dependencies/InputParser/InputParser_C.h" 3 | 4 | 5 | #if USE_NETWORKING 6 | #include "../../../dependencies/AmmarServer/src/AmmClient/AmmClient.h" 7 | #endif 8 | 9 | 10 | #include 11 | #include 12 | #include 13 | 14 | 15 | #define SEND_RECV_BUFFER_SIZE 16000 16 | 17 | char * strstrDoubleNewlineLocal(char * request,unsigned int requestLength,unsigned int * endOfLine) 18 | { 19 | if (request==0) { return request; } 20 | if (requestLength==0) { return request; } 21 | if (endOfLine==0) { return request; } 22 | 23 | char * ptrA=request; 24 | char * ptrB=request+1; 25 | 26 | char * ptrEnd = request + requestLength; 27 | 28 | //fprintf(stderr,"\strstrDoubleNewline for 13 10 13 10 on a buffer with %u bytes of data : ",requestLength); 29 | while (ptrB remoteExecution(struct MocapNET2 * mnet,const std::vector &mnetInput) 51 | { 52 | std::vector result; 53 | 54 | #if USE_NETWORKING 55 | fprintf(stderr,"remoteExecution :\n"); 56 | //----------------------------------------------------- 57 | char requestBuffer[SEND_RECV_BUFFER_SIZE+1]={0}; 58 | char part[128]={0}; 59 | 60 | snprintf(requestBuffer,SEND_RECV_BUFFER_SIZE,"control.html?skeleton="); 61 | for (int i=0; iremoteContext; 75 | 76 | char resultBuffer[SEND_RECV_BUFFER_SIZE+1]={0}; 77 | unsigned int filecontentSize=SEND_RECV_BUFFER_SIZE; 78 | 79 | if ( 80 | AmmClient_RecvFile( 81 | instance, 82 | requestBuffer, 83 | resultBuffer, 84 | &filecontentSize, 85 | 1,//keepAlive, 86 | 0// reallyFastImplementation 87 | ) 88 | ) 89 | { 90 | unsigned int startOfData=0; 91 | char * onlyResults = strstrDoubleNewlineLocal(resultBuffer,filecontentSize,&startOfData); 92 | if (onlyResults==0) { 93 | fprintf(stderr,"Couldnt find body.. \n"); 94 | onlyResults=resultBuffer; 95 | } 96 | 97 | //fprintf(stderr,"Got back:\n %s \n\n",resultBuffer); 98 | 99 | struct InputParserC * ipc = InputParser_Create(4096,1); 100 | InputParser_SetDelimeter(ipc,0,','); 101 | 102 | if (ipc!=0) 103 | { 104 | result.clear(); 105 | //fprintf(stderr,"Disregarding headers we have: %s \n\n",onlyResults); 106 | int numberOfArguments = InputParser_SeperateWords(ipc,onlyResults,1); 107 | //fprintf(stderr,"Skeleton Arguments : %u\n",numberOfArguments); 108 | for (int argument=0; argument 8 | #include 9 | 10 | #include "mocapnet2.hpp" 11 | 12 | std::vector remoteExecution(struct MocapNET2 * mnet,const std::vector &mnetInput); 13 | 14 | 15 | void * intializeRemoteExecution(const char * ip,unsigned int port,unsigned int socketTimeoutSeconds); 16 | int stopRemoteExecution(void * instance); -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/solutionParts/body.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | /** @file body.hpp 3 | * @brief Code that handles regressing the whole body using the legacy ( MocapNET 1 ) engine that encoded the whole body at once.. 4 | * This is no longer used since the body has been split in upper and lower body to more effectively treat occlusions 5 | * however the code remains here as documentation and for backwards compatibility 6 | * @author Ammar Qammaz (AmmarkoV) 7 | */ 8 | 9 | 10 | #include "../mocapnet2.hpp" 11 | 12 | 13 | /** 14 | * @brief An array with string labels for what each element of an input should be after concatenating uncompressed and compressed input. 15 | * Use ./GroundTruthDumper --from dataset/headerWithHeadAndOneMotion.bvh --printc 16 | * to extract this automatically 17 | */ 18 | /* 19 | enum MOCAPNET_BODY_Output_Joints 20 | { 21 | MOCAPNET_BODY_OUTPUT_HIP_XPOSITION=0, //0 22 | MOCAPNET_BODY_OUTPUT_HIP_YPOSITION, //1 23 | MOCAPNET_BODY_OUTPUT_HIP_ZPOSITION, //2 24 | MOCAPNET_BODY_OUTPUT_HIP_ZROTATION, //3 25 | MOCAPNET_BODY_OUTPUT_HIP_YROTATION, //4 26 | MOCAPNET_BODY_OUTPUT_HIP_XROTATION, //5 27 | MOCAPNET_BODY_OUTPUT_ABDOMEN_ZROTATION, //6 28 | MOCAPNET_BODY_OUTPUT_ABDOMEN_XROTATION, //7 29 | MOCAPNET_BODY_OUTPUT_ABDOMEN_YROTATION, //8 30 | MOCAPNET_BODY_OUTPUT_CHEST_ZROTATION, //9 31 | MOCAPNET_BODY_OUTPUT_CHEST_XROTATION, //10 32 | MOCAPNET_BODY_OUTPUT_CHEST_YROTATION, //11 33 | MOCAPNET_BODY_OUTPUT_NECK_ZROTATION, //12 34 | MOCAPNET_BODY_OUTPUT_NECK_XROTATION, //13 35 | MOCAPNET_BODY_OUTPUT_NECK_YROTATION, //14 36 | MOCAPNET_BODY_OUTPUT_HEAD_ZROTATION, //15 37 | MOCAPNET_BODY_OUTPUT_HEAD_XROTATION, //16 38 | MOCAPNET_BODY_OUTPUT_HEAD_YROTATION, //17 39 | MOCAPNET_BODY_OUTPUT_EYE_L_ZROTATION, //18 40 | MOCAPNET_BODY_OUTPUT_EYE_L_XROTATION, //19 41 | MOCAPNET_BODY_OUTPUT_EYE_L_YROTATION, //20 42 | MOCAPNET_BODY_OUTPUT_EYE_R_ZROTATION, //21 43 | MOCAPNET_BODY_OUTPUT_EYE_R_XROTATION, //22 44 | MOCAPNET_BODY_OUTPUT_EYE_R_YROTATION, //23 45 | MOCAPNET_BODY_OUTPUT_RSHOULDER_ZROTATION,//24 46 | MOCAPNET_BODY_OUTPUT_RSHOULDER_XROTATION,//25 47 | MOCAPNET_BODY_OUTPUT_RSHOULDER_YROTATION,//26 48 | MOCAPNET_BODY_OUTPUT_RELBOW_ZROTATION, //27 49 | MOCAPNET_BODY_OUTPUT_RELBOW_XROTATION, //28 50 | MOCAPNET_BODY_OUTPUT_RELBOW_YROTATION, //29 51 | MOCAPNET_BODY_OUTPUT_RHAND_ZROTATION, //30 52 | MOCAPNET_BODY_OUTPUT_RHAND_XROTATION, //31 53 | MOCAPNET_BODY_OUTPUT_RHAND_YROTATION, //32 54 | MOCAPNET_BODY_OUTPUT_LSHOULDER_ZROTATION,//33 55 | MOCAPNET_BODY_OUTPUT_LSHOULDER_XROTATION,//34 56 | MOCAPNET_BODY_OUTPUT_LSHOULDER_YROTATION,//35 57 | MOCAPNET_BODY_OUTPUT_LELBOW_ZROTATION,//36 58 | MOCAPNET_BODY_OUTPUT_LELBOW_XROTATION,//37 59 | MOCAPNET_BODY_OUTPUT_LELBOW_YROTATION,//38 60 | MOCAPNET_BODY_OUTPUT_LHAND_ZROTATION,//39 61 | MOCAPNET_BODY_OUTPUT_LHAND_XROTATION,//40 62 | MOCAPNET_BODY_OUTPUT_LHAND_YROTATION,//41 63 | MOCAPNET_BODY_OUTPUT_RHIP_ZROTATION,//42 64 | MOCAPNET_BODY_OUTPUT_RHIP_XROTATION,//43 65 | MOCAPNET_BODY_OUTPUT_RHIP_YROTATION,//44 66 | MOCAPNET_BODY_OUTPUT_RKNEE_ZROTATION,//45 67 | MOCAPNET_BODY_OUTPUT_RKNEE_XROTATION,//46 68 | MOCAPNET_BODY_OUTPUT_RKNEE_YROTATION,//47 69 | MOCAPNET_BODY_OUTPUT_RFOOT_ZROTATION,//48 70 | MOCAPNET_BODY_OUTPUT_RFOOT_XROTATION,//49 71 | MOCAPNET_BODY_OUTPUT_RFOOT_YROTATION,//50 72 | MOCAPNET_BODY_OUTPUT_TOE1_2_R_ZROTATION,//51 73 | MOCAPNET_BODY_OUTPUT_TOE1_2_R_XROTATION,//52 74 | MOCAPNET_BODY_OUTPUT_TOE1_2_R_YROTATION,//53 75 | MOCAPNET_BODY_OUTPUT_TOE5_3_R_ZROTATION,//54 76 | MOCAPNET_BODY_OUTPUT_TOE5_3_R_XROTATION,//55 77 | MOCAPNET_BODY_OUTPUT_TOE5_3_R_YROTATION,//56 78 | MOCAPNET_BODY_OUTPUT_LHIP_ZROTATION,//57 79 | MOCAPNET_BODY_OUTPUT_LHIP_XROTATION,//58 80 | MOCAPNET_BODY_OUTPUT_LHIP_YROTATION,//59 81 | MOCAPNET_BODY_OUTPUT_LKNEE_ZROTATION,//60 82 | MOCAPNET_BODY_OUTPUT_LKNEE_XROTATION,//61 83 | MOCAPNET_BODY_OUTPUT_LKNEE_YROTATION,//62 84 | MOCAPNET_BODY_OUTPUT_LFOOT_ZROTATION,//63 85 | MOCAPNET_BODY_OUTPUT_LFOOT_XROTATION,//64 86 | MOCAPNET_BODY_OUTPUT_LFOOT_YROTATION,//65 87 | MOCAPNET_BODY_OUTPUT_TOE1_2_L_ZROTATION,//66 88 | MOCAPNET_BODY_OUTPUT_TOE1_2_L_XROTATION,//67 89 | MOCAPNET_BODY_OUTPUT_TOE1_2_L_YROTATION,//68 90 | MOCAPNET_BODY_OUTPUT_TOE5_3_L_ZROTATION,//69 91 | MOCAPNET_BODY_OUTPUT_TOE5_3_L_XROTATION,//70 92 | MOCAPNET_BODY_OUTPUT_TOE5_3_L_YROTATION,//71 93 | //----------------------------- 94 | MOCAPNET_BODY_OUTPUT_NUMBER// = 72 95 | }; 96 | */ 97 | 98 | int mocapnetBody_initializeAssociations(struct MocapNET2 * mnet, struct skeletonSerialized * input); 99 | 100 | int mocapnetBody_initialize(struct MocapNET2 * mnet,const char * filename,float qualitySetting,unsigned int mode,unsigned int forceCPU); 101 | 102 | int mocapnetBody_unload(struct MocapNET2 * mnet); 103 | 104 | int mocapnetBody_fillResultVector(std::vector &finalResultVector,std::vector resultBody); 105 | 106 | std::vector mocapnetBody_evaluateInput(struct MocapNET2 * mnet,struct skeletonSerialized * input); -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/solutionParts/lowerBody.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | /** @file lowerBody.hpp 3 | * @brief Code that handles getting 2D joints packed in their skeletonSerialized and vector formats and can run the tensorflow code retrieving a 3D BVH motion frame that estimates the human lower body 4 | * @author Ammar Qammaz (AmmarkoV) 5 | */ 6 | 7 | #include "../mocapnet2.hpp" 8 | 9 | /** 10 | * @brief Since the C/C++ code in this repository depends on a seperate python trainer, and the networks can differ depending on training parameters (and this is a research project) the series of 2D joints 11 | * can often change. It is also very expensive to do string matching on every frame, so before evaluating any input this call has to be executed in order to perform the correct array associations and from then on 12 | * we can pass 2D data without searching for labels on each frame. It needs to be called before any mocapnetLowerBody_evaluateInput call 13 | * @param MocapNET instance 14 | * @param a skeletonSerialized structure that holds our 2D input 15 | * @retval 1=Success/0=Failure 16 | */ 17 | int mocapnetLowerBody_initializeAssociations(struct MocapNET2 * mnet, struct skeletonSerialized * input); 18 | 19 | /** 20 | * @brief This call loads and initializes the required Tensorflow models for the specific configuration requested, it needs to be called before any mocapnetLowerBody_evaluateInput call 21 | * @param MocapNET instance 22 | * @param This parameter is currently omitted 23 | * @param MocapNET 1 supported multiple quality settings, however for MocapNET2 you should default to 1.0 24 | * @param MocapNET 1 used 3 ensembles, MocapNET 2 uses 5 ensembles, so mode should default to 5 25 | * @param The network can be executed on the GPU or the CPU, if you supply 1 you will force CPU execution, if not MocapNET will try to run it on your GPU ( if tensorflow finds it ) 26 | * @retval 1=Success/0=Failure 27 | */ 28 | int mocapnetLowerBody_initialize(struct MocapNET2 * mnet,const char * filename,float qualitySetting,unsigned int mode,unsigned int forceCPU); 29 | 30 | /** 31 | * @brief This call deallocates the tensorflow models 32 | * @param MocapNET instance 33 | * @retval 1=Success/0=Failure 34 | */ 35 | int mocapnetLowerBody_unload(struct MocapNET2 * mnet); 36 | 37 | /** 38 | * @brief This call inserts the 3D pose extracted from tensorflow into the final resulting BVH vector 39 | * @param The final BVH vector that we want to populate with lower body data 40 | * @param The lower body result 41 | * @retval 1=Success/0=Failure 42 | */ 43 | int mocapnetLowerBody_fillResultVector(std::vector &finalResultVector,std::vector resultBody); 44 | 45 | /** 46 | * @brief This call converts 2D input that is formatted on a skeletonSerialized structure to a 3D pose vector. You need to call mocapnetLowerBody_initializeAssociations and mocapnetLowerBody_initialize before calling this function 47 | * and if you want to convert the output result to the final result vector you need to use the mocapnetLowerBody_fillResultVector call. 48 | * @param MocapNET instance 49 | * @param a skeletonSerialized structure that holds our 2D input 50 | * correct ensemble for the perceived orientation. 51 | * @retval 3D pose output that needs to be processed through mocapnetLowerBody_fillResultVector to fill the final BVH buffer 52 | */ 53 | std::vector mocapnetLowerBody_evaluateInput(struct MocapNET2 * mnet,struct skeletonSerialized * input); 54 | -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/solutionParts/upperBody.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | /** @file upperBody.hpp 3 | * @brief Code that handles getting 2D joints packed in their skeletonSerialized and vector formats and can run the tensorflow code retrieving a 3D BVH motion frame that estimates the human upper body 4 | * @author Ammar Qammaz (AmmarkoV) 5 | */ 6 | 7 | #include "../mocapnet2.hpp" 8 | 9 | /** 10 | * @brief Since the C/C++ code in this repository depends on a seperate python trainer, and the networks can differ depending on training parameters (and this is a research project) the series of 2D joints 11 | * can often change. It is also very expensive to do string matching on every frame, so before evaluating any input this call has to be executed in order to perform the correct array associations and from then on 12 | * we can pass 2D data without searching for labels on each frame. It needs to be called before any mocapnetUpperBody_evaluateInput call 13 | * @param MocapNET instance 14 | * @param a skeletonSerialized structure that holds our 2D input 15 | * @retval 1=Success/0=Failure 16 | */ 17 | int mocapnetUpperBody_initializeAssociations(struct MocapNET2 * mnet, struct skeletonSerialized * input); 18 | 19 | /** 20 | * @brief This call loads and initializes the required Tensorflow models for the specific configuration requested, it needs to be called before any mocapnetUpperBody_evaluateInput call 21 | * @param MocapNET instance 22 | * @param This parameter is currently omitted 23 | * @param MocapNET 1 supported multiple quality settings, however for MocapNET2 you should default to 1.0 24 | * @param MocapNET 1 used 3 ensembles, MocapNET 2 uses 5 ensembles, so mode should default to 5 25 | * @param The network can be executed on the GPU or the CPU, if you supply 1 you will force CPU execution, if not MocapNET will try to run it on your GPU ( if tensorflow finds it ) 26 | * @retval 1=Success/0=Failure 27 | */ 28 | int mocapnetUpperBody_initialize(struct MocapNET2 * mnet,const char * filename,float qualitySetting,unsigned int mode,unsigned int forceCPU); 29 | 30 | /** 31 | * @brief This call deallocates the tensorflow models 32 | * @param MocapNET instance 33 | * @retval 1=Success/0=Failure 34 | */ 35 | int mocapnetUpperBody_unload(struct MocapNET2 * mnet); 36 | 37 | /** 38 | * @brief This call inserts the 3D pose extracted from tensorflow into the final resulting BVH vector 39 | * @param The final BVH vector that we want to populate with upper body data 40 | * @param The upper body result 41 | * @retval 1=Success/0=Failure 42 | */ 43 | int mocapnetUpperBody_fillResultVector(std::vector &finalResultVector,std::vector resultBody); 44 | 45 | 46 | 47 | int mocapnetUpperBody_getOrientation(struct MocapNET2 * mnet,struct skeletonSerialized * input); 48 | 49 | /** 50 | * @brief This call converts 2D input that is formatted on a skeletonSerialized structure to a 3D pose vector. You need to call mocapnetUpperBody_initializeAssociations and mocapnetUpperBody_initialize before calling this function 51 | * and if you want to convert the output result to the final result vector you need to use the mocapnetUpperBody_fillResultVector call. 52 | * @param MocapNET instance 53 | * @param a skeletonSerialized structure that holds our 2D input 54 | * @param If this flag is set to 1 then the 2D input will be treated as front facing, otherwise the orientation classifier will decide on the orientation and call the 55 | * correct ensemble for the perceived orientation. 56 | * @retval 3D pose output that needs to be processed through mocapnetUpperBody_fillResultVector to fill the final BVH buffer 57 | */ 58 | std::vector mocapnetUpperBody_evaluateInput(struct MocapNET2 * mnet,struct skeletonSerialized * input); 59 | -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/visualization/allInOne.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | /** @file allInOne.hpp 3 | * @brief Code that handles GUI output and visualization using OpenCV. 4 | * If OpenCV is not be available, CMake will not declare the USE_OPENCV compilation flag and the whole code will not be included. 5 | * @author Ammar Qammaz (AmmarkoV) 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | 12 | #if USE_OPENCV 13 | #include "opencv2/opencv.hpp" 14 | using namespace cv; 15 | 16 | int visualizeAllInOne( 17 | const char* windowName, 18 | unsigned int frameNumber, 19 | unsigned int saveVisualization, 20 | cv::Mat * alreadyLoadedImage, 21 | const char * path, 22 | const char * label, 23 | unsigned int serialLength, 24 | unsigned int width, 25 | unsigned int height, 26 | struct skeletonSerialized * skeleton, 27 | struct MocapNET2 * mnet, 28 | struct MocapNET2Options * options, 29 | std::vector > points2DOutputGUIRealView, 30 | std::vector > points2DOutputGUIForcedView, 31 | unsigned int numberOfMissingJoints 32 | ); 33 | 34 | 35 | #endif 36 | -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/visualization/camera_ready.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | /** @file camera_ready.hpp 3 | * @brief Code that handles GUI output and visualization using OpenCV. 4 | * If OpenCV is not be available, CMake will not declare the USE_OPENCV compilation flag and the whole code will not be included. 5 | * @author Ammar Qammaz (AmmarkoV) 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | 12 | #if USE_OPENCV 13 | #include "opencv2/opencv.hpp" 14 | using namespace cv; 15 | 16 | int visualizeCameraReady( 17 | const char* windowName, 18 | unsigned int frameNumber, 19 | unsigned int saveVisualization, 20 | cv::Mat * alreadyLoadedImage, 21 | const char * path, 22 | const char * label, 23 | unsigned int serialLength, 24 | unsigned int width, 25 | unsigned int height, 26 | struct skeletonSerialized * skeleton, 27 | struct MocapNET2 * mnet, 28 | std::vector > points2DOutputGUIRealView, 29 | std::vector > points2DOutputGUIForcedView, 30 | std::vector > points2DOutputGUIForcedViewSide, 31 | unsigned int numberOfMissingJoints 32 | ); 33 | #endif 34 | -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/visualization/drawSkeleton.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | /** @file visualization.hpp 3 | * @brief Code that handles GUI output and visualization using OpenCV. 4 | * If OpenCV is not be available, CMake will not declare the USE_OPENCV compilation flag and the whole code will not be included. 5 | * @author Ammar Qammaz (AmmarkoV) 6 | */ 7 | 8 | #include 9 | #include 10 | #include "../mocapnet2.hpp" 11 | 12 | #include "../applicationLogic/parseCommandlineOptions.hpp" 13 | 14 | #if USE_OPENCV 15 | #include "opencv2/opencv.hpp" 16 | using namespace cv; 17 | #endif 18 | 19 | 20 | #if USE_OPENCV 21 | #if USE_BVH 22 | 23 | int visualizeSkeletonSerialized( 24 | cv::Mat &outputMat, 25 | struct skeletonSerialized * skeleton, 26 | unsigned int drawLeftFingers, 27 | unsigned int drawRightFingers, 28 | unsigned int drawFace, 29 | unsigned int x,unsigned int y, 30 | unsigned int width,unsigned int height 31 | ); 32 | 33 | int drawSkeleton(cv::Mat &outputMat,std::vector > points2DOutputGUIForcedView,float offsetX,float offsetY,int labels); 34 | #endif 35 | #endif 36 | -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/visualization/map.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | /** @file map.hpp 3 | * @brief Code that handles GUI output and visualization using OpenCV. 4 | * If OpenCV is not be available, CMake will not declare the USE_OPENCV compilation flag and the whole code will not be included. 5 | * @author Ammar Qammaz (AmmarkoV) 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | 12 | #if USE_OPENCV 13 | #include "opencv2/opencv.hpp" 14 | using namespace cv; 15 | 16 | int visualizeMap( 17 | const char* windowName, 18 | unsigned int frameNumber, 19 | unsigned int width, 20 | unsigned int height, 21 | struct skeletonSerialized * skeleton, 22 | struct MocapNET2 * mnet, 23 | std::vector result, 24 | std::vector > points2DOutputGUIRealView, 25 | unsigned int numberOfMissingJoints, 26 | unsigned int gestureDetected, 27 | const char * gestureName, 28 | unsigned int gestureFrame 29 | ); 30 | #endif 31 | -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/visualization/rgb.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | /** @file rgb.hpp 3 | * @brief Code that handles GUI output and visualization using OpenCV. 4 | * If OpenCV is not be available, CMake will not declare the USE_OPENCV compilation flag and the whole code will not be included. 5 | * @author Ammar Qammaz (AmmarkoV) 6 | */ 7 | 8 | #if USE_OPENCV 9 | #include "opencv2/opencv.hpp" 10 | using namespace cv; 11 | 12 | int visualizeCameraIntensities(const char* windowName, cv::Mat &imgOriginal,int forceColors); 13 | int visualizeCameraChannels(const char* windowName,cv::Mat &img,int channelNumber); 14 | int visualizeCameraEdges(const char* windowName,cv::Mat &img); 15 | int visualizeCameraFeatures(const char* windowName,cv::Mat &img); 16 | int visualizeFigure(const char* windowName,cv::Mat &img); 17 | #endif -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/visualization/template.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | /** @file allInOne.hpp 3 | * @brief Code that handles GUI output and visualization using OpenCV. 4 | * If OpenCV is not be available, CMake will not declare the USE_OPENCV compilation flag and the whole code will not be included. 5 | * @author Ammar Qammaz (AmmarkoV) 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | 12 | #if USE_OPENCV 13 | #include "opencv2/opencv.hpp" 14 | using namespace cv; 15 | 16 | int visualizeTemplate( 17 | const char* windowName, 18 | unsigned int frameNumber, 19 | unsigned int saveVisualization, 20 | cv::Mat * alreadyLoadedImage, 21 | const char * path, 22 | const char * label, 23 | unsigned int serialLength, 24 | unsigned int width, 25 | unsigned int height, 26 | struct skeletonSerialized * skeleton, 27 | struct MocapNET2 * mnet, 28 | struct MocapNET2Options * options, 29 | std::vector > points2DOutputGUIRealView, 30 | std::vector > points2DOutputGUIForcedView, 31 | unsigned int numberOfMissingJoints 32 | ); 33 | 34 | 35 | #endif 36 | -------------------------------------------------------------------------------- /src/MocapNET2/MocapNETLib2/visualization/widgets.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | /** @file widgets.hpp 3 | * @brief Code that handles GUI output and visualization using OpenCV. 4 | * If OpenCV is not be available, CMake will not declare the USE_OPENCV compilation flag and the whole code will not be included. 5 | * @author Ammar Qammaz (AmmarkoV) 6 | */ 7 | 8 | #if USE_OPENCV 9 | #include "opencv2/opencv.hpp" 10 | using namespace cv; 11 | 12 | cv::Mat overlay(cv::Mat base,cv::Mat overlay); 13 | 14 | int drawScale(cv::Mat &outputMat,const char * description,float x,float y,float value,float minimum,float maximum); 15 | 16 | int plotFloatVector( 17 | cv::Mat & img, 18 | char cleanBackground, 19 | std::vector history, 20 | unsigned int x, 21 | unsigned int y, 22 | unsigned int width, 23 | unsigned int height 24 | ); 25 | 26 | 27 | int visualizeOrientation( 28 | cv::Mat &img, 29 | const char * label, 30 | float orientationDegrees, 31 | float frontClass, 32 | float backClass, 33 | float leftClass, 34 | float rightClass, 35 | unsigned int x, 36 | unsigned int y, 37 | unsigned int width, 38 | unsigned int height 39 | ); 40 | 41 | 42 | 43 | int visualizeNSDM( 44 | cv::Mat &img, 45 | const char * label, 46 | std::vector NSDM, 47 | unsigned int channelsPerNSDMElement, 48 | unsigned int x, 49 | unsigned int y, 50 | unsigned int width, 51 | unsigned int height 52 | ); 53 | 54 | 55 | 56 | int visualizeNSDMAsBar( 57 | cv::Mat &img, 58 | std::vector NSDM, 59 | unsigned int x, 60 | unsigned int y, 61 | unsigned int width, 62 | unsigned int height 63 | ); 64 | 65 | 66 | int drawFloorFromPrimitives( 67 | cv::Mat &img, 68 | float roll, 69 | float pitch, 70 | float yaw, 71 | unsigned int floorDimension 72 | ); 73 | #endif -------------------------------------------------------------------------------- /src/MocapNET2/drawCSV/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project( drawCSV ) 2 | cmake_minimum_required( VERSION 2.8.13) 3 | #cmake_minimum_required(VERSION 3.5) 4 | find_package(OpenCV REQUIRED) 5 | INCLUDE_DIRECTORIES(${OpenCV_INCLUDE_DIRS}) 6 | 7 | #set_property(GLOBAL PROPERTY USE_FOLDERS ON) 8 | set(CMAKE_CXX_STANDARD 11) 9 | include_directories(${TENSORFLOW_INCLUDE_ROOT}) 10 | 11 | 12 | add_executable(drawCSV drawCSV.cpp ) 13 | 14 | target_link_libraries(drawCSV rt dl m ${OpenCV_LIBRARIES} ${OPENGL_LIBS} Tensorflow TensorflowFramework MocapNETLib2 ${NETWORK_CLIENT_LIBRARIES} ${PNG_Libs} ${JPG_Libs} ) 15 | set_target_properties(drawCSV PROPERTIES DEBUG_POSTFIX "D") 16 | 17 | 18 | set_target_properties(drawCSV PROPERTIES 19 | ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 20 | LIBRARY_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 21 | RUNTIME_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 22 | ) 23 | 24 | -------------------------------------------------------------------------------- /src/MocapNET2/reshapeCSVFileToMakeClassification/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project( ReshapeCSV ) 2 | cmake_minimum_required( VERSION 2.8.13 ) 3 | #cmake_minimum_required(VERSION 3.5) 4 | 5 | #find_package(OpenCV REQUIRED) 6 | #INCLUDE_DIRECTORIES(${OpenCV_INCLUDE_DIRS}) 7 | 8 | 9 | 10 | add_executable(ReshapeCSV reshapeCSV.cpp ) 11 | target_link_libraries(ReshapeCSV rt dl m Tensorflow TensorflowFramework MocapNETLib2 ) 12 | #set_target_properties(TestCSV PROPERTIES DEBUG_POSTFIX "D") 13 | 14 | 15 | set_target_properties(ReshapeCSV PROPERTIES 16 | ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 17 | LIBRARY_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 18 | RUNTIME_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 19 | ) 20 | 21 | -------------------------------------------------------------------------------- /src/MocapNET2/reshapeCSVFileToMakeClassification/reshapeCSV.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Utility to extract BVH files straight from OpenPose JSON output 3 | * Sample usage ./MocapNETCSV --from test.csv --visualize 4 | */ 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | #include "../MocapNETLib2/mocapnet2.hpp" 13 | #include "../MocapNETLib2/core/core.hpp" 14 | #include "../MocapNETLib2/IO/csvRead.hpp" 15 | 16 | 17 | int main(int argc, char *argv[]) 18 | { 19 | struct MocapNET2 mnet; 20 | struct CSVFileContext csvT={0}; 21 | unsigned long processedLines=0; 22 | 23 | fprintf(stderr,"Run : \n"); 24 | fprintf(stderr,"./createRandomizedDataset.sh\n"); 25 | fprintf(stderr,"to update the dataset/bvh_body_all.csv to the latest version..\n\n"); 26 | 27 | fprintf(stdout,"Front,Back,Left,Right\n"); 28 | 29 | 30 | char csvFileInput[1024]; 31 | snprintf(csvFileInput,1024,"dataset/bvh_body_all.csv"); 32 | 33 | if (argc==2) 34 | { 35 | snprintf(csvFileInput,1024,"%s",argv[1]); 36 | } else 37 | { 38 | fprintf(stderr,"Please provide an argument i.e. \n ./reshapeCSV dataset/bvh_body_all.csv"); 39 | return 0; 40 | } 41 | 42 | 43 | if (openCSVFile(&csvT,csvFileInput)) 44 | { 45 | fprintf(stderr,"CSV file had %u lines\n",csvT.lineNumber); 46 | //---------------------------------------------------- 47 | struct skeletonSerialized skeletonS= {0}; 48 | while ( parseNextCSVCOCOSkeleton(&csvT,&skeletonS) ) 49 | //---------------------------------------------------- 50 | { 51 | if (processedLines==0) 52 | { initializeMocapNET2InputAssociation(&mnet,&skeletonS,1,1,1); } 53 | 54 | unsigned int correspondingClass = getMocapNETOrientationFromAngle(skeletonS.skeletonBody[4].value); 55 | 56 | if (processedLines%1000==0) { fprintf(stderr,"."); } 57 | //fprintf(stderr,".[%0.2f/%u]",skeletonS.skeletonBody[4].value,correspondingClass); 58 | 59 | /* 60 | enum MOCAPNET_Orientation 61 | { 62 | MOCAPNET_ORIENTATION_NONE=0, 63 | MOCAPNET_ORIENTATION_FRONT, 64 | MOCAPNET_ORIENTATION_BACK, 65 | MOCAPNET_ORIENTATION_LEFT, 66 | MOCAPNET_ORIENTATION_RIGHT, 67 | //----------------------------- 68 | MOCAPNET_ORIENTATION_NUMBER 69 | }; 70 | */ 71 | 72 | switch(correspondingClass) 73 | { 74 | //The series here is important for MocapNETLib2/core.cpp: 75 | case MOCAPNET_ORIENTATION_NONE: fprintf(stdout,"0,0,0,0\n"); break; //Add erroneous categories to catch bugs 76 | case MOCAPNET_ORIENTATION_FRONT: fprintf(stdout,"1,0,0,0\n"); break; 77 | case MOCAPNET_ORIENTATION_BACK: fprintf(stdout,"0,1,0,0\n"); break; 78 | case MOCAPNET_ORIENTATION_LEFT: fprintf(stdout,"0,0,1,0\n"); break; 79 | case MOCAPNET_ORIENTATION_RIGHT: fprintf(stdout,"0,0,0,1\n"); break; 80 | default : fprintf(stdout,"0,0,0,0\n"); break; //Add erroneous categories to catch bugs 81 | }; 82 | 83 | ++processedLines; 84 | } 85 | //---------------------------------------------------- 86 | closeCSVFile(&csvT); 87 | } 88 | 89 | 90 | fprintf(stderr,"\nDon't forget, Run : \n"); 91 | fprintf(stderr,"./createRandomizedDataset.sh\n"); 92 | fprintf(stderr,"to update the dataset/bvh_body_all.csv to the latest version..\n\n"); 93 | 94 | exit(0); 95 | } 96 | -------------------------------------------------------------------------------- /src/MocapNET2/testCSV/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project( TestCSV ) 2 | cmake_minimum_required( VERSION 2.8.13 ) 3 | #cmake_minimum_required(VERSION 3.5) 4 | 5 | find_package(OpenCV REQUIRED) 6 | INCLUDE_DIRECTORIES(${OpenCV_INCLUDE_DIRS}) 7 | 8 | 9 | 10 | add_executable(TestCSV testCSV.cpp ) 11 | target_link_libraries(TestCSV rt dl m Tensorflow TensorflowFramework MocapNETLib2 ) 12 | #set_target_properties(TestCSV PROPERTIES DEBUG_POSTFIX "D") 13 | 14 | 15 | set_target_properties(TestCSV PROPERTIES 16 | ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 17 | LIBRARY_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 18 | RUNTIME_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 19 | ) 20 | 21 | -------------------------------------------------------------------------------- /src/MocapNET2/testCSV/testCSV.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Utility to extract BVH files straight from OpenPose JSON output 3 | * Sample usage ./MocapNETCSV --from test.csv --visualize 4 | */ 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | #include "../MocapNETLib2/mocapnet2.hpp" 13 | #include "../MocapNETLib2/IO/csvRead.hpp" 14 | 15 | 16 | int main(int argc, char *argv[]) 17 | { 18 | struct MocapNET2 mnet; 19 | struct CSVFileContext csvT={0}; 20 | 21 | fprintf(stderr,"Run : \n"); 22 | fprintf(stderr,"./convertOpenPoseJSONToCSV --from frames/GOPR3246.MP4-data/ -o newtest.csv\n"); 23 | fprintf(stderr,"to update the newtest.csv to the latest version..\n\n"); 24 | 25 | 26 | if (openCSVFile(&csvT,"newtest.csv")) 27 | { 28 | fprintf(stderr,"CSV file had %u lines\n",csvT.lineNumber); 29 | //---------------------------------------------------- 30 | struct skeletonSerialized skeletonS= {0}; 31 | while ( parseNextCSVCOCOSkeleton(&csvT,&skeletonS) ) 32 | //---------------------------------------------------- 33 | { 34 | initializeMocapNET2InputAssociation(&mnet,&skeletonS,1,1,1); 35 | fprintf(stderr,"...\n"); 36 | 37 | fprintf(stderr,"Don't forget, Run : \n"); 38 | fprintf(stderr,"./convertOpenPoseJSONToCSV --from frames/GOPR3246.MP4-data/ -o newtest.csv\n"); 39 | fprintf(stderr,"to update the newtest.csv to the latest version..\n\n"); 40 | 41 | exit(0); 42 | } 43 | //---------------------------------------------------- 44 | closeCSVFile(&csvT); 45 | } 46 | 47 | 48 | fprintf(stderr,"Don't forget, Run : \n"); 49 | fprintf(stderr,"./convertOpenPoseJSONToCSV --from frames/GOPR3246.MP4-data/ -o newtest.csv\n"); 50 | fprintf(stderr,"to update the newtest.csv to the latest version..\n\n"); 51 | 52 | exit(0); 53 | } 54 | -------------------------------------------------------------------------------- /src/NeuralNetworkAbstractionLayer/README.md: -------------------------------------------------------------------------------- 1 | Since Tensorflow is under very active development and undergoes frequent "changes" this is an abstraction layer so that all the tensorflow crazyness is handled transparently. 2 | In the end what we need is to just give a vector of numbers run the network and get back a vector of numbers. 3 | We don't care about all the internals of tensorflow 4 | -------------------------------------------------------------------------------- /src/NeuralNetworkAbstractionLayer/neuralNetworkAbstraction.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | 6 | #ifdef USE_TENSORFLOW1 7 | #include "../Tensorflow/tensorflow.hpp" 8 | #elif USE_TENSORFLOW2 9 | #include "../Tensorflow2/tensorflow2.h" 10 | #endif 11 | 12 | struct neuralNetworkModel 13 | { 14 | #ifdef USE_TENSORFLOW1 15 | struct TensorflowInstance model; 16 | #elif USE_TENSORFLOW2 17 | struct Tensorflow2Instance model; 18 | #else 19 | #warning "No tensorflow support signaled through a C define.." 20 | void * model; 21 | #endif 22 | }; 23 | 24 | 25 | void neuralNetworkPrintVersion(); 26 | 27 | char * neuralNetworkGetPath(struct neuralNetworkModel * nn); 28 | 29 | int neuralNetworkLoad( 30 | struct neuralNetworkModel * nn, 31 | const char * filename, 32 | const char * inputTensor, 33 | const char * outputTensor, 34 | unsigned int numberOfInputElements, 35 | unsigned int numberOfOutputElements, 36 | unsigned int forceCPU 37 | ); 38 | 39 | 40 | 41 | 42 | int neuralNetworkUnload( 43 | struct neuralNetworkModel * nn 44 | ); 45 | 46 | 47 | std::vector neuralNetworkExecute( 48 | struct neuralNetworkModel * nn, 49 | std::vector input 50 | ); 51 | -------------------------------------------------------------------------------- /src/Tensorflow/createTensorflowConfigurationForC.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5) 3 | #config = tf.ConfigProto(gpu_options=gpu_options) 4 | 5 | config = tf.ConfigProto( 6 | device_count={'CPU' : 1, 'GPU' : 0}, 7 | allow_soft_placement=True, 8 | log_device_placement=False 9 | ); 10 | 11 | serialized = config.SerializeToString() 12 | c = list(map(hex, serialized)) 13 | 14 | 15 | print("uint8_t config[] = {") 16 | print(c) 17 | print("};") 18 | print("TF_SetConfig(opts, (void*)config, ",len(serialized),", status);};") 19 | #['0x32', '0x9', '0x9', '0x0', '0x0', '0x0', '0x0', '0x0', '0x0', '0xe0', '0x3f'] 20 | 21 | -------------------------------------------------------------------------------- /src/Tensorflow/tensorflow.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | /** @file tensorflow.hpp 3 | * @brief The main wrapper around the tensorflow C API 4 | * 5 | * 6 | * @author Ammar Qammaz (AmmarkoV) 7 | * @bug The tensorflow C-API is volatile and subject to change 8 | */ 9 | 10 | #include // TensorFlow C API header 11 | #include 12 | 13 | 14 | /** 15 | * @brief A structure that holds all of the relevant information for a tensorflow instance 16 | * 17 | * This is used to simplify context switching and reduce the complexity of the library 18 | * 19 | */ 20 | struct TensorflowInstance 21 | { 22 | char modelPath[1024]; 23 | char inputLayerName[512]; 24 | char outputLayerName[512]; 25 | 26 | TF_Graph* graph; 27 | TF_Session* session; 28 | TF_Operation* operation; 29 | TF_Tensor* inputTensor; 30 | TF_Tensor* outputTensor; 31 | TF_Output input_operation; 32 | TF_Output output_operation; 33 | 34 | TF_Status* status; 35 | TF_SessionOptions* options; 36 | }; 37 | 38 | /** 39 | * @brief Get the number of Ticks in Microseconds, to be used as a performance counter 40 | * @ingroup tensorflow 41 | * @retval The number of microseconds since system boot 42 | */ 43 | unsigned long GetTickCountMicroseconds(); 44 | 45 | 46 | /** 47 | * @brief Get the number of Ticks in Milliseconds, to be used as a performance counter 48 | * @ingroup tensorflow 49 | * @retval The number of milliseconds since system boot 50 | */ 51 | unsigned long GetTickCountMilliseconds(); 52 | 53 | 54 | /** 55 | * @brief List nodes of the TF_Graph by printing them in stdout 56 | * @ingroup tensorflow 57 | * @param Label of printed output 58 | * @param Pointer to TF_Graph struct 59 | * @retval No return value 60 | */ 61 | void listNodes(const char * label , TF_Graph* graph); 62 | 63 | 64 | /** 65 | * @brief Load a tensorflow instance from a .pb file 66 | * @ingroup tensorflow 67 | * @param Pointer to a struct TensorflowInstance that will hold the tensorflow instance on load. 68 | * @param Path to .pb file 69 | * @param Name of input tensor, i.e. input_1 70 | * @param Name of output tensor, i.e. output_1 71 | * @retval 1 = Success loading the file , 0 = Failure 72 | */ 73 | int loadTensorflowInstance( 74 | struct TensorflowInstance * net, 75 | const char * filename, 76 | const char * inputTensor, 77 | const char * outputTensor, 78 | unsigned int forceCPU 79 | ); 80 | 81 | /** 82 | * @brief Evaluate an input vector through the neural network and return an output vector 83 | * @ingroup tensorflow 84 | * @param Pointer to a struct TensorflowInstance that holds a loaded tensorflow instance. 85 | * @param Input vector of floats 86 | * @retval Output vector of floats, Empty vector in case of failure 87 | */ 88 | std::vector predictTensorflow(struct TensorflowInstance * net,std::vector input); 89 | 90 | 91 | 92 | /** 93 | * @brief Evaluate an input image through a network that outputs a vector of heatmaps 94 | * @ingroup tensorflow 95 | * @param Pointer to a struct TensorflowInstance that holds a loaded tensorflow instance. 96 | * @param Width of input image 97 | * @param Height of input image 98 | * @param Pixels of input image 99 | * @retval Output vector of vectors of floats, That correspond to the heatmaps 100 | * @bug TF_GraphGetTensorShape returns -1 for some networks. 101 | */ 102 | std::vector > predictTensorflowOnArrayOfHeatmaps( 103 | struct TensorflowInstance * net, 104 | unsigned int width , 105 | unsigned int height , 106 | float * data, 107 | unsigned int heatmapWidth, 108 | unsigned int heatmapHeight, 109 | unsigned int numberOfOutputTensors 110 | ); 111 | 112 | 113 | /** 114 | * @brief Clean tensorflow instance from memory and deallocate it 115 | * @ingroup tensorflow 116 | * @param Pointer to a struct TensorflowInstance that holds a loaded tensorflow instance. 117 | * @retval 1 = Success saving the file , 0 = Failure 118 | */ 119 | int unloadTensorflow(struct TensorflowInstance * net); 120 | -------------------------------------------------------------------------------- /src/Tensorflow/tf_utils.hpp: -------------------------------------------------------------------------------- 1 | // Licensed under the MIT License . 2 | // SPDX-License-Identifier: MIT 3 | // Copyright (c) 2018 - 2019 Daniil Goncharov . 4 | // 5 | // Permission is hereby granted, free of charge, to any person obtaining a copy 6 | // of this software and associated documentation files (the "Software"), to deal 7 | // in the Software without restriction, including without limitation the rights 8 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | // copies of the Software, and to permit persons to whom the Software is 10 | // furnished to do so, subject to the following conditions: 11 | // 12 | // The above copyright notice and this permission notice shall be included in all 13 | // copies or substantial portions of the Software. 14 | // 15 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | // SOFTWARE. 22 | 23 | #pragma once 24 | 25 | #if defined(_MSC_VER) 26 | # if !defined(COMPILER_MSVC) 27 | # define COMPILER_MSVC // Set MSVC visibility of exported symbols in the shared library. 28 | # endif 29 | # pragma warning(push) 30 | # pragma warning(disable : 4190) 31 | #endif 32 | #include // TensorFlow C API header 33 | #include 34 | #include 35 | #include 36 | 37 | namespace tf_utils { 38 | 39 | TF_Graph* LoadGraph(const char* graphPath); 40 | 41 | void DeleteGraph(TF_Graph* graph); 42 | 43 | TF_Session* CreateSession(TF_Graph* graph); 44 | 45 | void DeleteSession(TF_Session* session); 46 | 47 | TF_Code RunSession(TF_Session* session, 48 | const TF_Output* inputs, TF_Tensor* const* input_tensors, std::size_t ninputs, 49 | const TF_Output* outputs, TF_Tensor** output_tensors, std::size_t noutputs); 50 | 51 | TF_Code RunSession(TF_Session* session, 52 | const std::vector& inputs, const std::vector& input_tensors, 53 | const std::vector& outputs, std::vector& output_tensors); 54 | 55 | TF_Tensor* CreateTensor(TF_DataType data_type, 56 | const std::int64_t* dims, std::size_t num_dims, 57 | const void* data, std::size_t len); 58 | 59 | template 60 | TF_Tensor* CreateTensor(TF_DataType data_type, const std::vector& dims, const std::vector& data) { 61 | return CreateTensor(data_type, 62 | dims.data(), dims.size(), 63 | data.data(), data.size() * sizeof(T)); 64 | } 65 | 66 | TF_Tensor* CreateEmptyTensor(TF_DataType data_type, const std::int64_t* dims, std::size_t num_dims); 67 | 68 | TF_Tensor* CreateEmptyTensor(TF_DataType data_type, const std::vector& dims); 69 | 70 | void DeleteTensor(TF_Tensor* tensor); 71 | 72 | void DeleteTensors(const std::vector& tensors); 73 | 74 | void SetTensorsData(TF_Tensor* tensor, const void* data, std::size_t len); 75 | 76 | void PrintTensorInfo(TF_Graph* graph, const char* layer_name,int printInputs,int printOutputs); 77 | 78 | void PrintOp(TF_Graph* graph); 79 | 80 | int listNodesMN(const char * label , TF_Graph* graph); 81 | 82 | template 83 | void SetTensorsData(TF_Tensor* tensor, const std::vector& data) { 84 | SetTensorsData(tensor, data.data(), data.size() * sizeof(T)); 85 | } 86 | 87 | template 88 | std::vector GetTensorsData(const TF_Tensor* tensor) { 89 | auto data = static_cast(TF_TensorData(tensor)); 90 | if (data == nullptr) { 91 | return {}; 92 | } 93 | 94 | return {data, data + (TF_TensorByteSize(tensor) / TF_DataTypeSize(TF_TensorType(tensor)))}; 95 | } 96 | 97 | template 98 | std::vector > GetTensorsData(const std::vector& tensors) { 99 | std::vector > data; 100 | data.reserve(tensors.size()); 101 | for (const auto t : tensors) { 102 | data.push_back(GetTensorsData(t)); 103 | } 104 | 105 | return data; 106 | } 107 | 108 | } // namespace tf_utils 109 | 110 | #if defined(_MSC_VER) 111 | # pragma warning(pop) 112 | #endif 113 | 114 | -------------------------------------------------------------------------------- /src/Tensorflow2/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project( Tensorflow2 ) 2 | cmake_minimum_required( VERSION 2.8.7 ) 3 | #cmake_minimum_required(VERSION 3.5) 4 | 5 | 6 | add_executable(Tensorflow2 testtf2.cpp tensorflow2.h ) 7 | target_link_libraries(Tensorflow2 rt dl m Tensorflow TensorflowFramework MocapNETLib2 ) 8 | set_target_properties(Tensorflow2 PROPERTIES DEBUG_POSTFIX "D") 9 | 10 | 11 | set_target_properties(Tensorflow2 PROPERTIES 12 | ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 13 | LIBRARY_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 14 | RUNTIME_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 15 | ) 16 | 17 | -------------------------------------------------------------------------------- /src/Webcam/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project( OpenCVTest ) 2 | #cmake_minimum_required( VERSION 2.8.7 ) 3 | cmake_minimum_required(VERSION 3.5) 4 | find_package(OpenCV REQUIRED) 5 | INCLUDE_DIRECTORIES(${OpenCV_INCLUDE_DIRS}) 6 | 7 | 8 | 9 | add_executable(OpenCVTest webcam.cpp) 10 | target_link_libraries(OpenCVTest rt dl m ${OpenCV_LIBRARIES} ) 11 | set_target_properties(OpenCVTest PROPERTIES DEBUG_POSTFIX "D") 12 | 13 | 14 | set_target_properties(OpenCVTest PROPERTIES 15 | ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 16 | LIBRARY_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 17 | RUNTIME_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" 18 | ) 19 | 20 | -------------------------------------------------------------------------------- /src/Webcam/webcam.cpp: -------------------------------------------------------------------------------- 1 | #include "opencv2/opencv.hpp" 2 | /** @file webcam.cpp 3 | * @brief This is a simple test file to make sure your camera or video files can be opened using OpenCV 4 | * @author Ammar Qammaz (AmmarkoV) 5 | */ 6 | #include 7 | 8 | 9 | using namespace cv; 10 | 11 | int main(int argc, char *argv[]) 12 | { 13 | const char * webcam = 0; 14 | for (int i=0; ii+1) 19 | { 20 | webcam = argv[i+1]; 21 | } 22 | } 23 | } 24 | 25 | 26 | VideoCapture cap(webcam); // open the default camera 27 | if (webcam==0) 28 | { 29 | std::cerr<<"Trying to open webcam\n"; 30 | cap.set(cv::CAP_PROP_FRAME_WIDTH,640); // In case of errors try CV_CAP_PROP_FRAME_WIDTH 31 | cap.set(cv::CAP_PROP_FRAME_HEIGHT,480); // In case of errors try CV_CAP_PROP_FRAME_HEIGHT 32 | } 33 | else 34 | { 35 | std::cerr<<"Trying to open "<> frame; 52 | if ( (frame.size().width>0) && (frame.size().height>0) ) 53 | { 54 | imshow("feed", frame); 55 | } 56 | else 57 | { 58 | std::cerr<<"Broken frame.. \n"; 59 | } 60 | waitKey(1); 61 | } 62 | // the camera will be deinitialized automatically in VideoCapture destructor 63 | return 0; 64 | } 65 | -------------------------------------------------------------------------------- /src/python/blender/downloadAndInstallBlender.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 4 | cd "$DIR" 5 | 6 | BLENDER="blender-3.4.1-linux-x64" 7 | 8 | if [[ -d "$BLENDER" ]] 9 | then 10 | echo "Blender seems to exist on your filesystem." 11 | else 12 | echo "Will get a copy of blender." 13 | #https://www.blender.org/download/release/Blender3.4/blender-3.4.1-linux-x64.tar.xz/ 14 | wget https://ftp.halifax.rwth-aachen.de/blender/release/Blender3.4/$BLENDER.tar.xz 15 | tar -xf $BLENDER.tar.xz 16 | fi 17 | 18 | 19 | #This now happens from inside the blender_mocapnet.py script in the main function 20 | #-------------------------------------------------------------------------------- 21 | #git clone https://github.com/makehumancommunity/mpfb2 22 | #mkdir -p ~/.config/blender/3.4/scripts/addons/ 23 | #cd ~/.config/blender/3.4/scripts/addons/ 24 | #ln -s $DIR/mpfb2/src/mpfb 25 | #OR 26 | #wget http://download.tuxfamily.org/makehuman/plugins/mpfb2-latest.zip 27 | #wget http://files.makehumancommunity.org/asset_packs/makehuman_system_assets/makehuman_system_assets_cc0.zip 28 | #-------------------------------------------------------------------------------- 29 | 30 | 31 | xdg-open "https://www.youtube.com/watch?v=ooLRUS5j4AI"& 32 | cd "$DIR" 33 | 34 | $BLENDER/blender -y --python blender_mocapnet.py 35 | 36 | 37 | exit 0 38 | -------------------------------------------------------------------------------- /src/python/compareUtility/aT.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #Written by Ammar Qammaz a.k.a AmmarkoV - 2020 3 | # This bash script uses gnuplot and R so make sure to : 4 | # sudo apt-get install gnuplot r-base 5 | 6 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 7 | cd "$DIR" 8 | 9 | ORIG_DIR=`pwd` 10 | 11 | function getStatistics 12 | { 13 | #use R to generate statistics 14 | #sudo apt-get install r-base 15 | R -q -e "x <- read.csv('$1', header = F); summary(x); sd(x[ , 1])" > $2 16 | cat $1 | wc -l >> $2 17 | } 18 | 19 | 20 | IN="$1" 21 | OUTPUT_IMAGE="AllDistancesFrequency_$2.png" 22 | echo "plotAllJointsDistanceFrequency.sh $IN $OUTPUT_IMAGE" 23 | 24 | getStatistics $IN $IN-statsraw.txt 25 | cat $IN-statsraw.txt | grep -E 'Min|Qu|Median|Mean|Max' > $IN-stats.txt 26 | STATS=`cat $IN-stats.txt` 27 | MEAN=`cat $IN-stats.txt | grep "Mean" | cut -d':' -f2 ` 28 | MEDIAN=`cat $IN-stats.txt | grep "Median" | cut -d':' -f2 ` 29 | MAXIMUM=`cat $IN-stats.txt | grep "Max" | cut -d':' -f2 ` 30 | PLACETEXT=`cat $IN-stats.txt | grep "3rd" | cut -d':' -f2 ` 31 | 32 | LOW_LIMIT="50"; 33 | LIMIT="150"; 34 | NUMBER_OF_RECORDS=`wc -l $1 | cut -d' ' -f1` 35 | 36 | GNUPLOT_CMD="set terminal png; \ 37 | set output \"$OUTPUT_IMAGE\"; set yrange [0:1];\ 38 | set title \"Frequency precision diagram $IN \";\ 39 | set xlabel \"Distance Of Joints(mm)\"; \ 40 | set ylabel \"Frequency Of Value\"; \ 41 | set arrow from $MEAN, graph 0 to $MEAN, graph 1 nohead; \ 42 | set label \"Mean Value of $MEAN mm \" at $MEAN,0.40; \ 43 | set arrow from $MEDIAN, graph 0 to $MEDIAN, graph 1 nohead; \ 44 | set label \"Median Value of $MEDIAN mm \" at $MEDIAN,0.30; \ 45 | set label \"$STATS\" at $PLACETEXT,0.85; \ 46 | binwidth=3;\ 47 | bin(x,width)=width*floor(x/width);\ 48 | plot [0:] '$IN' using (bin(\$1,binwidth)):(1.0) smooth cnorm t 'smooth cumulative'" 49 | 50 | 51 | echo "WE WILL RUN " 52 | echo "gnuplot -e \"$GNUPLOT_CMD\"" 53 | echo " " 54 | echo " " 55 | 56 | 57 | gnuplot -e "$GNUPLOT_CMD" 58 | 59 | rm $IN-statsraw.txt 60 | rm $IN-stats.txt 61 | 62 | exit 0 63 | -------------------------------------------------------------------------------- /src/python/compareUtility/align2DPoints.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #Written by Ammar Qammaz a.k.a AmmarkoV - 2020 3 | 4 | 5 | import h5py 6 | import numpy as np 7 | import csv 8 | import os 9 | import sys 10 | 11 | from scipy.spatial import procrustes 12 | from scipy.linalg import orthogonal_procrustes 13 | 14 | 15 | #Taken from https://github.com/una-dinosauria/3d-pose-baseline/blob/master/src/procrustes.py 16 | def compute_similarity_transform(X, Y, compute_optimal_scale=False): 17 | """ 18 | A port of MATLAB's `procrustes` function to Numpy. 19 | Adapted from http://stackoverflow.com/a/18927641/1884420 20 | Args 21 | X: array NxM of targets, with N number of points and M point dimensionality 22 | Y: array NxM of inputs 23 | compute_optimal_scale: whether we compute optimal scale or force it to be 1 24 | Returns: 25 | d: squared error after transformation 26 | Z: transformed Y 27 | T: computed rotation 28 | b: scaling 29 | c: translation 30 | """ 31 | 32 | muX = X.mean(0) 33 | muY = Y.mean(0) 34 | 35 | X0 = X - muX 36 | Y0 = Y - muY 37 | 38 | ssX = (X0**2.).sum() 39 | ssY = (Y0**2.).sum() 40 | 41 | # centred Frobenius norm 42 | normX = np.sqrt(ssX) 43 | normY = np.sqrt(ssY) 44 | 45 | # scale to equal (unit) norm 46 | X0 = X0 / normX 47 | Y0 = Y0 / normY 48 | 49 | # optimum rotation matrix of Y 50 | A = np.dot(X0.T, Y0) 51 | U,s,Vt = np.linalg.svd(A,full_matrices=False) 52 | V = Vt.T 53 | T = np.dot(V, U.T) 54 | 55 | # Make sure we have a rotation 56 | detT = np.linalg.det(T) 57 | V[:,-1] *= np.sign( detT ) 58 | s[-1] *= np.sign( detT ) 59 | T = np.dot(V, U.T) 60 | 61 | traceTA = s.sum() 62 | 63 | if compute_optimal_scale: # Compute optimum scaling of Y. 64 | b = traceTA * normX / normY 65 | d = 1 - traceTA**2 66 | Z = normX*traceTA*np.dot(Y0, T) + muX 67 | else: # If no scaling allowed 68 | b = 1 69 | d = 1 + ssY/ssX - 2 * traceTA * normY / normX 70 | Z = normY*np.dot(Y0, T) + muX 71 | 72 | c = muX - b*np.dot(muY, T) 73 | 74 | return d, Z, T, b, c 75 | 76 | 77 | 78 | 79 | def pointListsReturnAvgDistance(A,B): 80 | numberOfPoints=A.shape[0] 81 | if (A.shape[0]!=B.shape[0]): 82 | print("Error comparing point lists of different length") 83 | return inf 84 | 85 | distance=0.0 86 | for i in range(0,numberOfPoints): 87 | #--------- 88 | xA=A[i][0] 89 | yA=A[i][1] 90 | zA=A[i][2] 91 | #--------- 92 | xB=B[i][0] 93 | yB=B[i][1] 94 | zB=B[i][2] 95 | #--------- 96 | xAB=xA-xB 97 | yAB=yA-yB 98 | zAB=zA-zB 99 | 100 | #Pythagorean theorem for 3 dimensions 101 | #distance = squareRoot( xAB^2 + yAB^2 + zAB^2 ) 102 | distance+=np.sqrt( (xAB*xAB) + (yAB*yAB) + (zAB*zAB) ) 103 | return distance/numberOfPoints 104 | 105 | 106 | 107 | 108 | 109 | -------------------------------------------------------------------------------- /src/python/compareUtility/writeCSVResults.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #Written by Ammar Qammaz a.k.a AmmarkoV - 2020 3 | 4 | import numpy as np 5 | 6 | #----------------------------------------------------------------------------------------------------------------------- 7 | def writeCSVFileResults( 8 | JOINT_LABELS, 9 | outputFile, 10 | addHeader, 11 | globalJointDistances, 12 | perJointDistance, 13 | numberOfJoints, 14 | subject, 15 | action, 16 | subaction, 17 | camera, 18 | actionLabel, 19 | addedPixelNoise 20 | ): 21 | #Write header---------------- 22 | if (addHeader): 23 | file = open(outputFile,"w") 24 | file.write("Subject,") 25 | file.write("Action,") 26 | file.write("ActionLabel,") 27 | file.write("Subaction,") 28 | file.write("Camera,") 29 | file.write("Noise,") 30 | file.write("Global_Median,") 31 | #file.write("Global_Mean,") 32 | file.write("Global_Average,") 33 | file.write("Global_Std,") 34 | file.write("Global_Var,") 35 | for jointID in range(0,numberOfJoints): 36 | file.write("%s_Median,"%JOINT_LABELS[jointID]) 37 | #file.write("%s_Mean,"%JOINT_LABELS[jointID]) 38 | file.write("%s_Average,"%JOINT_LABELS[jointID]) 39 | file.write("%s_Std,"%JOINT_LABELS[jointID]) 40 | file.write("%s_Var"%JOINT_LABELS[jointID]) 41 | if (jointID!=numberOfJoints-1): 42 | file.write(",") 43 | else: 44 | file.write("\n") 45 | else: 46 | file = open(outputFile,"a") 47 | #---------------------------- 48 | 49 | median=np.median(globalJointDistances) 50 | mean=np.mean(globalJointDistances) 51 | average=np.average(globalJointDistances) 52 | std=np.std(globalJointDistances) 53 | var=np.var(globalJointDistances) 54 | print("\nGlobal Median:",median," Average:",average," Std:",std,"Var:",var) #file.write(subject) 55 | file.write(subject) 56 | file.write(",") 57 | file.write(action) 58 | file.write(",") 59 | file.write(actionLabel) 60 | file.write(",") 61 | file.write(subaction) 62 | file.write(",") 63 | file.write(camera) 64 | file.write(",") 65 | file.write(str(addedPixelNoise)) 66 | file.write(",") 67 | file.write(str(median)) 68 | file.write(",") 69 | #file.write(str(mean)) 70 | #file.write(",") 71 | file.write(str(average)) 72 | file.write(",") 73 | file.write(str(std)) 74 | file.write(",") 75 | file.write(str(var)) 76 | file.write(",") 77 | 78 | for jointID in range(0,numberOfJoints): 79 | median=np.median(perJointDistance[jointID]) 80 | mean=np.mean(perJointDistance[jointID]) 81 | average=np.average(perJointDistance[jointID]) 82 | std=np.std(perJointDistance[jointID]) 83 | var=np.var(perJointDistance[jointID]) 84 | print("Joint ",JOINT_LABELS[jointID]," Median:",median," Mean:",mean," Average:",average," Std:",std,"Var:",var) 85 | file.write(str(median)) 86 | file.write(",") 87 | #file.write(str(mean)) 88 | #file.write(",") 89 | file.write(str(average)) 90 | file.write(",") 91 | file.write(str(std)) 92 | file.write(",") 93 | file.write(str(var)) 94 | if (jointID!=numberOfJoints-1): 95 | file.write(",") 96 | else: 97 | file.write("\n") 98 | 99 | file.close() 100 | #-------------------------------------------- 101 | 102 | 103 | 104 | #----------------------------------------------------------------------------------------------------------------------- 105 | def appendRAWResultsForGNUplot(outputFile,globalJointDistances): 106 | fileHandler = open(outputFile, "a") 107 | for measurement in globalJointDistances: 108 | fileHandler.write(str(measurement)) 109 | fileHandler.write("\n") 110 | fileHandler.close() 111 | -------------------------------------------------------------------------------- /src/python/ctypes/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #gcc -c -fPIC c.c -o c.o 4 | #gcc c.o -shared -o libcalci.so 5 | 6 | gcc -shared -o libcalci.so -fPIC c.c 7 | 8 | python3 p.py 9 | 10 | 11 | exit 0 12 | -------------------------------------------------------------------------------- /src/python/ctypes/c.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "c.h" 4 | 5 | void connect() 6 | { 7 | printf("Connected to C extension...\n"); 8 | } 9 | 10 | //return random value in range of 0-50 11 | int randNum() 12 | { 13 | int nRand = rand() % 50; 14 | return nRand; 15 | } 16 | 17 | //add two number and return value 18 | int addNum(int a, int b) 19 | { 20 | int nAdd = a + b; 21 | return nAdd; 22 | } 23 | 24 | int printFloatList(float * l,int lSize) 25 | { 26 | for (int i=0; i