├── docs ├── .nojekyll └── static │ ├── images │ ├── viz_1.png │ ├── viz_2.png │ ├── carousel1.jpg │ ├── carousel2.jpg │ ├── carousel3.jpg │ ├── carousel4.jpg │ ├── OCB_pipeline_main_new_colors.jpg │ └── fine_tuning_pipeline_synthpose.jpg │ ├── pdfs │ └── sample.pdf │ ├── videos │ ├── carousel1.mp4 │ ├── carousel2.mp4 │ ├── carousel3.mp4 │ └── banner_video.mp4 │ ├── js │ ├── index.js │ └── bulma-slider.min.js │ └── css │ ├── index.css │ └── bulma-carousel.min.css ├── synthpose ├── resource │ ├── imgext.json │ ├── gender.json │ └── vertices_keypoints_corr.csv ├── RICH │ ├── downsample_rich.py │ ├── utils.py │ └── generate_dataset.py ├── Infinity │ ├── reconstruction.py │ ├── video_scene.py │ └── generate_dataset.py ├── constants.py └── BEDLAM │ └── generate_dataset.py ├── benchmarking ├── CameraIntrinsics │ ├── iPad11,1 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad11,2 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad11,3 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad11,4 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad11,6 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad11,7 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad12,1 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad12,2 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad13,1 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad13,2 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad13,4 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad13,5 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad13,6 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad13,7 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad13,8 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad13,9 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad14,1 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad14,2 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad14,3 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad14,4 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad14,5 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad14,6 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad7,11 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad7,12 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad7,5 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad7,6 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad8,1 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad8,10 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad8,11 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad8,12 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad8,2 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad8,3 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad8,4 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad8,5 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad8,6 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad8,7 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad8,8 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad8,9 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPod9,1 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad13,10 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad13,11 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad13,16 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad13,17 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad13,18 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPad13,19 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone10,1 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone10,2 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone10,3 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone10,4 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone10,5 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone10,6 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone11,2 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone11,4 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone11,6 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone11,8 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone12,1 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_4k │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone12,3 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_4k │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone12,5 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_4k │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone12,8 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone13,1 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone13,2 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone13,3 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ ├── Deployed_4k │ │ │ └── cameraIntrinsics.pickle │ │ ├── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_240fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone13,4 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone14,2 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone14,3 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone14,4 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone14,5 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone14,6 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone14,7 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone14,8 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone15,2 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone15,3 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ ├── iPhone8,1 │ │ ├── Deployed │ │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ │ └── cameraIntrinsics.pickle │ └── iPhone8,4 │ │ ├── Deployed │ │ └── cameraIntrinsics.pickle │ │ └── Deployed_720_60fps │ │ └── cameraIntrinsics.pickle ├── utils.py ├── .gitignore ├── defaultSessionMetadata.yaml ├── resource │ └── vertices_keypoints_corr_smpl.csv ├── kinematics.py ├── opensimPipeline │ ├── Models │ │ ├── RajagopalModified2016_markers_mmpose.xml │ │ └── RajagopalModified2016_markers_openpose.xml │ └── IK │ │ ├── Setup_IK_mmpose.xml │ │ ├── Setup_IK_openpose.xml │ │ ├── Setup_IK_mmpose_anatomical.xml │ │ └── Setup_IK_mmpose_final.xml ├── calibration.py ├── constants.py └── pose.py ├── LICENSE └── README.md /docs/.nojekyll: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/static/images/viz_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/docs/static/images/viz_1.png -------------------------------------------------------------------------------- /docs/static/images/viz_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/docs/static/images/viz_2.png -------------------------------------------------------------------------------- /docs/static/pdfs/sample.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/docs/static/pdfs/sample.pdf -------------------------------------------------------------------------------- /docs/static/images/carousel1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/docs/static/images/carousel1.jpg -------------------------------------------------------------------------------- /docs/static/images/carousel2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/docs/static/images/carousel2.jpg -------------------------------------------------------------------------------- /docs/static/images/carousel3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/docs/static/images/carousel3.jpg -------------------------------------------------------------------------------- /docs/static/images/carousel4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/docs/static/images/carousel4.jpg -------------------------------------------------------------------------------- /docs/static/videos/carousel1.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/docs/static/videos/carousel1.mp4 -------------------------------------------------------------------------------- /docs/static/videos/carousel2.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/docs/static/videos/carousel2.mp4 -------------------------------------------------------------------------------- /docs/static/videos/carousel3.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/docs/static/videos/carousel3.mp4 -------------------------------------------------------------------------------- /docs/static/videos/banner_video.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/docs/static/videos/banner_video.mp4 -------------------------------------------------------------------------------- /synthpose/resource/imgext.json: -------------------------------------------------------------------------------- 1 | {"Gym": "bmp", "BBQ": "bmp", "ParkingLot2": "bmp", "Pavallion": "png", "ParkingLot1": "bmp", "LectureHall": "png"} -------------------------------------------------------------------------------- /docs/static/images/OCB_pipeline_main_new_colors.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/docs/static/images/OCB_pipeline_main_new_colors.jpg -------------------------------------------------------------------------------- /docs/static/images/fine_tuning_pipeline_synthpose.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/docs/static/images/fine_tuning_pipeline_synthpose.jpg -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad11,1/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad11,1/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad11,2/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad11,2/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad11,3/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad11,3/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad11,4/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad11,4/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad11,6/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad11,6/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad11,7/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad11,7/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad12,1/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad12,1/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad12,2/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad12,2/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,1/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,1/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,2/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,2/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,4/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,4/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,5/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,5/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,6/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,6/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,7/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,7/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,8/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,8/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,9/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,9/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad14,1/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad14,1/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad14,2/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad14,2/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad14,3/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad14,3/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad14,4/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad14,4/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad14,5/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad14,5/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad14,6/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad14,6/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad7,11/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad7,11/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad7,12/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad7,12/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad7,5/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad7,5/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad7,6/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad7,6/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad8,1/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad8,1/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad8,10/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad8,10/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad8,11/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad8,11/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad8,12/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad8,12/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad8,2/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad8,2/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad8,3/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad8,3/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad8,4/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad8,4/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad8,5/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad8,5/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad8,6/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad8,6/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad8,7/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad8,7/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad8,8/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad8,8/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad8,9/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad8,9/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPod9,1/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPod9,1/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,10/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,10/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,11/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,11/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,16/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,16/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,17/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,17/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,18/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,18/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,19/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,19/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone10,1/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone10,1/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone10,2/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone10,2/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone10,3/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone10,3/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone10,4/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone10,4/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone10,5/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone10,5/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone10,6/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone10,6/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone11,2/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone11,2/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone11,4/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone11,4/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone11,6/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone11,6/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone11,8/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone11,8/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone12,1/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone12,1/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone12,3/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone12,3/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone12,5/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone12,5/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone12,8/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone12,8/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone13,1/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone13,1/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone13,2/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone13,2/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone13,3/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone13,3/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone13,4/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone13,4/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone14,2/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone14,2/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone14,3/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone14,3/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone14,4/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone14,4/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone14,5/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone14,5/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone14,6/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone14,6/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone14,7/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone14,7/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone14,8/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone14,8/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone15,2/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone15,2/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone15,3/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone15,3/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone8,1/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone8,1/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone8,4/Deployed/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone8,4/Deployed/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone12,1/Deployed_4k/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone12,1/Deployed_4k/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone12,3/Deployed_4k/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone12,3/Deployed_4k/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone12,5/Deployed_4k/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone12,5/Deployed_4k/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone13,3/Deployed_4k/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone13,3/Deployed_4k/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad11,1/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad11,1/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad11,2/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad11,2/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad11,3/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad11,3/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad11,4/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad11,4/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad11,6/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad11,6/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad11,7/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad11,7/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad12,1/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad12,1/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad12,2/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad12,2/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,1/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,1/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,2/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,2/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,4/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,4/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,5/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,5/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,6/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,6/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,7/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,7/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,8/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,8/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,9/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,9/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad14,1/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad14,1/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad14,2/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad14,2/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad14,3/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad14,3/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad14,4/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad14,4/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad14,5/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad14,5/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad14,6/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad14,6/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad7,11/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad7,11/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad7,12/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad7,12/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad7,5/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad7,5/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad7,6/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad7,6/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad8,1/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad8,1/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad8,10/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad8,10/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad8,11/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad8,11/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad8,12/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad8,12/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad8,2/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad8,2/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad8,3/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad8,3/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad8,4/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad8,4/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad8,5/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad8,5/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad8,6/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad8,6/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad8,7/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad8,7/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad8,8/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad8,8/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad8,9/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad8,9/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPod9,1/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPod9,1/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,10/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,10/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,11/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,11/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,16/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,16/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,17/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,17/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,18/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,18/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPad13,19/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPad13,19/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone10,1/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone10,1/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone10,2/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone10,2/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone10,3/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone10,3/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone10,4/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone10,4/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone10,5/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone10,5/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone10,6/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone10,6/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone11,2/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone11,2/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone11,4/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone11,4/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone11,6/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone11,6/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone11,8/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone11,8/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone12,8/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone12,8/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone13,1/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone13,1/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone13,2/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone13,2/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone13,3/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone13,3/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone13,4/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone13,4/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone14,2/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone14,2/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone14,3/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone14,3/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone14,4/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone14,4/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone14,5/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone14,5/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone14,6/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone14,6/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone14,7/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone14,7/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone14,8/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone14,8/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone15,2/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone15,2/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone15,3/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone15,3/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone8,1/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone8,1/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone8,4/Deployed_720_60fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone8,4/Deployed_720_60fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /benchmarking/CameraIntrinsics/iPhone13,3/Deployed_720_240fps/cameraIntrinsics.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StanfordMIMI/OpenCapBench/HEAD/benchmarking/CameraIntrinsics/iPhone13,3/Deployed_720_240fps/cameraIntrinsics.pickle -------------------------------------------------------------------------------- /synthpose/resource/gender.json: -------------------------------------------------------------------------------- 1 | {"4": "male", "14": "male", "5": "male", "6": "male", "8": "male", "18": "female", "20": "male", "0": "male", "1": "male", "7": "male", "2": "male", "13": "female", "3": "male", "15": "male", "16": "female", "11": "male", "17": "female", "19": "female", "21": "female", "9": "female", "12": "female", "10": "female"} -------------------------------------------------------------------------------- /benchmarking/utils.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | 3 | def importMetadata(filePath): 4 | myYamlFile = open(filePath) 5 | parsedYamlFile = yaml.load(myYamlFile, Loader=yaml.FullLoader) 6 | 7 | return parsedYamlFile 8 | 9 | def delete_multiple_element(list_object, indices): 10 | indices = sorted(indices, reverse=True) 11 | for idx in indices: 12 | if idx < len(list_object): 13 | list_object.pop(idx) 14 | -------------------------------------------------------------------------------- /benchmarking/.gitignore: -------------------------------------------------------------------------------- 1 | *.mp4 2 | #*.mov 3 | *~ 4 | #*.json 5 | .ipynb_checkpoints 6 | __pycache__ 7 | *.log 8 | *WithKeypoints.avi 9 | *_rotated.avi 10 | .env 11 | *.pth 12 | *.ipynb 13 | 14 | # Data 15 | */Data/* 16 | Data/ 17 | Examples/Data/* 18 | 19 | # Logging 20 | wandb/ 21 | 22 | # Files for re-processing 23 | Examples/reprocessSessionServer.py 24 | 25 | *.ini 26 | *.stats 27 | 28 | tests/ 29 | slurm/ 30 | 31 | newsletter.py 32 | 33 | VirtualMarker 34 | CLIFF 35 | -------------------------------------------------------------------------------- /docs/static/js/index.js: -------------------------------------------------------------------------------- 1 | window.HELP_IMPROVE_VIDEOJS = false; 2 | 3 | 4 | $(document).ready(function() { 5 | // Check for click events on the navbar burger icon 6 | 7 | var options = { 8 | slidesToScroll: 1, 9 | slidesToShow: 1, 10 | loop: true, 11 | infinite: true, 12 | autoplay: true, 13 | autoplaySpeed: 5000, 14 | } 15 | 16 | // Initialize all div with carousel class 17 | var carousels = bulmaCarousel.attach('.carousel', options); 18 | 19 | bulmaSlider.attach(); 20 | 21 | }) 22 | -------------------------------------------------------------------------------- /benchmarking/defaultSessionMetadata.yaml: -------------------------------------------------------------------------------- 1 | calibrationSettings: 2 | overwriteDeployedIntrinsics: false 3 | saveSessionIntrinsics: false 4 | checkerBoard: 5 | black2BlackCornersHeight_n: 8 6 | black2BlackCornersWidth_n: 11 7 | placement: ground 8 | squareSideLength_mm: 60 9 | gender_mf: m 10 | height_m: 1.89 11 | iphoneModel: 12 | Cam0: iphone13,3 13 | Cam1: iphone13,3 14 | Cam2: iphone13,3 15 | Cam3: iphone13,3 16 | Cam4: iphone13,3 17 | markerAugmentationSettings: 18 | markerAugmenterModel: LSTM 19 | mass_kg: 83.2 20 | openSimModel: LaiUhlrich2022 21 | subjectID: defaultSubject 22 | -------------------------------------------------------------------------------- /benchmarking/resource/vertices_keypoints_corr_smpl.csv: -------------------------------------------------------------------------------- 1 | Name,Index 2 | sternum,3171 3 | rshoulder,4724 4 | lshoulder,1239 5 | r_lelbow,5135 6 | l_lelbow,1666 7 | r_melbow,5200 8 | l_melbow,1731 9 | r_lwrist,5696 10 | l_lwrist,2235 11 | r_mwrist,5567 12 | l_mwrist,2106 13 | r_ASIS,6573 14 | l_ASIS,3156 15 | r_PSIS,6521 16 | l_PSIS,3097 17 | r_knee,4496 18 | l_knee,1009 19 | r_mknee,4499 20 | l_mknee,1013 21 | r_ankle,6728 22 | l_ankle,3327 23 | r_mankle,6832 24 | l_mankle,3432 25 | r_5meta,6748 26 | l_5meta,3349 27 | r_toe,6741 28 | l_toe,3340 29 | r_big_toe,6634 30 | l_big_toe,3232 31 | l_calc,3387 32 | r_calc,6796 33 | C7,3012 34 | L2,3502 35 | T11,3017 36 | T6,3028 37 | -------------------------------------------------------------------------------- /synthpose/resource/vertices_keypoints_corr.csv: -------------------------------------------------------------------------------- 1 | Name,Index 2 | sternum,5621 3 | rshoulder,6629 4 | lshoulder,3878 5 | r_lelbow,7040 6 | l_lelbow,4302 7 | r_melbow,7105 8 | l_melbow,4369 9 | r_lwrist,7584 10 | l_lwrist,4848 11 | r_mwrist,7457 12 | l_mwrist,4721 13 | r_ASIS,8421 14 | l_ASIS,5727 15 | r_PSIS,8371 16 | l_PSIS,5677 17 | r_knee,6401 18 | l_knee,3640 19 | r_mknee,6407 20 | l_mknee,3646 21 | r_ankle,8576 22 | l_ankle,5882 23 | r_mankle,8680 24 | l_mankle,8892 25 | r_5meta,8596 26 | l_5meta,5902 27 | r_toe,8589 28 | l_toe,5895 29 | r_big_toe,8482 30 | l_big_toe,5788 31 | l_calc,8846 32 | r_calc,8634 33 | r_bpinky,7978 34 | l_bpinky,4807 35 | r_tpinky,8004 36 | l_tpinky,5268 37 | r_bindex,7483 38 | l_bindex,4747 39 | r_tindex,7664 40 | l_tindex,4928 41 | r_tmiddle,7776 42 | l_tmiddle,5040 43 | r_tring,7887 44 | l_tring,5151 45 | r_bthumb,7420 46 | l_bthumb,4684 47 | r_tthumb,8078 48 | l_tthumb,5342 49 | C7,5484 50 | L2,5941 51 | T11,5489 52 | T6,5500 53 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 yonigozlan 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /benchmarking/kinematics.py: -------------------------------------------------------------------------------- 1 | import os 2 | import opensim 3 | 4 | 5 | def runIKTool( 6 | pathGenericSetupFile, 7 | pathScaledModel, 8 | pathTRCFile, 9 | pathOutputFolder, 10 | timeRange=[], 11 | IKFileName="not_specified", 12 | ): 13 | # Paths 14 | if IKFileName == "not_specified": 15 | _, IKFileName = os.path.split(pathTRCFile) 16 | IKFileName = IKFileName[:-4] 17 | pathOutputMotion = os.path.join(pathOutputFolder, IKFileName + ".mot") 18 | pathOutputSetup = os.path.join(pathOutputFolder, "Setup_IK_" + IKFileName + ".xml") 19 | 20 | # Setup IK tool. 21 | opensim.Logger.setLevelString("error") 22 | IKTool = opensim.InverseKinematicsTool(pathGenericSetupFile) 23 | IKTool.setName(IKFileName) 24 | IKTool.set_model_file(pathScaledModel) 25 | IKTool.set_marker_file(pathTRCFile) 26 | if timeRange: 27 | IKTool.set_time_range(0, timeRange[0]) 28 | IKTool.set_time_range(1, timeRange[-1]) 29 | IKTool.setResultsDir(pathOutputFolder) 30 | IKTool.set_report_errors(True) 31 | IKTool.set_report_marker_locations(False) 32 | IKTool.set_output_motion_file(pathOutputMotion) 33 | IKTool.printToXML(pathOutputSetup) 34 | command = "opensim-cmd -o error" + " run-tool " + pathOutputSetup 35 | os.system(command) 36 | 37 | return pathOutputMotion 38 | -------------------------------------------------------------------------------- /docs/static/css/index.css: -------------------------------------------------------------------------------- 1 | body { 2 | font-family: 'Noto Sans', sans-serif; 3 | } 4 | 5 | 6 | .footer .icon-link { 7 | font-size: 25px; 8 | color: #000; 9 | } 10 | 11 | .link-block a { 12 | margin-top: 5px; 13 | margin-bottom: 5px; 14 | } 15 | 16 | .dnerf { 17 | font-variant: small-caps; 18 | } 19 | 20 | 21 | .teaser .hero-body { 22 | padding-top: 0; 23 | padding-bottom: 3rem; 24 | } 25 | 26 | .teaser { 27 | font-family: 'Google Sans', sans-serif; 28 | } 29 | 30 | 31 | .publication-title { 32 | } 33 | 34 | .publication-banner { 35 | max-height: parent; 36 | 37 | } 38 | 39 | .publication-banner video { 40 | position: relative; 41 | left: auto; 42 | top: auto; 43 | transform: none; 44 | object-fit: fit; 45 | } 46 | 47 | .publication-header .hero-body { 48 | } 49 | 50 | .publication-title { 51 | font-family: 'Google Sans', sans-serif; 52 | } 53 | 54 | .publication-authors { 55 | font-family: 'Google Sans', sans-serif; 56 | } 57 | 58 | .publication-venue { 59 | color: #555; 60 | width: fit-content; 61 | font-weight: bold; 62 | } 63 | 64 | .publication-awards { 65 | color: #ff3860; 66 | width: fit-content; 67 | font-weight: bolder; 68 | } 69 | 70 | .publication-authors { 71 | } 72 | 73 | .publication-authors a { 74 | color: hsl(204, 86%, 53%) !important; 75 | } 76 | 77 | .publication-authors a:hover { 78 | text-decoration: underline; 79 | } 80 | 81 | .author-block { 82 | display: inline-block; 83 | } 84 | 85 | .publication-banner img { 86 | } 87 | 88 | .publication-authors { 89 | /*color: #4286f4;*/ 90 | } 91 | 92 | .publication-video { 93 | position: relative; 94 | width: 100%; 95 | height: 0; 96 | padding-bottom: 56.25%; 97 | 98 | overflow: hidden; 99 | border-radius: 10px !important; 100 | } 101 | 102 | .publication-video iframe { 103 | position: absolute; 104 | top: 0; 105 | left: 0; 106 | width: 100%; 107 | height: 100%; 108 | } 109 | 110 | .publication-body img { 111 | } 112 | 113 | .results-carousel { 114 | overflow: hidden; 115 | } 116 | 117 | .results-carousel .item { 118 | margin: 5px; 119 | overflow: hidden; 120 | padding: 20px; 121 | font-size: 0; 122 | } 123 | 124 | .results-carousel video { 125 | margin: 0; 126 | } 127 | 128 | .slider-pagination .slider-page { 129 | background: #000000; 130 | } 131 | 132 | .eql-cntrb { 133 | font-size: smaller; 134 | } 135 | 136 | 137 | 138 | -------------------------------------------------------------------------------- /docs/static/css/bulma-carousel.min.css: -------------------------------------------------------------------------------- 1 | @-webkit-keyframes spinAround{from{-webkit-transform:rotate(0);transform:rotate(0)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes spinAround{from{-webkit-transform:rotate(0);transform:rotate(0)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.slider{position:relative;width:100%}.slider-container{display:flex;flex-wrap:nowrap;flex-direction:row;overflow:hidden;-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0);min-height:100%}.slider-container.is-vertical{flex-direction:column}.slider-container .slider-item{flex:none}.slider-container .slider-item .image.is-covered img{-o-object-fit:cover;object-fit:cover;-o-object-position:center center;object-position:center center;height:100%;width:100%}.slider-container .slider-item .video-container{height:0;padding-bottom:0;padding-top:56.25%;margin:0;position:relative}.slider-container .slider-item .video-container.is-1by1,.slider-container .slider-item .video-container.is-square{padding-top:100%}.slider-container .slider-item .video-container.is-4by3{padding-top:75%}.slider-container .slider-item .video-container.is-21by9{padding-top:42.857143%}.slider-container .slider-item .video-container embed,.slider-container .slider-item .video-container iframe,.slider-container .slider-item .video-container object{position:absolute;top:0;left:0;width:100%!important;height:100%!important}.slider-navigation-next,.slider-navigation-previous{display:flex;justify-content:center;align-items:center;position:absolute;width:42px;height:42px;background:#fff center center no-repeat;background-size:20px 20px;border:1px solid #fff;border-radius:25091983px;box-shadow:0 2px 5px #3232321a;top:50%;margin-top:-20px;left:0;cursor:pointer;transition:opacity .3s,-webkit-transform .3s;transition:transform .3s,opacity .3s;transition:transform .3s,opacity .3s,-webkit-transform .3s}.slider-navigation-next:hover,.slider-navigation-previous:hover{-webkit-transform:scale(1.2);transform:scale(1.2)}.slider-navigation-next.is-hidden,.slider-navigation-previous.is-hidden{display:none;opacity:0}.slider-navigation-next svg,.slider-navigation-previous svg{width:25%}.slider-navigation-next{left:auto;right:0;background:#fff center center no-repeat;background-size:20px 20px}.slider-pagination{display:none;justify-content:center;align-items:center;position:absolute;bottom:0;left:0;right:0;padding:.5rem 1rem;text-align:center}.slider-pagination .slider-page{background:#fff;width:10px;height:10px;border-radius:25091983px;display:inline-block;margin:0 3px;box-shadow:0 2px 5px #3232321a;transition:-webkit-transform .3s;transition:transform .3s;transition:transform .3s,-webkit-transform .3s;cursor:pointer}.slider-pagination .slider-page.is-active,.slider-pagination .slider-page:hover{-webkit-transform:scale(1.4);transform:scale(1.4)}@media screen and (min-width:800px){.slider-pagination{display:flex}}.hero.has-carousel{position:relative}.hero.has-carousel+.hero-body,.hero.has-carousel+.hero-footer,.hero.has-carousel+.hero-head{z-index:10;overflow:hidden}.hero.has-carousel .hero-carousel{position:absolute;top:0;left:0;bottom:0;right:0;height:auto;border:none;margin:auto;padding:0;z-index:0}.hero.has-carousel .hero-carousel .slider{width:100%;max-width:100%;overflow:hidden;height:100%!important;max-height:100%;z-index:0}.hero.has-carousel .hero-carousel .slider .has-background{max-height:100%}.hero.has-carousel .hero-carousel .slider .has-background .is-background{-o-object-fit:cover;object-fit:cover;-o-object-position:center center;object-position:center center;height:100%;width:100%}.hero.has-carousel .hero-body{margin:0 3rem;z-index:10} -------------------------------------------------------------------------------- /synthpose/RICH/downsample_rich.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | from PIL import Image 5 | from tqdm import tqdm 6 | 7 | 8 | def downsample_images( 9 | source_folder, annotation_file, write_images=False, write_annotations=True 10 | ): 11 | # Create a new folder in the source_folder to store the downsampled images and annotations 12 | dest_folder = os.path.join(source_folder, "downsampled") 13 | os.makedirs(dest_folder, exist_ok=True) 14 | orig_width = 4112 15 | orig_height = 3008 16 | new_width = 984 17 | new_height = 720 18 | ratio = new_width / orig_width 19 | 20 | if write_images: 21 | # Traverse through the source folder and find all the image files 22 | cnt_images = 0 23 | for root, dirs, files in os.walk( 24 | os.path.join(source_folder, "ps/project/multi-ioi/rich_release") 25 | ): 26 | for file in files: 27 | # if file.endswith('.bmp') or file.endswith('.png'): 28 | if file.endswith(".png"): 29 | # Open the image file and downsample it to 1280x720 30 | img_path = os.path.join(root, file) 31 | img = Image.open(img_path) 32 | img = img.resize((new_width, new_height), Image.ANTIALIAS) 33 | 34 | # Save the downsampled image in the new folder with the same nested architecture 35 | dest_path = os.path.join( 36 | dest_folder, os.path.relpath(img_path, source_folder) 37 | ) 38 | os.makedirs(os.path.dirname(dest_path), exist_ok=True) 39 | dest_path = dest_path[:-3] + "jpg" 40 | img.save(dest_path, "JPEG") 41 | cnt_images += 1 42 | if cnt_images % 100 == 0: 43 | print(f"Saved {cnt_images} images") 44 | print(f"Saved {cnt_images} images in total") 45 | if write_annotations: 46 | # Downsample the corresponding annotations 47 | with open(annotation_file) as f: 48 | annotations = json.load(f) 49 | for annotation in annotations["images"]: 50 | annotation["width"] = new_width 51 | annotation["height"] = new_height 52 | new_img_path = os.path.join( 53 | dest_folder, 54 | os.path.relpath(annotation["img_path"], source_folder).removeprefix( 55 | "../../" 56 | ), 57 | ) 58 | new_img_path = new_img_path[:-3] + "jpg" 59 | annotation["img_path"] = new_img_path 60 | for annotation in annotations["annotations"]: 61 | annotation["bbox"] = [round(x * ratio) for x in annotation["bbox"]] 62 | for keypoint_name, keypoint in annotation["keypoints"].items(): 63 | keypoint["x"] = round(keypoint["x"] * ratio) 64 | keypoint["y"] = round(keypoint["y"] * ratio) 65 | for index, coord in enumerate(annotation["coco_keypoints"]): 66 | if index % 3 != 2: 67 | annotation["coco_keypoints"][index] = round(coord * ratio) 68 | # Save the modified annotations in a new JSON file with the same nested architecture 69 | dest_annotation_file = os.path.join( 70 | dest_folder, os.path.relpath(annotation_file, source_folder) 71 | ) 72 | os.makedirs(os.path.dirname(dest_annotation_file), exist_ok=True) 73 | with open(dest_annotation_file, "w") as f: 74 | json.dump(annotations, f, ensure_ascii=False, indent=4) 75 | 76 | 77 | if __name__ == "__main__": 78 | downsample_images( 79 | "path_to_rich_dataset", 80 | "rich_downsampled/annotations.json", 81 | write_images=False, 82 | write_annotations=True, 83 | ) 84 | -------------------------------------------------------------------------------- /synthpose/Infinity/reconstruction.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pyrender 3 | import smplx 4 | import torch 5 | import trimesh 6 | from constants import AUGMENTED_VERTICES_INDEX_DICT, JOINT_NAMES 7 | 8 | 9 | def get_axis_angle_from_ann(ann, start_index, end_index): 10 | quaternions = [ 11 | ann["quaternions"][joint_name] 12 | for joint_name in JOINT_NAMES[start_index:end_index] 13 | ] 14 | axis_angle = [] 15 | for quaternion in quaternions: 16 | norm = np.sqrt(quaternion[1] ** 2 + quaternion[2] ** 2 + quaternion[3] ** 2) 17 | angle = 2 * np.arctan2( 18 | norm, 19 | quaternion[0], 20 | ) 21 | if angle == 0: 22 | axis_angle.append([0, 0, 0]) 23 | else: 24 | axis_angle.append( 25 | [ 26 | angle * quaternion[1] / norm, 27 | angle * quaternion[2] / norm, 28 | angle * quaternion[3] / norm, 29 | ] 30 | ) 31 | 32 | return torch.tensor(axis_angle, dtype=torch.float32).reshape([1, -1]) 33 | 34 | 35 | def get_body_pose(ann): 36 | return get_axis_angle_from_ann(ann, 1, 22) 37 | 38 | 39 | def get_left_hand_pose(ann): 40 | return get_axis_angle_from_ann(ann, 25, 40) 41 | 42 | 43 | def get_right_hand_pose(ann): 44 | return get_axis_angle_from_ann(ann, 40, 55) 45 | 46 | 47 | def get_left_eye_pose(ann): 48 | return get_axis_angle_from_ann(ann, 23, 24) 49 | 50 | 51 | def get_right_eye_pose(ann): 52 | return get_axis_angle_from_ann(ann, 24, 25) 53 | 54 | 55 | def get_jaw_pose(ann): 56 | return get_axis_angle_from_ann(ann, 22, 23) 57 | 58 | 59 | def get_poses(ann): 60 | return { 61 | "body_pose": get_body_pose(ann), 62 | "left_hand_pose": get_left_hand_pose(ann), 63 | "right_hand_pose": get_right_hand_pose(ann), 64 | "leye_pose": get_left_eye_pose(ann), 65 | "reye_pose": get_right_eye_pose(ann), 66 | "jaw_pose": get_jaw_pose(ann), 67 | } 68 | 69 | 70 | def get_smplx_model( 71 | model_folder, 72 | gender, 73 | betas, 74 | poses, 75 | ): 76 | return smplx.create( 77 | model_folder, 78 | model_type="smplx", 79 | gender=gender, 80 | use_face_contour=False, 81 | num_betas=10, 82 | num_expression_coeffs=10, 83 | ext="npz", 84 | betas=betas, 85 | body_pose=poses["body_pose"], 86 | left_hand_pose=poses["left_hand_pose"], 87 | right_hand_pose=poses["right_hand_pose"], 88 | jaw_pose=poses["jaw_pose"], 89 | leye_pose=poses["leye_pose"], 90 | reye_pose=poses["reye_pose"], 91 | use_pca=False, 92 | flat_hand_mean=True, 93 | ) 94 | 95 | 96 | def get_vertices_and_joints(model, betas): 97 | output = model(betas=betas, expression=None, return_verts=True) 98 | vertices = output.vertices.detach().cpu().numpy().squeeze() 99 | joints = output.joints.detach().cpu().numpy().squeeze() 100 | 101 | return vertices, joints 102 | 103 | 104 | def get_augmented_vertices(vertices): 105 | return np.array( 106 | [vertices[vertex] for vertex in AUGMENTED_VERTICES_INDEX_DICT.values()] 107 | ) 108 | 109 | 110 | def show_mesh( 111 | scene, 112 | viewer, 113 | vertices, 114 | augmented_vertices, 115 | model, 116 | joints, 117 | plot_augmented_vertices=True, 118 | plot_joints=False, 119 | nodes=[], 120 | ): 121 | vertex_colors = np.ones([vertices.shape[0], 4]) * [0.3, 0.3, 0.3, 0.8] 122 | tri_mesh = trimesh.Trimesh(vertices, model.faces, vertex_colors=vertex_colors) 123 | 124 | mesh = pyrender.Mesh.from_trimesh(tri_mesh) 125 | viewer.render_lock.acquire() 126 | for node in nodes: 127 | scene.remove_node(node) 128 | 129 | nodes = [scene.add(mesh, "body")] 130 | 131 | if plot_joints: 132 | sm = trimesh.creation.uv_sphere(radius=0.005) 133 | sm.visual.vertex_colors = [0.9, 0.1, 0.1, 1.0] 134 | tfs = np.tile(np.eye(4), (len(joints), 1, 1)) 135 | tfs[:, :3, 3] = joints 136 | joints_pcl = pyrender.Mesh.from_trimesh(sm, poses=tfs) 137 | nodes += [scene.add(joints_pcl, name="joints")] 138 | 139 | if plot_augmented_vertices: 140 | sm = trimesh.creation.uv_sphere(radius=0.01) 141 | sm.visual.vertex_colors = [0.1, 0.1, 0.9, 1.0] 142 | tfs = np.tile(np.eye(4), (len(augmented_vertices), 1, 1)) 143 | tfs[:, :3, 3] = augmented_vertices 144 | joints_pcl = pyrender.Mesh.from_trimesh(sm, poses=tfs) 145 | nodes += [scene.add(joints_pcl, name="vertices")] 146 | 147 | viewer.render_lock.release() 148 | 149 | return nodes 150 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## 🚀 SynthPose Update! 2 | 3 | SynthPose models are now available on Hugging Face Transformers 🤗, and you can test it in a few clicks in a dedicated Hugging Face Space! 4 | - 🤗 Weights and model card: [synthpose-vitpose-base-hf](https://huggingface.co/yonigozlan/synthpose-vitpose-base-hf), [synthpose-vitpose-huge-hf](https://huggingface.co/yonigozlan/synthpose-vitpose-huge-hf) 5 | - 🤗 Space: [Synthpose-Markerless-MoCap-VitPose](https://huggingface.co/spaces/yonigozlan/Synthpose-Markerless-MoCap-VitPose) 6 | # OpenCapBench 7 | 8 | OpenCapBench is a benchmark designed to bridge the gap between pose estimation and biomechanics. It evaluates pose estimation models under physiological constraints using consistent kinematic metrics computed via [OpenSim](https://opensim.stanford.edu/). 9 | 10 | ![Pipeline Overview](docs/static/images/OCB_pipeline_main_new_colors.jpg) 11 | 12 | --- 13 | 14 | ## Features 15 | 16 | - Unified evaluation benchmark for biomechanics and pose estimation. 17 | - Integration with OpenSim for joint angle computations. 18 | - Fine-tuning models with **SynthPose**, enabling dense keypoint predictions for accurate kinematic analysis. 19 | - Tools to benchmark custom models on clinically relevant datasets. 20 | 21 | ## Installation 22 | - Clone this repository. 23 | - Install [mmpose](https://mmpose.readthedocs.io/en/latest/installation.html#) and the [opensim python package](https://opensimconfluence.atlassian.net/wiki/spaces/OpenSim/pages/53085346/Scripting+in+Python). 24 | - Download the [OpenCap data](https://simtk.org/projects/opencap) and place it in a "dataDir" of your choice. 25 | 26 | ## Usage 27 | Replace the example values with your values and run the following. 28 | ```bash 29 | python benchmarking/benchmark.py \ 30 | --model_config_pose "mmpose_dir"/configs/body_2d_keypoint/topdown_heatmap/.../your_mmpose_model_config.py \ 31 | --model_ckpt_pose "your_mmpose_weights" \ 32 | --dataDir "your_dataDir" \ 33 | --dataName "hrnet48_final" 34 | ``` 35 | 36 | --- 37 | 38 | ## SynthPose: Fine-tuning Pose Models 39 | 40 | **SynthPose** fine-tunes pre-trained pose estimation models using synthetic datasets to predict arbitrarily dense sets of keypoints. 41 | 42 | ![SynthPose Pipeline](docs/static/images/fine_tuning_pipeline_synthpose.jpg) 43 | 44 | 45 | ## Installation 46 | 47 | - Clone this repository. 48 | - Download the synthetic data that you want to use to finetune your model (e.g BEDLAM, VisionFit etc.). 49 | - Download the [SMPL-X model](https://smpl-x.is.tue.mpg.de/) and place in in a folder named `models` at the root of this repository. 50 | - Install pytorch, smplx, pycocotools python packages. 51 | --- 52 | 53 | ## Usage 54 | 55 | - Customize the SMPL/X vertices you want to finetune your model on in `synthpose/resource/vertices_keypoints_corr.csv`. 56 | - Use the "generate_dataset.py" scripts in `synthpose/"dataset_name"` to generate the 2D keypoints annotations corresponding to the vertices chosen in the previous step. 57 | - Finetune a pose estimation model on this generated dataset. For an example of how to do this, you can take a look at [my fork of mmpose](https://github.com/yonigozlan/mmpose) where I created an "Infinity" dataset where the keypoints correspond to anatomical markers. The mmpose documentation is great to [learn how to finetune a 2D pose estimation models](https://mmpose.readthedocs.io/en/dev-1.x/user_guides/train_and_test.html#launch-training). 58 | 59 | --- 60 | 61 | ## Key Results 62 | 63 | Models finetuned with SynthPose to predict anatomical/MoCap markers demonstrate superior performance for kinematics compare to models predicting COCO keypoints or SMPL mesh. 64 | Here are the results on OpenCapBench: 65 | 66 | ![Results Comparison](docs/static/images/OpenCapBench_cr.svg) 67 | 68 | --- 69 | 70 | ## Visual Results 71 | 72 | Examples of marker predictions with a SynthPose model predicting anatomical/MoCap markers on OpenCap dataset subjects: 73 | 74 | ![SynthPose Visualizations](docs/static/images/viz_1.png) 75 | ![SynthPose Visualizations](docs/static/images/viz_2.png) 76 | 77 | --- 78 | 79 | ## Citation 80 | 81 | If you find OpenCapBench useful in your research, please cite: 82 | 83 | ```bibtex 84 | @misc{gozlan2024opencapbenchbenchmarkbridgepose, 85 | title={OpenCapBench: A Benchmark to Bridge Pose Estimation and Biomechanics}, 86 | author={Yoni Gozlan and Antoine Falisse and Scott Uhlrich and Anthony Gatti and Michael Black and Akshay Chaudhari}, 87 | year={2024}, 88 | eprint={2406.09788}, 89 | archivePrefix={arXiv}, 90 | primaryClass={cs.CV}, 91 | url={https://arxiv.org/abs/2406.09788}, 92 | } 93 | ``` 94 | 95 | --- 96 | 97 | ## Links 98 | 99 | - [Paper (arXiv)](https://arxiv.org/abs/2406.09788) 100 | - [Supplementary Material](docs/static/pdfs/supplementary_material.pdf) 101 | - [GitHub Repository](https://github.com/StanfordMIMI/OpenCapBench) 102 | 103 | --- 104 | 105 | ## License 106 | 107 | This project is licensed under the [MIT License](LICENSE). 108 | -------------------------------------------------------------------------------- /synthpose/RICH/utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from smplx.lbs import transform_mat 4 | 5 | def extract_cam_param_xml(xml_path='', dtype=torch.float32): 6 | 7 | import xml.etree.ElementTree as ET 8 | tree = ET.parse(xml_path) 9 | 10 | extrinsics_mat = [float(s) for s in tree.find('./CameraMatrix/data').text.split()] 11 | intrinsics_mat = [float(s) for s in tree.find('./Intrinsics/data').text.split()] 12 | distortion_vec = [float(s) for s in tree.find('./Distortion/data').text.split()] 13 | 14 | focal_length_x = intrinsics_mat[0] 15 | focal_length_y = intrinsics_mat[4] 16 | center = torch.tensor([[intrinsics_mat[2], intrinsics_mat[5]]], dtype=dtype) 17 | 18 | rotation = torch.tensor([[extrinsics_mat[0], extrinsics_mat[1], extrinsics_mat[2]], 19 | [extrinsics_mat[4], extrinsics_mat[5], extrinsics_mat[6]], 20 | [extrinsics_mat[8], extrinsics_mat[9], extrinsics_mat[10]]], dtype=dtype) 21 | 22 | translation = torch.tensor([[extrinsics_mat[3], extrinsics_mat[7], extrinsics_mat[11]]], dtype=dtype) 23 | 24 | # t = -Rc --> c = -R^Tt 25 | cam_center = [ -extrinsics_mat[0]*extrinsics_mat[3] - extrinsics_mat[4]*extrinsics_mat[7] - extrinsics_mat[8]*extrinsics_mat[11], 26 | -extrinsics_mat[1]*extrinsics_mat[3] - extrinsics_mat[5]*extrinsics_mat[7] - extrinsics_mat[9]*extrinsics_mat[11], 27 | -extrinsics_mat[2]*extrinsics_mat[3] - extrinsics_mat[6]*extrinsics_mat[7] - extrinsics_mat[10]*extrinsics_mat[11]] 28 | 29 | cam_center = torch.tensor([cam_center], dtype=dtype) 30 | 31 | k1 = torch.tensor([distortion_vec[0]], dtype=dtype) 32 | k2 = torch.tensor([distortion_vec[1]], dtype=dtype) 33 | 34 | return focal_length_x, focal_length_y, center, rotation, translation, cam_center, k1, k2 35 | 36 | class CalibratedCamera(nn.Module): 37 | 38 | def __init__(self, calib_path='', rotation=None, translation=None, 39 | focal_length_x=None, focal_length_y=None, 40 | batch_size=1, 41 | center=None, dtype=torch.float32, **kwargs): 42 | super(CalibratedCamera, self).__init__() 43 | self.batch_size = batch_size 44 | self.dtype = dtype 45 | self.calib_path = calib_path 46 | # Make a buffer so that PyTorch does not complain when creating 47 | # the camera matrix 48 | self.register_buffer('zero', 49 | torch.zeros([batch_size], dtype=dtype)) 50 | 51 | import os.path as osp 52 | if not osp.exists(calib_path): 53 | raise FileNotFoundError('Could''t find {}.'.format(calib_path)) 54 | else: 55 | focal_length_x, focal_length_y, center, rotation, translation, cam_center, _, _ \ 56 | = extract_cam_param_xml(xml_path=calib_path, dtype=dtype) 57 | 58 | if focal_length_x is None or type(focal_length_x) == float: 59 | focal_length_x = torch.full( 60 | [batch_size], 61 | focal_length_x, 62 | dtype=dtype) 63 | 64 | if focal_length_y is None or type(focal_length_y) == float: 65 | focal_length_y = torch.full( 66 | [batch_size], 67 | focal_length_y, 68 | dtype=dtype) 69 | 70 | self.register_buffer('focal_length_x', focal_length_x) 71 | self.register_buffer('focal_length_y', focal_length_y) 72 | 73 | if center is None: 74 | center = torch.zeros([batch_size, 2], dtype=dtype) 75 | self.register_buffer('center', center) 76 | 77 | rotation = rotation.unsqueeze(dim=0).repeat(batch_size, 1, 1) 78 | rotation = nn.Parameter(rotation, requires_grad=False) 79 | 80 | self.register_parameter('rotation', rotation) 81 | 82 | if translation is None: 83 | translation = torch.zeros([batch_size, 3], dtype=dtype) 84 | 85 | translation = translation.view(3, -1).repeat(batch_size, 1, 1).squeeze(dim=-1) 86 | translation = nn.Parameter(translation, requires_grad=False) 87 | self.register_parameter('translation', translation) 88 | 89 | cam_center = nn.Parameter(cam_center, requires_grad=False) 90 | self.register_parameter('cam_center', cam_center) 91 | 92 | def forward(self, points): 93 | device = points.device 94 | 95 | with torch.no_grad(): 96 | camera_mat = torch.zeros([self.batch_size, 2, 2], 97 | dtype=self.dtype, device=points.device) 98 | camera_mat[:, 0, 0] = self.focal_length_x 99 | camera_mat[:, 1, 1] = self.focal_length_y 100 | 101 | camera_transform = transform_mat(self.rotation, 102 | self.translation.unsqueeze(dim=-1)) 103 | homog_coord = torch.ones(list(points.shape)[:-1] + [1], 104 | dtype=points.dtype, 105 | device=device) 106 | # Convert the points to homogeneous coordinates 107 | points_h = torch.cat([points, homog_coord], dim=-1) 108 | 109 | projected_points = torch.einsum('bki,bji->bjk', 110 | [camera_transform, points_h]) 111 | 112 | img_points = torch.div(projected_points[:, :, :2], 113 | projected_points[:, :, 2].unsqueeze(dim=-1)) 114 | img_points = torch.einsum('bki,bji->bjk', [camera_mat, img_points]) \ 115 | + self.center.unsqueeze(dim=1) 116 | return img_points 117 | -------------------------------------------------------------------------------- /docs/static/js/bulma-slider.min.js: -------------------------------------------------------------------------------- 1 | !function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.bulmaSlider=e():t.bulmaSlider=e()}("undefined"!=typeof self?self:this,function(){return function(n){var r={};function i(t){if(r[t])return r[t].exports;var e=r[t]={i:t,l:!1,exports:{}};return n[t].call(e.exports,e,e.exports,i),e.l=!0,e.exports}return i.m=n,i.c=r,i.d=function(t,e,n){i.o(t,e)||Object.defineProperty(t,e,{configurable:!1,enumerable:!0,get:n})},i.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return i.d(e,"a",e),e},i.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},i.p="",i(i.s=0)}([function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),n.d(e,"isString",function(){return l});var r=n(1),i=Object.assign||function(t){for(var e=1;e=l.length&&(s=!0)):s=!0),s&&(t.once&&(u[e]=null),t.callback(r))});-1!==u.indexOf(null);)u.splice(u.indexOf(null),1)}}]),e}();e.a=i}]).default}); -------------------------------------------------------------------------------- /benchmarking/opensimPipeline/Models/RajagopalModified2016_markers_mmpose.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | /bodyset/torso 8 | 9 | 0.0031549999999999998 0.3715 0 10 | 11 | 12 | 13 | /bodyset/torso 14 | 15 | 0.0031549999999999998 0.3715 0.17000000000000001 16 | 17 | 18 | 19 | /bodyset/torso 20 | 21 | 0.0031549999999999998 0.3715 -0.17000000000000001 22 | 23 | 24 | 25 | /bodyset/pelvis 26 | 27 | -0.020276000000000002 -0.078490000000000004 0.094759999999999997 28 | 29 | 30 | 31 | /bodyset/pelvis 32 | 33 | -0.020276000000000002 -0.078490000000000004 -0.094759999999999997 34 | 35 | 36 | 37 | /bodyset/pelvis 38 | 39 | -0.020276000000000002 -0.078490000000000004 0 40 | 41 | 42 | 43 | /bodyset/tibia_r 44 | 45 | -0.0080895399999999992 -0.0035347999999999998 -0.00148474 46 | 47 | 48 | 49 | /bodyset/tibia_l 50 | 51 | -0.0080895399999999992 -0.0035347999999999998 0.00148474 52 | 53 | 54 | 55 | /bodyset/tibia_r 56 | 57 | -0.014 -0.38 0.0074999999999999997 58 | 59 | 60 | 61 | /bodyset/tibia_l 62 | 63 | -0.014 -0.38 -0.0074999999999999997 64 | 65 | 66 | 67 | /bodyset/calcn_r 68 | 69 | 0.018205985755086355 0.01 -0.020246741086146904 70 | 71 | 72 | 73 | /bodyset/calcn_l 74 | 75 | 0.018205985755086355 0.01 0.020246741086146904 76 | 77 | 78 | 79 | /bodyset/humerus_r 80 | 81 | 0.013143999999999999 -0.286273 -0.0095949999999999994 82 | 83 | 84 | 85 | /bodyset/humerus_l 86 | 87 | 0.013143999999999999 -0.286273 0.0095949999999999994 88 | 89 | 90 | 91 | /bodyset/hand_r 92 | 93 | 0 0 0 94 | 95 | 96 | 97 | /bodyset/hand_l 98 | 99 | 0 0 0 100 | 101 | 102 | 103 | /bodyset/calcn_r 104 | 105 | 0.208195 0.00312648 0.0189188 106 | 107 | 108 | 109 | /bodyset/calcn_l 110 | 111 | 0.208195 0.00312648 -0.0189188 112 | 113 | 114 | 115 | 116 | 117 | -------------------------------------------------------------------------------- /benchmarking/calibration.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import pickle 4 | 5 | 6 | def saveCameraParameters(filename, CameraParams): 7 | if not os.path.exists(os.path.dirname(filename)): 8 | os.makedirs(os.path.dirname(filename), exist_ok=True) 9 | 10 | open_file = open(filename, "wb") 11 | pickle.dump(CameraParams, open_file) 12 | open_file.close() 13 | 14 | return True 15 | 16 | 17 | def loadCameraParameters(filename): 18 | open_file = open(filename, "rb") 19 | cameraParams = pickle.load(open_file) 20 | 21 | open_file.close() 22 | return cameraParams 23 | 24 | 25 | def getCameraParameters(sessionMetadata, sessionDir): 26 | # Get checkerboard parameters from metadata. 27 | CheckerBoardParams = { 28 | "dimensions": ( 29 | sessionMetadata["checkerBoard"]["black2BlackCornersWidth_n"], 30 | sessionMetadata["checkerBoard"]["black2BlackCornersHeight_n"], 31 | ), 32 | "squareSize": sessionMetadata["checkerBoard"]["squareSideLength_mm"], 33 | } 34 | # Camera directories and models. 35 | cameraDirectories = {} 36 | cameraModels = {} 37 | for pathCam in glob.glob(os.path.join(sessionDir, "Videos", "Cam*")): 38 | if os.name == "nt": # windows 39 | camName = pathCam.split("\\")[-1] 40 | elif os.name == "posix": # ubuntu 41 | camName = pathCam.split("/")[-1] 42 | cameraDirectories[camName] = os.path.join(sessionDir, "Videos", pathCam) 43 | cameraModels[camName] = sessionMetadata["iphoneModel"][camName] 44 | 45 | # Get cameras' intrinsics and extrinsics. 46 | # Load parameters if saved, compute and save them if not. 47 | CamParamDict = {} 48 | loadedCamParams = {} 49 | for camName in cameraDirectories: 50 | camDir = cameraDirectories[camName] 51 | # Intrinsics ###################################################### 52 | # Intrinsics and extrinsics already exist for this session. 53 | if os.path.exists(os.path.join(camDir, "cameraIntrinsicsExtrinsics.pickle")): 54 | print("Load extrinsics for {} - already existing".format(camName)) 55 | CamParams = loadCameraParameters( 56 | os.path.join(camDir, "cameraIntrinsicsExtrinsics.pickle") 57 | ) 58 | loadedCamParams[camName] = True 59 | 60 | # Extrinsics do not exist for this session. 61 | # else: 62 | # print("Compute extrinsics for {} - not yet existing".format(camName)) 63 | # # Intrinsics ################################################## 64 | # # Intrinsics directories. 65 | # intrinsicDir = os.path.join( 66 | # baseDir, "CameraIntrinsics", cameraModels[camName] 67 | # ) 68 | # permIntrinsicDir = os.path.join(intrinsicDir, intrinsicsFinalFolder) 69 | # # Intrinsics exist. 70 | # if os.path.exists(permIntrinsicDir): 71 | # CamParams = loadCameraParameters( 72 | # os.path.join(permIntrinsicDir, "cameraIntrinsics.pickle") 73 | # ) 74 | # # Intrinsics do not exist throw an error. Eventually the 75 | # # webapp will give you the opportunity to compute them. 76 | 77 | # else: 78 | # exception = "Intrinsics don't exist for your camera model. OpenCap supports all iOS devices released in 2018 or later: https://www.opencap.ai/get-started." 79 | # raise Exception(exception, exception) 80 | 81 | # # Extrinsics ################################################## 82 | # # Compute extrinsics from images popped out of this trial. 83 | # # Hopefully you get a clean shot of the checkerboard in at 84 | # # least one frame of each camera. 85 | # useSecondExtrinsicsSolution = ( 86 | # alternateExtrinsics is not None and camName in alternateExtrinsics 87 | # ) 88 | # pathVideoWithoutExtension = os.path.join( 89 | # camDir, "InputMedia", trialName, trial_id 90 | # ) 91 | # extension = getVideoExtension(pathVideoWithoutExtension) 92 | # extrinsicPath = os.path.join( 93 | # camDir, "InputMedia", trialName, trial_id + extension 94 | # ) 95 | 96 | # # Modify intrinsics if camera view is rotated 97 | # CamParams = rotateIntrinsics(CamParams, extrinsicPath) 98 | 99 | # # for 720p, imageUpsampleFactor=4 is best for small board 100 | # try: 101 | # CamParams = calcExtrinsicsFromVideo( 102 | # extrinsicPath, 103 | # CamParams, 104 | # CheckerBoardParams, 105 | # visualize=False, 106 | # imageUpsampleFactor=imageUpsampleFactor, 107 | # useSecondExtrinsicsSolution=useSecondExtrinsicsSolution, 108 | # ) 109 | # except Exception as e: 110 | # if len(e.args) == 2: # specific exception 111 | # raise Exception(e.args[0], e.args[1]) 112 | # elif len(e.args) == 1: # generic exception 113 | # exception = "Camera calibration failed. Verify your setup and try again. Visit https://www.opencap.ai/best-pratices to learn more about camera calibration and https://www.opencap.ai/troubleshooting for potential causes for a failed calibration." 114 | # raise Exception(exception, traceback.format_exc()) 115 | # loadedCamParams[camName] = False 116 | 117 | # Append camera parameters. 118 | if CamParams is not None: 119 | CamParamDict[camName] = CamParams.copy() 120 | else: 121 | CamParamDict[camName] = None 122 | 123 | # Save parameters if not existing yet. 124 | if not all([loadedCamParams[i] for i in loadedCamParams]): 125 | for camName in CamParamDict: 126 | saveCameraParameters( 127 | os.path.join( 128 | cameraDirectories[camName], "cameraIntrinsicsExtrinsics.pickle" 129 | ), 130 | CamParamDict[camName], 131 | ) 132 | 133 | return CamParamDict, cameraDirectories 134 | 135 | 136 | def getCamAnglesOffsets(sessionMetadata, CamParamDict): 137 | # Detect if checkerboard is upside down. 138 | upsideDownChecker = isCheckerboardUpsideDown(CamParamDict) 139 | # Get rotation angles from motion capture environment to OpenSim. 140 | # Space-fixed are lowercase, Body-fixed are uppercase. 141 | checkerBoardMount = sessionMetadata["checkerBoard"]["placement"] 142 | if checkerBoardMount == "backWall" and not upsideDownChecker: 143 | rotationAngles = {"y": 90, "z": 180} 144 | elif checkerBoardMount == "backWall" and upsideDownChecker: 145 | rotationAngles = {"y": -90} 146 | elif checkerBoardMount == "backWall_largeCB": 147 | rotationAngles = {"y": -90} 148 | # TODO: uppercase? 149 | elif checkerBoardMount == "backWall_walking": 150 | rotationAngles = {"YZ": (-90, 180)} 151 | elif checkerBoardMount == "ground": 152 | rotationAngles = {"x": -90, "y": 90} 153 | elif checkerBoardMount == "ground_jumps": # for sub1 154 | rotationAngles = {"x": 90, "y": 180} 155 | elif checkerBoardMount == "ground_gaits": # for sub1 156 | rotationAngles = {"x": 90, "y": 90} 157 | else: 158 | raise Exception( 159 | "checkerBoard placement value in\ 160 | sessionMetadata.yaml is not currently supported" 161 | ) 162 | 163 | return rotationAngles 164 | 165 | 166 | def isCheckerboardUpsideDown(CameraParams): 167 | # With backwall orientation, R[1,1] will always be positive in correct orientation 168 | # and negative if upside down 169 | for cam in list(CameraParams.keys()): 170 | if CameraParams[cam] is not None: 171 | upsideDown = CameraParams[cam]["rotation"][1, 1] < 0 172 | break 173 | # Default if no camera params (which is a garbage case anyway) 174 | upsideDown = False 175 | 176 | return upsideDown 177 | -------------------------------------------------------------------------------- /benchmarking/opensimPipeline/IK/Setup_IK_mmpose.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Unassigned 6 | 7 | Unassigned 8 | 9 | Inf 10 | 11 | 1.0000000000000001e-05 12 | 13 | 14 | 15 | 16 | 17 | true 18 | 19 | 5 20 | 21 | 22 | 23 | true 24 | 25 | 5 26 | 27 | 28 | 29 | true 30 | 31 | 5 32 | 33 | 34 | 35 | true 36 | 37 | 25 38 | 39 | 40 | 41 | true 42 | 43 | 25 44 | 45 | 46 | 47 | true 48 | 49 | 25 50 | 51 | 52 | 53 | true 54 | 55 | 30 56 | 57 | 58 | 59 | true 60 | 61 | 30 62 | 63 | 64 | 65 | true 66 | 67 | 30 68 | 69 | 70 | 71 | true 72 | 73 | 60 74 | 75 | 76 | 77 | true 78 | 79 | 30 80 | 81 | 82 | 83 | true 84 | 85 | 30 86 | 87 | 88 | 89 | true 90 | 91 | 30 92 | 93 | 94 | 95 | true 96 | 97 | 60 98 | 99 | 100 | 101 | true 102 | 103 | 5 104 | 105 | 106 | 107 | true 108 | 109 | 5 110 | 111 | 112 | 113 | true 114 | 115 | 5 116 | 117 | 118 | 119 | true 120 | 121 | 5 122 | 123 | 124 | 125 | 126 | 127 | Unassigned 128 | 129 | Unassigned 130 | 131 | -Inf Inf 132 | 133 | true 134 | 135 | Unassigned 136 | 137 | false 138 | 139 | 140 | -------------------------------------------------------------------------------- /synthpose/Infinity/video_scene.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import os.path as osp 4 | import zipfile 5 | from typing import Optional 6 | 7 | import cv2 8 | import numpy as np 9 | from constants import JOINT_NAMES, K1, K2 10 | from pycocotools.coco import COCO 11 | from scipy.spatial.transform import Rotation 12 | 13 | 14 | class VideoScene: 15 | def __init__( 16 | self, 17 | path_to_example: Optional[str] = None, 18 | exercise: Optional[str] = None, 19 | index_video: Optional[int] = None, 20 | video_name="video", 21 | labels_name="labels", 22 | segmentation_name="segmentation", 23 | ): 24 | if path_to_example is None: 25 | self.json_path = ( 26 | f"synthetic_finetuning/data/api_example/{exercise}/{labels_name}.json" 27 | ) 28 | self.video_path = ( 29 | f"synthetic_finetuning/data/api_example/{exercise}/{video_name}.mp4" 30 | ) 31 | iseg_paths = sorted( 32 | glob.glob( 33 | osp.join( 34 | f"synthetic_finetuning/data/api_example/{exercise}/{segmentation_name}", 35 | "*.iseg.*.png", 36 | ) 37 | ) 38 | ) 39 | 40 | nb_elements = len( 41 | glob.glob( 42 | osp.join( 43 | f"synthetic_finetuning/data/api_example/{exercise}/{segmentation_name}", 44 | "image.000000.iseg.*.png", 45 | ) 46 | ) 47 | ) 48 | 49 | else: 50 | self.json_path = osp.join(path_to_example, f"{labels_name}.json") 51 | self.video_path = osp.join(path_to_example, f"{video_name}.mp4") 52 | iseg_paths_glob = osp.join( 53 | path_to_example, f"{segmentation_name}/*.iseg.*.png" 54 | ) 55 | iseg_path = osp.join(path_to_example, segmentation_name) 56 | # check if path exists 57 | if not sorted(glob.glob(iseg_paths_glob)): 58 | # create directory 59 | os.makedirs(iseg_path) 60 | with zipfile.ZipFile(f"{iseg_path}.zip", "r") as zip_ref: 61 | zip_ref.extractall(iseg_path) 62 | 63 | iseg_paths = sorted(glob.glob(iseg_paths_glob)) 64 | 65 | iseg_elements_glob = osp.join( 66 | path_to_example, f"{segmentation_name}/image.000000.iseg.*.png" 67 | ) 68 | nb_elements = len(glob.glob(iseg_elements_glob)) 69 | if not iseg_paths: 70 | archive = zipfile.ZipFile(f"{path_to_example}.zip", "r") 71 | imgdata = archive.read("img_01.png") 72 | 73 | self.iseg_paths = [ 74 | path for path in iseg_paths if int(path.split(".")[-2]) % nb_elements == 0 75 | ] 76 | 77 | self.coco = COCO(self.json_path) 78 | self.infos = self.coco.dataset.get("info") 79 | self.scene_id = self.infos["scene_id"] 80 | if "camera_K_matrix" in self.infos: 81 | self.K = np.array(self.infos["camera_K_matrix"]) 82 | else: 83 | self.K = K2 if self.scene_id == "4578713" else K1 84 | if "camera_RT_matrix" in self.infos: 85 | self.current_extrinsic_matrix = np.array(self.infos["camera_RT_matrix"]) 86 | self.current_img = None 87 | self.current_ann = None 88 | self.current_seg = None 89 | self.current_vertices = None 90 | 91 | self.cap = cv2.VideoCapture(self.video_path) 92 | self.fps = int(self.cap.get(cv2.CAP_PROP_FPS)) 93 | self.nb_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) 94 | self.dims = ( 95 | int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)), 96 | int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), 97 | ) 98 | self.mean_accuracy = [] 99 | 100 | def load_frame(self, index_frame: int): 101 | img_data = list(self.coco.imgs.values())[index_frame] 102 | img_id = img_data["id"] 103 | ann_ids = self.coco.getAnnIds(imgIds=img_id) 104 | anns = self.coco.loadAnns(ann_ids) 105 | self.cap.set(cv2.CAP_PROP_POS_FRAMES, index_frame) 106 | res, img = self.cap.read() 107 | if not res: 108 | img = None 109 | 110 | anns = [ann for ann in anns if "armature_keypoints" in ann] 111 | if len(anns) > 1: 112 | print("Multiple humans detected") 113 | ann = anns[0] 114 | 115 | self.current_img = img 116 | self.current_ann = ann 117 | self.current_seg = cv2.imread(self.iseg_paths[index_frame]) 118 | 119 | return img, ann, self.infos 120 | 121 | def compute_extrinsic_matrix(self, joints: np.ndarray): 122 | image_points = np.array( 123 | [ 124 | [ 125 | self.current_ann["armature_keypoints"][joint_name]["x"], 126 | self.current_ann["armature_keypoints"][joint_name]["y"], 127 | ] 128 | for joint_name in JOINT_NAMES[:55] 129 | ], 130 | dtype=np.float32, 131 | ) 132 | 133 | _, rvec, tvec = cv2.solvePnP(joints[:55], image_points, self.K, None) 134 | R, _ = cv2.Rodrigues(rvec) 135 | T = np.concatenate([R, tvec], axis=1) 136 | 137 | self.current_extrinsic_matrix = T 138 | 139 | def align_3d_vertices(self, joints: np.ndarray, augmented_vertices: np.ndarray): 140 | image_points = np.array( 141 | [ 142 | [ 143 | self.current_ann["armature_keypoints"][joint_name]["x_global"], 144 | self.current_ann["armature_keypoints"][joint_name]["y_global"], 145 | self.current_ann["armature_keypoints"][joint_name]["z_global"], 146 | ] 147 | for joint_name in JOINT_NAMES[:55] 148 | ], 149 | dtype=np.float32, 150 | ) 151 | translation_image_points = np.mean(image_points, axis=0) 152 | centered_image_points = image_points - translation_image_points 153 | smplx_points = joints[:55] 154 | translation_smplx_points = np.mean(smplx_points, axis=0) 155 | centered_smplx_points = smplx_points - translation_smplx_points 156 | 157 | R, loss = Rotation.align_vectors(centered_image_points, centered_smplx_points) 158 | # print("align vectors loss", loss) 159 | R = R.as_matrix() 160 | RT = np.concatenate([R, translation_image_points[:, np.newaxis]], axis=1) 161 | 162 | homogeneous_augmented_vertices = np.concatenate( 163 | [ 164 | augmented_vertices - translation_smplx_points, 165 | np.ones([augmented_vertices.shape[0], 1]), 166 | ], 167 | axis=1, 168 | ) 169 | 170 | return np.matmul(homogeneous_augmented_vertices, RT.T) 171 | 172 | def compute_2d_projection( 173 | self, joints: np.ndarray, vertices: np.ndarray, method: str = "pnp" 174 | ): 175 | if method == "pnp": 176 | self.compute_extrinsic_matrix(joints) 177 | elif method == "align_3d": 178 | vertices = self.align_3d_vertices(joints, vertices) 179 | 180 | vertices = np.concatenate([vertices, np.ones([vertices.shape[0], 1])], axis=1) 181 | vertices = np.matmul(vertices, self.current_extrinsic_matrix.T) 182 | vertices = np.matmul(vertices, self.K.T) 183 | vertices = vertices[:, :2] / vertices[:, 2:] 184 | 185 | self.current_vertices = vertices 186 | 187 | return vertices 188 | 189 | def compute_reprojection_accuracy(self): 190 | outside_vertices = 0 191 | for vertex in self.current_vertices: 192 | x, y = int(vertex[0]), int(vertex[1]) 193 | if ( 194 | 0 <= x < self.current_seg.shape[0] 195 | and 0 <= y < self.current_seg.shape[1] 196 | and not self.current_seg[y, x].any() 197 | ): 198 | outside_vertices += 1 199 | 200 | accuracy = 1 - outside_vertices / self.current_vertices.shape[0] 201 | self.mean_accuracy.append(accuracy) 202 | 203 | return accuracy 204 | 205 | def get_mean_accuracy(self): 206 | return np.mean(self.mean_accuracy) 207 | -------------------------------------------------------------------------------- /benchmarking/opensimPipeline/Models/RajagopalModified2016_markers_openpose.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | /bodyset/torso 8 | 9 | 0.0031549999999999998 0.3715 0 10 | 11 | 12 | 13 | /bodyset/torso 14 | 15 | 0.0031549999999999998 0.3715 0.17000000000000001 16 | 17 | 18 | 19 | /bodyset/torso 20 | 21 | 0.0031549999999999998 0.3715 -0.17000000000000001 22 | 23 | 24 | 25 | /bodyset/pelvis 26 | 27 | -0.020276000000000002 -0.078490000000000004 0.094759999999999997 28 | 29 | 30 | 31 | /bodyset/pelvis 32 | 33 | -0.020276000000000002 -0.078490000000000004 -0.094759999999999997 34 | 35 | 36 | 37 | /bodyset/pelvis 38 | 39 | -0.020276000000000002 -0.078490000000000004 0 40 | 41 | 42 | 43 | /bodyset/tibia_r 44 | 45 | -0.0080895399999999992 -0.0035347999999999998 -0.00148474 46 | 47 | 48 | 49 | /bodyset/tibia_l 50 | 51 | -0.0080895399999999992 -0.0035347999999999998 0.00148474 52 | 53 | 54 | 55 | /bodyset/tibia_r 56 | 57 | -0.014 -0.38 0.0074999999999999997 58 | 59 | 60 | 61 | /bodyset/tibia_l 62 | 63 | -0.014 -0.38 -0.0074999999999999997 64 | 65 | 66 | 67 | /bodyset/calcn_r 68 | 69 | 0.018205985755086355 0.01 -0.020246741086146904 70 | 71 | 72 | 73 | /bodyset/calcn_l 74 | 75 | 0.018205985755086355 0.01 0.020246741086146904 76 | 77 | 78 | 79 | /bodyset/calcn_r 80 | 81 | 0.200266 3.68629e-17 0.0404935 82 | 83 | 84 | 85 | /bodyset/calcn_l 86 | 87 | 0.200266 3.68629e-17 -0.0404935 88 | 89 | 90 | 91 | /bodyset/calcn_r 92 | 93 | 0.227575 3.59955e-17 -0.0151851 94 | 95 | 96 | 97 | /bodyset/calcn_l 98 | 99 | 0.227575 3.59955e-17 0.0151851 100 | 101 | 102 | 103 | /bodyset/humerus_r 104 | 105 | 0.013143999999999999 -0.286273 -0.0095949999999999994 106 | 107 | 108 | 109 | /bodyset/humerus_l 110 | 111 | 0.013143999999999999 -0.286273 0.0095949999999999994 112 | 113 | 114 | 115 | /bodyset/hand_r 116 | 117 | 0 0 0 118 | 119 | 120 | 121 | /bodyset/hand_l 122 | 123 | 0 0 0 124 | 125 | 126 | 127 | 128 | 129 | -------------------------------------------------------------------------------- /benchmarking/opensimPipeline/IK/Setup_IK_openpose.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Unassigned 6 | 7 | Unassigned 8 | 9 | Inf 10 | 11 | 1.0000000000000001e-05 12 | 13 | 14 | 15 | 16 | 17 | true 18 | 19 | 5 20 | 21 | 22 | 23 | true 24 | 25 | 5 26 | 27 | 28 | 29 | true 30 | 31 | 5 32 | 33 | 34 | 35 | true 36 | 37 | 25 38 | 39 | 40 | 41 | true 42 | 43 | 25 44 | 45 | 46 | 47 | true 48 | 49 | 25 50 | 51 | 52 | 53 | true 54 | 55 | 30 56 | 57 | 58 | 59 | true 60 | 61 | 30 62 | 63 | 64 | 65 | true 66 | 67 | 30 68 | 69 | 70 | 71 | true 72 | 73 | 30 74 | 75 | 76 | 77 | true 78 | 79 | 60 80 | 81 | 82 | 83 | true 84 | 85 | 30 86 | 87 | 88 | 89 | true 90 | 91 | 30 92 | 93 | 94 | 95 | true 96 | 97 | 30 98 | 99 | 100 | 101 | true 102 | 103 | 30 104 | 105 | 106 | 107 | true 108 | 109 | 60 110 | 111 | 112 | 113 | true 114 | 115 | 5 116 | 117 | 118 | 119 | true 120 | 121 | 5 122 | 123 | 124 | 125 | true 126 | 127 | 5 128 | 129 | 130 | 131 | true 132 | 133 | 5 134 | 135 | 136 | 137 | 138 | 139 | Unassigned 140 | 141 | Unassigned 142 | 143 | -Inf Inf 144 | 145 | true 146 | 147 | Unassigned 148 | 149 | false 150 | 151 | 152 | -------------------------------------------------------------------------------- /synthpose/Infinity/generate_dataset.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import json 3 | import os 4 | from copy import deepcopy 5 | 6 | import cv2 7 | import numpy as np 8 | import reconstruction 9 | import torch 10 | from PIL import Image 11 | from tqdm import tqdm 12 | from video_scene import VideoScene 13 | 14 | from ..constants import AUGMENTED_VERTICES_NAMES, COCO_VERTICES_NAME, MODEL_FOLDER 15 | 16 | 17 | class DatasetGenerator: 18 | def __init__( 19 | self, 20 | data_folder: str, 21 | samples_per_video: int = 8, 22 | # sample_rate: int = 0.3, 23 | method="align_3d", 24 | output_path="infinity_dataset_combined", 25 | shift=0, 26 | split=0.8, 27 | infinity_version="v0.1.0", 28 | ): 29 | self.data_folder = data_folder 30 | self.samples_per_video = samples_per_video 31 | self.method = method 32 | self.output_path_train = os.path.join(output_path, "train") 33 | self.output_path_test = os.path.join(output_path, "test") 34 | self.shift = shift 35 | self.split = split 36 | if infinity_version == "v0.1.0": 37 | self.video_name = "video" 38 | self.labels_name = "labels" 39 | self.segmentation_name = "segmentation" 40 | else: 41 | self.video_name = "video.rgb" 42 | self.labels_name = "video.rgb" 43 | self.segmentation_name = "video.rgb" 44 | self.video_paths = sorted( 45 | glob.glob(os.path.join(data_folder, f"*/{self.video_name}.mp4")) 46 | ) 47 | self.data_dict_train = { 48 | "infos": {}, 49 | "images": [], 50 | "annotations": [], 51 | "categories": [], 52 | } 53 | 54 | self.data_dict_train["categories"] = [ 55 | { 56 | "id": 0, 57 | "augmented_keypoints": AUGMENTED_VERTICES_NAMES, 58 | "coco_keypoints": COCO_VERTICES_NAME, 59 | } 60 | ] 61 | self.data_dict_test = deepcopy(self.data_dict_train) 62 | self.total_source_images = 0 63 | self.total_missing_bboxes = 0 64 | self.total_error_reconstruction = 0 65 | 66 | if not os.path.exists(os.path.join(self.output_path_train, "images")): 67 | os.makedirs(os.path.join(self.output_path_train, "images")) 68 | if not os.path.exists(os.path.join(self.output_path_test, "images")): 69 | os.makedirs(os.path.join(self.output_path_test, "images")) 70 | 71 | def save_image_annotation( 72 | self, annotation_dict, img, video_scene, index_frame, mode: str = "train" 73 | ): 74 | if mode == "train": 75 | self.data_dict_train["annotations"].append(annotation_dict) 76 | img_name = f"{len(self.data_dict_train['images'])}.png" 77 | img_path = os.path.join(self.output_path_train, "images", img_name) 78 | Image.fromarray(cv2.cvtColor(np.uint8(img), cv2.COLOR_BGR2RGB)).save( 79 | img_path 80 | ) 81 | image_dict = { 82 | "id": len(self.data_dict_train["images"]), 83 | "width": video_scene.dims[0], 84 | "height": video_scene.dims[1], 85 | "frame_number": index_frame, 86 | "img_path": img_path, 87 | } 88 | self.data_dict_train["images"].append(image_dict) 89 | elif mode == "test": 90 | self.data_dict_test["annotations"].append(annotation_dict) 91 | img_name = f"{len(self.data_dict_test['images'])}.png" 92 | img_path = os.path.join(self.output_path_test, "images", img_name) 93 | Image.fromarray(cv2.cvtColor(np.uint8(img), cv2.COLOR_BGR2RGB)).save( 94 | img_path 95 | ) 96 | image_dict = { 97 | "id": len(self.data_dict_test["images"]), 98 | "width": video_scene.dims[0], 99 | "height": video_scene.dims[1], 100 | "frame_number": index_frame, 101 | "img_path": img_path, 102 | } 103 | self.data_dict_test["images"].append(image_dict) 104 | 105 | def generate_dataset(self): 106 | train_size = int(len(self.video_paths) * self.split) 107 | mode = "train" 108 | for index_video, video_path in enumerate(tqdm(self.video_paths)): 109 | if index_video == train_size: 110 | mode = "test" 111 | path = os.path.dirname(video_path) 112 | video_scene = VideoScene( 113 | path_to_example=path, 114 | video_name=self.video_name, 115 | labels_name=self.labels_name, 116 | segmentation_name=self.segmentation_name, 117 | ) 118 | indices_to_sample = list( 119 | set( 120 | np.linspace( 121 | 0, 122 | video_scene.nb_frames - 1, 123 | self.samples_per_video, 124 | dtype=np.int32, 125 | ) 126 | ) 127 | ) 128 | indices_to_sample = [index.item() for index in indices_to_sample] 129 | for index_frame in indices_to_sample: 130 | ( 131 | img, 132 | ann, 133 | groundtruth_landmarks, 134 | coco_landmarks, 135 | success, 136 | ) = self.get_grountruth_landmarks(video_scene, index_frame) 137 | 138 | self.total_source_images += 1 139 | if not success: 140 | self.total_error_reconstruction += 1 141 | continue 142 | if "bbox" not in ann: 143 | self.total_missing_bboxes 144 | continue 145 | 146 | annotation_dict = self.generate_annotation_dict(ann, mode=mode) 147 | annotation_dict["keypoints"] = groundtruth_landmarks 148 | annotation_dict["coco_keypoints"] = coco_landmarks 149 | self.save_image_annotation( 150 | annotation_dict, img, video_scene, index_frame, mode=mode 151 | ) 152 | 153 | with open( 154 | os.path.join(self.output_path_train, "annotations.json"), 155 | "w", 156 | encoding="utf-8", 157 | ) as f: 158 | json.dump(self.data_dict_train, f, ensure_ascii=False, indent=4) 159 | with open( 160 | os.path.join(self.output_path_test, "annotations.json"), 161 | "w", 162 | encoding="utf-8", 163 | ) as f: 164 | json.dump(self.data_dict_test, f, ensure_ascii=False, indent=4) 165 | 166 | print("total source images: ", self.total_source_images) 167 | print("total missing bboxes: ", self.total_missing_bboxes) 168 | print("total error reconstruction: ", self.total_error_reconstruction) 169 | 170 | def generate_annotation_dict(self, ann: dict, mode: str = "train"): 171 | annotation_dict = {} 172 | if mode == "train": 173 | annotation_dict["image_id"] = len(self.data_dict_train["images"]) 174 | else: 175 | annotation_dict["image_id"] = len(self.data_dict_test["images"]) 176 | annotation_dict["id"] = annotation_dict["image_id"] 177 | annotation_dict["category_id"] = 0 178 | annotation_dict["bbox"] = ann["bbox"] 179 | annotation_dict["percent_in_fov"] = ann["percent_in_fov"] 180 | annotation_dict["percent_occlusion"] = ann["percent_occlusion"] 181 | annotation_dict["iscrowd"] = 0 182 | 183 | return annotation_dict 184 | 185 | def get_grountruth_landmarks(self, video_scene: VideoScene, index_frame): 186 | img, ann, infos = video_scene.load_frame(index_frame) 187 | coco_landmarks = ann["keypoints"] 188 | gender = infos["avatar_presenting_gender"] 189 | betas = torch.tensor(infos["avatar_betas"], dtype=torch.float32).unsqueeze(0) 190 | poses = reconstruction.get_poses(ann) 191 | smplx_model = reconstruction.get_smplx_model(MODEL_FOLDER, gender, betas, poses) 192 | vertices, joints = reconstruction.get_vertices_and_joints(smplx_model, betas) 193 | augmented_vertices = reconstruction.get_augmented_vertices(vertices) 194 | projected_vertices = video_scene.compute_2d_projection( 195 | joints, augmented_vertices, method=self.method 196 | ) 197 | if np.isnan(projected_vertices).any(): 198 | return img, ann, {}, {}, False 199 | groundtruth_landmarks = { 200 | name: {"x": point[0], "y": point[1]} 201 | for name, point in zip(AUGMENTED_VERTICES_NAMES, projected_vertices) 202 | } 203 | 204 | # check if each landmark is out of frame (visible) or not: 205 | for name, point in groundtruth_landmarks.items(): 206 | if ( 207 | point["x"] < 0 208 | or point["y"] < 0 209 | or point["x"] > video_scene.dims[0] 210 | or point["y"] > video_scene.dims[1] 211 | ): 212 | groundtruth_landmarks[name]["v"] = 0 213 | else: 214 | groundtruth_landmarks[name]["v"] = 1 215 | 216 | return img, ann, groundtruth_landmarks, coco_landmarks, True 217 | 218 | 219 | if __name__ == "__main__": 220 | dataset_generator = DatasetGenerator( 221 | data_folder="path_to_infinity_dataset", 222 | method="align_3d", 223 | output_path="infinity_reannotated", 224 | samples_per_video=15, 225 | ) 226 | dataset_generator.generate_dataset() 227 | -------------------------------------------------------------------------------- /synthpose/constants.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import json 3 | 4 | import numpy as np 5 | 6 | IMAGES_ROOT_PATH = "../ps/project/multi-ioi/rich_release" 7 | 8 | SEQ_NAMES = [ 9 | "Gym_010_cooking1", 10 | "Gym_010_dips1", 11 | "Gym_010_dips2", 12 | "Gym_010_lunge1", 13 | "Gym_010_lunge2", 14 | "Gym_010_mountainclimber1", 15 | "Gym_010_mountainclimber2", 16 | "Gym_010_pushup1", 17 | "Gym_010_pushup2", 18 | "Gym_011_burpee2", 19 | "Gym_011_cooking1", 20 | "Gym_011_cooking2", 21 | "Gym_011_dips1", 22 | "Gym_011_dips2", 23 | "Gym_011_dips3", 24 | "Gym_011_dips4", 25 | "Gym_011_pushup1", 26 | "Gym_011_pushup2", 27 | "Gym_012_cooking2", 28 | "Gym_012_lunge1", 29 | "Gym_012_lunge2", 30 | "Gym_012_pushup2", 31 | "Gym_013_burpee4", 32 | "Gym_013_dips1", 33 | "Gym_013_dips2", 34 | "Gym_013_dips3", 35 | "Gym_013_lunge1", 36 | "Gym_013_lunge2", 37 | "Gym_013_pushup1", 38 | "Gym_013_pushup2", 39 | "LectureHall_009_021_reparingprojector1", 40 | "LectureHall_009_sidebalancerun1", 41 | "LectureHall_010_plankjack1", 42 | "LectureHall_010_sidebalancerun1", 43 | "LectureHall_019_wipingchairs1", 44 | "LectureHall_021_plankjack1", 45 | "LectureHall_021_sidebalancerun1", 46 | "ParkingLot2_009_burpeejump1", 47 | "ParkingLot2_009_burpeejump2", 48 | "ParkingLot2_009_impro1", 49 | "ParkingLot2_009_impro2", 50 | "ParkingLot2_009_impro5", 51 | "ParkingLot2_009_overfence1", 52 | "ParkingLot2_009_overfence2", 53 | "ParkingLot2_009_spray1", 54 | "ParkingLot2_017_burpeejump1", 55 | "ParkingLot2_017_burpeejump2", 56 | "ParkingLot2_017_eating1", 57 | "ParkingLot2_017_overfence1", 58 | "ParkingLot2_017_overfence2", 59 | "ParkingLot2_017_pushup1", 60 | "ParkingLot2_017_pushup2", 61 | ] 62 | 63 | SMPLX_MODEL_DIR = "../models/smplx" 64 | SET = "test" 65 | BODIES_PATH = "../" 66 | SCAN_CALIBRATION_PATH = "../scan_calibration" 67 | IMGEXT = json.load(open("../resource/imgext.json", "r")) 68 | GENDERS_PATH = "../resource/gender.json" 69 | 70 | 71 | def load_augmented_corr(): 72 | with open(AUGMENTED_VERTICES_FILE_PATH, "r", encoding="utf-8-sig") as data: 73 | augmented_vertices_index = list(csv.DictReader(data)) 74 | augmented_vertices_index_dict = { 75 | vertex["Name"]: int(vertex["Index"]) for vertex in augmented_vertices_index 76 | } 77 | 78 | return augmented_vertices_index_dict 79 | 80 | 81 | exercises = [ 82 | "V_UP", 83 | "SITUP", 84 | "BRIDGE", 85 | "BURPEE", 86 | "PUSHUP", 87 | "BIRD_DOG", 88 | "CRUNCHES", 89 | "SUPERMAN", 90 | "LEG_RAISE", 91 | "DOWNWARD_DOG", 92 | "UPPERCUT-LEFT", 93 | "CLAMSHELL-LEFT", 94 | "UPPERCUT-RIGHT", 95 | "CLAMSHELL-RIGHT", 96 | "LUNGE-CROSSBACK", 97 | "BEAR_CRAWL-HOLDS", 98 | "DONKEY_KICK-LEFT", 99 | "PUSHUP-EXPLOSIVE", 100 | "SQUAT-BODYWEIGHT", 101 | "DEADLIFT-DUMBBELL", 102 | "DONKEY_KICK-RIGHT", 103 | "PUSHUP-CLOSE_GRIP", 104 | "ARM_RAISE-DUMBBELL", 105 | "BICEP_CURL-BARBELL", 106 | "SQUAT-BACK-BARBELL", 107 | "SQUAT-GOBLET+SUMO-DUMBBELL", 108 | "PRESS-SINGLE_ARM-DUMBBELL-LEFT", 109 | "BICEP_CURL-ALTERNATING-DUMBBELL", 110 | "PRESS-SINGLE_ARM-DUMBBELL-RIGHT", 111 | "PUSH_PRESS-SINGLE_ARM-DUMBBELL-LEFT", 112 | "PUSH_PRESS-SINGLE_ARM-DUMBBELL-RIGHT", 113 | "SPLIT_SQUAT-SINGLE_ARM-DUMBBELL-LEFT", 114 | "SPLIT_SQUAT-SINGLE_ARM-DUMBBELL-RIGHT", 115 | "TRICEP_KICKBACK-BENT_OVER+SINGLE_ARM-DUMBBELL-LEFT", 116 | "TRICEP_KICKBACK-BENT_OVER+SINGLE_ARM-DUMBBELL-RIGHT", 117 | ] 118 | 119 | 120 | K1 = np.array([[311.11, 0.0, 112.0], [0.0, 311.11, 112.0], [0.0, 0.0, 1.0]]) 121 | K2 = np.array([[245.0, 0.0, 112.0], [0.0, 245.0, 112.0], [0.0, 0.0, 1.0]]) 122 | 123 | AUGMENTED_VERTICES_FILE_PATH = "../resource/vertices_keypoints_corr.csv" 124 | AUGMENTED_VERTICES_INDEX_DICT = load_augmented_corr() 125 | AUGMENTED_VERTICES_NAMES = list(AUGMENTED_VERTICES_INDEX_DICT.keys()) 126 | COCO_VERTICES_NAME = [ 127 | "nose", 128 | "left_eye", 129 | "right_eye", 130 | "left_ear", 131 | "right_ear", 132 | "left_shoulder", 133 | "right_shoulder", 134 | "left_elbow", 135 | "right_elbow", 136 | "left_wrist", 137 | "right_wrist", 138 | "left_hip", 139 | "right_hip", 140 | "left_knee", 141 | "right_knee", 142 | "left_ankle", 143 | "right_ankle", 144 | ] 145 | 146 | MODEL_FOLDER = "../models" 147 | 148 | 149 | JOINT_NAMES = [ 150 | "pelvis", 151 | "left_hip", 152 | "right_hip", 153 | "spine1", 154 | "left_knee", 155 | "right_knee", 156 | "spine2", 157 | "left_ankle", 158 | "right_ankle", 159 | "spine3", 160 | "left_foot", 161 | "right_foot", 162 | "neck", 163 | "left_collar", 164 | "right_collar", 165 | "head", 166 | "left_shoulder", 167 | "right_shoulder", 168 | "left_elbow", 169 | "right_elbow", 170 | "left_wrist", 171 | "right_wrist", 172 | "jaw", 173 | "left_eye_smplhf", 174 | "right_eye_smplhf", 175 | "left_index1", 176 | "left_index2", 177 | "left_index3", 178 | "left_middle1", 179 | "left_middle2", 180 | "left_middle3", 181 | "left_pinky1", 182 | "left_pinky2", 183 | "left_pinky3", 184 | "left_ring1", 185 | "left_ring2", 186 | "left_ring3", 187 | "left_thumb1", 188 | "left_thumb2", 189 | "left_thumb3", 190 | "right_index1", 191 | "right_index2", 192 | "right_index3", 193 | "right_middle1", 194 | "right_middle2", 195 | "right_middle3", 196 | "right_pinky1", 197 | "right_pinky2", 198 | "right_pinky3", 199 | "right_ring1", 200 | "right_ring2", 201 | "right_ring3", 202 | "right_thumb1", 203 | "right_thumb2", 204 | "right_thumb3", 205 | "nose", 206 | "right_eye", 207 | "left_eye", 208 | "right_ear", 209 | "left_ear", 210 | "left_big_toe", 211 | "left_small_toe", 212 | "left_heel", 213 | "right_big_toe", 214 | "right_small_toe", 215 | "right_heel", 216 | "left_thumb", 217 | "left_index", 218 | "left_middle", 219 | "left_ring", 220 | "left_pinky", 221 | "right_thumb", 222 | "right_index", 223 | "right_middle", 224 | "right_ring", 225 | "right_pinky", 226 | "right_eye_brow1", 227 | "right_eye_brow2", 228 | "right_eye_brow3", 229 | "right_eye_brow4", 230 | "right_eye_brow5", 231 | "left_eye_brow5", 232 | "left_eye_brow4", 233 | "left_eye_brow3", 234 | "left_eye_brow2", 235 | "left_eye_brow1", 236 | "nose1", 237 | "nose2", 238 | "nose3", 239 | "nose4", 240 | "right_nose_2", 241 | "right_nose_1", 242 | "nose_middle", 243 | "left_nose_1", 244 | "left_nose_2", 245 | "right_eye1", 246 | "right_eye2", 247 | "right_eye3", 248 | "right_eye4", 249 | "right_eye5", 250 | "right_eye6", 251 | "left_eye4", 252 | "left_eye3", 253 | "left_eye2", 254 | "left_eye1", 255 | "left_eye6", 256 | "left_eye5", 257 | "right_mouth_1", 258 | "right_mouth_2", 259 | "right_mouth_3", 260 | "mouth_top", 261 | "left_mouth_3", 262 | "left_mouth_2", 263 | "left_mouth_1", 264 | "left_mouth_5", # 59 in OpenPose output 265 | "left_mouth_4", # 58 in OpenPose output 266 | "mouth_bottom", 267 | "right_mouth_4", 268 | "right_mouth_5", 269 | "right_lip_1", 270 | "right_lip_2", 271 | "lip_top", 272 | "left_lip_2", 273 | "left_lip_1", 274 | "left_lip_3", 275 | "lip_bottom", 276 | "right_lip_3", 277 | # Face contour 278 | "right_contour_1", 279 | "right_contour_2", 280 | "right_contour_3", 281 | "right_contour_4", 282 | "right_contour_5", 283 | "right_contour_6", 284 | "right_contour_7", 285 | "right_contour_8", 286 | "contour_middle", 287 | "left_contour_8", 288 | "left_contour_7", 289 | "left_contour_6", 290 | "left_contour_5", 291 | "left_contour_4", 292 | "left_contour_3", 293 | "left_contour_2", 294 | "left_contour_1", 295 | ] 296 | 297 | JSON_CATEGORIES = [ 298 | { 299 | "id": 0, 300 | "augmented_keypoints": [ 301 | "sternum", 302 | "rshoulder", 303 | "lshoulder", 304 | "r_lelbow", 305 | "l_lelbow", 306 | "r_melbow", 307 | "l_melbow", 308 | "r_lwrist", 309 | "l_lwrist", 310 | "r_mwrist", 311 | "l_mwrist", 312 | "r_ASIS", 313 | "l_ASIS", 314 | "r_PSIS", 315 | "l_PSIS", 316 | "r_knee", 317 | "l_knee", 318 | "r_mknee", 319 | "l_mknee", 320 | "r_ankle", 321 | "l_ankle", 322 | "r_mankle", 323 | "l_mankle", 324 | "r_5meta", 325 | "l_5meta", 326 | "r_toe", 327 | "l_toe", 328 | "r_big_toe", 329 | "l_big_toe", 330 | "l_calc", 331 | "r_calc", 332 | "r_bpinky", 333 | "l_bpinky", 334 | "r_tpinky", 335 | "l_tpinky", 336 | "r_bindex", 337 | "l_bindex", 338 | "r_tindex", 339 | "l_tindex", 340 | "r_tmiddle", 341 | "l_tmiddle", 342 | "r_tring", 343 | "l_tring", 344 | "r_bthumb", 345 | "l_bthumb", 346 | "r_tthumb", 347 | "l_tthumb", 348 | "C7", 349 | "L2", 350 | "T11", 351 | "T6", 352 | ], 353 | "coco_keypoints": [ 354 | "nose", 355 | "left_eye", 356 | "right_eye", 357 | "left_ear", 358 | "right_ear", 359 | "left_shoulder", 360 | "right_shoulder", 361 | "left_elbow", 362 | "right_elbow", 363 | "left_wrist", 364 | "right_wrist", 365 | "left_hip", 366 | "right_hip", 367 | "left_knee", 368 | "right_knee", 369 | "left_ankle", 370 | "right_ankle", 371 | ], 372 | } 373 | ] 374 | -------------------------------------------------------------------------------- /benchmarking/constants.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | config_global = "sherlock" # "local" or "windows" or "sherlock" 4 | 5 | config_base_local = { 6 | "mmposeDirectory": "/home/yoni/OneDrive_yonigoz@stanford.edu/RA/Code/mmpose", 7 | "OutputBoxDirectory": "OutputBox", 8 | "model_config_person": "demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py", 9 | "model_ckpt_person": "https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth", 10 | # "model_config_person" : "demo/mmdetection_cfg/configs/convnext/cascade-mask-rcnn_convnext-t-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py", 11 | # "model_ckpt_person" :"https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220509_204200-8f07c40b.pth", 12 | # "model_config_pose" : "configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py", 13 | # "model_config_pose" : "configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288.py", 14 | # "model_ckpt_pose" : "pretrain/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288-39c3c381_20220916.pth", 15 | # "model_ckpt_pose" : "pretrain/coco/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth", 16 | "model_config_pose": "configs/body_2d_keypoint/topdown_heatmap/infinity/hrnet48/td-hm_hrnet-w48_dark-8xb32-210e_merge_bedlam_infinity_coco_3DPW_eval_rich-384x288_pretrained.py", 17 | "model_ckpt_pose": "/home/yoni/OneDrive_yonigoz@stanford.edu/RA/prod/OpenCapBench/data/models/epoch_30.pth", 18 | "dataDir": "/home/yoni/OneDrive_yonigoz@stanford.edu/RA/prod/OpenCapBench/data", 19 | "batch_size_det": 4, 20 | "batch_size_pose": 32, 21 | "useGTscaling": True, 22 | "marker_set": "Anatomical", # "Coco" or "Anatomical" 23 | "alt_model": None, # None or "VirtualMarker" or "CLIFF" 24 | } 25 | 26 | config_base_local["model_ckpt_pose_absolute"] = os.path.join( 27 | config_base_local["mmposeDirectory"], config_base_local["model_ckpt_pose"] 28 | ) 29 | 30 | config_base_windows = { 31 | "mmposeDirectory": "C:/Data/OpenCap/mmpose", 32 | "OutputBoxDirectory": "OutputBox", 33 | "model_config_person": "demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py", 34 | "model_ckpt_person": "https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth", 35 | # "model_config_person" : "demo/mmdetection_cfg/configs/convnext/cascade-mask-rcnn_convnext-t-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py", 36 | # "model_ckpt_person" :"https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220509_204200-8f07c40b.pth", 37 | # "model_config_pose" : "configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py", 38 | # "model_config_pose" : "configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288.py", 39 | # "model_ckpt_pose" : "pretrain/coco/td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288-39c3c381_20220916.pth", 40 | # "model_ckpt_pose" : "pretrain/coco/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth", 41 | "model_config_pose": "configs/body_2d_keypoint/topdown_heatmap/infinity/hrnet48/td-hm_hrnet-w48_dark-8xb32-210e_merge_bedlam_infinity_coco_3DPW_eval_rich-384x288_pretrained.py", 42 | "model_ckpt_pose": "C:/Data/OpenCapBench/data/models/epoch_30.pth", 43 | "dataDir": "C:/Data/OpenCapBench/data", 44 | "batch_size_det": 4, 45 | "batch_size_pose": 32, 46 | "useGTscaling": True, 47 | "marker_set": "Anatomical", # "Coco" or "Anatomical" 48 | "alt_model": None, # None or "VirtualMarker" or "CLIFF" 49 | } 50 | 51 | 52 | 53 | config_base_sherlock = { 54 | "mmposeDirectory": "/home/users/yonigoz/RA/mmpose", 55 | "OutputBoxDirectory": "OutputBox", 56 | "model_config_person": "demo/mmdetection_cfg/configs/convnext/cascade-mask-rcnn_convnext-t-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py", 57 | "model_ckpt_person": "https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220509_204200-8f07c40b.pth", 58 | # "model_config_person" : "demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py", 59 | # "model_ckpt_person" : "https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth", 60 | "model_config_pose": "configs/body_2d_keypoint/topdown_heatmap/infinity/td-hm_ViTPose-huge_8xb64-210e_merge_bedlam_infinity_eval_bedlam-256x192.py", 61 | "model_ckpt_pose": "/scratch/users/yonigoz/mmpose_data/work_dirs/merge_bedlam_infinity_eval_bedlam/ViT/huge/best_infinity_AP_epoch_10.pth", 62 | # "model_config_pose" : "configs/body_2d_keypoint/topdown_heatmap/infinity/td-hm_hrnet-w48_dark-8xb32-210e_merge_bedlam_infinity_coco_eval_bedlam-384x288_pretrained.py", 63 | # "model_ckpt_pose" : "/scratch/users/yonigoz/mmpose_data/work_dirs/merge_bedlam_infinity_coco_eval_bedlam/HRNet/w48_dark_pretrained/best_infinity_AP_epoch_18.pth", 64 | "dataDir": "/scratch/users/yonigoz/OpenCap_data", 65 | "batch_size_det": 16, 66 | "batch_size_pose": 2, 67 | "useGTscaling": True, 68 | "marker_set": "Anatomical", 69 | "alt_model": None, 70 | } 71 | config_base_sherlock["model_ckpt_pose_absolute"] = config_base_sherlock[ 72 | "model_ckpt_pose" 73 | ] 74 | 75 | config = {} 76 | if config_global == "local": 77 | config = config_base_local 78 | if config_global == "sherlock": 79 | config = config_base_sherlock 80 | if config_global == "windows": 81 | config = config_base_windows 82 | 83 | 84 | def getMMposeAnatomicalCocoMarkerNames(): 85 | marker_names = [ 86 | "nose", 87 | "left_eye", 88 | "right_eye", 89 | "left_ear", 90 | "right_ear", 91 | "left_shoulder", 92 | "right_shoulder", 93 | "left_elbow", 94 | "right_elbow", 95 | "left_wrist", 96 | "right_wrist", 97 | "left_hip", 98 | "right_hip", 99 | "left_knee", 100 | "right_knee", 101 | "left_ankle", 102 | "right_ankle", 103 | "sternum", 104 | "rshoulder", 105 | "lshoulder", 106 | "r_lelbow", 107 | "l_lelbow", 108 | "r_melbow", 109 | "l_melbow", 110 | "r_lwrist", 111 | "l_lwrist", 112 | "r_mwrist", 113 | "l_mwrist", 114 | "r_ASIS", 115 | "l_ASIS", 116 | "r_PSIS", 117 | "l_PSIS", 118 | "r_knee", 119 | "l_knee", 120 | "r_mknee", 121 | "l_mknee", 122 | "r_ankle", 123 | "l_ankle", 124 | "r_mankle", 125 | "l_mankle", 126 | "r_5meta", 127 | "l_5meta", 128 | "r_toe", 129 | "l_toe", 130 | "r_big_toe", 131 | "l_big_toe", 132 | "l_calc", 133 | "r_calc", 134 | "C7", 135 | "L2", 136 | "T11", 137 | "T6", 138 | ] 139 | 140 | return marker_names 141 | 142 | 143 | def getMMposeMarkerNames(): 144 | markerNames = [ 145 | "Nose", 146 | "LEye", 147 | "REye", 148 | "LEar", 149 | "REar", 150 | "LShoulder", 151 | "RShoulder", 152 | "LElbow", 153 | "RElbow", 154 | "LWrist", 155 | "RWrist", 156 | "LHip", 157 | "RHip", 158 | "LKnee", 159 | "RKnee", 160 | "LAnkle", 161 | "RAnkle", 162 | "LBigToe", 163 | "LSmallToe", 164 | "LHeel", 165 | "RBigToe", 166 | "RSmallToe", 167 | "RHeel", 168 | ] 169 | 170 | return markerNames 171 | 172 | 173 | def getOpenPoseMarkerNames(): 174 | markerNames = [ 175 | "Nose", 176 | "Neck", 177 | "RShoulder", 178 | "RElbow", 179 | "RWrist", 180 | "LShoulder", 181 | "LElbow", 182 | "LWrist", 183 | "midHip", 184 | "RHip", 185 | "RKnee", 186 | "RAnkle", 187 | "LHip", 188 | "LKnee", 189 | "LAnkle", 190 | "REye", 191 | "LEye", 192 | "REar", 193 | "LEar", 194 | "LBigToe", 195 | "LSmallToe", 196 | "LHeel", 197 | "RBigToe", 198 | "RSmallToe", 199 | "RHeel", 200 | ] 201 | 202 | return markerNames 203 | 204 | 205 | def getMMposeAnatomicalCocoMarkerPairs(): 206 | markerNames = { 207 | "left_eye": "right_eye", 208 | "left_ear": "right_ear", 209 | "left_shoulder": "right_shoulder", 210 | "left_elbow": "right_elbow", 211 | "left_wrist": "right_wrist", 212 | "left_hip": "right_hip", 213 | "left_knee": "right_knee", 214 | "left_ankle": "right_ankle", 215 | "rshoulder": "lshoulder", 216 | "lshoulder": "rshoulder", 217 | "r_lelbow": "l_lelbow", 218 | "l_lelbow": "r_lelbow", 219 | "r_melbow": "l_melbow", 220 | "l_melbow": "r_melbow", 221 | "r_lwrist": "l_lwrist", 222 | "l_lwrist": "r_lwrist", 223 | "r_mwrist": "l_mwrist", 224 | "l_mwrist": "r_mwrist", 225 | "r_ASIS": "l_ASIS", 226 | "l_ASIS": "r_ASIS", 227 | "r_PSIS": "l_PSIS", 228 | "l_PSIS": "r_PSIS", 229 | "r_knee": "l_knee", 230 | "l_knee": "r_knee", 231 | "r_mknee": "l_mknee", 232 | "l_mknee": "r_mknee", 233 | "r_ankle": "l_ankle", 234 | "l_ankle": "r_ankle", 235 | "r_mankle": "l_mankle", 236 | "l_mankle": "r_mankle", 237 | "r_5meta": "l_5meta", 238 | "l_5meta": "r_5meta", 239 | "r_toe": "l_toe", 240 | "l_toe": "r_toe", 241 | "r_big_toe": "l_big_toe", 242 | "l_big_toe": "r_big_toe", 243 | "l_calc": "r_calc", 244 | "r_calc": "l_calc", 245 | } 246 | 247 | return markerNames 248 | -------------------------------------------------------------------------------- /benchmarking/opensimPipeline/IK/Setup_IK_mmpose_anatomical.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Unassigned 6 | 7 | Unassigned 8 | 9 | Inf 10 | 11 | 1.0000000000000001e-05 12 | 13 | 14 | 15 | 16 | 17 | true 18 | 19 | 5 20 | 21 | 22 | 23 | true 24 | 25 | 5 26 | 27 | 28 | 29 | true 30 | 31 | 5 32 | 33 | 34 | 35 | true 36 | 37 | 5 38 | 39 | 40 | 41 | true 42 | 43 | 25 44 | 45 | 46 | 47 | true 48 | 49 | 25 50 | 51 | 52 | 53 | true 54 | 55 | 25 56 | 57 | 58 | 59 | true 60 | 61 | 25 62 | 63 | 64 | 65 | true 66 | 67 | 20 68 | 69 | 70 | 71 | true 72 | 73 | 20 74 | 75 | 76 | 77 | true 78 | 79 | 0 80 | 81 | 82 | 83 | true 84 | 85 | 7 86 | 87 | 88 | 89 | true 90 | 91 | 7 92 | 93 | 94 | 95 | false 96 | 97 | 0 98 | 99 | 100 | 101 | true 102 | 103 | 20 104 | 105 | 106 | 107 | true 108 | 109 | 20 110 | 111 | 112 | 113 | true 114 | 115 | 0 116 | 117 | 118 | 119 | true 120 | 121 | 7 122 | 123 | 124 | 125 | false 126 | 127 | 0 128 | 129 | 130 | 131 | true 132 | 133 | 7 134 | 135 | 136 | 137 | true 138 | 139 | 5 140 | 141 | 142 | 143 | true 144 | 145 | 5 146 | 147 | 148 | 149 | true 150 | 151 | 5 152 | 153 | 154 | 155 | true 156 | 157 | 5 158 | 159 | 160 | 161 | true 162 | 163 | 5 164 | 165 | 166 | 167 | true 168 | 169 | 5 170 | 171 | 172 | 173 | 174 | 175 | Unassigned 176 | 177 | Unassigned 178 | 179 | -Inf Inf 180 | 181 | true 182 | 183 | Unassigned 184 | 185 | true 186 | 187 | 188 | -------------------------------------------------------------------------------- /synthpose/RICH/generate_dataset.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import json 3 | import os 4 | import pickle 5 | 6 | import numpy as np 7 | import torch 8 | from smplx import SMPLX 9 | from tqdm import tqdm 10 | from utils import CalibratedCamera 11 | 12 | from ..constants import ( 13 | AUGMENTED_VERTICES_INDEX_DICT, 14 | AUGMENTED_VERTICES_NAMES, 15 | BODIES_PATH, 16 | COCO_VERTICES_NAME, 17 | GENDERS_PATH, 18 | IMAGES_ROOT_PATH, 19 | IMGEXT, 20 | SCAN_CALIBRATION_PATH, 21 | SEQ_NAMES, 22 | SET, 23 | SMPLX_MODEL_DIR, 24 | ) 25 | 26 | 27 | class DatasetGenerator: 28 | def __init__( 29 | self, 30 | output_path: str = "infinity_dataset_combined", 31 | sample_rate: int = 6, 32 | ): 33 | self.img_width = 4112 34 | self.img_height = 3008 35 | self.output_path = output_path 36 | self.sample_rate = sample_rate 37 | self.data_dict = { 38 | "infos": {}, 39 | "images": [], 40 | "annotations": [], 41 | "categories": [], 42 | } 43 | 44 | self.data_dict["categories"] = [ 45 | { 46 | "id": 0, 47 | "augmented_keypoints": AUGMENTED_VERTICES_NAMES, 48 | "coco_keypoints": COCO_VERTICES_NAME, 49 | } 50 | ] 51 | self.total_source_images = 0 52 | self.total_error_reconstruction = 0 53 | 54 | if not os.path.exists(self.output_path): 55 | os.makedirs(self.output_path) 56 | 57 | def get_bbox(self, vertices): 58 | x_img, y_img = vertices[:, 0], vertices[:, 1] 59 | xmin = min(x_img) 60 | ymin = min(y_img) 61 | xmax = max(x_img) 62 | ymax = max(y_img) 63 | 64 | x_center = (xmin + xmax) / 2.0 65 | width = xmax - xmin 66 | xmin = x_center - 0.5 * width # * 1.2 67 | xmax = x_center + 0.5 * width # * 1.2 68 | 69 | y_center = (ymin + ymax) / 2.0 70 | height = ymax - ymin 71 | ymin = y_center - 0.5 * height # * 1.2 72 | ymax = y_center + 0.5 * height # * 1.2 73 | 74 | xmin = max(0, xmin) 75 | ymin = max(0, ymin) 76 | xmax = min(self.img_width, xmax) 77 | ymax = min(self.img_height, ymax) 78 | 79 | bbox = np.array([xmin, ymin, xmax - xmin, ymax - ymin]).astype(int) 80 | 81 | return bbox 82 | 83 | def generate_annotation_dict(self): 84 | annotation_dict = {} 85 | annotation_dict["image_id"] = len(self.data_dict["images"]) 86 | annotation_dict["id"] = annotation_dict["image_id"] 87 | annotation_dict["category_id"] = 0 88 | annotation_dict["iscrowd"] = 0 89 | 90 | return annotation_dict 91 | 92 | def get_grountruth_landmarks( 93 | self, 94 | body_model, 95 | set: str, 96 | cam, 97 | seq_name: str, 98 | frame_id: int, 99 | sub_id, 100 | ): 101 | smplx_params_fn = os.path.join( 102 | BODIES_PATH, set, seq_name, f"{frame_id:05d}", f"{sub_id}.pkl" 103 | ) 104 | body_params = pickle.load(open(smplx_params_fn, "rb")) 105 | body_params = {k: torch.from_numpy(v) for k, v in body_params.items()} 106 | body_model.reset_params(**body_params) 107 | model_output = body_model( 108 | return_verts=True, body_pose=body_params["body_pose"], return_full_pose=True 109 | ) 110 | vertices = model_output.vertices.detach() 111 | 112 | ## project to image 113 | 114 | projected_vertices = cam(vertices).squeeze().detach().numpy() 115 | bbox = self.get_bbox(projected_vertices) 116 | projected_vertices_anatomical = projected_vertices[ 117 | list(AUGMENTED_VERTICES_INDEX_DICT.values()) 118 | ].tolist() 119 | 120 | coco_landmarks = [0] * 3 * 17 121 | 122 | if np.isnan(projected_vertices_anatomical).any(): 123 | return {}, {}, False 124 | groundtruth_landmarks = { 125 | name: {"x": point[0], "y": point[1]} 126 | for name, point in zip( 127 | AUGMENTED_VERTICES_NAMES, projected_vertices_anatomical 128 | ) 129 | } 130 | 131 | # check if each landmark is out of frame (visible) or not: 132 | for name, point in groundtruth_landmarks.items(): 133 | if ( 134 | point["x"] < 0 135 | or point["y"] < 0 136 | or point["x"] > self.img_width 137 | or point["y"] > self.img_height 138 | ): 139 | groundtruth_landmarks[name]["v"] = 0 140 | else: 141 | groundtruth_landmarks[name]["v"] = 1 142 | 143 | return groundtruth_landmarks, coco_landmarks, bbox, True 144 | 145 | def generate_dataset(self): 146 | iteration = 0 147 | it_file = 0 148 | nb_files = len(SEQ_NAMES) 149 | gender_mapping = json.load(open(GENDERS_PATH, "r")) 150 | 151 | for seq_name in SEQ_NAMES: 152 | print("extracting data from ", seq_name, "...") 153 | splits = seq_name.split("_") 154 | if len(splits) == 3: 155 | scene_name, sub_id, _ = splits 156 | sub_ids = [sub_id] 157 | else: 158 | scene_name = splits[0] 159 | sub_ids = splits[1:-1] 160 | # extension = imgext[scene_name] 161 | extension = IMGEXT[scene_name] 162 | for sub_id in sub_ids: 163 | print("sub_id: ", sub_id) 164 | gender = gender_mapping[f"{int(sub_id)}"] 165 | 166 | seq_path = os.path.join(IMAGES_ROOT_PATH, SET, seq_name) 167 | cams_paths = [ 168 | item 169 | for item in os.listdir(seq_path) 170 | if os.path.isdir(os.path.join(seq_path, item)) 171 | ] 172 | body_model = SMPLX( 173 | SMPLX_MODEL_DIR, 174 | gender=gender, 175 | num_pca_comps=12, 176 | flat_hand_mean=False, 177 | create_expression=True, 178 | create_jaw_pose=True, 179 | ) 180 | cam_iter = 0 181 | nb_cams = len(cams_paths) 182 | for cam_path in cams_paths: 183 | camera_id = int(cam_path.split("_")[-1]) 184 | print("camera_id: ", camera_id) 185 | calib_path = os.path.join( 186 | SCAN_CALIBRATION_PATH, 187 | scene_name, 188 | "calibration", 189 | f"{camera_id:03d}.xml", 190 | ) 191 | if not os.path.exists(calib_path): 192 | print( 193 | f"skipped camera {camera_id} for scene {scene_name} because no calibration file" 194 | ) 195 | continue 196 | 197 | cam = CalibratedCamera(calib_path=calib_path) 198 | images_paths = glob.glob( 199 | os.path.join(seq_path, cam_path) + f"/*.{extension}" 200 | ) 201 | nb_images = len(images_paths) // self.sample_rate 202 | for index_frame, image_path in enumerate(images_paths): 203 | frame_id = int(image_path.split("/")[-1].split("_")[0]) 204 | 205 | if not os.path.exists( 206 | os.path.join( 207 | BODIES_PATH, 208 | SET, 209 | seq_name, 210 | f"{frame_id:05d}", 211 | f"{sub_id}.pkl", 212 | ) 213 | ): 214 | continue 215 | if index_frame % self.sample_rate == 0: 216 | ( 217 | groundtruth_landmarks, 218 | coco_landmarks, 219 | bbox, 220 | success, 221 | ) = self.get_grountruth_landmarks( 222 | body_model, 223 | SET, 224 | cam, 225 | seq_name, 226 | frame_id, 227 | sub_id, 228 | ) 229 | 230 | self.total_source_images += 1 231 | if not success: 232 | self.total_error_reconstruction += 1 233 | continue 234 | 235 | annotation_dict = self.generate_annotation_dict() 236 | annotation_dict["bbox"] = bbox.tolist() 237 | annotation_dict["keypoints"] = groundtruth_landmarks 238 | annotation_dict["coco_keypoints"] = coco_landmarks 239 | 240 | self.data_dict["annotations"].append(annotation_dict) 241 | 242 | image_dict = { 243 | "id": len(self.data_dict["images"]), 244 | "width": self.img_width, 245 | "height": self.img_height, 246 | "frame_number": index_frame, 247 | "img_path": image_path, 248 | } 249 | self.data_dict["images"].append(image_dict) 250 | 251 | if iteration % 100 == 0: 252 | print( 253 | f"scene {it_file}/{nb_files}, cam {cam_iter}/{nb_cams}, {index_frame//self.sample_rate}/{nb_images}" 254 | ) 255 | iteration += 1 256 | cam_iter += 1 257 | it_file += 1 258 | 259 | with open( 260 | os.path.join(self.output_path, "annotations.json"), 261 | "w", 262 | encoding="utf-8", 263 | ) as f: 264 | json.dump(self.data_dict, f, ensure_ascii=False, indent=4) 265 | 266 | print("total source images: ", self.total_source_images) 267 | print("total error reconstruction: ", self.total_error_reconstruction) 268 | 269 | 270 | if __name__ == "__main__": 271 | dataset_generator = DatasetGenerator( 272 | output_path="rich_reannotated", 273 | sample_rate=6, 274 | ) 275 | dataset_generator.generate_dataset() 276 | -------------------------------------------------------------------------------- /benchmarking/opensimPipeline/IK/Setup_IK_mmpose_final.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Unassigned 6 | 7 | Unassigned 8 | 9 | Inf 10 | 11 | 1.0000000000000001e-05 12 | 13 | 14 | 15 | 16 | 17 | true 18 | 19 | 5 20 | 21 | 22 | 23 | true 24 | 25 | 5 26 | 27 | 28 | 29 | true 30 | 31 | 5 32 | 33 | 34 | 35 | true 36 | 37 | 10 38 | 39 | 40 | 41 | true 42 | 43 | 25 44 | 45 | 46 | 47 | true 48 | 49 | 25 50 | 51 | 52 | 53 | true 54 | 55 | 25 56 | 57 | 58 | 59 | true 60 | 61 | 25 62 | 63 | 64 | 65 | true 66 | 67 | 20 68 | 69 | 70 | 71 | true 72 | 73 | 0 74 | 75 | 76 | 77 | true 78 | 79 | 20 80 | 81 | 82 | 83 | true 84 | 85 | 0 86 | 87 | 88 | 89 | true 90 | 91 | 7 92 | 93 | 94 | 95 | true 96 | 97 | 7 98 | 99 | 100 | 101 | true 102 | 103 | 7 104 | 105 | 106 | 107 | true 108 | 109 | 20 110 | 111 | 112 | 113 | true 114 | 115 | 0 116 | 117 | 118 | 119 | true 120 | 121 | 20 122 | 123 | 124 | 125 | true 126 | 127 | 0 128 | 129 | 130 | 131 | true 132 | 133 | 7 134 | 135 | 136 | 137 | true 138 | 139 | 7 140 | 141 | 142 | 143 | true 144 | 145 | 7 146 | 147 | 148 | 149 | true 150 | 151 | 5 152 | 153 | 154 | 155 | true 156 | 157 | 5 158 | 159 | 160 | 161 | true 162 | 163 | 5 164 | 165 | 166 | 167 | true 168 | 169 | 5 170 | 171 | 172 | 173 | true 174 | 175 | 5 176 | 177 | 178 | 179 | true 180 | 181 | 5 182 | 183 | 184 | 185 | true 186 | 187 | 5 188 | 189 | 190 | 191 | true 192 | 193 | 5 194 | 195 | 196 | 197 | 198 | 199 | Unassigned 200 | 201 | Unassigned 202 | 203 | -Inf Inf 204 | 205 | true 206 | 207 | Unassigned 208 | 209 | true 210 | 211 | 212 | -------------------------------------------------------------------------------- /benchmarking/pose.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle 3 | import shutil 4 | 5 | import numpy as np 6 | import scipy 7 | from constants import ( 8 | getMMposeAnatomicalCocoMarkerNames, 9 | getMMposeMarkerNames, 10 | getOpenPoseMarkerNames, 11 | ) 12 | 13 | 14 | def runPoseDetector( 15 | config_benchmark, 16 | CameraDirectories, 17 | trialRelativePath, 18 | pathPoseDetector, 19 | trialName, 20 | CamParamDict=None, 21 | generateVideo=True, 22 | cams2Use=["all"], 23 | poseDetector="OpenPose", 24 | bbox_thr=0.8, 25 | ): 26 | # Create list of cameras. 27 | if cams2Use[0] == "all": 28 | cameras2Use = list(CameraDirectories.keys()) 29 | else: 30 | cameras2Use = cams2Use 31 | 32 | CameraDirectories_selectedCams = {} 33 | CamParamList_selectedCams = [] 34 | for cam in cameras2Use: 35 | CameraDirectories_selectedCams[cam] = CameraDirectories[cam] 36 | CamParamList_selectedCams.append(CamParamDict[cam]) 37 | 38 | # Get/add video extension. 39 | cameraDirectory = CameraDirectories_selectedCams[cameras2Use[0]] 40 | trialRelativePath += ".avi" 41 | 42 | for camName in CameraDirectories_selectedCams: 43 | cameraDirectory = CameraDirectories_selectedCams[camName] 44 | print("Running {} for {}".format(poseDetector, camName)) 45 | runPoseVideo( 46 | config_benchmark, 47 | cameraDirectory, 48 | trialRelativePath, 49 | pathPoseDetector, 50 | trialName, 51 | generateVideo=generateVideo, 52 | bbox_thr=bbox_thr, 53 | ) 54 | 55 | return 56 | 57 | 58 | def runPoseVideo( 59 | config_benchmark, 60 | cameraDirectory, 61 | fileName, 62 | pathMMpose, 63 | trialName, 64 | generateVideo=True, 65 | bbox_thr=0.8, 66 | ): 67 | model_config_person = config_benchmark["model_config_person"] 68 | model_ckpt_person = config_benchmark["model_ckpt_person"] 69 | model_config_pose = config_benchmark["model_config_pose"] 70 | trialPrefix, _ = os.path.splitext(os.path.basename(fileName)) 71 | videoFullPath = os.path.normpath(os.path.join(cameraDirectory, fileName)) 72 | 73 | pathOutputVideo = os.path.join( 74 | cameraDirectory, "OutputMedia_mmpose_" + str(bbox_thr), trialName 75 | ) 76 | 77 | # mmposeBoxDir = os.path.join("OutputBox_mmpose", trialName) 78 | pathOutputBox = config_benchmark["OutputBoxDirectory"] 79 | pathOutputBox = os.path.join( 80 | config_benchmark["OutputBoxDirectory"].join( 81 | [ 82 | config_benchmark["dataName"].join(cameraDirectory.split(config_benchmark["dataName"])[:-1]), 83 | cameraDirectory.split(config_benchmark["dataName"])[-1] 84 | ] 85 | ), 86 | trialName, 87 | ) 88 | # pathOutputBox = os.path.join(cameraDirectory, mmposeBoxDir) 89 | 90 | mmposePklDir = os.path.join("OutputPkl_mmpose_" + str(bbox_thr), trialName) 91 | pathOutputPkl = os.path.join(cameraDirectory, mmposePklDir) 92 | 93 | os.makedirs(pathOutputVideo, exist_ok=True) 94 | os.makedirs(pathOutputPkl, exist_ok=True) 95 | 96 | # The video is rewritten, unrotated, and downsampled. There is no 97 | # need to do anything specific for the rotation, just rewriting the video 98 | # unrotates it. 99 | trialPath, _ = os.path.splitext(fileName) 100 | fileName = trialPath + "_rotated.avi" 101 | pathVideoRot = os.path.normpath(os.path.join(cameraDirectory, fileName)) 102 | cmd_fr = " " 103 | # if frameRate > 60.0: 104 | # cmd_fr = ' -r 60 ' 105 | # frameRate = 60.0 106 | CMD = "ffmpeg -loglevel error -y -i {}{}-q 0 {}".format( 107 | videoFullPath, cmd_fr, pathVideoRot 108 | ) 109 | 110 | videoFullPath = pathVideoRot 111 | trialPrefix = trialPrefix + "_rotated" 112 | 113 | if not os.path.exists(pathVideoRot): 114 | os.system(CMD) 115 | 116 | pklPath = os.path.join(pathOutputPkl, trialPrefix + ".pkl") 117 | ppPklPath = os.path.join(pathOutputPkl, trialPrefix + "_pp.pkl") 118 | # Run pose detector if this file doesn't exist in outputs 119 | if not os.path.exists(ppPklPath): 120 | if config_benchmark["alt_model"] == "VirtualMarker": 121 | from utilsMMpose import detection_inference 122 | from utilsPose import pose_inference_updated 123 | else: 124 | from utilsMMpose import detection_inference, pose_inference_updated 125 | 126 | # Run human detection. 127 | bboxPath = os.path.join(pathOutputBox, trialPrefix + ".pkl") 128 | print("bboxPath", bboxPath) 129 | if not os.path.exists(bboxPath): 130 | os.makedirs(pathOutputBox, exist_ok=True) 131 | full_model_config_person = os.path.join(pathMMpose, model_config_person) 132 | detection_inference( 133 | full_model_config_person, 134 | model_ckpt_person, 135 | videoFullPath, 136 | bboxPath, 137 | batch_size=config_benchmark["batch_size_det"], 138 | ) 139 | 140 | # Run pose detection. 141 | pathModelCkptPose = config_benchmark["model_ckpt_pose_absolute"] 142 | videoOutPath = os.path.join(pathOutputVideo, trialPrefix + "withKeypoints.mp4") 143 | full_model_config_pose = os.path.join(pathMMpose, model_config_pose) 144 | pose_inference_updated( 145 | full_model_config_pose, 146 | pathModelCkptPose, 147 | videoFullPath, 148 | bboxPath, 149 | pklPath, 150 | videoOutPath, 151 | batch_size=config_benchmark["batch_size_pose"], 152 | bbox_thr=bbox_thr, 153 | visualize=generateVideo, 154 | marker_set=config_benchmark["marker_set"], 155 | ) 156 | 157 | # Post-process data to have OpenPose-like file structure. 158 | # arrangeMMposePkl(pklPath, ppPklPath) 159 | if config_benchmark["alt_model"] is None: 160 | if config_benchmark["marker_set"] == "Anatomical": 161 | arrangeMMposeAnatomicalPkl(pklPath, ppPklPath) 162 | elif config_benchmark["marker_set"] == "Coco": 163 | arrangeMMposePkl(pklPath, ppPklPath) 164 | else: 165 | # copy pklPath to ppPklPath 166 | shutil.copy(pklPath, ppPklPath) 167 | 168 | 169 | def arrangeMMposeAnatomicalPkl(poseInferencePklPath, outputPklPath): 170 | open_file = open(poseInferencePklPath, "rb") 171 | frames = pickle.load(open_file) 172 | open_file.close() 173 | 174 | markersMMposeAnatomical = getMMposeAnatomicalCocoMarkerNames() 175 | # markersMMposeAnatomical = getMMposeAnatomicalMarkerNames() 176 | nb_markers = len(markersMMposeAnatomical) 177 | data4pkl = [] 178 | for c_frame, frame in enumerate(frames): 179 | data4people = [] 180 | for c, person in enumerate(frame): 181 | coordinates = person["pred_instances"]["keypoints"][0, :, :] 182 | c_coord_out = np.zeros((nb_markers * 3,)) 183 | for c_m, marker in enumerate(markersMMposeAnatomical): 184 | c_coord = [coordinates[c_m][0], coordinates[c_m][1]] 185 | c_coord.append(person["pred_instances"]["keypoint_scores"][0][c_m]) 186 | idx_out = np.arange(c_m * 3, c_m * 3 + 3) 187 | c_coord_out[idx_out,] = c_coord 188 | c_dict = {} 189 | c_dict["person_id"] = [c] 190 | c_dict["pose_keypoints_2d"] = c_coord_out.tolist() 191 | data4people.append(c_dict) 192 | data4pkl.append(data4people) 193 | 194 | with open(outputPklPath, "wb") as f: 195 | pickle.dump(data4pkl, f) 196 | 197 | return 198 | 199 | 200 | def arrangeMMposePkl(poseInferencePklPath, outputPklPath): 201 | open_file = open(poseInferencePklPath, "rb") 202 | frames = pickle.load(open_file) 203 | open_file.close() 204 | 205 | markersMMpose = getMMposeMarkerNames() 206 | markersOpenPose = getOpenPoseMarkerNames() 207 | 208 | data4pkl = [] 209 | for c_frame, frame in enumerate(frames): 210 | data4people = [] 211 | for c, person in enumerate(frame): 212 | # coordinates = person["preds_with_flip"].tolist() 213 | coordinates = person["pred_instances"]["keypoints"][0, :, :] 214 | confidence = person["pred_instances"]["keypoint_scores"][0, :] 215 | # stack confidence with coordinates 216 | coordinates = np.column_stack((coordinates, confidence)) 217 | c_coord_out = np.zeros((25 * 3,)) 218 | for c_m, marker in enumerate(markersOpenPose): 219 | if marker == "midHip": 220 | leftHip = coordinates[markersMMpose.index("LHip")] 221 | rightHip = coordinates[markersMMpose.index("RHip")] 222 | c_coord = [] 223 | # Mid point between both hips 224 | c_coord.append((leftHip[0] + rightHip[0]) / 2) 225 | c_coord.append((leftHip[1] + rightHip[1]) / 2) 226 | # Lowest confidence 227 | c_coord.append(np.min([leftHip[2], rightHip[2]])) 228 | elif marker == "Neck": 229 | leftShoulder = coordinates[markersMMpose.index("LShoulder")] 230 | rightShoulder = coordinates[markersMMpose.index("RShoulder")] 231 | c_coord = [] 232 | # Mid point between both shoulders 233 | c_coord.append((leftShoulder[0] + rightShoulder[0]) / 2) 234 | c_coord.append((leftShoulder[1] + rightShoulder[1]) / 2) 235 | # Lowest confidence 236 | c_coord.append(np.min([leftShoulder[2], rightShoulder[2]])) 237 | else: 238 | c_coord = coordinates[markersMMpose.index(marker)] 239 | idx_out = np.arange(c_m * 3, c_m * 3 + 3) 240 | c_coord_out[idx_out,] = c_coord 241 | c_dict = {} 242 | c_dict["person_id"] = [c] 243 | c_dict["pose_keypoints_2d"] = c_coord_out.tolist() 244 | data4people.append(c_dict) 245 | data4pkl.append(data4people) 246 | 247 | with open(outputPklPath, "wb") as f: 248 | pickle.dump(data4pkl, f) 249 | 250 | return 251 | 252 | 253 | def getUpsampledMarkers(keypoints3D, frameRate): 254 | keypoints3D_res = np.empty( 255 | (keypoints3D.shape[2], keypoints3D.shape[0] * keypoints3D.shape[1]) 256 | ) 257 | for iFrame in range(keypoints3D.shape[2]): 258 | keypoints3D_res[iFrame, :] = np.reshape( 259 | keypoints3D[:, :, iFrame], 260 | (1, keypoints3D.shape[0] * keypoints3D.shape[1]), 261 | "F", 262 | ) 263 | # Upsample to 100 Hz. 264 | newTime = np.arange( 265 | 0, np.round(len(keypoints3D_res) / frameRate + 1 / 100, 6), 1 / 100 266 | ) 267 | interpFxn = scipy.interpolate.interp1d( 268 | [i / frameRate for i in range(len(keypoints3D_res))], 269 | keypoints3D_res, 270 | axis=0, 271 | fill_value="extrapolate", 272 | ) 273 | keypoints3D_res_interp = interpFxn(newTime) 274 | keypoints3D_interp = np.empty( 275 | (keypoints3D.shape[0], keypoints3D.shape[1], len(newTime)) 276 | ) 277 | for iFrame in range(len(newTime)): 278 | keypoints3D_interp[:, :, iFrame] = np.reshape( 279 | keypoints3D_res_interp[iFrame, :], 280 | (keypoints3D.shape[0], keypoints3D.shape[1]), 281 | "F", 282 | ) 283 | 284 | return keypoints3D_interp 285 | -------------------------------------------------------------------------------- /synthpose/BEDLAM/generate_dataset.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import json 3 | import os 4 | 5 | import numpy as np 6 | import smplx 7 | import torch 8 | 9 | from ..constants import ( 10 | AUGMENTED_VERTICES_INDEX_DICT, 11 | AUGMENTED_VERTICES_NAMES, 12 | COCO_VERTICES_NAME, 13 | MODEL_FOLDER, 14 | ) 15 | 16 | ROTATION_MATRIX_2D_90_CLOCKWISE = np.array([[0, 1], [-1, 0]]) 17 | 18 | smplx_model_male = smplx.create( 19 | MODEL_FOLDER, 20 | model_type="smplx", 21 | gender="neutral", 22 | ext="npz", 23 | flat_hand_mean=True, 24 | num_betas=11, 25 | use_pca=False, 26 | ) 27 | smplx_model_female = smplx.create( 28 | MODEL_FOLDER, 29 | model_type="smplx", 30 | gender="female", 31 | ext="npz", 32 | num_betas=11, 33 | flat_hand_mean=True, 34 | use_pca=False, 35 | ) 36 | 37 | smplx_model_neutral = smplx.create( 38 | MODEL_FOLDER, 39 | model_type="smplx", 40 | gender="neutral", 41 | ext="npz", 42 | flat_hand_mean=True, 43 | num_betas=11, 44 | use_pca=False, 45 | ) 46 | 47 | 48 | def get_smplx_vertices(poses, betas, trans, gender): 49 | if gender == "male": 50 | model_out = smplx_model_male( 51 | betas=torch.tensor(betas).unsqueeze(0).float(), 52 | global_orient=torch.tensor(poses[:3]).unsqueeze(0).float(), 53 | body_pose=torch.tensor(poses[3:66]).unsqueeze(0).float(), 54 | left_hand_pose=torch.tensor(poses[75:120]).unsqueeze(0).float(), 55 | right_hand_pose=torch.tensor(poses[120:165]).unsqueeze(0).float(), 56 | jaw_pose=torch.tensor(poses[66:69]).unsqueeze(0).float(), 57 | leye_pose=torch.tensor(poses[69:72]).unsqueeze(0).float(), 58 | reye_pose=torch.tensor(poses[72:75]).unsqueeze(0).float(), 59 | transl=torch.tensor(trans).unsqueeze(0), 60 | ) 61 | # from psbody.mesh import Mesh 62 | elif gender == "female": 63 | model_out = smplx_model_female( 64 | betas=torch.tensor(betas).unsqueeze(0).float(), 65 | global_orient=torch.tensor(poses[:3]).unsqueeze(0).float(), 66 | body_pose=torch.tensor(poses[3:66]).unsqueeze(0).float(), 67 | left_hand_pose=torch.tensor(poses[75:120]).unsqueeze(0).float(), 68 | right_hand_pose=torch.tensor(poses[120:165]).unsqueeze(0).float(), 69 | jaw_pose=torch.tensor(poses[66:69]).unsqueeze(0).float(), 70 | leye_pose=torch.tensor(poses[69:72]).unsqueeze(0).float(), 71 | reye_pose=torch.tensor(poses[72:75]).unsqueeze(0).float(), 72 | transl=torch.tensor(trans).unsqueeze(0), 73 | ) 74 | elif gender == "neutral": 75 | model_out = smplx_model_neutral( 76 | betas=torch.tensor(betas).unsqueeze(0).float(), 77 | global_orient=torch.tensor(poses[:3]).unsqueeze(0).float(), 78 | body_pose=torch.tensor(poses[3:66]).unsqueeze(0).float(), 79 | left_hand_pose=torch.tensor(poses[75:120]).unsqueeze(0).float(), 80 | right_hand_pose=torch.tensor(poses[120:165]).unsqueeze(0).float(), 81 | jaw_pose=torch.tensor(poses[66:69]).unsqueeze(0).float(), 82 | leye_pose=torch.tensor(poses[69:72]).unsqueeze(0).float(), 83 | reye_pose=torch.tensor(poses[72:75]).unsqueeze(0).float(), 84 | transl=torch.tensor(trans).unsqueeze(0), 85 | ) 86 | else: 87 | print("Please provide gender as male or female") 88 | return model_out.vertices[0], model_out.joints[0] 89 | 90 | 91 | class DatasetGenerator: 92 | def __init__( 93 | self, 94 | annotation_files_path: str, 95 | output_path: str = "infinity_dataset_combined", 96 | sample_rate: int = 6, 97 | ): 98 | self.img_width = 1280 99 | self.img_height = 720 100 | self.annotation_files_path = annotation_files_path 101 | self.output_path = output_path 102 | self.sample_rate = sample_rate 103 | self.data_dict = { 104 | "infos": {}, 105 | "images": [], 106 | "annotations": [], 107 | "categories": [], 108 | } 109 | 110 | self.data_dict["categories"] = [ 111 | { 112 | "id": 0, 113 | "augmented_keypoints": AUGMENTED_VERTICES_NAMES, 114 | "coco_keypoints": COCO_VERTICES_NAME, 115 | } 116 | ] 117 | self.total_source_images = 0 118 | self.total_error_reconstruction = 0 119 | 120 | if not os.path.exists(self.output_path): 121 | os.makedirs(self.output_path) 122 | 123 | def get_bbox(self, vertices): 124 | x_img, y_img = vertices[:, 0], vertices[:, 1] 125 | xmin = min(x_img) 126 | ymin = min(y_img) 127 | xmax = max(x_img) 128 | ymax = max(y_img) 129 | 130 | x_center = (xmin + xmax) / 2.0 131 | width = xmax - xmin 132 | xmin = x_center - 0.5 * width # * 1.2 133 | xmax = x_center + 0.5 * width # * 1.2 134 | 135 | y_center = (ymin + ymax) / 2.0 136 | height = ymax - ymin 137 | ymin = y_center - 0.5 * height # * 1.2 138 | ymax = y_center + 0.5 * height # * 1.2 139 | 140 | xmin = max(0, xmin) 141 | ymin = max(0, ymin) 142 | xmax = min(self.img_width, xmax) 143 | ymax = min(self.img_height, ymax) 144 | 145 | bbox = np.array([xmin, ymin, xmax - xmin, ymax - ymin]).astype(int) 146 | 147 | return bbox 148 | 149 | def generate_annotation_dict(self): 150 | annotation_dict = {} 151 | annotation_dict["image_id"] = len(self.data_dict["images"]) 152 | annotation_dict["id"] = annotation_dict["image_id"] 153 | annotation_dict["category_id"] = 0 154 | annotation_dict["iscrowd"] = 0 155 | 156 | return annotation_dict 157 | 158 | def get_grountruth_landmarks( 159 | self, annotations: dict, index_frame: int, rotate_flag: bool = False 160 | ): 161 | pose = annotations["pose_cam"][index_frame] 162 | beta = annotations["shape"][index_frame] 163 | body_trans_cam = annotations["trans_cam"][index_frame] 164 | verts_3d, joints_3d = get_smplx_vertices(pose, beta, body_trans_cam, "neutral") 165 | cam_trans = annotations["cam_ext"][index_frame][:, 3][:3] 166 | verts_3d = verts_3d.detach().cpu() + cam_trans 167 | 168 | projected_vertices = np.matmul( 169 | annotations["cam_int"][index_frame], verts_3d.T 170 | ).T 171 | projected_vertices = projected_vertices[:, :2] / projected_vertices[:, 2:] 172 | projected_vertices = projected_vertices.numpy() 173 | 174 | if rotate_flag: 175 | projected_vertices[:] -= np.array([self.img_height / 2, self.img_width / 2]) 176 | projected_vertices = np.matmul( 177 | ROTATION_MATRIX_2D_90_CLOCKWISE, projected_vertices.T 178 | ).T 179 | projected_vertices[:] += np.array([self.img_width / 2, self.img_height / 2]) 180 | 181 | projected_vertices_anatomical = projected_vertices[ 182 | list(AUGMENTED_VERTICES_INDEX_DICT.values()) 183 | ] 184 | 185 | bbox = self.get_bbox(projected_vertices) 186 | 187 | coco_landmarks = [0] * 3 * 17 188 | 189 | if np.isnan(projected_vertices_anatomical).any(): 190 | return {}, {}, False 191 | groundtruth_landmarks = { 192 | name: {"x": point[0], "y": point[1]} 193 | for name, point in zip( 194 | AUGMENTED_VERTICES_NAMES, projected_vertices_anatomical 195 | ) 196 | } 197 | 198 | # check if each landmark is out of frame (visible) or not: 199 | for name, point in groundtruth_landmarks.items(): 200 | if ( 201 | point["x"] < 0 202 | or point["y"] < 0 203 | or point["x"] > self.img_width 204 | or point["y"] > self.img_height 205 | ): 206 | groundtruth_landmarks[name]["v"] = 0 207 | else: 208 | groundtruth_landmarks[name]["v"] = 1 209 | 210 | return groundtruth_landmarks, coco_landmarks, bbox, True 211 | 212 | def generate_dataset(self): 213 | annotations_files_paths = glob.glob(self.annotation_files_path + "/*.npz") 214 | iteration = 0 215 | it_file = 0 216 | nb_files = len(annotations_files_paths) 217 | print("creating dataset annotations...") 218 | print("using cuda: ", torch.cuda.is_available()) 219 | for annotation_path in annotations_files_paths: 220 | rotate_flag = False 221 | if "closeup" in annotation_path: # Since the original image are rotated 222 | rotate_flag = True 223 | annotations = np.load(annotation_path) 224 | nb_images = len(annotations["imgname"]) // self.sample_rate 225 | print("starting file: ", annotation_path) 226 | for index_frame, img_name in enumerate(annotations["imgname"]): 227 | if index_frame % self.sample_rate == 0: 228 | ( 229 | groundtruth_landmarks, 230 | coco_landmarks, 231 | bbox, 232 | success, 233 | ) = self.get_grountruth_landmarks( 234 | annotations, index_frame, rotate_flag 235 | ) 236 | 237 | self.total_source_images += 1 238 | if not success: 239 | self.total_error_reconstruction += 1 240 | continue 241 | 242 | annotation_dict = self.generate_annotation_dict() 243 | annotation_dict["bbox"] = bbox.tolist() 244 | annotation_dict["keypoints"] = groundtruth_landmarks 245 | annotation_dict["coco_keypoints"] = coco_landmarks 246 | 247 | self.data_dict["annotations"].append(annotation_dict) 248 | 249 | image_dict = { 250 | "id": len(self.data_dict["images"]), 251 | "width": self.img_width, 252 | "height": self.img_height, 253 | "frame_number": index_frame, 254 | "img_path": os.path.join( 255 | annotation_path.split("/")[-1].split(".")[0], 256 | "png", 257 | img_name, 258 | ), 259 | } 260 | self.data_dict["images"].append(image_dict) 261 | 262 | if iteration % 100 == 0: 263 | print( 264 | f"scene {it_file}/{nb_files}, {index_frame//self.sample_rate}/{nb_images}" 265 | ) 266 | iteration += 1 267 | 268 | it_file += 1 269 | 270 | with open( 271 | os.path.join(self.output_path, "annotations.json"), 272 | "w", 273 | encoding="utf-8", 274 | ) as f: 275 | json.dump(self.data_dict, f, ensure_ascii=False, indent=4) 276 | 277 | print("total source images: ", self.total_source_images) 278 | print("total error reconstruction: ", self.total_error_reconstruction) 279 | 280 | 281 | if __name__ == "__main__": 282 | dataset_generator = DatasetGenerator( 283 | annotation_files_path="../all_npz_12_validation", 284 | output_path="bedlam_reannotated", 285 | sample_rate=6, 286 | ) 287 | dataset_generator.generate_dataset() 288 | --------------------------------------------------------------------------------