├── .editorconfig ├── .github ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md └── workflows │ ├── add-assignees.yml │ ├── build.yml │ ├── check-inactive.yml │ ├── close-issues.yml │ ├── find-issues.yml │ ├── issue-reply.yml │ ├── issue-statistics.yml │ ├── issue-welcome.yml │ ├── lint.yml │ └── publish-to-pypi.yml ├── .gitignore ├── .pre-commit-config.yaml ├── .pylintrc ├── .readthedocs.yml ├── CITATION.cff ├── LICENSE ├── README.md ├── README_CN.md ├── configs ├── fourdag │ ├── README.md │ ├── campus_config │ │ └── eval_keypoints3d.py │ ├── fourdag_config │ │ ├── eval_keypoints3d_seq2.py │ │ ├── eval_keypoints3d_seq4.py │ │ └── eval_keypoints3d_seq5.py │ └── shelf_config │ │ └── eval_keypoints3d.py ├── humman_mocap │ ├── README.md │ ├── mview_sperson_smpl_estimator.py │ └── mview_sperson_smpl_estimator_trt.py ├── modules │ ├── core │ │ ├── estimation │ │ │ ├── mperson_smpl_estimator.py │ │ │ ├── mview_mperson_end2end_estimator.py │ │ │ ├── mview_mperson_topdown_estimator.py │ │ │ ├── mview_sperson_smpl_estimator.py │ │ │ └── mview_sperson_smplx_estimator.py │ │ └── evaluation │ │ │ ├── bottom_up_eval_shelf_unittest.py │ │ │ └── mview_mperson_eval_shelf_unittest.py │ ├── data │ │ ├── data_converter │ │ │ ├── campus_data_converter_testset_w_perception.py │ │ │ ├── campus_data_converter_unittest.py │ │ │ ├── humman_smc_data_converter_wo_perception.py │ │ │ ├── panoptic_data_converter_testset_w_perception.py │ │ │ ├── panoptic_data_converter_testset_wo_perception.py │ │ │ ├── panoptic_data_converter_trainset_w_perception.py │ │ │ ├── panoptic_data_converter_trainset_wo_perception.py │ │ │ ├── panoptic_data_converter_unittest.py │ │ │ ├── shelf_data_converter_trainset_w_perception.py │ │ │ └── shelf_data_converter_unittest.py │ │ ├── data_visualization │ │ │ └── shelf_data_visualization_testset.py │ │ └── dataset │ │ │ ├── mvp_shelf_testset.py │ │ │ ├── mvpose_shelf_testset.py │ │ │ ├── shelf_unittest.py │ │ │ └── shelf_unittest_bottom_up.py │ ├── human_perception │ │ ├── deploy │ │ │ ├── detection_tensorrt_dynamic-320x320-1344x1344.py │ │ │ └── pose-detection_tensorrt_dynamic-384x288.py │ │ ├── mediapipe_pose_estimator.py │ │ ├── mmdet_faster_rcnn_detector.py │ │ ├── mmdet_faster_rcnn_r50_fpn_coco.py │ │ ├── mmdet_htc_fpn_detector.py │ │ ├── mmdet_htc_x101_64x4d_fpn_16x1_20e_coco.py │ │ ├── mmdet_trt_faster_rcnn_detector.py │ │ ├── mmpose_hrnet_estimator.py │ │ ├── mmpose_hrnet_w48_coco_wholebody_384x288_dark_plus.py │ │ ├── mmpose_trt_hrnet_estimator.py │ │ ├── mmtrack_deepsort_faster-rcnn_fpn_4e_mot17-private-half.py │ │ └── mmtrack_faster_rcnn_detector.py │ ├── model │ │ ├── body_model │ │ │ ├── smpl.py │ │ │ └── smplx.py │ │ └── registrant │ │ │ ├── handlers │ │ │ ├── joint_prior.py │ │ │ ├── keypoints3d_limb_len.py │ │ │ ├── keypoints3d_mse.py │ │ │ ├── pose_prior.py │ │ │ ├── pose_reg.py │ │ │ ├── shape_prior.py │ │ │ └── smooth_joint.py │ │ │ ├── smplify.py │ │ │ ├── smplify_test.py │ │ │ ├── smplifyx.py │ │ │ ├── smplifyx_test.py │ │ │ └── smplifyxd_test.py │ ├── ops │ │ ├── bottom_up_association │ │ │ └── fourdag_associator.py │ │ ├── projection │ │ │ ├── aniposelib_projector.py │ │ │ └── opencv_projector.py │ │ ├── top_down_association │ │ │ ├── mvpose_associator.py │ │ │ └── mvpose_tracking_associator.py │ │ └── triangulation │ │ │ ├── aniposelib_triangulator.py │ │ │ ├── auto_threshold_selector.py │ │ │ ├── camera_error_selector.py │ │ │ ├── jacobi_triangulator.py │ │ │ ├── manual_threshold_selector.py │ │ │ ├── opencv_triangulator.py │ │ │ ├── reprojection_error_point_selector.py │ │ │ └── slow_camera_error_selector.py │ └── service │ │ └── smpl_stream_service.py ├── mvp │ ├── README.md │ ├── campus_config │ │ └── mvp_campus.py │ ├── panoptic_config │ │ ├── mvp_panoptic.py │ │ └── mvp_panoptic_3cam.py │ └── shelf_config │ │ ├── mvp_shelf.py │ │ └── mvp_shelf_50.py ├── mvpose │ ├── README.md │ ├── campus_config │ │ ├── campus_data_converter_testset.py │ │ └── eval_keypoints3d.py │ ├── mview_mperson_topdown_estimator.py │ ├── panoptic_config │ │ ├── eval_keypoints3d.py │ │ └── panoptic_data_converter_testset.py │ └── shelf_config │ │ ├── eval_keypoints3d.py │ │ └── shelf_data_converter_testset.py ├── mvpose_tracking │ ├── README.md │ ├── campus_config │ │ ├── campus_data_converter_testset.py │ │ └── eval_keypoints3d.py │ ├── mview_mperson_topdown_estimator.py │ ├── panoptic_config │ │ ├── eval_keypoints3d.py │ │ └── panoptic_data_converter_testset.py │ └── shelf_config │ │ ├── eval_keypoints3d.py │ │ └── shelf_data_converter_testset.py └── shape_aware_3d_pose_optim │ ├── README.md │ └── shape_aware_3d_pose_optimizer.py ├── dockerfiles ├── runtime_ubt18 │ ├── Dockerfile │ └── build_runtime_docker.sh └── service_ubt18 │ ├── Dockerfile │ └── build_runtime_docker.sh ├── docs ├── en │ ├── Makefile │ ├── api.rst │ ├── apis.md │ ├── benchmark.md │ ├── changelog.md │ ├── conf.py │ ├── data_structure │ │ ├── keypoints.md │ │ ├── limbs.md │ │ └── smpl_data.md │ ├── dataset_preparation.md │ ├── estimation │ │ ├── mview_mperson_end2end_estimator.md │ │ ├── mview_mperson_topdown_smpl_estimator.md │ │ └── mview_sperson_smpl_estimator.md │ ├── faq.md │ ├── getting_started.md │ ├── index.rst │ ├── installation.md │ ├── license.rst │ ├── make.bat │ ├── model │ │ └── smplify.md │ ├── ops │ │ └── triangulation.md │ ├── test.md │ ├── tools │ │ ├── eval_model.md │ │ ├── mmdeploy.md │ │ ├── mview_mperson_evaluation.md │ │ ├── mview_mperson_smplify3d.md │ │ ├── prepare_dataset.md │ │ ├── process_smc.md │ │ ├── run_mview_sperson_estimator.md │ │ ├── start_service.md │ │ ├── train_model.md │ │ └── visualize_dataset.md │ └── tutorials │ │ ├── config.md │ │ ├── introduction.md │ │ ├── new_dataset.md │ │ └── new_module.md └── zh_cn │ └── installation.md ├── requirements ├── build.txt ├── docs.txt ├── readthedocs.txt ├── runtime.txt ├── service.txt └── test.txt ├── resources ├── SMPLify_classes.png └── xrmocap-logo.png ├── scripts ├── download_install_deformable.sh ├── download_test_data.sh ├── download_weight.sh ├── eval_mvp.sh ├── run_docker.sh ├── slurm_eval_mvp.sh ├── slurm_train_mvp.sh ├── start_service_docker.sh └── train_mvp.sh ├── setup.cfg ├── setup.py ├── tests ├── core │ └── evaluation │ │ ├── test_fourdag_evaluation.py │ │ ├── test_metric_manager.py │ │ └── test_mvpose_evaluation.py ├── data_structure │ └── body_model │ │ ├── test_smpl_data.py │ │ ├── test_smplx_data.py │ │ └── test_smplxd_data.py ├── human_perception │ ├── test_bbox_detection.py │ └── test_top_down_pose_estimation.py ├── io │ └── test_camera_io.py ├── model │ ├── body_model │ │ ├── test_smpl.py │ │ └── test_smplx.py │ └── registrant │ │ ├── test_smplify.py │ │ ├── test_smplifyx.py │ │ └── test_smplifyxd.py ├── ops │ ├── test_bottom_up_association.py │ ├── test_camera_selector.py │ ├── test_projection.py │ ├── test_reprojection_error_point_selector.py │ ├── test_threshold_selector.py │ ├── test_top_down_association.py │ └── test_triangulation.py ├── test_data │ ├── test_data_converter.py │ └── test_dataset.py ├── transform │ ├── convention │ │ ├── test_bbox_convention.py │ │ └── test_keypoints_convention.py │ ├── image │ │ └── test_color.py │ ├── keypoints3d │ │ └── test_optim.py │ └── test_limbs_transform.py ├── utils │ └── test_time_utils.py └── visualization │ └── test_visualize_smpl.py ├── tools ├── clients │ └── smpl_verts_client.py ├── eval_model.py ├── misc │ └── publish_model.py ├── mview_mperson_end2end_estimator.py ├── mview_mperson_evaluation.py ├── mview_mperson_smplify3d.py ├── mview_mperson_topdown_estimator.py ├── prepare_dataset.py ├── process_smc.py ├── run_mview_sperson_estimator.py ├── start_service.py ├── train_model.py └── visualize_dataset.py └── xrmocap ├── __init__.py ├── client ├── __init__.py └── smpl_stream_client.py ├── core ├── __init__.py ├── estimation │ ├── __init__.py │ ├── base_estimator.py │ ├── builder.py │ ├── mperson_smpl_estimator.py │ ├── mview_mperson_end2end_estimator.py │ ├── mview_mperson_topdown_estimator.py │ └── mview_sperson_smpl_estimator.py ├── evaluation │ ├── __init__.py │ ├── base_evaluation.py │ ├── bottom_up_association_evaluation.py │ ├── builder.py │ ├── end2end_evaluation.py │ ├── metric_manager.py │ ├── metrics │ │ ├── __init__.py │ │ ├── base_metric.py │ │ ├── builder.py │ │ ├── mpjpe_metric.py │ │ ├── pa_mpjpe_metric.py │ │ ├── pck_metric.py │ │ ├── pcp_metric.py │ │ ├── precision_recall_metric.py │ │ └── prediction_matcher.py │ └── top_down_association_evaluation.py ├── hook │ ├── __init__.py │ └── smplify_hook │ │ ├── __init__.py │ │ ├── builder.py │ │ ├── smplify_base_hook.py │ │ └── smplify_verbose_hook.py └── train │ ├── __init__.py │ ├── builder.py │ └── trainer.py ├── data ├── __init__.py ├── data_converter │ ├── __init__.py │ ├── base_data_converter.py │ ├── builder.py │ ├── campus_data_converter.py │ ├── humman_smc_data_converter.py │ ├── panoptic_data_converter.py │ └── shelf_data_converter.py ├── data_visualization │ ├── __init__.py │ ├── base_data_visualization.py │ ├── builder.py │ └── mview_mperson_data_visualization.py ├── dataloader │ ├── __init__.py │ └── builder.py └── dataset │ ├── __init__.py │ ├── base_dataset.py │ ├── bottom_up_mview_mperson_dataset.py │ ├── builder.py │ ├── mview_mperson_dataset.py │ └── mvp_dataset.py ├── data_structure ├── __init__.py ├── body_model │ ├── __init__.py │ ├── smpl_data.py │ ├── smplx_data.py │ └── smplxd_data.py ├── keypoints.py ├── limbs.py └── smc_reader.py ├── human_perception ├── __init__.py ├── bbox_detection │ ├── __init__.py │ ├── mmdet_detector.py │ ├── mmdet_trt_detector.py │ └── mmtrack_detector.py ├── builder.py └── keypoints_estimation │ ├── __init__.py │ ├── mediapipe_estimator.py │ ├── mmpose_top_down_estimator.py │ └── mmpose_trt_top_down_estimator.py ├── io ├── __init__.py ├── camera.py └── image.py ├── model ├── __init__.py ├── architecture │ ├── __init__.py │ ├── affinity_estimator.py │ ├── base_architecture.py │ ├── builder.py │ └── multi_view_pose_transformer.py ├── body_model │ ├── __init__.py │ ├── builder.py │ ├── smpl.py │ └── smplx.py ├── loss │ ├── __init__.py │ ├── builder.py │ ├── kp_loss.py │ ├── mapping.py │ ├── mse_loss.py │ ├── prior_loss.py │ └── utils.py ├── mvp │ ├── __init__.py │ ├── builder.py │ ├── matcher.py │ ├── mvp_decoder.py │ ├── pose_resnet.py │ ├── position_encoding.py │ └── projattn.py └── registrant │ ├── __init__.py │ ├── builder.py │ ├── handler │ ├── __init__.py │ ├── base_handler.py │ ├── betas_prior_handler.py │ ├── body_pose_prior_handler.py │ ├── builder.py │ ├── keypoint3d_limb_length_handler.py │ ├── keypoint3d_mse_handler.py │ └── multiview_keypoint2d_mse_handler.py │ ├── optimizable_parameters.py │ ├── smplify.py │ ├── smplifyx.py │ └── smplifyxd.py ├── ops ├── __init__.py ├── bottom_up_association │ ├── __init__.py │ ├── builder.py │ ├── fourdag_associator.py │ └── graph_solver │ │ ├── __init__.py │ │ ├── builder.py │ │ ├── graph_associate.py │ │ └── graph_construct.py ├── projection │ ├── __init__.py │ ├── aniposelib_projector.py │ ├── builder.py │ └── pytorch_projector.py ├── top_down_association │ ├── __init__.py │ ├── body_tracking │ │ ├── __init__.py │ │ ├── builder.py │ │ ├── kalman_tracker.py │ │ └── kalman_tracking.py │ ├── builder.py │ ├── identity_tracking │ │ ├── __init__.py │ │ ├── base_tracking.py │ │ ├── builder.py │ │ ├── keypoints_distance_tracking.py │ │ └── perception2d_tracking.py │ ├── matching │ │ ├── __init__.py │ │ ├── base_matching.py │ │ ├── builder.py │ │ ├── match_solver.py │ │ ├── multi_way_matching.py │ │ └── pictorial │ │ │ ├── __init__.py │ │ │ └── pictorial.py │ └── mvpose_associator.py └── triangulation │ ├── __init__.py │ ├── aniposelib_triangulator.py │ ├── builder.py │ ├── jacobi_triangulator.py │ └── point_selection │ ├── __init__.py │ ├── auto_threshold_selector.py │ ├── base_selector.py │ ├── builder.py │ ├── camera_error_selector.py │ ├── hybrid_kps2d_selector.py │ ├── manual_threshold_selector.py │ ├── reprojection_error_point_selector.py │ └── slow_camera_error_selector.py ├── service ├── __init__.py ├── base_flask_service.py ├── builder.py └── smpl_stream_service.py ├── transform ├── __init__.py ├── bbox │ └── __init__.py ├── convention │ ├── __init__.py │ ├── bbox_convention.py │ ├── joints_convention │ │ ├── __init__.py │ │ └── standard_joint_angles.py │ └── keypoints_convention │ │ ├── __init__.py │ │ ├── fourdag_19.py │ │ ├── human_data.py │ │ ├── paf.py │ │ └── panoptic.py ├── image │ ├── __init__.py │ ├── base_image_transform.py │ ├── builder.py │ ├── color.py │ ├── convert.py │ ├── load.py │ └── shape.py ├── keypoints3d │ ├── __init__.py │ └── optim │ │ ├── __init__.py │ │ ├── aniposelib_optimizer.py │ │ ├── base_optimizer.py │ │ ├── builder.py │ │ ├── fourdag_base_optimizer.py │ │ ├── fourdag_optimization.py │ │ ├── median_smooth.py │ │ ├── nan_interpolation.py │ │ ├── prior_optimizer.py │ │ ├── rm_duplicate.py │ │ ├── smpl_shape_aware_optimizer.py │ │ └── trajectory_optimizer.py ├── point │ └── __init__.py └── rotation │ └── __init__.py ├── utils ├── __init__.py ├── camera_utils.py ├── date_utils.py ├── distribute_utils.py ├── eval_utils.py ├── ffmpeg_utils.py ├── fourdag_utils.py ├── geometry.py ├── mvp_utils.py ├── mvpose_utils.py ├── service_utils.py ├── time_utils.py └── triangulation_utils.py ├── version.py └── visualization ├── __init__.py ├── render ├── __init__.py └── mpr_renderer.py ├── visualize_keypoints2d.py ├── visualize_keypoints3d.py └── visualize_smpl.py /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | 4 | [*] 5 | end_of_line = lf 6 | -------------------------------------------------------------------------------- /.github/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to XRMoCap 2 | 3 | All kinds of contributions are welcome, including but not limited to the following. 4 | 5 | - Fixes (typo, bugs) 6 | - New features and components 7 | 8 | ## Workflow 9 | 10 | 1. Fork and pull the latest xrmocap 11 | 1. Checkout a new branch with a meaningful name (do not use master branch for PRs) 12 | 1. Commit your changes 13 | 1. Create a PR 14 | 15 | ```{note} 16 | - If you plan to add some new features that involve large changes, it is encouraged to open an issue for discussion first. 17 | - If you are the author of some papers and would like to include your method to xrmocap, please contact us. We will much appreciate your contribution. 18 | ``` 19 | 20 | ## Code style 21 | 22 | ### Python 23 | 24 | We adopt [PEP8](https://www.python.org/dev/peps/pep-0008/) as the preferred code style. 25 | 26 | We use the following tools for linting and formatting: 27 | 28 | - [flake8](http://flake8.pycqa.org/en/latest/): linter 29 | - [yapf](https://github.com/google/yapf): formatter 30 | - [isort](https://github.com/timothycrosley/isort): sort imports 31 | 32 | Style configurations of yapf and isort can be found in [setup.cfg](../setup.cfg). 33 | 34 | We use [pre-commit hook](https://pre-commit.com/) that checks and formats for `flake8`, `yapf`, `isort`, `trailing whitespaces`, 35 | fixes `end-of-files`, sorts `requirments.txt` automatically on every commit. 36 | The config for a pre-commit hook is stored in [.pre-commit-config](../.pre-commit-config.yaml). 37 | 38 | After you clone the repository, you will need to install initialize pre-commit hook. 39 | 40 | ``` 41 | pip install -U pre-commit 42 | ``` 43 | 44 | From the repository folder 45 | 46 | ``` 47 | pre-commit install 48 | ``` 49 | 50 | If you are facing an issue when installing markdown lint, you may install ruby for markdown lint by 51 | referring to [this repo](https://github.com/innerlee/setup) by following the usage and taking [`zzruby.sh`](https://github.com/innerlee/setup/blob/master/zzruby.sh) 52 | 53 | or by the following steps 54 | 55 | ```shell 56 | # install rvm 57 | curl -L https://get.rvm.io | bash -s -- --autolibs=read-fail 58 | rvm autolibs disable 59 | # install ruby 60 | rvm install 2.7.1 61 | ``` 62 | 63 | After this on every commit check code linters and formatter will be enforced. 64 | 65 | > Before you create a PR, make sure that your code lints and is formatted by yapf. 66 | 67 | ### C++ and CUDA 68 | 69 | We follow the [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html). 70 | -------------------------------------------------------------------------------- /.github/workflows/add-assignees.yml: -------------------------------------------------------------------------------- 1 | name: Add Assignees 2 | 3 | on: 4 | issues: 5 | types: [opened, edited] 6 | 7 | jobs: 8 | add-assigness: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Add assigness 12 | uses: actions-cool/issues-helper@v3 13 | with: 14 | actions: 'add-assignees' 15 | token: ${{ secrets.GITHUB_TOKEN }} 16 | issue-number: ${{ github.event.issue.number }} 17 | assignees: 'aichunling0418,LazyBusyYang,tonylu0728,yl-1993' 18 | random-to: 1 19 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: build 5 | 6 | on: 7 | push: 8 | branches: 9 | - main 10 | paths-ignore: 11 | - 'README.md' 12 | - 'README_CN.md' 13 | - 'docs/**' 14 | 15 | pull_request: 16 | paths-ignore: 17 | - 'README.md' 18 | - 'README_CN.md' 19 | - 'docs/**' 20 | 21 | concurrency: 22 | group: ${{ github.workflow }}-${{ github.ref }} 23 | cancel-in-progress: true 24 | 25 | jobs: 26 | build_cuda114: 27 | runs-on: ubuntu-20.04 28 | defaults: 29 | run: 30 | shell: bash -l {0} 31 | container: 32 | image: openxrlab/xrmocap_runtime:ubuntu1804_x64_cuda116_py38_torch1121_mmcv161 33 | env: 34 | GITHUB_ACTIONS: true 35 | steps: 36 | - uses: actions/checkout@v2 37 | - name: Show conda env 38 | run: | 39 | source /opt/miniconda/etc/profile.d/conda.sh && conda deactivate 40 | conda info -e 41 | conda activate openxrlab 42 | conda list | grep torch 43 | conda list | grep mmcv 44 | - name: Prepare test data 45 | run: | 46 | pip install gdown 47 | sh scripts/download_test_data.sh 48 | - name: Prepare weight 49 | run: | 50 | sh scripts/download_weight.sh 51 | - name: Build and install 52 | run: | 53 | rm -rf xrmocap.egg-info 54 | source /opt/miniconda/etc/profile.d/conda.sh && conda activate openxrlab 55 | pip install xrprimer 56 | pip install . 57 | - name: Install pytest plugin 58 | run: | 59 | source /opt/miniconda/etc/profile.d/conda.sh && conda activate openxrlab 60 | pip install pytest-github-actions-annotate-failures 61 | - name: Run unittests 62 | run: | 63 | source /opt/miniconda/etc/profile.d/conda.sh && conda activate openxrlab 64 | coverage run --source xrmocap -m pytest tests/ 65 | - name: Generate coverage report 66 | run: | 67 | source /opt/miniconda/etc/profile.d/conda.sh && conda activate openxrlab 68 | coverage xml 69 | coverage report -m 70 | - name: Upload coverage to Codecov 71 | uses: codecov/codecov-action@v3 72 | with: 73 | files: ./coverage.xml 74 | flags: unittests 75 | env_vars: OS,PYTHON 76 | name: codecov-umbrella 77 | fail_ci_if_error: false 78 | -------------------------------------------------------------------------------- /.github/workflows/check-inactive.yml: -------------------------------------------------------------------------------- 1 | name: Check Inactive 2 | 3 | on: 4 | schedule: 5 | - cron: "0 0 1 * *" 6 | 7 | jobs: 8 | check-inactive: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: check-inactive 12 | uses: actions-cool/issues-helper@v2.2.1 13 | with: 14 | actions: 'check-inactive' 15 | token: ${{ secrets.GITHUB_TOKEN }} 16 | inactive-day: 30 17 | -------------------------------------------------------------------------------- /.github/workflows/close-issues.yml: -------------------------------------------------------------------------------- 1 | name: Check Issues 2 | 3 | on: 4 | schedule: 5 | - cron: "0 0 */7 * *" 6 | 7 | jobs: 8 | check-need-info: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: close-issues 12 | uses: actions-cool/issues-helper@v3 13 | with: 14 | actions: 'close-issues' 15 | token: ${{ secrets.GITHUB_TOKEN }} 16 | inactive-day: 60 17 | -------------------------------------------------------------------------------- /.github/workflows/find-issues.yml: -------------------------------------------------------------------------------- 1 | name: Find Issues 2 | 3 | on: 4 | schedule: 5 | - cron: "0 0 */2 * *" 6 | 7 | jobs: 8 | find-issues: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Find issues 12 | uses: actions-cool/issues-helper@v3 13 | with: 14 | actions: 'find-issues' 15 | token: ${{ secrets.GITHUB_TOKEN }} 16 | issue-state: 'open' 17 | -------------------------------------------------------------------------------- /.github/workflows/issue-reply.yml: -------------------------------------------------------------------------------- 1 | name: Issue Reply 2 | 3 | on: 4 | issues: 5 | types: [labeled] 6 | 7 | jobs: 8 | reply-helper: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: help wanted 12 | if: github.event.label.name == 'help wanted' 13 | uses: actions-cool/issues-helper@v1.2 14 | with: 15 | actions: 'create-comment' 16 | token: ${{ secrets.GITHUB_TOKEN }} 17 | issue-number: ${{ github.event.issue.number }} 18 | body: | 19 | Hello @${{ github.event.issue.user.login }}, welcome to commit your issue here. We will check and reply to you as soon as possible. 20 | 你好 @${{ github.event.issue.user.login }},非常欢迎提交你的问题和需求,我们会尽快确认并回复你。 21 | -------------------------------------------------------------------------------- /.github/workflows/issue-statistics.yml: -------------------------------------------------------------------------------- 1 | name: Issue Month Statistics 2 | 3 | on: 4 | schedule: 5 | - cron: "0 1 1 * *" 6 | 7 | jobs: 8 | month-statistics: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: month-statistics 12 | uses: actions-cool/issues-helper@v1.7 13 | with: 14 | actions: 'month-statistics' 15 | token: ${{ secrets.GITHUB_TOKEN }} 16 | count-lables: 'true' 17 | count-comments: 'true' 18 | -------------------------------------------------------------------------------- /.github/workflows/issue-welcome.yml: -------------------------------------------------------------------------------- 1 | name: Issue Welcome 2 | 3 | on: 4 | issues: 5 | types: [opened] 6 | 7 | jobs: 8 | issue-welcome: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: welcome 12 | uses: actions-cool/issues-helper@v2.2.1 13 | with: 14 | actions: 'welcome' 15 | token: ${{ secrets.GITHUB_TOKEN }} 16 | body: | 17 | Hi @${{ github.event.issue.user.login }}, welcome to commit your first issue! 18 | 你好 @${{ github.event.issue.user.login }},非常欢迎首次提交你的问题! 19 | issue-contents: 'eyes' 20 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: lint 2 | 3 | on: [push, pull_request] 4 | 5 | concurrency: 6 | group: ${{ github.workflow }}-${{ github.ref }} 7 | cancel-in-progress: true 8 | 9 | jobs: 10 | lint: 11 | runs-on: ubuntu-20.04 12 | steps: 13 | - uses: actions/checkout@v2 14 | - name: Set up Python 3.8 15 | uses: actions/setup-python@v2 16 | with: 17 | python-version: 3.8 18 | - name: Install pre-commit hook 19 | run: | 20 | sudo apt-add-repository ppa:brightbox/ruby-ng -y 21 | sudo apt-get update 22 | sudo apt-get install -y ruby2.7 23 | pip install pre-commit 24 | pre-commit install 25 | - name: Linting 26 | run: pre-commit run --all-files 27 | - name: Check docstring coverage 28 | run: | 29 | pip install interrogate 30 | interrogate -vinmMI --ignore-init-method --ignore-module --ignore-nested-functions --ignore-regex "__repr__" -f 60 xrmocap/ 31 | -------------------------------------------------------------------------------- /.github/workflows/publish-to-pypi.yml: -------------------------------------------------------------------------------- 1 | name: deploy 2 | 3 | on: push 4 | 5 | jobs: 6 | build-n-publish: 7 | runs-on: ubuntu-latest 8 | if: startsWith(github.event.ref, 'refs/tags') 9 | steps: 10 | - uses: actions/checkout@v2 11 | - name: Set up Python 3.7 12 | uses: actions/setup-python@v1 13 | with: 14 | python-version: 3.7 15 | - name: Build XRMoCap 16 | run: | 17 | pip install wheel 18 | python setup.py sdist bdist_wheel 19 | - name: Publish distribution to PyPI 20 | run: | 21 | pip install twine 22 | twine upload dist/* -u __token__ -p ${{ secrets.PYPI_PASSWORD }} 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | **/*.pyc 6 | 7 | # C extensions 8 | *.so 9 | 10 | # Distribution / packaging 11 | .Python 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | result/ 20 | lib64/ 21 | parts/ 22 | sdist/ 23 | var/ 24 | wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | xrprimer/ 30 | 31 | # PyInstaller 32 | # Usually these files are written by a python script from a template 33 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 34 | *.manifest 35 | *.spec 36 | 37 | # Installer logs 38 | pip-log.txt 39 | pip-delete-this-directory.txt 40 | 41 | # Unit test / coverage reports 42 | htmlcov/ 43 | .tox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | 53 | # Translations 54 | *.mo 55 | *.pot 56 | 57 | # Django stuff: 58 | *.log 59 | local_settings.py 60 | db.sqlite3 61 | 62 | # Flask stuff: 63 | instance/ 64 | .webassets-cache 65 | 66 | # Scrapy stuff: 67 | .scrapy 68 | 69 | # Sphinx documentation 70 | docs/_build/ 71 | 72 | # PyBuilder 73 | target/ 74 | 75 | # Jupyter Notebook 76 | .ipynb_checkpoints 77 | 78 | # pyenv 79 | .python-version 80 | 81 | # celery beat schedule file 82 | celerybeat-schedule 83 | 84 | # SageMath parsed files 85 | *.sage.py 86 | 87 | # Environments 88 | .env 89 | .venv 90 | env/ 91 | venv/ 92 | ENV/ 93 | env.bak/ 94 | venv.bak/ 95 | 96 | # Spyder project settings 97 | .spyderproject 98 | .spyproject 99 | 100 | # Rope project settings 101 | .ropeproject 102 | 103 | # mkdocs documentation 104 | /site 105 | 106 | # mypy 107 | .mypy_cache/ 108 | 109 | # custom 110 | tests/data/ 111 | # data folder 112 | xrmocap_data/ 113 | output/ 114 | # or data link 115 | xrmocap_data 116 | weight 117 | xrmocap/model/deformable 118 | output/ 119 | xrprimer/ 120 | mmhuman3d/ 121 | 122 | # data for pytest moved to http server 123 | .vscode 124 | .idea 125 | *.pkl 126 | *.pkl.json 127 | *.log.json 128 | work_dirs/ 129 | logs/ 130 | 131 | # Pytorch 132 | *.pth 133 | *.pt 134 | 135 | 136 | # Visualization 137 | *.mp4 138 | *.png 139 | *.gif 140 | *.jpg 141 | *.obj 142 | *.ply 143 | 144 | # Resources as exception 145 | !resources/* 146 | 147 | # Loaded/Saved data files 148 | *.npz 149 | *.npy 150 | *.pickle 151 | 152 | # MacOS 153 | *DS_Store* 154 | # git 155 | *.orig 156 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | exclude: .*/tests/data/ 2 | repos: 3 | - repo: https://github.com/PyCQA/flake8 4 | rev: 5.0.4 5 | hooks: 6 | - id: flake8 7 | - repo: https://github.com/asottile/seed-isort-config.git 8 | rev: v2.2.0 9 | hooks: 10 | - id: seed-isort-config 11 | args: [--settings-path, ./] 12 | - repo: https://github.com/PyCQA/isort.git 13 | rev: 5.12.0 14 | hooks: 15 | - id: isort 16 | args: [--settings-file, ./setup.cfg] 17 | - repo: https://github.com/pre-commit/mirrors-yapf.git 18 | rev: v0.30.0 19 | hooks: 20 | - id: yapf 21 | - repo: https://github.com/pre-commit/pre-commit-hooks.git 22 | rev: v3.1.0 23 | hooks: 24 | - id: trailing-whitespace 25 | args: [--markdown-linebreak-ext=md] 26 | exclude: .*/tests/data/ 27 | - id: check-yaml 28 | - id: end-of-file-fixer 29 | - id: requirements-txt-fixer 30 | - id: double-quote-string-fixer 31 | - id: check-merge-conflict 32 | - id: fix-encoding-pragma 33 | args: ["--remove"] 34 | - id: mixed-line-ending 35 | args: ["--fix=lf"] 36 | - repo: https://github.com/codespell-project/codespell 37 | rev: v2.1.0 38 | hooks: 39 | - id: codespell 40 | args: ["--ignore-words-list", "mot"] 41 | - repo: https://github.com/myint/docformatter.git 42 | rev: v1.3.1 43 | hooks: 44 | - id: docformatter 45 | args: ["--in-place", "--wrap-descriptions", "79"] 46 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | build: 4 | os: ubuntu-22.04 5 | tools: 6 | python: "3.8" 7 | 8 | formats: 9 | - epub 10 | 11 | python: 12 | install: 13 | - requirements: requirements/docs.txt 14 | - requirements: requirements/readthedocs.txt 15 | -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: 1.2.0 2 | message: "If you use this software, please cite it as below." 3 | authors: 4 | - name: "XRMoCap Contributors" 5 | title: "XRMoCap: OpenXRLab Multi-view Motion Capture Toolbox and Benchmark" 6 | date-released: 2022-09-01 7 | url: "https://github.com/openxrlab/xrmocap" 8 | license: Apache-2.0 9 | -------------------------------------------------------------------------------- /README_CN.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/README_CN.md -------------------------------------------------------------------------------- /configs/humman_mocap/README.md: -------------------------------------------------------------------------------- 1 | # HuMMan MoCap 2 | 3 | ## Introduction 4 | 5 | We provide the config files for HuMMan MoCap Toolchain: [HuMMan MoCap](https://caizhongang.github.io/projects/HuMMan/). 6 | 7 | ```BibTeX 8 | @article{cai2022humman, 9 | title={HuMMan: Multi-Modal 4D Human Dataset for Versatile Sensing and Modeling}, 10 | author={Cai, Zhongang and Ren, Daxuan and Zeng, Ailing and Lin, Zhengyu and Yu, Tao and Wang, Wenjia and Fan, Xiangyu and Gao, Yang and Yu, Yifan and Pan, Liang and others}, 11 | journal={arXiv preprint arXiv:2204.13686}, 12 | year={2022} 13 | } 14 | ``` 15 | -------------------------------------------------------------------------------- /configs/modules/data/data_converter/campus_data_converter_testset_w_perception.py: -------------------------------------------------------------------------------- 1 | type = 'CampusDataCovnerter' 2 | data_root = 'CampusSeq1' 3 | bbox_detector = dict( 4 | type='MMdetDetector', 5 | mmdet_kwargs=dict( 6 | checkpoint='weight/' + 7 | 'faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth', 8 | config='configs/modules/human_perception/' + 9 | 'mmdet_faster_rcnn_r50_fpn_coco.py', 10 | device='cuda')) 11 | kps2d_estimator = dict( 12 | type='MMposeTopDownEstimator', 13 | mmpose_kwargs=dict( 14 | checkpoint='weight/hrnet_w48_coco_wholebody' + 15 | '_384x288_dark-f5726563_20200918.pth', 16 | config='configs/modules/human_perception/mmpose_hrnet_w48_' + 17 | 'coco_wholebody_384x288_dark_plus.py', 18 | device='cuda')) 19 | scene_range = [[350, 470], [650, 750]] 20 | meta_path = 'CampusSeq1/xrmocap_meta_testset' 21 | visualize = True 22 | -------------------------------------------------------------------------------- /configs/modules/data/data_converter/campus_data_converter_unittest.py: -------------------------------------------------------------------------------- 1 | type = 'CampusDataCovnerter' 2 | data_root = 'CampusSeq1' 3 | bbox_detector = dict( 4 | type='MMdetDetector', 5 | mmdet_kwargs=dict( 6 | checkpoint='weight/' + 7 | 'faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth', 8 | config='configs/modules/human_perception/' + 9 | 'mmdet_faster_rcnn_r50_fpn_coco.py', 10 | device='cuda')) 11 | kps2d_estimator = dict( 12 | type='MMposeTopDownEstimator', 13 | bbox_thr=0.8, 14 | mmpose_kwargs=dict( 15 | checkpoint='weight/hrnet_w48_coco_wholebody' + 16 | '_384x288_dark-f5726563_20200918.pth', 17 | config='configs/modules/human_perception/mmpose_hrnet_w48_' + 18 | 'coco_wholebody_384x288_dark_plus.py', 19 | device='cuda')) 20 | scene_range = [[350, 355]] 21 | meta_path = 'CampusSeq1/xrmocap_meta' 22 | visualize = True 23 | -------------------------------------------------------------------------------- /configs/modules/data/data_converter/humman_smc_data_converter_wo_perception.py: -------------------------------------------------------------------------------- 1 | type = 'HummanSMCDataCovnerter' 2 | data_root = 'xrmocap_data/humman_dataset' 3 | bbox_detector = None 4 | kps2d_estimator = None 5 | batch_size = 1000 6 | view_idxs = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] 7 | meta_path = 'xrmocap_data/humman_dataset/xrmocap_meta' 8 | visualize = False 9 | -------------------------------------------------------------------------------- /configs/modules/data/data_converter/panoptic_data_converter_testset_w_perception.py: -------------------------------------------------------------------------------- 1 | type = 'PanopticDataCovnerter' 2 | data_root = 'panoptic-toolbox' 3 | bbox_detector = dict( 4 | type='MMdetDetector', 5 | mmdet_kwargs=dict( 6 | checkpoint='weight/' + 7 | 'faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth', 8 | config='configs/modules/human_perception/' + 9 | 'mmdet_faster_rcnn_r50_fpn_coco.py', 10 | device='cuda')) 11 | kps2d_estimator = dict( 12 | type='MMposeTopDownEstimator', 13 | mmpose_kwargs=dict( 14 | checkpoint='weight/hrnet_w48_coco_wholebody' + 15 | '_384x288_dark-f5726563_20200918.pth', 16 | config='configs/modules/human_perception/mmpose_hrnet_w48_' + 17 | 'coco_wholebody_384x288_dark_plus.py', 18 | device='cuda')) 19 | 20 | batch_size = 1000 21 | scene_names = [ 22 | '160906_pizza1', '160422_haggling1', '160906_ian5', '160906_band4' 23 | ] 24 | view_idxs = [3, 6, 12, 13, 23] 25 | frame_period = 12 26 | scene_range = [[112, 6694], [245, 13825], [129, 3001], [161, 10001]] 27 | meta_path = 'panoptic-toolbox/xrmocap_meta_testset' 28 | visualize = True 29 | -------------------------------------------------------------------------------- /configs/modules/data/data_converter/panoptic_data_converter_testset_wo_perception.py: -------------------------------------------------------------------------------- 1 | type = 'PanopticDataCovnerter' 2 | data_root = 'panoptic-toolbox' 3 | bbox_detector = None 4 | kps2d_estimator = None 5 | 6 | batch_size = 1000 7 | scene_names = [ 8 | '160906_pizza1', '160422_haggling1', '160906_ian5', '160906_band4' 9 | ] 10 | view_idxs = [3, 6, 12, 13, 23] 11 | frame_period = 12 12 | scene_range = [[112, 6694], [245, 13825], [129, 3001], [161, 10001]] 13 | meta_path = 'panoptic-toolbox/xrmocap_meta_testset' 14 | visualize = True 15 | -------------------------------------------------------------------------------- /configs/modules/data/data_converter/panoptic_data_converter_trainset_w_perception.py: -------------------------------------------------------------------------------- 1 | type = 'PanopticDataCovnerter' 2 | data_root = 'panoptic-toolbox' 3 | bbox_detector = dict( 4 | type='MMdetDetector', 5 | mmdet_kwargs=dict( 6 | checkpoint='weight/' + 7 | 'faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth', 8 | config='configs/modules/human_perception/' + 9 | 'mmdet_faster_rcnn_r50_fpn_coco.py', 10 | device='cuda')) 11 | kps2d_estimator = dict( 12 | type='MMposeTopDownEstimator', 13 | mmpose_kwargs=dict( 14 | checkpoint='weight/hrnet_w48_coco_wholebody' + 15 | '_384x288_dark-f5726563_20200918.pth', 16 | config='configs/modules/human_perception/mmpose_hrnet_w48_' + 17 | 'coco_wholebody_384x288_dark_plus.py', 18 | device='cuda')) 19 | 20 | batch_size = 1000 21 | scene_names = [ 22 | '160422_ultimatum1', '160224_haggling1', '160226_haggling1', 23 | '161202_haggling1', '160906_ian1', '160906_ian2', '160906_ian3', 24 | '160906_band1', '160906_band2' 25 | ] 26 | view_idxs = [3, 6, 12, 13, 23] 27 | frame_period = 12 28 | scene_range = [[173, 26967], [169, 8885], [129, 11594], [3390, 14240], 29 | [154, 3001], [156, 7501], [133, 7501], [168, 7501], [139, 7501]] 30 | meta_path = 'panoptic-toolbox/xrmocap_meta_trainset' 31 | visualize = True 32 | -------------------------------------------------------------------------------- /configs/modules/data/data_converter/panoptic_data_converter_trainset_wo_perception.py: -------------------------------------------------------------------------------- 1 | type = 'PanopticDataCovnerter' 2 | data_root = 'panoptic-toolbox' 3 | bbox_detector = None 4 | kps2d_estimator = None 5 | 6 | batch_size = 1000 7 | scene_names = [ 8 | '160422_ultimatum1', '160224_haggling1', '160226_haggling1', 9 | '161202_haggling1', '160906_ian1', '160906_ian2', '160906_ian3', 10 | '160906_band1', '160906_band2' 11 | ] 12 | view_idxs = [3, 6, 12, 13, 23] 13 | frame_period = 12 14 | scene_range = [[173, 26967], [169, 8885], [129, 11594], [3390, 14240], 15 | [154, 3001], [156, 7501], [133, 7501], [168, 7501], [139, 7501]] 16 | meta_path = 'panoptic-toolbox/xrmocap_meta_trainset' 17 | visualize = True 18 | -------------------------------------------------------------------------------- /configs/modules/data/data_converter/panoptic_data_converter_unittest.py: -------------------------------------------------------------------------------- 1 | type = 'PanopticDataCovnerter' 2 | data_root = 'panoptic-toolbox' 3 | bbox_detector = dict( 4 | type='MMdetDetector', 5 | mmdet_kwargs=dict( 6 | checkpoint='weight/' + 7 | 'faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth', 8 | config='configs/modules/human_perception/' + 9 | 'mmdet_faster_rcnn_r50_fpn_coco.py', 10 | device='cuda')) 11 | kps2d_estimator = dict( 12 | type='MMposeTopDownEstimator', 13 | mmpose_kwargs=dict( 14 | checkpoint='weight/hrnet_w48_coco_wholebody' + 15 | '_384x288_dark-f5726563_20200918.pth', 16 | config='configs/modules/human_perception/mmpose_hrnet_w48_' + 17 | 'coco_wholebody_384x288_dark_plus.py', 18 | device='cuda')) 19 | 20 | scene_names = 'all' 21 | scene_range = [[5, 15]] 22 | view_idxs = [3, 6, 12] 23 | meta_path = 'panoptic-toolbox/xrmocap_meta_testset' 24 | frame_period = 2 25 | visualize = True 26 | -------------------------------------------------------------------------------- /configs/modules/data/data_converter/shelf_data_converter_trainset_w_perception.py: -------------------------------------------------------------------------------- 1 | type = 'ShelfDataCovnerter' 2 | data_root = 'Shelf' 3 | bbox_detector = dict( 4 | type='MMdetDetector', 5 | mmdet_kwargs=dict( 6 | checkpoint='weight/' + 7 | 'faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth', 8 | config='configs/modules/human_perception/' + 9 | 'mmdet_faster_rcnn_r50_fpn_coco.py', 10 | device='cuda')) 11 | kps2d_estimator = dict( 12 | type='MMposeTopDownEstimator', 13 | mmpose_kwargs=dict( 14 | checkpoint='weight/hrnet_w48_coco_wholebody' + 15 | '_384x288_dark-f5726563_20200918.pth', 16 | config='configs/modules/human_perception/mmpose_hrnet_w48_' + 17 | 'coco_wholebody_384x288_dark_plus.py', 18 | device='cuda')) 19 | scene_range = [[0, 300], [600, 3200]] 20 | meta_path = 'Shelf/xrmocap_meta_testset' 21 | visualize = True 22 | -------------------------------------------------------------------------------- /configs/modules/data/data_converter/shelf_data_converter_unittest.py: -------------------------------------------------------------------------------- 1 | type = 'ShelfDataCovnerter' 2 | data_root = 'Shelf' 3 | bbox_detector = dict( 4 | type='MMdetDetector', 5 | mmdet_kwargs=dict( 6 | checkpoint='weight/' + 7 | 'faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth', 8 | config='configs/modules/human_perception/' + 9 | 'mmdet_faster_rcnn_r50_fpn_coco.py', 10 | device='cuda')) 11 | kps2d_estimator = dict( 12 | type='MMposeTopDownEstimator', 13 | bbox_thr=0.8, 14 | mmpose_kwargs=dict( 15 | checkpoint='weight/hrnet_w48_coco_wholebody' + 16 | '_384x288_dark-f5726563_20200918.pth', 17 | config='configs/modules/human_perception/mmpose_hrnet_w48_' + 18 | 'coco_wholebody_384x288_dark_plus.py', 19 | device='cuda')) 20 | scene_range = [[300, 305]] 21 | meta_path = 'Shelf/xrmocap_meta' 22 | visualize = True 23 | -------------------------------------------------------------------------------- /configs/modules/data/data_visualization/shelf_data_visualization_testset.py: -------------------------------------------------------------------------------- 1 | type = 'MviewMpersonDataVisualization' 2 | data_root = 'Shelf' 3 | output_dir = 'output' 4 | meta_path = 'xrmocap_meta_testset' 5 | pred_kps3d_paths = None 6 | bbox_thr = 0.96 7 | vis_percep2d = True 8 | kps2d_convention = None 9 | vis_gt_kps3d = True 10 | gt_kps3d_convention = None 11 | -------------------------------------------------------------------------------- /configs/modules/data/dataset/mvp_shelf_testset.py: -------------------------------------------------------------------------------- 1 | type = 'MviewMpersonDataset' 2 | data_root = 'Shelf' 3 | img_pipeline = [ 4 | dict(type='LoadImagePIL'), 5 | dict(type='Resize', size=224), 6 | dict(type='ToTensor'), 7 | dict(type='BGR2RGB') 8 | ] 9 | meta_path = 'xrmocap_meta_testset' 10 | test_mode = True 11 | shuffled = False 12 | bbox_convention = None 13 | kps2d_convention = None 14 | gt_kps3d_convention = 'campus' 15 | cam_world2cam = False 16 | -------------------------------------------------------------------------------- /configs/modules/data/dataset/mvpose_shelf_testset.py: -------------------------------------------------------------------------------- 1 | type = 'MviewMpersonDataset' 2 | data_root = 'Shelf' 3 | img_pipeline = [ 4 | dict(type='LoadImagePIL'), 5 | dict(type='Resize', size=224), 6 | dict(type='ToTensor'), 7 | dict(type='RGB2BGR'), 8 | ] 9 | meta_path = 'xrmocap_meta_testset' 10 | test_mode = True 11 | shuffled = False 12 | bbox_convention = 'xyxy' 13 | bbox_thr = 0.9 14 | kps2d_convention = 'coco' 15 | gt_kps3d_convention = 'coco' 16 | cam_world2cam = False 17 | -------------------------------------------------------------------------------- /configs/modules/data/dataset/shelf_unittest.py: -------------------------------------------------------------------------------- 1 | type = 'MviewMpersonDataset' 2 | data_root = 'tests/data/data/test_dataset/Shelf_unittest' 3 | img_pipeline = [ 4 | dict(type='LoadImagePIL'), 5 | dict(type='Resize', size=224), 6 | dict(type='ToTensor'), 7 | dict(type='RGB2BGR'), 8 | ] 9 | meta_path = 'tests/data/data/test_dataset/Shelf_unittest/' +\ 10 | 'xrmocap_meta_perception2d' 11 | test_mode = True 12 | shuffled = False 13 | bbox_convention = 'xyxy' 14 | bbox_thr = 0.6 15 | kps2d_convention = 'coco' 16 | gt_kps3d_convention = 'coco' 17 | cam_world2cam = False 18 | -------------------------------------------------------------------------------- /configs/modules/data/dataset/shelf_unittest_bottom_up.py: -------------------------------------------------------------------------------- 1 | type = 'BottomUpMviewMpersonDataset' 2 | data_root = 'tests/data/data/test_dataset/Shelf_unittest' 3 | img_pipeline = [ 4 | dict(type='LoadImagePIL'), 5 | dict(type='Resize', size=224), 6 | dict(type='ToTensor'), 7 | dict(type='BGR2RGB'), 8 | ] 9 | meta_path = 'tests/data/data/test_dataset/Shelf_unittest/' +\ 10 | 'xrmocap_meta_perception2d' 11 | test_mode = True 12 | shuffled = False 13 | kps2d_convention = 'fourdag_19' 14 | gt_kps3d_convention = 'campus' 15 | cam_world2cam = True 16 | -------------------------------------------------------------------------------- /configs/modules/human_perception/deploy/detection_tensorrt_dynamic-320x320-1344x1344.py: -------------------------------------------------------------------------------- 1 | onnx_config = dict( 2 | type='onnx', 3 | export_params=True, 4 | keep_initializers_as_inputs=False, 5 | opset_version=11, 6 | save_file='end2end.onnx', 7 | input_names=['input'], 8 | output_names=['dets', 'labels'], 9 | input_shape=None, 10 | optimize=True, 11 | dynamic_axes=dict( 12 | input=dict({ 13 | 0: 'batch', 14 | 2: 'height', 15 | 3: 'width' 16 | }), 17 | dets=dict({ 18 | 0: 'batch', 19 | 1: 'num_dets' 20 | }), 21 | labels=dict({ 22 | 0: 'batch', 23 | 1: 'num_dets' 24 | }))) 25 | codebase_config = dict( 26 | type='mmdet', 27 | task='ObjectDetection', 28 | model_type='end2end', 29 | post_processing=dict( 30 | score_threshold=0.05, 31 | confidence_threshold=0.005, 32 | iou_threshold=0.5, 33 | max_output_boxes_per_class=200, 34 | pre_top_k=5000, 35 | keep_top_k=100, 36 | background_label_id=-1)) 37 | backend_config = dict( 38 | type='tensorrt', 39 | common_config=dict(fp16_mode=False, max_workspace_size=1073741824), 40 | model_inputs=[ 41 | dict( 42 | input_shapes=dict( 43 | input=dict( 44 | min_shape=[1, 3, 320, 320], 45 | opt_shape=[1, 3, 800, 1344], 46 | max_shape=[10, 3, 1344, 1344]))) 47 | ]) 48 | -------------------------------------------------------------------------------- /configs/modules/human_perception/deploy/pose-detection_tensorrt_dynamic-384x288.py: -------------------------------------------------------------------------------- 1 | onnx_config = dict( 2 | type='onnx', 3 | export_params=True, 4 | keep_initializers_as_inputs=False, 5 | opset_version=11, 6 | save_file='end2end.onnx', 7 | input_names=['input'], 8 | output_names=['output'], 9 | input_shape=[288, 384], 10 | optimize=True, 11 | dynamic_axes=dict(input=dict({0: 'batch'}), output=dict({0: 'batch'}))) 12 | codebase_config = dict(type='mmpose', task='PoseDetection') 13 | backend_config = dict( 14 | type='tensorrt', 15 | common_config=dict(fp16_mode=False, max_workspace_size=1073741824), 16 | model_inputs=[ 17 | dict( 18 | input_shapes=dict( 19 | input=dict( 20 | min_shape=[1, 3, 384, 288], 21 | opt_shape=[2, 3, 384, 288], 22 | max_shape=[5, 3, 384, 288]))) 23 | ]) 24 | -------------------------------------------------------------------------------- /configs/modules/human_perception/mediapipe_pose_estimator.py: -------------------------------------------------------------------------------- 1 | type = 'MediapipeEstimator' 2 | mediapipe_kwargs = dict( 3 | static_image_mode=True, 4 | model_complexity=2, 5 | enable_segmentation=False, 6 | min_detection_confidence=0.5) 7 | bbox_thr = 0.95 8 | -------------------------------------------------------------------------------- /configs/modules/human_perception/mmdet_faster_rcnn_detector.py: -------------------------------------------------------------------------------- 1 | type = 'MMdetDetector' 2 | mmdet_kwargs = dict( 3 | checkpoint='weight/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth', 4 | config='configs/modules/human_perception/' + 5 | 'mmdet_faster_rcnn_r50_fpn_coco.py', 6 | device='cuda') 7 | batch_size = 10 8 | -------------------------------------------------------------------------------- /configs/modules/human_perception/mmdet_htc_fpn_detector.py: -------------------------------------------------------------------------------- 1 | type = 'MMdetDetector' 2 | mmdet_kwargs = dict( 3 | checkpoint='weight/htc_x101_64x4d_fpn_16x1_20e_coco_20200318-b181fd7a.pth', 4 | config='configs/modules/human_perception/' + 5 | 'mmdet_htc_x101_64x4d_fpn_16x1_20e_coco.py', 6 | device='cuda') 7 | batch_size = 2 8 | -------------------------------------------------------------------------------- /configs/modules/human_perception/mmdet_trt_faster_rcnn_detector.py: -------------------------------------------------------------------------------- 1 | type = 'MMdetTrtDetector' 2 | deploy_cfg = 'configs/modules/human_perception/deploy/' + \ 3 | 'detection_tensorrt_dynamic-320x320-1344x1344.py' 4 | model_cfg = 'configs/modules/human_perception/' + \ 5 | 'mmdet_faster_rcnn_r50_fpn_coco.py' 6 | backend_files = [ 7 | 'weight/mmdet_faster_rcnn/end2end.engine', 8 | ] 9 | device = 'cuda' 10 | batch_size = 1 11 | -------------------------------------------------------------------------------- /configs/modules/human_perception/mmpose_hrnet_estimator.py: -------------------------------------------------------------------------------- 1 | type = 'MMposeTopDownEstimator' 2 | mmpose_kwargs = dict( 3 | checkpoint='weight/hrnet_w48_coco_wholebody' + 4 | '_384x288_dark-f5726563_20200918.pth', 5 | config='configs/modules/human_perception/mmpose_hrnet_w48_' + 6 | 'coco_wholebody_384x288_dark_plus.py', 7 | device='cuda') 8 | bbox_thr = 0.95 9 | -------------------------------------------------------------------------------- /configs/modules/human_perception/mmpose_trt_hrnet_estimator.py: -------------------------------------------------------------------------------- 1 | type = 'MMposeTrtTopDownEstimator' 2 | deploy_cfg = 'configs/modules/human_perception/deploy/' + \ 3 | 'pose-detection_tensorrt_dynamic-384x288.py' 4 | model_cfg = 'configs/modules/human_perception/mmpose_hrnet_w48_' + \ 5 | 'coco_wholebody_384x288_dark_plus.py' 6 | backend_files = [ 7 | 'weight/mmpose_hrnet/end2end.engine', 8 | ] 9 | device = 'cuda' 10 | bbox_thr = 0.95 11 | -------------------------------------------------------------------------------- /configs/modules/human_perception/mmtrack_faster_rcnn_detector.py: -------------------------------------------------------------------------------- 1 | type = 'MMtrackDetector' 2 | mmtrack_kwargs = dict( 3 | config='configs/modules/human_perception/' + 4 | 'mmtrack_deepsort_faster-rcnn_fpn_4e_mot17-private-half.py', 5 | device='cuda') 6 | -------------------------------------------------------------------------------- /configs/modules/model/body_model/smpl.py: -------------------------------------------------------------------------------- 1 | type = 'SMPL' 2 | gender = 'neutral' 3 | num_betas = 10 4 | batch_size = 1 5 | keypoint_convention = 'smpl_45' 6 | model_path = 'xrmocap_data/body_models/smpl' 7 | logger = None 8 | -------------------------------------------------------------------------------- /configs/modules/model/body_model/smplx.py: -------------------------------------------------------------------------------- 1 | type = 'SMPLX' 2 | gender = 'neutral' 3 | num_betas = 10 4 | use_face_contour = True 5 | keypoint_convention = 'smplx' 6 | model_path = 'xrmocap_data/body_models/smplx' 7 | batch_size = 1 8 | use_pca = False 9 | logger = None 10 | -------------------------------------------------------------------------------- /configs/modules/model/registrant/handlers/joint_prior.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/configs/modules/model/registrant/handlers/joint_prior.py -------------------------------------------------------------------------------- /configs/modules/model/registrant/handlers/keypoints3d_limb_len.py: -------------------------------------------------------------------------------- 1 | handler_key = 'keypoints3d_limb_len' 2 | type = 'Keypoint3dLimbLenHandler' 3 | loss = dict( 4 | type='LimbLengthLoss', 5 | convention='smpl', 6 | loss_weight=1.0, 7 | reduction='mean') 8 | -------------------------------------------------------------------------------- /configs/modules/model/registrant/handlers/keypoints3d_mse.py: -------------------------------------------------------------------------------- 1 | handler_key = 'keypoints3d_mse' 2 | type = 'Keypoint3dMSEHandler' 3 | mse_loss = dict( 4 | type='KeypointMSELoss', loss_weight=10.0, reduction='sum', sigma=100) 5 | -------------------------------------------------------------------------------- /configs/modules/model/registrant/handlers/pose_prior.py: -------------------------------------------------------------------------------- 1 | handler_key = 'pose_prior' 2 | type = 'BodyPosePriorHandler' 3 | prior_loss = dict( 4 | type='MaxMixturePriorLoss', 5 | prior_folder='xrmocap_data/body_models', 6 | num_gaussians=8, 7 | loss_weight=4.78**2, 8 | reduction='sum') 9 | -------------------------------------------------------------------------------- /configs/modules/model/registrant/handlers/pose_reg.py: -------------------------------------------------------------------------------- 1 | handler_key = 'pose_reg' 2 | type = 'BodyPosePriorHandler' 3 | prior_loss = dict(type='PoseRegLoss', loss_weight=0.001, reduction='mean') 4 | -------------------------------------------------------------------------------- /configs/modules/model/registrant/handlers/shape_prior.py: -------------------------------------------------------------------------------- 1 | handler_key = 'joint_prior' 2 | type = 'BodyPosePriorHandler' 3 | prior_loss = dict( 4 | type='JointPriorLoss', 5 | loss_weight=20.0, 6 | reduction='sum', 7 | smooth_spine=True, 8 | smooth_spine_loss_weight=20, 9 | use_full_body=True) 10 | -------------------------------------------------------------------------------- /configs/modules/model/registrant/handlers/smooth_joint.py: -------------------------------------------------------------------------------- 1 | handler_key = 'smooth_joint' 2 | type = 'BodyPosePriorHandler' 3 | prior_loss = dict( 4 | type='SmoothJointLoss', loss_weight=1.0, reduction='mean', loss_func='L2') 5 | -------------------------------------------------------------------------------- /configs/modules/ops/bottom_up_association/fourdag_associator.py: -------------------------------------------------------------------------------- 1 | type = 'FourDAGAssociator' 2 | kps_convention = 'fourdag_19' 3 | min_asgn_cnt = 5 4 | use_tracking_edges = True 5 | keypoints3d_optimizer = dict( 6 | type='FourDAGOptimizer', 7 | triangulator=dict(type='JacobiTriangulator', ), 8 | active_rate=0.1, 9 | min_track_cnt=5, 10 | bone_capacity=100, 11 | w_bone3d=1.0, 12 | w_square_shape=1e-2, 13 | shape_max_iter=5, 14 | w_kps3d=1.0, 15 | w_regular_pose=1e-3, 16 | pose_max_iter=20, 17 | w_kps2d=1e-5, 18 | w_temporal_trans=1e-1, 19 | w_temporal_pose=1e-2, 20 | min_triangulate_cnt=15, 21 | init_active=0.9, 22 | triangulate_thresh=0.05, 23 | ) 24 | graph_construct = dict( 25 | type='GraphConstruct', 26 | kps_convention='fourdag_19', 27 | max_epi_dist=0.15, 28 | max_temp_dist=0.2, 29 | normalize_edges=True, 30 | ) 31 | graph_associate = dict( 32 | type='GraphAssociate', 33 | kps_convention='fourdag_19', 34 | w_epi=2, 35 | w_temp=2, 36 | w_view=2, 37 | w_paf=1, 38 | w_hier=0.5, 39 | c_view_cnt=1.5, 40 | min_check_cnt=1, 41 | ) 42 | -------------------------------------------------------------------------------- /configs/modules/ops/projection/aniposelib_projector.py: -------------------------------------------------------------------------------- 1 | type = 'AniposelibProjector' 2 | camera_parameters = [] 3 | logger = None 4 | -------------------------------------------------------------------------------- /configs/modules/ops/projection/opencv_projector.py: -------------------------------------------------------------------------------- 1 | type = 'OpencvProjector' 2 | camera_parameters = [] 3 | -------------------------------------------------------------------------------- /configs/modules/ops/top_down_association/mvpose_associator.py: -------------------------------------------------------------------------------- 1 | __pred_kps3d_convention__ = 'coco' 2 | __bbox_thr__ = 0.9 3 | 4 | type = 'MvposeAssociator' 5 | triangulator = dict( 6 | type='AniposelibTriangulator', 7 | camera_parameters=[], 8 | ) 9 | affinity_estimator = dict(type='AppearanceAffinityEstimator', init_cfg=None) 10 | point_selector = dict( 11 | type='HybridKps2dSelector', 12 | triangulator=dict( 13 | type='AniposelibTriangulator', 14 | camera_parameters=[], 15 | ), 16 | verbose=False, 17 | ignore_kps_name=['left_eye', 'right_eye', 'left_ear', 'right_ear'], 18 | convention=__pred_kps3d_convention__) 19 | multi_way_matching = dict( 20 | type='MultiWayMatching', 21 | use_dual_stochastic_SVT=True, 22 | lambda_SVT=50, 23 | alpha_SVT=0.5, 24 | n_cam_min=3, 25 | ) 26 | kalman_tracking = None 27 | identity_tracking = dict( 28 | type='KeypointsDistanceTracking', 29 | tracking_distance=0.7, 30 | tracking_kps3d_convention=__pred_kps3d_convention__, 31 | tracking_kps3d_name=[ 32 | 'left_shoulder', 'right_shoulder', 'left_hip_extra', 'right_hip_extra' 33 | ]) 34 | checkpoint_path = './weight/mvpose/' + \ 35 | 'resnet50_reid_camstyle-98d61e41_20220921.pth' 36 | bbox_thr = __bbox_thr__ 37 | device = 'cuda' 38 | -------------------------------------------------------------------------------- /configs/modules/ops/top_down_association/mvpose_tracking_associator.py: -------------------------------------------------------------------------------- 1 | __pred_kps3d_convention__ = 'coco' 2 | __bbox_thr__ = 0.9 3 | 4 | type = 'MvposeAssociator' 5 | triangulator = dict( 6 | type='AniposelibTriangulator', 7 | camera_parameters=[], 8 | ) 9 | affinity_estimator = dict(type='AppearanceAffinityEstimator', init_cfg=None) 10 | point_selector = dict( 11 | type='HybridKps2dSelector', 12 | triangulator=dict( 13 | type='AniposelibTriangulator', 14 | camera_parameters=[], 15 | ), 16 | verbose=False, 17 | ignore_kps_name=['left_eye', 'right_eye', 'left_ear', 'right_ear'], 18 | convention=__pred_kps3d_convention__) 19 | multi_way_matching = dict( 20 | type='MultiWayMatching', 21 | use_dual_stochastic_SVT=True, 22 | lambda_SVT=50, 23 | alpha_SVT=0.5, 24 | n_cam_min=3, 25 | ) 26 | kalman_tracking = dict( 27 | type='KalmanTracking', 28 | n_cam_min=3, 29 | ) 30 | identity_tracking = dict( 31 | type='KeypointsDistanceTracking', 32 | tracking_distance=0.7, 33 | tracking_kps3d_convention=__pred_kps3d_convention__, 34 | tracking_kps3d_name=[ 35 | 'left_shoulder', 'right_shoulder', 'left_hip_extra', 'right_hip_extra' 36 | ]) 37 | checkpoint_path = './weight/mvpose/' + \ 38 | 'resnet50_reid_camstyle-98d61e41_20220921.pth' 39 | best_distance = 600 40 | interval = 5 41 | bbox_thr = __bbox_thr__ 42 | device = 'cuda' 43 | -------------------------------------------------------------------------------- /configs/modules/ops/triangulation/aniposelib_triangulator.py: -------------------------------------------------------------------------------- 1 | type = 'AniposelibTriangulator' 2 | camera_parameters = [] 3 | logger = None 4 | -------------------------------------------------------------------------------- /configs/modules/ops/triangulation/auto_threshold_selector.py: -------------------------------------------------------------------------------- 1 | type = 'AutoThresholdSelector' 2 | start = 0.95 3 | stride = -0.025 4 | verbose = True 5 | logger = None 6 | -------------------------------------------------------------------------------- /configs/modules/ops/triangulation/camera_error_selector.py: -------------------------------------------------------------------------------- 1 | type = 'CameraErrorSelector' 2 | target_camera_number = 5 3 | triangulator = dict( 4 | type='AniposelibTriangulator', camera_parameters=[], logger=None) 5 | verbose = True 6 | logger = None 7 | -------------------------------------------------------------------------------- /configs/modules/ops/triangulation/jacobi_triangulator.py: -------------------------------------------------------------------------------- 1 | type = 'JacobiTriangulator' 2 | -------------------------------------------------------------------------------- /configs/modules/ops/triangulation/manual_threshold_selector.py: -------------------------------------------------------------------------------- 1 | type = 'ManualThresholdSelector' 2 | threshold = 0.5 3 | verbose = True 4 | logger = None 5 | -------------------------------------------------------------------------------- /configs/modules/ops/triangulation/opencv_triangulator.py: -------------------------------------------------------------------------------- 1 | type = 'OpencvTriangulator' 2 | camera_parameters = [] 3 | multiview_reduction = 'median' 4 | -------------------------------------------------------------------------------- /configs/modules/ops/triangulation/reprojection_error_point_selector.py: -------------------------------------------------------------------------------- 1 | type = 'ReprojectionErrorPointSelector' 2 | target_camera_number = 5 3 | triangulator = dict( 4 | type='AniposelibTriangulator', camera_parameters=[], logger=None) 5 | verbose = True 6 | logger = None 7 | -------------------------------------------------------------------------------- /configs/modules/ops/triangulation/slow_camera_error_selector.py: -------------------------------------------------------------------------------- 1 | type = 'SlowCameraErrorSelector' 2 | target_camera_number = 5 3 | triangulator = dict( 4 | type='AniposelibTriangulator', camera_parameters=[], logger=None) 5 | verbose = True 6 | logger = None 7 | -------------------------------------------------------------------------------- /configs/modules/service/smpl_stream_service.py: -------------------------------------------------------------------------------- 1 | type = 'SMPLStreamService' 2 | name = 'smpl_stream_service' 3 | work_dir = f'temp/{name}' 4 | body_model_dir = 'xrmocap_data/body_models' 5 | device = 'cuda:0' 6 | enable_bytes = True 7 | enable_cors = True 8 | port = 29091 9 | max_http_buffer_size = 128 * 1024 * 1024 10 | -------------------------------------------------------------------------------- /configs/mvpose/campus_config/campus_data_converter_testset.py: -------------------------------------------------------------------------------- 1 | type = 'CampusDataCovnerter' 2 | data_root = 'CampusSeq1' 3 | bbox_detector = dict( 4 | type='MMtrackDetector', 5 | mmtrack_kwargs=dict( 6 | config='configs/modules/human_perception/' + 7 | 'mmtrack_deepsort_faster-rcnn_fpn_4e_mot17-private-half.py', 8 | device='cuda')) 9 | kps2d_estimator = dict( 10 | type='MMposeTopDownEstimator', 11 | mmpose_kwargs=dict( 12 | checkpoint='weight/hrnet_w48_coco_wholebody' + 13 | '_384x288_dark-f5726563_20200918.pth', 14 | config='configs/modules/human_perception/mmpose_hrnet_w48_' + 15 | 'coco_wholebody_384x288_dark_plus.py', 16 | device='cuda')) 17 | scene_range = [[350, 470], [650, 750]] 18 | meta_path = 'CampusSeq1/xrmocap_meta_testset' 19 | visualize = True 20 | -------------------------------------------------------------------------------- /configs/mvpose/panoptic_config/panoptic_data_converter_testset.py: -------------------------------------------------------------------------------- 1 | type = 'PanopticDataCovnerter' 2 | data_root = 'panoptic-toolbox' 3 | bbox_detector = dict( 4 | type='MMtrackDetector', 5 | mmtrack_kwargs=dict( 6 | config='configs/modules/human_perception/' + 7 | 'mmtrack_deepsort_faster-rcnn_fpn_4e_mot17-private-half.py', 8 | device='cuda')) 9 | kps2d_estimator = dict( 10 | type='MMposeTopDownEstimator', 11 | mmpose_kwargs=dict( 12 | checkpoint='weight/hrnet_w48_coco_wholebody' + 13 | '_384x288_dark-f5726563_20200918.pth', 14 | config='configs/modules/human_perception/mmpose_hrnet_w48_' + 15 | 'coco_wholebody_384x288_dark_plus.py', 16 | device='cuda')) 17 | 18 | batch_size = 1000 19 | scene_names = [ 20 | '160906_pizza1', '160422_haggling1', '160906_ian5', '160906_band4' 21 | ] 22 | view_idxs = [3, 6, 12, 13, 23] 23 | frame_period = 1 24 | scene_range = 'all' 25 | meta_path = 'panoptic-toolbox/xrmocap_meta_testset' 26 | visualize = True 27 | -------------------------------------------------------------------------------- /configs/mvpose/shelf_config/shelf_data_converter_testset.py: -------------------------------------------------------------------------------- 1 | type = 'ShelfDataCovnerter' 2 | data_root = 'Shelf' 3 | bbox_detector = dict( 4 | type='MMtrackDetector', 5 | mmtrack_kwargs=dict( 6 | config='configs/modules/human_perception/' + 7 | 'mmtrack_deepsort_faster-rcnn_fpn_4e_mot17-private-half.py', 8 | device='cuda')) 9 | kps2d_estimator = dict( 10 | type='MMposeTopDownEstimator', 11 | mmpose_kwargs=dict( 12 | checkpoint='weight/hrnet_w48_coco_wholebody' + 13 | '_384x288_dark-f5726563_20200918.pth', 14 | config='configs/modules/human_perception/mmpose_hrnet_w48_' + 15 | 'coco_wholebody_384x288_dark_plus.py', 16 | device='cuda')) 17 | scene_range = [[300, 600]] 18 | meta_path = 'Shelf/xrmocap_meta_testset' 19 | visualize = True 20 | -------------------------------------------------------------------------------- /configs/mvpose_tracking/campus_config/campus_data_converter_testset.py: -------------------------------------------------------------------------------- 1 | type = 'CampusDataCovnerter' 2 | data_root = 'CampusSeq1' 3 | bbox_detector = dict( 4 | type='MMtrackDetector', 5 | mmtrack_kwargs=dict( 6 | config='configs/modules/human_perception/' + 7 | 'mmtrack_deepsort_faster-rcnn_fpn_4e_mot17-private-half.py', 8 | device='cuda')) 9 | kps2d_estimator = dict( 10 | type='MMposeTopDownEstimator', 11 | mmpose_kwargs=dict( 12 | checkpoint='weight/hrnet_w48_coco_wholebody' + 13 | '_384x288_dark-f5726563_20200918.pth', 14 | config='configs/modules/human_perception/mmpose_hrnet_w48_' + 15 | 'coco_wholebody_384x288_dark_plus.py', 16 | device='cuda')) 17 | scene_range = [[350, 470], [650, 750]] 18 | meta_path = 'CampusSeq1/xrmocap_meta_testset' 19 | visualize = True 20 | -------------------------------------------------------------------------------- /configs/mvpose_tracking/panoptic_config/panoptic_data_converter_testset.py: -------------------------------------------------------------------------------- 1 | type = 'PanopticDataCovnerter' 2 | data_root = 'panoptic-toolbox' 3 | bbox_detector = dict( 4 | type='MMtrackDetector', 5 | mmtrack_kwargs=dict( 6 | config='configs/modules/human_perception/' + 7 | 'mmtrack_deepsort_faster-rcnn_fpn_4e_mot17-private-half.py', 8 | device='cuda')) 9 | kps2d_estimator = dict( 10 | type='MMposeTopDownEstimator', 11 | mmpose_kwargs=dict( 12 | checkpoint='weight/hrnet_w48_coco_wholebody' + 13 | '_384x288_dark-f5726563_20200918.pth', 14 | config='configs/modules/human_perception/mmpose_hrnet_w48_' + 15 | 'coco_wholebody_384x288_dark_plus.py', 16 | device='cuda')) 17 | 18 | batch_size = 1000 19 | scene_names = [ 20 | '160906_pizza1', '160422_haggling1', '160906_ian5', '160906_band4' 21 | ] 22 | view_idxs = [3, 6, 12, 13, 23] 23 | frame_period = 1 24 | scene_range = 'all' 25 | meta_path = 'panoptic-toolbox/xrmocap_meta_testset' 26 | visualize = True 27 | -------------------------------------------------------------------------------- /configs/mvpose_tracking/shelf_config/shelf_data_converter_testset.py: -------------------------------------------------------------------------------- 1 | type = 'ShelfDataCovnerter' 2 | data_root = 'Shelf' 3 | bbox_detector = dict( 4 | type='MMtrackDetector', 5 | mmtrack_kwargs=dict( 6 | config='configs/modules/human_perception/' + 7 | 'mmtrack_deepsort_faster-rcnn_fpn_4e_mot17-private-half.py', 8 | device='cuda')) 9 | kps2d_estimator = dict( 10 | type='MMposeTopDownEstimator', 11 | mmpose_kwargs=dict( 12 | checkpoint='weight/hrnet_w48_coco_wholebody' + 13 | '_384x288_dark-f5726563_20200918.pth', 14 | config='configs/modules/human_perception/mmpose_hrnet_w48_' + 15 | 'coco_wholebody_384x288_dark_plus.py', 16 | device='cuda')) 17 | scene_range = [[300, 600]] 18 | meta_path = 'Shelf/xrmocap_meta_testset' 19 | visualize = True 20 | -------------------------------------------------------------------------------- /configs/shape_aware_3d_pose_optim/README.md: -------------------------------------------------------------------------------- 1 | # Shape-aware 3D Pose Optimization 2 | 3 | - [Introduction](#introduction) 4 | - [Results](#results) 5 | 6 | ## Introduction 7 | 8 | We provide the config files for Shape-aware 3D Pose Optimization, which is the second stage in the [Shape-aware Multi-Person Pose Estimation from Multi-View Images](https://ait.ethz.ch/projects/2021/multi-human-pose/). 9 | 10 | ```BibTeX 11 | @inproceedings{dong2021shape, 12 | title={Shape-aware Multi-Person Pose Estimation from Multi-view Images}, 13 | author={Dong, Zijian and Song, Jie and Chen, Xu and Guo, Chen and Hilliges, Otmar}, 14 | booktitle={International Conference on Computer Vision (ICCV)}, 15 | year={2021} 16 | } 17 | ``` 18 | 19 | ## Results 20 | 21 | We use keypoints3d generated by MVPose or MVPose tracking to generate the SMPL, then iteratively optimize keypoints3d using 2D perception data and SMPL, report the Percentage of Correct Parts (PCP) on Shelf datasets. It should be noted that the optimization result depends on the SMPL accuracy of the fitting. 22 | 23 | You can find more details in the [config](shape_aware_3d_pose_optimizer.py), where `kps3d_optimizers` is shape-aware 3d pose optimizer, you can set the iteration number. For more details, see docs for `SMPLShapeAwareOptimizer` and the docstring in [code](../../xrmocap/transform/keypoints3d/optim/smpl_shape_aware_optimizer.py). 24 | 25 | 26 | | | Actor 0 | Actor 1 | Actor 2 | Average | 27 | |:------:|:-------:|:--------:|:--------:|:--------:| 28 | | keypoints3d wo/ optimizer| 98.10 | 93.51 | 97.89 | 96.50 | 29 | | keypoints3d w/ optimizer| 97.89 | 95.14 | 97.70 | 96.91 | 30 | -------------------------------------------------------------------------------- /dockerfiles/runtime_ubt18/build_runtime_docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | CUDA_VER=11.6 3 | PY_VER=3.8 4 | MMCV_VER=1.6.1 5 | TORCH_VER=1.12.1 6 | TORCHV_VER=0.13.1 7 | CUDA_VER_DIGIT=${CUDA_VER//./} 8 | PY_VER_DIGIT=${PY_VER//./} 9 | MMCV_VER_DIGIT=${MMCV_VER//./} 10 | TORCH_VER_DIGIT=${TORCH_VER//./} 11 | FINAL_TAG="openxrlab/xrmocap_runtime:ubuntu1804_x64_cuda${CUDA_VER_DIGIT}_py${PY_VER_DIGIT}_torch${TORCH_VER_DIGIT}_mmcv${MMCV_VER_DIGIT}" 12 | echo "tag to build: $FINAL_TAG" 13 | BUILD_ARGS="--build-arg CUDA_VER=${CUDA_VER} --build-arg PY_VER=${PY_VER} --build-arg MMCV_VER=${MMCV_VER} --build-arg TORCH_VER=${TORCH_VER} --build-arg TORCHV_VER=${TORCHV_VER}" 14 | # build according to Dockerfile 15 | TAG=${FINAL_TAG}_not_compatible 16 | docker build -t $TAG -f dockerfiles/runtime_ubt18/Dockerfile $BUILD_ARGS --progress=plain . 17 | # Install mpr and mmcv-full with GPU 18 | CONTAINER_ID=$(docker run -it --gpus all -d $TAG) 19 | docker exec -ti $CONTAINER_ID sh -c " 20 | . /opt/miniconda/etc/profile.d/conda.sh && \ 21 | conda activate openxrlab && \ 22 | pip install mmcv-full==1.6.1 -f https://download.openmmlab.com/mmcv/dist/cu${CUDA_VER_DIGIT}/torch${TORCH_VER}/index.html && \ 23 | pip install git+https://github.com/rmbashirov/minimal_pytorch_rasterizer.git && \ 24 | pip cache purge 25 | " 26 | docker commit $CONTAINER_ID $FINAL_TAG 27 | docker rm -f $CONTAINER_ID 28 | docker rmi $TAG 29 | echo "Successfully tagged $FINAL_TAG" 30 | -------------------------------------------------------------------------------- /dockerfiles/service_ubt18/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG INPUT_TAG 2 | FROM $INPUT_TAG 3 | 4 | # Install test requirements 5 | RUN . /opt/miniconda/etc/profile.d/conda.sh && \ 6 | conda activate openxrlab && \ 7 | pip install -r https://raw.githubusercontent.com/openxrlab/xrmocap/main/requirements/service.txt && \ 8 | pip cache purge 9 | -------------------------------------------------------------------------------- /dockerfiles/service_ubt18/build_runtime_docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | CUDA_VER=11.6 3 | PY_VER=3.8 4 | MMCV_VER=1.6.1 5 | TORCH_VER=1.12.1 6 | TORCHV_VER=0.13.1 7 | CUDA_VER_DIGIT=${CUDA_VER//./} 8 | PY_VER_DIGIT=${PY_VER//./} 9 | MMCV_VER_DIGIT=${MMCV_VER//./} 10 | TORCH_VER_DIGIT=${TORCH_VER//./} 11 | INPUT_TAG="openxrlab/xrmocap_runtime:ubuntu1804_x64_cuda${CUDA_VER_DIGIT}_py${PY_VER_DIGIT}_torch${TORCH_VER_DIGIT}_mmcv${MMCV_VER_DIGIT}" 12 | FINAL_TAG="${INPUT_TAG}_service" 13 | echo "tag to build: $FINAL_TAG" 14 | BUILD_ARGS="--build-arg CUDA_VER=${CUDA_VER} --build-arg PY_VER=${PY_VER} --build-arg MMCV_VER=${MMCV_VER} --build-arg TORCH_VER=${TORCH_VER} --build-arg TORCHV_VER=${TORCHV_VER} --build-arg INPUT_TAG=${INPUT_TAG}" 15 | # build according to Dockerfile 16 | docker build -t $FINAL_TAG -f dockerfiles/service_ubt18/Dockerfile $BUILD_ARGS --progress=plain . 17 | echo "Successfully tagged $FINAL_TAG" 18 | -------------------------------------------------------------------------------- /docs/en/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/en/api.rst: -------------------------------------------------------------------------------- 1 | xrmocap.core 2 | ------------ 3 | 4 | estimation 5 | ^^^^^^^^^^ 6 | .. automodule:: xrmocap.core.estimation 7 | :members: 8 | 9 | evaluation 10 | ^^^^^^^^^^ 11 | .. automodule:: xrmocap.core.evaluation 12 | :members: 13 | 14 | smplify hook 15 | ^^^^^^^^^^^^ 16 | .. automodule:: xrmocap.core.hook.smplify_hook 17 | :members: 18 | 19 | train 20 | ^^^^^ 21 | .. automodule:: xrmocap.core.train 22 | :members: 23 | 24 | visualization 25 | ^^^^^^^^^^^^^ 26 | .. automodule:: xrmocap.core.visualization 27 | :members: 28 | 29 | xrmocap.data 30 | ------------ 31 | 32 | data_converter 33 | ^^^^^^^^^^^^^^ 34 | .. automodule:: xrmocap.data.data_converter 35 | :members: 36 | 37 | dataloader 38 | ^^^^^^^^^^ 39 | .. automodule:: xrmocap.data.dataloader 40 | :members: 41 | 42 | dataset 43 | ^^^^^^^ 44 | .. automodule:: xrmocap.data.dataset 45 | :members: 46 | 47 | data_visualization 48 | ^^^^^^^^^^^^^^^^^^ 49 | .. automodule:: xrmocap.data.data_visualization 50 | :members: 51 | 52 | 53 | xrmocap.data_structure 54 | ---------------------- 55 | 56 | .. automodule:: xrmocap.data_structure 57 | :members: 58 | 59 | 60 | xrmocap.human_perception 61 | ------------------------ 62 | 63 | bbox_detection 64 | ^^^^^^^^^^^^^^ 65 | .. automodule:: xrmocap.human_perception.bbox_detection 66 | :members: 67 | 68 | keypoints_estimation 69 | ^^^^^^^^^^^^^^^^^^^^ 70 | .. automodule:: xrmocap.human_perception.keypoints_estimation 71 | :members: 72 | 73 | 74 | xrmocap.io 75 | ---------- 76 | 77 | .. automodule:: xrmocap.io 78 | :members: 79 | 80 | 81 | xrmocap.model 82 | ------------- 83 | 84 | architecture 85 | ^^^^^^^^^^^^ 86 | .. automodule:: xrmocap.model.architecture 87 | :members: 88 | 89 | 90 | xrmocap.ops 91 | ----------- 92 | 93 | projection 94 | ^^^^^^^^^^ 95 | .. automodule:: xrmocap.ops.projection 96 | :members: 97 | 98 | 99 | xrmocap.transform 100 | ----------------- 101 | 102 | keypoints3d.optim 103 | ^^^^^^^^^^^^^^^^^ 104 | .. automodule:: xrmocap.transform.keypoints3d.optim 105 | :members: 106 | 107 | 108 | xrmocap.utils 109 | ------------- 110 | 111 | .. automodule:: xrmocap.utils 112 | :members: 113 | -------------------------------------------------------------------------------- /docs/en/apis.md: -------------------------------------------------------------------------------- 1 | # APIs 2 | 3 | - [Multi-view single-person SMPL Estimator](#multi-view-single-person-smpl-estimator) 4 | - [Multi-view multi-person SMPL Estimator](#multi-view-multi-person-smpl-estimator) 5 | 6 | ## Multi-view single-person SMPL Estimator 7 | 8 | `MultiViewSinglePersonSMPLEstimator` is an API class for multi-view single-person scenario, taking multi-view videos and multi-view camera parameters as input, estimating SMPL parameters and some other important information. See the [estimator doc](./estimation/mview_sperson_smpl_estimator.md) for details. 9 | 10 | 11 | 12 | ## Multi-view multi-person SMPL Estimator 13 | 14 | `MultiPersonSMPLEstimator` is an API class for multi-view multi-person scenario, taking multi-person keypoints3d and multi-view camera parameters as input, estimating SMPL parameters and some other important information. See the [estimator doc](./tools/mview_mperson_smplify3d.md) for details. 15 | -------------------------------------------------------------------------------- /docs/en/benchmark.md: -------------------------------------------------------------------------------- 1 | # Benchmark and Model Zoo 2 | 3 | For optimization-based methods, we provide configuration files and log files. 4 | For learning-based methods, we provide configuration files, log files and pretrained models. 5 | Moreover, all supported methods are evaluated on three common benchmarks: Campus, Shelf, and CMU Panoptic. 6 | 7 | ## Baselines 8 | 9 | ### MVPose (Single frame) 10 | 11 | Please refer to [MVPose](../../configs/mvpose/README.md) for details. 12 | 13 | ### MVPose (Temporal tracking and filtering) 14 | 15 | Please refer to [MVPose with tracking](../../configs/mvpose_tracking/README.md) for details. 16 | 17 | ### Shape-aware 3D Pose Optimization 18 | 19 | Please refer to [Shape-aware 3D Pose Optimization](../../configs/shape_aware_3d_pose_optim/README.md) for details. 20 | 21 | ### MvP 22 | 23 | Please refer to [MvP benchmarks](../../configs/mvp/README.md) for details. 24 | 25 | ### 4D Association Graph 26 | 27 | Please refer to [FourDAG benchmarks](../../configs/fourdag/README.md) for details. 28 | -------------------------------------------------------------------------------- /docs/en/data_structure/limbs.md: -------------------------------------------------------------------------------- 1 | # Limbs 2 | 3 | - [Overview](#overview) 4 | - [Attribute definition](#attribute-definition) 5 | - [Create an instance](#create-an-instance) 6 | 7 | ### Overview 8 | 9 | Limbs is a class for person limbs data, recording connection vectors between keypoints. It accepts either `numpy.ndarray` or `torch.Tensor`, convert them into `numpy.ndarray`, `numpy.int32`. 10 | 11 | ### Attribute definition 12 | 13 | - connections: An ndarray for connections, in shape [n_conn, 2], `connections[:, 0]` are start point indice and `connections[:, 1]` are end point indice. `connections[n, :]` is `[start_index, end_index]` of the No.n connection. 14 | - connection_names: A list of strings, could be None. If not None, length of `connection_names` equals to length of `connections`. 15 | - parts: A nested list, could be None. If not None, `len(parts)` is number of parts, and `len(parts[0])` is number of connections in the first part. `parts[i][j]` is an index of connection. 16 | - part_names: A list of strings, could be None. If not None, length of `part_names` equals to length of `parts`. 17 | - points: An ndarray for points, could be None. If not None, it is in shape [n_point, point_dim]. We could use the index record in `connections` to fetch a point. 18 | - logger: Logger for logging. If None, root logger will be selected. 19 | 20 | ### Create an instance 21 | 22 | a. Create instance with raw data and `__init__()`. 23 | 24 | ```python 25 | from xrprimer.data_structure import Limbs 26 | 27 | # only connections arg is necessary for Limbs 28 | connections = np.asarray( 29 | [[0, 1], [0, 2], [1, 3]] 30 | ) 31 | limbs = Limbs(connections=connections) 32 | 33 | # split connections into parts 34 | parts = [[0, ], [1, 2]] 35 | part_names = ['head', 'right_arm'] 36 | limbs = Limbs(connections=connections, parts=parts, part_names=part_names) 37 | ``` 38 | 39 | b. Get limbs from a well-defined Keypoints instance. The connections will be searched from a sub-set of `human_data` limbs. 40 | 41 | ```python 42 | from xrprimer.transform.limbs import get_limbs_from_keypoints 43 | 44 | # Get limbs according to keypoints' mask and convention. 45 | limbs = get_limbs_from_keypoints(keypoints=keypoints2d) 46 | # connections, parts and part_names have been set 47 | 48 | # torch type is also accepted 49 | keypoints2d_torch = keypoints2d.to_tensor() 50 | limbs = get_limbs_from_keypoints(keypoints=keypoints2d_torch) 51 | 52 | # If both frame_idx and person_idx have been set, 53 | # limbs are searched from a certain frame 54 | # limbs.points have also been set 55 | limbs = get_limbs_from_keypoints(keypoints=keypoints2d, frame_idx=0, person_idx=0) 56 | ``` 57 | -------------------------------------------------------------------------------- /docs/en/data_structure/smpl_data.md: -------------------------------------------------------------------------------- 1 | # SMPLData 2 | 3 | - [Overview](#overview) 4 | - [Key/Value definition](#keyvalue-definition) 5 | - [Attribute definition](#attribute-definition) 6 | - [Create an instance](#create-an-instance) 7 | - [Convert into body_model input](#convert-into-body_model-input) 8 | - [File IO](#file-io) 9 | 10 | ### Overview 11 | 12 | SMPLData, SMPLXData and SMPLXDData are a classes for SMPL(X/XD) parameters, based on python dict class. It accepts either `numpy.ndarray` or `torch.Tensor`, convert them into `numpy.ndarray`. 13 | 14 | ### Key/Value definition 15 | 16 | - gender: A string marks gender of body_model, female, male or neutral. 17 | 18 | - fullpose: An ndarray of full pose, including `global_orient`, `body_pose`, and other pose if exists. 19 | 20 | ​ It's in shape [batch_size, fullpose_dim, 3], while `fullpose_dim` between among SMPLData and SMPLXData. 21 | 22 | - transl: An ndarray of translation, in shape [batch_size, 3]. 23 | 24 | - betas: An ndarray of body shape parameters, in shape [batch_size, betas_dim], while `betas_dim` is defined by input, and it's 10 by default. 25 | 26 | ### Attribute definition 27 | 28 | - logger: Logger for logging. If None, root logger will be selected. 29 | 30 | ### Create an instance 31 | 32 | a. Store the output of SMPLify. 33 | 34 | ```python 35 | smpl_data = SMPLData() 36 | smpl_data.from_param_dict(registrant_output) 37 | ``` 38 | 39 | b. New an instance with ndarray or Tensor. 40 | 41 | ```python 42 | smpl_data = SMPLData( 43 | gender='neutral', 44 | fullpose=fullpose, 45 | transl=transl, 46 | betas=betas) 47 | ``` 48 | 49 | c. New an instance with a dict. 50 | 51 | ```python 52 | smpl_dict = dict(smpl_data) 53 | another_smpl_data = SMPLData.from_dict(smpl_dict) 54 | ``` 55 | 56 | ### Convert into body_model input 57 | 58 | ```python 59 | smpl_data.to_tensor_dict(device='cuda:0') 60 | ``` 61 | 62 | ### File IO 63 | 64 | a. Dump an instance to an npz file. 65 | 66 | ```python 67 | dump_path = './output/smpl_data.npz' 68 | smpl_data.dump(dump_path) 69 | ``` 70 | 71 | b. Load an instance from file. 72 | 73 | ```python 74 | load_path = './output/smpl_data.npz' 75 | smpl_data = SMPLData.fromfile(load_path) 76 | # We could also new an instance and load. 77 | smpl_data = SMPLData() 78 | smpl_data.load(load_path) 79 | ``` 80 | -------------------------------------------------------------------------------- /docs/en/faq.md: -------------------------------------------------------------------------------- 1 | # Frequently Asked Questions 2 | 3 | We list some common troubles faced by many users and their corresponding solutions here. Feel free to enrich the list if you find any frequent issues and have ways to help others to solve them. If the contents here do not cover your issue, do not hesitate to create an issue! 4 | 5 | ## Installation 6 | 7 | - 'ImportError: libpng16.so.16: cannot open shared object file: No such file or directory' 8 | 9 | Please refer to [xrprimer faq](https://github.com/openxrlab/xrprimer/blob/main/docs/en/faq.md). 10 | 11 | - 'ImportError: liblapack.so.3: cannot open shared object file: No such file or directory' 12 | 13 | Please refer to [xrprimer faq](https://github.com/openxrlab/xrprimer/blob/main/docs/en/faq.md). 14 | 15 | - 'ModuleNotFoundError: No module named mmhuman3d.core.conventions.joints_mapping' 16 | 17 | Package `joints_mapping` actually exists in [github](https://github.com/open-mmlab/mmhuman3d/tree/main/mmhuman3d/core/conventions/joints_mapping), but it is not installed by pip for absence of `joints_mapping/__init__.py`. Install mmhuman3d from source will solve it: 18 | 19 | ```bash 20 | cd PATH_FOR_MMHUMAN3D 21 | git clone https://github.com/open-mmlab/mmhuman3d.git 22 | pip install -e ./mmhuman3d 23 | ``` 24 | 25 | - 'BrokenPipeError: ../../lib/python3.8/site-packages/xrprimer/utils/ffmpeg_utils.py:189: BrokenPipeError' 26 | 27 | You've installed a wrong version of ffmpeg. Try to install it by the following command, and do not specify any channel: 28 | 29 | ```bash 30 | conda install ffmpeg 31 | ``` 32 | 33 | - 'ImportError:numpy.core.multiarray failed to import' 34 | 35 | Numpy of some versions is not compatible with mmpose, so is scipy. Install a tested version will help: 36 | 37 | ```bash 38 | pip install numpy==1.23.5 scipy==1.10.0 39 | ``` 40 | 41 | - 'RuntimeError: nms_impl: implementation for device cuda:0 not found.' 42 | 43 | You have a newer mmcv-full and an older mmdet. Install an older mmcv-full may help. Remember to modify torch and cuda version according to your environment: 44 | 45 | ```bash 46 | pip install mmcv-full==1.6.1 -f https://download.openmmlab.com/mmcv/dist/cu113/torch1.12.0/index.html 47 | ``` 48 | -------------------------------------------------------------------------------- /docs/en/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to XRMoCap's documentation! 2 | ======================================= 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | :caption: Getting started 7 | 8 | installation.md 9 | dataset_preparation.md 10 | getting_started.md 11 | 12 | .. toctree:: 13 | :maxdepth: 2 14 | :caption: Benchmark 15 | 16 | benchmark.md 17 | 18 | .. toctree:: 19 | :maxdepth: 2 20 | :caption: Unit tests 21 | 22 | test.md 23 | 24 | .. toctree:: 25 | :maxdepth: 2 26 | :caption: Data structures 27 | 28 | data_structure/keypoints.md 29 | data_structure/limbs.md 30 | data_structure/smpl_data.md 31 | 32 | .. toctree:: 33 | :maxdepth: 2 34 | :caption: Operations 35 | 36 | ops/triangulation.md 37 | 38 | .. toctree:: 39 | :maxdepth: 2 40 | :caption: Model 41 | 42 | model/smplify.md 43 | 44 | .. toctree:: 45 | :maxdepth: 2 46 | :caption: Estimation 47 | 48 | estimation/mview_sperson_smpl_estimator.md 49 | estimation/mview_mperson_topdown_smpl_estimator.md 50 | 51 | .. toctree:: 52 | :maxdepth: 2 53 | :caption: Tools 54 | 55 | tools/eval_model.md 56 | tools/mview_mperson_evaluation.md 57 | tools/mview_mperson_smplify3d.md 58 | tools/prepare_dataset.md 59 | tools/process_smc.md 60 | tools/train_model.md 61 | tools/visualize_dataset.md 62 | 63 | .. toctree:: 64 | :maxdepth: 2 65 | :caption: Tutorials 66 | 67 | tutorials/introduction.md 68 | tutorials/config.md 69 | tutorials/new_dataset.md 70 | tutorials/new_module.md 71 | 72 | .. toctree:: 73 | :maxdepth: 2 74 | :caption: Notes 75 | 76 | faq.md 77 | changelog.md 78 | 79 | .. toctree:: 80 | :maxdepth: 2 81 | :caption: License 82 | 83 | license.rst 84 | 85 | .. toctree:: 86 | :maxdepth: 1 87 | :caption: APIs 88 | 89 | apis.md 90 | 91 | .. toctree:: 92 | :maxdepth: 1 93 | :caption: API Reference 94 | 95 | api.rst 96 | 97 | Indices and tables 98 | ================== 99 | 100 | * :ref:`genindex` 101 | * :ref:`search` 102 | -------------------------------------------------------------------------------- /docs/en/license.rst: -------------------------------------------------------------------------------- 1 | LICENSE 2 | ------- 3 | 4 | .. include:: ../../LICENSE 5 | -------------------------------------------------------------------------------- /docs/en/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/en/ops/triangulation.md: -------------------------------------------------------------------------------- 1 | # Triangulation 2 | 3 | - Triangulation 4 | - [Prepare camera parameters](https://github.com/openxrlab/xrprimer/blob/main/docs/en/ops/triangulator.md#prepare-camera-parameters) 5 | - [Build a triangulator](https://github.com/openxrlab/xrprimer/blob/main/docs/en/ops/triangulator.md#build-a-triangulator) 6 | - [Triangulate points from 2D to 3D](#triangulate-points-from-2d-to-3d) 7 | - [Get reprojection error](https://github.com/openxrlab/xrprimer/blob/main/docs/en/ops/triangulator.md#get-reprojection-error) 8 | - [Camera selection](https://github.com/openxrlab/xrprimer/blob/main/docs/en/ops/triangulator.md#camera-selection) 9 | 10 | ### Overview 11 | 12 | Triangulators in XRMoCap are sub-classes of XRPrimer triangulator. For basic usage of triangulators, please refer to [xrprimer doc](https://github.com/openxrlab/xrprimer/blob/main/docs/en/ops/triangulator.md#triangulate-points-from-2d-to-3d). 13 | 14 | ## Triangulate points from 2D to 3D 15 | 16 | In XRMoCap, we allow triangulators defined in `xrmocap/ops/triangulation` to take input data in arbitrary shape. The first dim shall be view and the last dim shall be `2+n` while n >=0. Here are shapes of some useful examples below: 17 | 18 | | points.shape | ret_points3d.shape | 19 | | ------------------------------------- | ----------------------------- | 20 | | [n_view, n_kps, 2] | [n_kps, 3] | 21 | | [n_view, n_frame, n_kps, 2] | [n_frame, n_kps, 3] | 22 | | [n_view, n_person, n_frame, n_kps, 2] | [n_frame, n_person, n_kps, 3] | 23 | -------------------------------------------------------------------------------- /docs/en/test.md: -------------------------------------------------------------------------------- 1 | # Running Tests 2 | 3 | - [Data Preparation](#data-preparation) 4 | - [Environment Preparation](#environment-preparation) 5 | - [Running tests through pytest](#running-tests-through-pytest) 6 | 7 | ## Data Preparation 8 | 9 | Download data from the file server, and extract files to `tests/data`. 10 | 11 | ``` 12 | sh scripts/download_test_data.sh 13 | ``` 14 | 15 | Download weights from Internet, and extract files to `weight`. 16 | 17 | ``` 18 | sh scripts/download_weight.sh 19 | ``` 20 | 21 | ## Environment Preparation 22 | 23 | Install packages for test. 24 | 25 | ``` 26 | pip install -r requirements/test.txt 27 | ``` 28 | 29 | ## Running tests through pytest 30 | 31 | Running all the tests below `test/`. It is a good way to validate whether `XRMoCap` has been correctly installed: 32 | 33 | ``` 34 | pytest tests/ 35 | ``` 36 | 37 | Or generate a coverage when testing: 38 | 39 | ``` 40 | coverage run --source xrmocap -m pytest tests/ 41 | coverage xml 42 | coverage report -m 43 | ``` 44 | 45 | Or starts a CPU-only test on a GPU machine: 46 | 47 | ``` 48 | export CUDA_VISIBLE_DEVICES=-1 49 | pytest tests/ 50 | ``` 51 | -------------------------------------------------------------------------------- /docs/en/tools/mmdeploy.md: -------------------------------------------------------------------------------- 1 | # Tool mmdeploy 2 | 3 | - [Overview](#overview) 4 | - [Installation](#Installation) 5 | - [Clone](#Clone) 6 | - [Run](#Run) 7 | 8 | ### Overview 9 | This tool converts human perception pytorch module into TensorRT engine with mmdeploy. 10 | 11 | ### Installation 12 | Please refer to [official repository](https://github.com/open-mmlab/mmdeploy/blob/master/docs/en/get_started.md) for installation. We also provide a [from-scratch setup script](../installation.md#a-from-scratch-setup-script) in XRMoCap. 13 | 14 | 15 | ### Clone 16 | ``` 17 | git clone https://github.com/open-mmlab/mmdeploy.git /path/of/mmdeploy 18 | ``` 19 | 20 | ### Run 21 | Remember to change the path of mmdeploy. 22 | ``` 23 | # mmdet 24 | mkdir -p weight/mmdet_faster_rcnn/ 25 | python /path/of/mmdeploy/tools/deploy.py \ 26 | ./configs/modules/human_perception/deploy/detection_tensorrt_dynamic-320x320-1344x1344.py \ 27 | ./configs/modules/human_perception/mmdet_faster_rcnn_r50_fpn_coco.py \ 28 | ./weight/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ 29 | ./tests/data/human_perception/test_bbox_detection/multi_person.png \ 30 | --work-dir ./weight/mmdet_faster_rcnn/ \ 31 | --device cuda:0 \ 32 | --dump-info 33 | 34 | # mmpose 35 | mkdir -p weight/mmpose_hrnet/ 36 | python /path/of/mmdeploy/tools/deploy.py \ 37 | ./configs/modules/human_perception/deploy/pose-detection_tensorrt_dynamic-384x288.py\ 38 | ./configs/modules/human_perception/mmpose_hrnet_w48_coco_wholebody_384x288_dark_plus.py \ 39 | ./weight/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth \ 40 | ./tests/data/human_perception/test_bbox_detection/multi_person.png \ 41 | --work-dir ./weight/mmpose_hrnet/ \ 42 | --device cuda:0 \ 43 | --dump-info 44 | ``` 45 | -------------------------------------------------------------------------------- /docs/en/tools/mview_mperson_evaluation.md: -------------------------------------------------------------------------------- 1 | # Multi-view Multi-person Evaluation 2 | 3 | - [Overview](#overview) 4 | - [Argument](#argument) 5 | - [Example](#example) 6 | 7 | ## Overview 8 | 9 | This tool takes calibrated camera parameters, RGB sequences, 2d perception data and 3d ground-truth from `MviewMpersonDataset` as input, generate multi-view multi-person keypoints3d and evaluate on the Campus/Shelf/CMU-Panoptic datasets. 10 | 11 | ## Argument 12 | 13 | - **enable_log_file** 14 | By default, enable_log_file is False and the tool will only print log to console. Add `--enable_log_file` makes it True and a log file named `{smc_file_name}_{time_str}.txt` will be written. 15 | 16 | - **evaluation_config**: 17 | `evaluation_config` is the path to a `TopDownAssociationEvaluation` config file. For more details, see docs for `TopDownAssociationEvaluation` and the docstring in [code](../../../xrmocap/core/evaluation/top_down_association_evaluation.py). 18 | 19 | Also, you can find our prepared config files at `configs/mvpose/*/eval_keypoints3d.py` or `configs/mvpose_tracking/*/eval_keypoints3d.py`. 20 | 21 | ## Example 22 | 23 | Evaluate on the Shelf dataset and run the tool without tracking. 24 | 25 | ```python 26 | python tools/mview_mperson_evaluation.py \ 27 | --enable_log_file \ 28 | --evaluation_config configs/mvpose/shelf_config/eval_keypoints3d.py 29 | ``` 30 | 31 | Evaluate on the Shelf dataset and run the tool with tracking. 32 | 33 | ```python 34 | python tools/mview_mperson_evaluation.py \ 35 | --enable_log_file \ 36 | --evaluation_config configs/mvpose_tracking/shelf_config/eval_keypoints3d.py 37 | ``` 38 | -------------------------------------------------------------------------------- /docs/en/tools/process_smc.md: -------------------------------------------------------------------------------- 1 | # Tool process_smc 2 | 3 | - [Overview](#overview) 4 | - [Argument: estimator_config](#argument-estimator_config) 5 | - [Argument: output_dir](#argument-output_dir) 6 | - [Argument: disable_log_file](#argument-disable_log_file) 7 | - [Argument: visualize](#argument-visualize) 8 | - [Example](#example) 9 | 10 | ### Overview 11 | 12 | This tool takes calibrated camera parameters and RGB frames from a SenseMoCap file as input, generate multi-view keypoints2d, keypoints3d and SMPLData. 13 | 14 | ### Argument: estimator_config 15 | 16 | `estimator_config` is the path to a `MultiViewSinglePersonSMPLEstimator` config file. For more details, see docs for `MultiViewSinglePersonSMPLEstimator` and the docstring in [code](../../../xrmocap/estimation/mview_sperson_smpl_estimator.py). 17 | 18 | Also, you can find our prepared config files at `config/estimation/mview_sperson_smpl_estimator.py`. 19 | 20 | ### Argument: output_dir 21 | 22 | `output_dir` is the path to the directory saving all possible output files, including multi-view keypoints2d, keypoints3d and SMPLData, log and visualization videos. 23 | 24 | ### Argument: disable_log_file 25 | 26 | By default, disable_log_file is False and a log file named `{smc_file_name}_{time_str}.txt` will be written. Add `--disable_log_file` makes it True and the tool will only print log to console. 27 | 28 | ### Argument: frame_file 29 | 30 | By default, frame_file is `'none'` frames in SMC will not be saved in file system, the tool fetches frames of one view 31 | each time, loads them into memory and runs human perception. If the SMC is too long or your have a poor RAM, 32 | `frame_file='temp'` will help you save the frames as image files in output_dir, and remove the temporary files at the 33 | end of this tool. If you need the frames for further usage, set `frame_file='keep'` and the files won't be removed. 34 | 35 | ### Argument: visualize 36 | 37 | By default, visualize is False. Add `--visualize` makes it True and the tool will visualize keypoints3d with an orbit camera, overlay projected keypoints3d on some views, and overlay SMPL meshes on one view. 38 | 39 | ### Example 40 | 41 | Run the tool with visualization. 42 | 43 | ```bash 44 | python tools/process_smc.py \ 45 | --estimator_config configs/humman_mocap/mview_sperson_smpl_estimator.py \ 46 | --smc_path xrmocap_data/humman/raw_smc/p000455_a000986.smc \ 47 | --output_dir xrmocap_data/humman/p000455_a000986_output \ 48 | --visualize 49 | ``` 50 | -------------------------------------------------------------------------------- /docs/en/tools/start_service.md: -------------------------------------------------------------------------------- 1 | # Tool start_service 2 | 3 | - [Overview](#overview) 4 | - [Argument: config_path](#argument-config_path) 5 | - [Argument: disable_log_file](#argument-disable_log_file) 6 | - [Example](#example) 7 | 8 | ### Overview 9 | 10 | This tool starts a server in the current console according to the configuration file, and sets up a logger. The logger displays information of no less than `INFO` level in the console, and write information of no less than `DEBUG` level in the log file under the `logs/` directory. 11 | 12 | For services that use the `work_dir` parameter, please make sure that the target path can be created correctly. Generally speaking, running `mkdir temp` in advance can ensure that the default configuration file in the repository can be successfully used. 13 | 14 | ### Argument: config_path 15 | 16 | `config_path` is the path to a configuration file for server. Please ensure that all parameters required by `SomeService.__init__()` are specified in the configuration file. An example is provided below. For more details, see the docstring in [code](../../../xrmocap/service/base_flask_service.py). 17 | 18 | ```python 19 | type = 'SMPLStreamService' 20 | name = 'smpl_stream_service' 21 | work_dir = f'temp/{name}' 22 | body_model_dir = 'xrmocap_data/body_models' 23 | device = 'cuda:0' 24 | enable_cors = True 25 | port = 29091 26 | ``` 27 | 28 | Also, you can find our prepared config files in `configs/modules/service/smpl_stream_service.py`. 29 | 30 | ### Argument: disable_log_file 31 | 32 | By default, `disable_log_file` is False and two log files under `logs/f'{service_name}_{time_str}'` will be written. Add `--disable_log_file` makes it True and the tool will only print log to console. 33 | 34 | ### Example 35 | 36 | Run the tool with explicit paths. 37 | 38 | ```bash 39 | python tools/start_service.py --config_path configs/modules/service/smpl_stream_service.py 40 | ``` 41 | -------------------------------------------------------------------------------- /docs/en/tutorials/config.md: -------------------------------------------------------------------------------- 1 | # Learn about Configs 2 | 3 | We incorporate modular and inheritance design into our config system, which is convenient to conduct various experiments. 4 | 5 | ## Modify config through script arguments 6 | 7 | Take MVPose and MVPose tracking as an example 8 | 9 | If you want to use tracker, you need to create a variable of dictionary type containing `type='KalmanTracking'` and others needed in `__init__()`. Then you need to build it and will get a Kalman tracking module, otherwise you just need to set `kalman_tracking_config=None`. 10 | 11 | Example: 12 | ``` 13 | kalman_tracking_config=dict(type='KalmanTracking', n_cam_min=2, logger=logger) 14 | 15 | if isinstance(kalman_tracking_config, dict): 16 | kalman_tracking = build_kalman_tracking(kalman_tracking_config) 17 | else: 18 | kalman_tracking = kalman_tracking_config 19 | ``` 20 | 21 | Using trackers 22 | 23 | tracker is only needed for multiple person, for single person, it can also be used but may slow down the speed. 24 | -------------------------------------------------------------------------------- /docs/en/tutorials/new_module.md: -------------------------------------------------------------------------------- 1 | # Add new module 2 | 3 | If you want to add a new module, write a class and register it in builder. Here we take triangulator as example. 4 | 5 | ### Develop PytorchTriangulator class 6 | 7 | 1. Inherit from base class 8 | 9 | Inherit from `BaseTriangulator` and assign correct values for class attributes. 10 | 11 | ```python 12 | class PytorchTriangulator(BaseTriangulator): 13 | CAMERA_CONVENTION = 'opencv' 14 | CAMERA_WORLD2CAM = True 15 | ``` 16 | 17 | Complete `__init__` and do not forget to add arguments of super-class. 18 | 19 | ```python 20 | def __init__(self, 21 | camera_parameters: List[FisheyeCameraParameter], 22 | logger: Union[None, str, logging.Logger] = None) -> None: 23 | self.logger = get_logger(logger) 24 | super().__init__(camera_parameters=camera_parameters, logger=logger) 25 | 26 | ``` 27 | 28 | 2. Complete necessary methods defined by base class 29 | 30 | ```python 31 | def triangulate( 32 | self, 33 | points: Union[torch.Tensor, list, tuple], 34 | points_mask: Union[torch.Tensor, list, tuple] = None) -> np.ndarray: 35 | 36 | def get_reprojection_error( 37 | self, 38 | points2d: torch.Tensor, 39 | points3d: torch.Tensor, 40 | points_mask: torch.Tensor = None, 41 | reduction: Literal['mean', 'sum', 'none'] = 'none' 42 | ) -> Union[torch.Tensor, float]: 43 | 44 | def get_projector(self) -> PytorchProjector: 45 | 46 | ``` 47 | 48 | 3. Add special methods of this class(Optional) 49 | 50 | ```python 51 | def get_device( 52 | self) -> torch.device: 53 | 54 | ``` 55 | 56 | 4. Register the class in builder 57 | 58 | Insert the following lines into `xrmocap/ops/triangulation/builder.py`. 59 | 60 | ```python 61 | from .pytorch_triangulator import PytorchTriangulator 62 | 63 | TRIANGULATORS.register_module( 64 | name='PytorchTriangulator', module=PytorchTriangulator) 65 | 66 | ``` 67 | 68 | ### Build and use 69 | 70 | Test whether the new module is OK to build. 71 | 72 | ```python 73 | from xrmocap.ops.triangulation.builder import build_triangulator 74 | 75 | triangulator = build_triangulator(dict(type='PytorchTriangulator')) 76 | 77 | ``` 78 | -------------------------------------------------------------------------------- /docs/zh_cn/installation.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/docs/zh_cn/installation.md -------------------------------------------------------------------------------- /requirements/build.txt: -------------------------------------------------------------------------------- 1 | filterpy 2 | numpy<1.24.0 3 | # # opencv-python for GL equipped env 4 | # opencv-python 5 | # # opencv-python-headless for headless env 6 | # opencv-python-headless 7 | pre-commit 8 | prettytable 9 | scipy 10 | tqdm 11 | -------------------------------------------------------------------------------- /requirements/docs.txt: -------------------------------------------------------------------------------- 1 | docutils==0.16.0 2 | myst-parser 3 | sphinx==4.0.2 4 | sphinx-copybutton 5 | sphinx_markdown_tables 6 | sphinx_rtd_theme==0.5.2 7 | -------------------------------------------------------------------------------- /requirements/readthedocs.txt: -------------------------------------------------------------------------------- 1 | mmcv 2 | mmhuman3d @ git+https://github.com/open-mmlab/mmhuman3d.git 3 | torch 4 | torchvision 5 | xrprimer 6 | -------------------------------------------------------------------------------- /requirements/runtime.txt: -------------------------------------------------------------------------------- 1 | aniposelib @ git+https://github.com/liruilong940607/aniposelib.git 2 | filterpy 3 | h5py 4 | mediapipe 5 | mmdet @ git+https://github.com/open-mmlab/mmdetection.git@v2.27.0 6 | mmhuman3d @ git+https://github.com/open-mmlab/mmhuman3d.git 7 | mmpose @ git+https://github.com/open-mmlab/mmpose.git@v0.29.0 8 | # required by smplx 9 | numpy<1.24.0 10 | pre-commit 11 | prettytable 12 | scipy 13 | smplx 14 | tqdm 15 | -------------------------------------------------------------------------------- /requirements/service.txt: -------------------------------------------------------------------------------- 1 | flask 2 | Flask-Caching 3 | flask-socketio 4 | flask_api 5 | flask_cors 6 | simple-websocket 7 | -------------------------------------------------------------------------------- /requirements/test.txt: -------------------------------------------------------------------------------- 1 | aniposelib @ git+https://github.com/liruilong940607/aniposelib.git 2 | coverage 3 | filterpy 4 | mediapipe 5 | mmdet @ git+https://github.com/open-mmlab/mmdetection.git@v2.27.0 6 | mmhuman3d @ git+https://github.com/open-mmlab/mmhuman3d.git 7 | mmpose @ git+https://github.com/open-mmlab/mmpose.git@v0.29.0 8 | # required by smplx 9 | numpy<1.24.0 10 | pre-commit 11 | prettytable 12 | pytest 13 | scipy 14 | smplx 15 | tqdm 16 | -------------------------------------------------------------------------------- /resources/SMPLify_classes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/resources/SMPLify_classes.png -------------------------------------------------------------------------------- /resources/xrmocap-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/resources/xrmocap-logo.png -------------------------------------------------------------------------------- /scripts/download_install_deformable.sh: -------------------------------------------------------------------------------- 1 | mkdir xrmocap_download 2 | cd xrmocap_download 3 | wget -q --no-check-certificate 'https://docs.google.com/uc?export=download&id=1t92uAuJWyoKI0uuiMq_VkBzC6HJ75Bz_' -O Deformable.tar 4 | tar -xvf Deformable.tar 5 | cd Deformable 6 | sh make.sh 7 | cd ../.. 8 | rm -rf xrmocap_download 9 | -------------------------------------------------------------------------------- /scripts/download_test_data.sh: -------------------------------------------------------------------------------- 1 | mkdir xrmocap_download 2 | cd xrmocap_download 3 | gdown https://docs.google.com/uc?id=1Mt2oq5Ghf4SY5cqn1ak5fQ5UGC3p8DDz 4 | tar -zxvf tests.tar.gz 5 | gdown https://docs.google.com/uc?id=1VxL2q1bcT9WxJqWdmf54a1IhLrpRUV5j 6 | tar -zxvf xrmocap_data.tar.gz 7 | cd .. 8 | cp -r xrmocap_download/tests ./ 9 | cp -r xrmocap_download/xrmocap_data ./ 10 | rm -rf xrmocap_download 11 | -------------------------------------------------------------------------------- /scripts/download_weight.sh: -------------------------------------------------------------------------------- 1 | mkdir -p weight/mvpose 2 | cd weight 3 | wget https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth 4 | wget https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth 5 | # limb_info.json 6 | gdown https://docs.google.com/uc?id=1FKzJVXno88xQj7MDyroFzm7ueofjsMH6 7 | cd mvpose 8 | # resnet50_reid_camstyle 9 | gdown https://docs.google.com/uc?id=1HScJmiJ-18ioLXmUK_sBrPFZakZWwc4e 10 | cd ../.. 11 | -------------------------------------------------------------------------------- /scripts/eval_mvp.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | # CFG_FILE="configs/mvp/shelf_config/mvp_shelf.py" 6 | # CFG_FILE="configs/mvp/campus_config/mvp_campus.py" 7 | # CFG_FILE="configs/mvp/panoptic_config/mvp_panoptic.py" 8 | # CFG_FILE="configs/mvp/panoptic_config/mvp_panoptic_3cam.py" 9 | 10 | # Trained with xrmocap from scratch 11 | # MODEL_PATH="weight/xrmocap_mvp_shelf.pth.tar" 12 | # MODEL_PATH="weight/xrmocap_mvp_campus.pth.tar" 13 | # MODEL_PATH="weight/xrmocap_mvp_panoptic_5view.pth.tar" 14 | # MODEL_PATH="weight/xrmocap_mvp_panoptic_3view_3_12_23.pth.tar" 15 | 16 | 17 | GPUS_PER_NODE=$1 18 | CFG_FILE=$2 19 | MODEL_PATH=$3 20 | 21 | 22 | python -m torch.distributed.launch \ 23 | --nproc_per_node=${GPUS_PER_NODE} \ 24 | --master_port 65530 \ 25 | --use_env tools/eval_model.py \ 26 | --cfg ${CFG_FILE} \ 27 | --model_path ${MODEL_PATH} 28 | -------------------------------------------------------------------------------- /scripts/run_docker.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | TAG="openxrlab/xrmocap_runtime:ubuntu1804_x64_cuda116_py38_torch1121_mmcv161" 3 | # modify data mount below 4 | VOLUMES="-v $PWD:/workspace/xrmocap -v /data:/workspace/xrmocap/data" 5 | WORKDIR="-w /workspace/xrmocap" 6 | docker run --runtime=nvidia -it --rm --shm-size=24g $VOLUMES $WORKDIR $TAG $@ 7 | -------------------------------------------------------------------------------- /scripts/slurm_eval_mvp.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | # CFG_FILE="configs/mvp/shelf_config/mvp_shelf.py" 6 | # CFG_FILE="configs/mvp/campus_config/mvp_campus.py" 7 | # CFG_FILE="configs/mvp/panoptic_config/mvp_panoptic.py" 8 | # CFG_FILE="configs/mvp/panoptic_config/mvp_panoptic_3cam.py" 9 | 10 | # Trained with xrmocap from scratch 11 | # MODEL_PATH="weight/xrmocap_mvp_shelf.pth.tar" 12 | # MODEL_PATH="weight/xrmocap_mvp_campus.pth.tar" 13 | # MODEL_PATH="weight/xrmocap_mvp_panoptic_5view.pth.tar" 14 | # MODEL_PATH="weight/xrmocap_mvp_panoptic_3view_3_12_23.pth.tar" 15 | 16 | 17 | PARTITION=$1 18 | JOB_NAME=mvp_eval 19 | GPUS_PER_NODE=$2 20 | CFG_FILE=$3 21 | MODEL_PATH=$4 22 | CPUS_PER_TASK=1 23 | 24 | 25 | srun -p ${PARTITION} \ 26 | --job-name=${JOB_NAME} \ 27 | --gres=gpu:${GPUS_PER_NODE} \ 28 | --ntasks-per-node=1 \ 29 | --cpus-per-task=${CPUS_PER_TASK} \ 30 | --kill-on-bad-exit=1 \ 31 | python -m torch.distributed.launch \ 32 | --nproc_per_node=${GPUS_PER_NODE} \ 33 | --master_port 44145 \ 34 | --use_env tools/eval_model.py \ 35 | --cfg ${CFG_FILE} \ 36 | --model_path ${MODEL_PATH} 37 | -------------------------------------------------------------------------------- /scripts/slurm_train_mvp.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | # CFG_FILE="configs/mvp/campus_config/mvp_campus.py" 6 | # CFG_FILE="configs/mvp/shelf_config/mvp_shelf.py" 7 | # CFG_FILE="configs/mvp/panoptic_config/mvp_panoptic.py" 8 | # CFG_FILE="configs/mvp/panoptic_config/mvp_panoptic_3cam.py" 9 | 10 | PARTITION=$1 11 | JOB_NAME=mvp_train 12 | GPUS_PER_NODE=$2 13 | CFG_FILE=$3 14 | CPUS_PER_TASK=1 15 | 16 | srun -p ${PARTITION} \ 17 | --job-name=${JOB_NAME} \ 18 | --gres=gpu:${GPUS_PER_NODE} \ 19 | --ntasks-per-node=1 \ 20 | --cpus-per-task=${CPUS_PER_TASK} \ 21 | --kill-on-bad-exit=1 \ 22 | python -m torch.distributed.launch \ 23 | --nproc_per_node=${GPUS_PER_NODE} \ 24 | --master_port 44145 \ 25 | --use_env tools/train_model.py \ 26 | --cfg ${CFG_FILE} \ 27 | -------------------------------------------------------------------------------- /scripts/start_service_docker.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | TAG=openxrlab/xrmocap_runtime:ubuntu1804_x64_cuda116_py38_torch1121_mmcv161_service 3 | CONFIG_PATH=$1 4 | PORT=$(grep 'port =' ${CONFIG_PATH} | cut -d "=" -f 2 | tr -d ' ') 5 | echo "Starting service on port $PORT" 6 | PORTS="-p $PORT:$PORT" 7 | WORKSPACE_VOLUMES="-v $PWD:/workspace/xrmocap" 8 | WORKDIR="-w /workspace/xrmocap" 9 | MEMORY="--memory=20g" 10 | docker run --runtime=nvidia -it --rm --entrypoint=/bin/bash $PORTS $WORKSPACE_VOLUMES $WORKDIR $MEMORY $TAG -c " 11 | source /opt/miniconda/etc/profile.d/conda.sh 12 | conda activate openxrlab 13 | pip install . 14 | python tools/start_service.py --config_path $CONFIG_PATH 15 | " 16 | -------------------------------------------------------------------------------- /scripts/train_mvp.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | # CFG_FILE="configs/mvp/campus_config/mvp_campus.py" 6 | # CFG_FILE="configs/mvp/shelf_config/mvp_shelf.py" 7 | # CFG_FILE="configs/mvp/panoptic_config/mvp_panoptic.py" 8 | # CFG_FILE="configs/mvp/panoptic_config/mvp_panoptic_3cam.py" 9 | 10 | GPUS_PER_NODE=$1 11 | CFG_FILE=$2 12 | 13 | python -m torch.distributed.launch \ 14 | --nproc_per_node=${GPUS_PER_NODE} \ 15 | --master_port 65530 \ 16 | --use_env tools/train_model.py \ 17 | --cfg ${CFG_FILE} \ 18 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [aliases] 2 | test=pytest 3 | 4 | [yapf] 5 | based_on_style = pep8 6 | blank_line_before_nested_class_or_def = true 7 | split_before_expression_after_opening_paren = true 8 | 9 | [isort] 10 | line_length = 79 11 | multi_line_output = 5 12 | include_trailing_comma = true 13 | known_standard_library = pkg_resources,setuptools 14 | known_first_party = xrmocap 15 | known_third_party =PIL,cv2,dateutil,filterpy,flask,flask_socketio,matplotlib,mmcv,mmhuman3d,numpy,prettytable,pytest,pytorch3d,scipy,smplx,socketio,sphinx_rtd_theme,torch,torchvision,tqdm,xrprimer 16 | no_lines_before = STDLIB,LOCALFOLDER 17 | default_section = THIRDPARTY 18 | -------------------------------------------------------------------------------- /tests/core/evaluation/test_fourdag_evaluation.py: -------------------------------------------------------------------------------- 1 | import mmcv 2 | import os 3 | import os.path as osp 4 | import pytest 5 | import shutil 6 | from xrprimer.data_structure import Keypoints 7 | 8 | from xrmocap.core.evaluation.builder import build_evaluation 9 | 10 | output_dir = 'tests/data/output/core/test_fourdag_evaluation' 11 | 12 | 13 | @pytest.fixture(scope='module', autouse=True) 14 | def fixture(): 15 | if os.path.exists(output_dir): 16 | shutil.rmtree(output_dir) 17 | os.makedirs(output_dir, exist_ok=False) 18 | 19 | 20 | def test_fourdag_evaluation(): 21 | evaluation_config = dict( 22 | mmcv.Config.fromfile('configs/modules/core/evaluation/' + 23 | 'bottom_up_eval_shelf_unittest.py')) 24 | evaluation_config['output_dir'] = output_dir 25 | evaluation_config['dataset_visualization']['output_dir'] = output_dir 26 | evaluation_config['dataset_visualization']['pred_kps3d_paths'] = osp.join( 27 | output_dir, 'scene0_pred_keypoints3d.npz') 28 | os.makedirs(output_dir, exist_ok=True) 29 | evaluation = build_evaluation(evaluation_config) 30 | evaluation.run(overwrite=True) 31 | pred_keypoints3d = Keypoints.fromfile( 32 | osp.join(output_dir, 'scene0_pred_keypoints3d.npz')) 33 | pred_kps3d = pred_keypoints3d.get_keypoints() 34 | assert pred_kps3d.shape == (5, 2, 19, 4) 35 | assert pred_keypoints3d.get_mask().shape == (5, 2, 19) 36 | -------------------------------------------------------------------------------- /tests/core/evaluation/test_metric_manager.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | import os 3 | import pytest 4 | import shutil 5 | from xrprimer.data_structure import Keypoints 6 | from xrprimer.transform.convention.keypoints_convention import ( 7 | convert_keypoints, 8 | ) 9 | 10 | from xrmocap.core.evaluation.metric_manager import MetricManager 11 | 12 | # yapf: enable 13 | 14 | INPUT_DIR = 'tests/data/core/evaluation/test_metric_manager' 15 | OUTPUT_DIR = 'tests/data/output/core/evaluation/test_metric_manager' 16 | METRIC_LIST = [ 17 | dict( 18 | type='PredictionMatcher', 19 | name='matching', 20 | ), 21 | dict( 22 | type='PCKMetric', 23 | name='pck_50', 24 | threshold=[50], 25 | ), 26 | dict( 27 | type='MPJPEMetric', 28 | name='mpjpe', 29 | align_kps_name='left_ankle', 30 | ), 31 | dict( 32 | type='PAMPJPEMetric', 33 | name='pa_mpjpe', 34 | align_kps_name='left_ankle', 35 | ), 36 | ] 37 | 38 | 39 | @pytest.fixture(scope='module', autouse=True) 40 | def fixture(): 41 | if os.path.exists(OUTPUT_DIR): 42 | shutil.rmtree(OUTPUT_DIR) 43 | os.makedirs(OUTPUT_DIR, exist_ok=False) 44 | 45 | 46 | def test_construct(): 47 | # test None pick_dict 48 | manager = MetricManager( 49 | metric_list=METRIC_LIST, 50 | pick_dict=None, 51 | ) 52 | assert len(manager.pick_dict) == len(METRIC_LIST) 53 | # test one all 54 | manager = MetricManager( 55 | metric_list=METRIC_LIST, 56 | pick_dict=dict(pck_50='all'), 57 | ) 58 | assert len(manager.pick_dict) == 1 59 | # test pick name and names 60 | manager = MetricManager( 61 | metric_list=METRIC_LIST, 62 | pick_dict=dict( 63 | pck_50='pck_value', 64 | mpjpe=[ 65 | 'mpjpe_value', 66 | ], 67 | ), 68 | ) 69 | assert len(manager.pick_dict) == 2 70 | 71 | 72 | def test_call(): 73 | manager = MetricManager( 74 | metric_list=METRIC_LIST, 75 | pick_dict=dict( 76 | matching='all', 77 | pck_50='pck@50', 78 | mpjpe='mpjpe_mean', 79 | pa_mpjpe='pa_mpjpe_mean'), 80 | ) 81 | gt_path = os.path.join(INPUT_DIR, 'gt_keypoints3d.npz') 82 | gt_keypoints3d = Keypoints.fromfile(gt_path) 83 | gt_keypoints3d = convert_keypoints( 84 | gt_keypoints3d, dst='coco', approximate=True) 85 | pred_path = os.path.join(INPUT_DIR, 'pred_keypoints3d.npz') 86 | pred_keypoints3d = Keypoints.fromfile(pred_path) 87 | pred_keypoints3d = convert_keypoints( 88 | pred_keypoints3d, dst='coco', approximate=True) 89 | result_dict, _ = manager( 90 | pred_keypoints3d=pred_keypoints3d, 91 | gt_keypoints3d=gt_keypoints3d, 92 | ) 93 | assert len(result_dict) == len(METRIC_LIST) 94 | -------------------------------------------------------------------------------- /tests/core/evaluation/test_mvpose_evaluation.py: -------------------------------------------------------------------------------- 1 | import mmcv 2 | import os 3 | import os.path as osp 4 | import pytest 5 | import shutil 6 | import torch 7 | from xrprimer.data_structure import Keypoints 8 | 9 | from xrmocap.core.evaluation.builder import build_evaluation 10 | 11 | output_dir = 'tests/data/output/core/test_mvpose_evaluation' 12 | device = 'cpu' if not torch.cuda.is_available() else 'cuda' 13 | 14 | 15 | @pytest.fixture(scope='module', autouse=True) 16 | def fixture(): 17 | if os.path.exists(output_dir): 18 | shutil.rmtree(output_dir) 19 | os.makedirs(output_dir, exist_ok=False) 20 | 21 | 22 | def test_mvpose_evaluation(): 23 | evaluation_config = dict( 24 | mmcv.Config.fromfile('configs/modules/core/evaluation/' + 25 | 'mview_mperson_eval_shelf_unittest.py')) 26 | evaluation_config['associator']['device'] = device 27 | evaluation_config['output_dir'] = output_dir 28 | evaluation_config['dataset_visualization']['output_dir'] = output_dir 29 | evaluation_config['dataset_visualization']['pred_kps3d_paths'] = osp.join( 30 | output_dir, 'scene0_pred_keypoints3d.npz') 31 | os.makedirs(output_dir, exist_ok=True) 32 | evaluation = build_evaluation(evaluation_config) 33 | evaluation.run(overwrite=True) 34 | pred_keypoints3d = Keypoints.fromfile( 35 | osp.join(output_dir, 'scene0_pred_keypoints3d.npz')) 36 | pred_kps3d = pred_keypoints3d.get_keypoints() 37 | assert pred_kps3d.shape == (5, 2, 17, 4) 38 | assert pred_keypoints3d.get_mask().shape == (5, 2, 17) 39 | -------------------------------------------------------------------------------- /tests/io/test_camera_io.py: -------------------------------------------------------------------------------- 1 | from xrmocap.data_structure.smc_reader import SMCReader 2 | from xrmocap.io.camera import get_color_camera_parameter_from_smc 3 | 4 | 5 | def test_load_from_smc(): 6 | smc_reader = SMCReader('tests/data/p000103_a000011_tiny.smc') 7 | kinect_number = smc_reader.num_kinects 8 | # build triangulator by smc 9 | for kinect_index in range(kinect_number): 10 | cam_param = get_color_camera_parameter_from_smc( 11 | smc_reader=smc_reader, 12 | camera_type='kinect', 13 | camera_id=kinect_index) 14 | break 15 | assert len(cam_param.get_intrinsic(k_dim=4)) == 4 16 | -------------------------------------------------------------------------------- /tests/model/body_model/test_smpl.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import pytest 4 | import shutil 5 | import torch 6 | 7 | from xrmocap.data_structure.body_model.smpl_data import SMPLData 8 | from xrmocap.model.body_model.builder import build_body_model 9 | 10 | body_model_load_dir = 'xrmocap_data/body_models/smpl' 11 | extra_joints_regressor_path = 'xrmocap_data/body_models/J_regressor_extra.npy' 12 | input_dir = 'tests/data/model/body_model/test_smpl' 13 | output_dir = 'tests/data/output/model/body_model/test_smpl' 14 | 15 | 16 | @pytest.fixture(scope='module', autouse=True) 17 | def fixture(): 18 | if os.path.exists(output_dir): 19 | shutil.rmtree(output_dir) 20 | os.makedirs(output_dir, exist_ok=False) 21 | 22 | 23 | def test_smpl(): 24 | random_body_pose = torch.rand((1, 69)) 25 | # test SMPL without extra_joints_regressor 26 | smpl_45 = build_body_model( 27 | dict( 28 | type='SMPL', 29 | keypoint_convention='smpl_45', 30 | model_path=body_model_load_dir)) 31 | smpl_45 = smpl_45(body_pose=random_body_pose) 32 | assert isinstance(smpl_45['joints'], torch.Tensor) 33 | assert smpl_45['joints'].shape[1] == 45 34 | smpl_data = SMPLData() 35 | smpl_data.from_param_dict(smpl_45) 36 | assert 'fullpose' in smpl_data 37 | assert isinstance(smpl_data['fullpose'], np.ndarray) 38 | npz_path = os.path.join(output_dir, 'dumped_smpl_data.npz') 39 | smpl_data.dump(npz_path) 40 | # test SMPL with extra_joints_regressor 41 | smpl_54 = build_body_model( 42 | dict( 43 | type='SMPL', 44 | keypoint_convention='smpl_54', 45 | model_path=body_model_load_dir, 46 | extra_joints_regressor=extra_joints_regressor_path)) 47 | smpl_54_output = smpl_54(body_pose=random_body_pose) 48 | assert isinstance(smpl_54_output['joints'], torch.Tensor) 49 | assert smpl_54_output['joints'].shape[1] == 54 50 | -------------------------------------------------------------------------------- /tests/model/body_model/test_smplx.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import pytest 4 | import shutil 5 | import torch 6 | 7 | from xrmocap.data_structure.body_model import SMPLXData 8 | from xrmocap.model.body_model.builder import build_body_model 9 | 10 | body_model_load_dir = 'xrmocap_data/body_models/smplx' 11 | input_dir = 'tests/data/model/body_model/test_smplx' 12 | output_dir = 'tests/data/output/model/body_model/test_smplx' 13 | 14 | 15 | @pytest.fixture(scope='module', autouse=True) 16 | def fixture(): 17 | if os.path.exists(output_dir): 18 | shutil.rmtree(output_dir) 19 | os.makedirs(output_dir, exist_ok=False) 20 | 21 | 22 | def test_smplx(): 23 | random_body_pose = torch.rand((1, 63)) 24 | left_hand_pose = torch.rand((1, 45)) 25 | right_hand_pose = torch.rand((1, 45)) 26 | jaw_pose = torch.rand((1, 3)) 27 | leye_pose = torch.rand((1, 3)) 28 | reye_pose = torch.rand((1, 3)) 29 | # test SMPLX without extra_joints_regressor 30 | smplx = build_body_model( 31 | dict( 32 | type='SMPLX', 33 | gender='neutral', 34 | num_betas=10, 35 | use_face_contour=True, 36 | keypoint_convention='smplx', 37 | model_path=body_model_load_dir, 38 | batch_size=1, 39 | use_pca=False, 40 | logger=None)) 41 | smplx_output = smplx( 42 | body_pose=random_body_pose, 43 | left_hand_pose=left_hand_pose, 44 | right_hand_pose=right_hand_pose, 45 | jaw_pose=jaw_pose, 46 | leye_pose=leye_pose, 47 | reye_pose=reye_pose) 48 | assert isinstance(smplx_output['joints'], torch.Tensor) 49 | assert smplx_output['joints'].shape[1] == 144 50 | smplx_data = SMPLXData() 51 | smplx_data.from_param_dict(smplx_output) 52 | assert 'fullpose' in smplx_data 53 | assert isinstance(smplx_data['fullpose'], np.ndarray) 54 | npz_path = os.path.join(output_dir, 'dumped_smplx_data.npz') 55 | smplx_data.dump(npz_path) 56 | -------------------------------------------------------------------------------- /tests/ops/test_bottom_up_association.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | import mmcv 3 | import os 4 | import pytest 5 | import shutil 6 | 7 | from xrmocap.ops.bottom_up_association.builder import ( 8 | build_bottom_up_associator, 9 | ) 10 | 11 | # yapf: enable 12 | 13 | output_dir = 'tests/data/output/ops/test_bottom_up_association' 14 | 15 | 16 | @pytest.fixture(scope='module', autouse=True) 17 | def fixture(): 18 | if os.path.exists(output_dir): 19 | shutil.rmtree(output_dir) 20 | os.makedirs(output_dir, exist_ok=False) 21 | 22 | 23 | def test_build_bottom_up_associator(): 24 | associator_cfg = dict( 25 | mmcv.Config.fromfile('configs/modules/ops/' + 26 | 'bottom_up_association/' + 27 | 'fourdag_associator.py')) 28 | associator = build_bottom_up_associator(associator_cfg) 29 | assert associator is not None 30 | -------------------------------------------------------------------------------- /tests/ops/test_top_down_association.py: -------------------------------------------------------------------------------- 1 | import mmcv 2 | import os 3 | import pytest 4 | import shutil 5 | import torch 6 | 7 | from xrmocap.ops.top_down_association.builder import build_top_down_associator 8 | 9 | output_dir = 'tests/data/output/ops/test_top_down_association' 10 | 11 | 12 | @pytest.fixture(scope='module', autouse=True) 13 | def fixture(): 14 | if os.path.exists(output_dir): 15 | shutil.rmtree(output_dir) 16 | os.makedirs(output_dir, exist_ok=False) 17 | 18 | 19 | @pytest.mark.skipif( 20 | not torch.cuda.is_available(), reason='No GPU device has been found.') 21 | def test_build_mvpose_associator(): 22 | associator_cfg = dict( 23 | mmcv.Config.fromfile('configs/modules/ops/' + 'top_down_association/' + 24 | 'mvpose_tracking_associator.py')) 25 | os.makedirs(output_dir, exist_ok=True) 26 | associator = build_top_down_associator(associator_cfg) 27 | assert associator is not None 28 | 29 | 30 | @pytest.mark.skipif( 31 | not torch.cuda.is_available(), reason='No GPU device has been found.') 32 | def test_run_mvpose_associator(): 33 | associator_cfg = dict( 34 | mmcv.Config.fromfile('configs/modules/ops/' + 'top_down_association/' + 35 | 'mvpose_associator.py')) 36 | os.makedirs(output_dir, exist_ok=True) 37 | associator = build_top_down_associator(associator_cfg) 38 | assert associator is not None 39 | 40 | 41 | @pytest.mark.skipif( 42 | not torch.cuda.is_available(), reason='No GPU device has been found.') 43 | def test_run_mvpose_tracking_associator(): 44 | associator_cfg = dict( 45 | mmcv.Config.fromfile('configs/modules/ops/' + 'top_down_association/' + 46 | 'mvpose_tracking_associator.py')) 47 | os.makedirs(output_dir, exist_ok=True) 48 | associator = build_top_down_associator(associator_cfg) 49 | assert associator is not None 50 | -------------------------------------------------------------------------------- /tests/transform/convention/test_bbox_convention.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | from xrmocap.transform.convention.bbox_convention import convert_bbox 5 | 6 | 7 | def test_convert_bbox_numpy(): 8 | # test dim 9 | single_bbox = np.asarray((1, 2, 3, 4)) 10 | output_bbox = convert_bbox(single_bbox, src='xyxy', dst='xywh') 11 | assert isinstance(output_bbox, np.ndarray) 12 | assert output_bbox[2] == 2 and output_bbox[3] == 2 13 | single_xyxy_bbox = output_bbox 14 | single_xywh_bbox = single_bbox 15 | batch_bbox = np.expand_dims(single_xyxy_bbox, axis=0).repeat(2, axis=0) 16 | output_bbox = convert_bbox(batch_bbox, src='xywh', dst='xyxy') 17 | assert isinstance(output_bbox, np.ndarray) 18 | assert output_bbox[1, 2] == single_xywh_bbox[2] and\ 19 | output_bbox[1, 3] == single_xywh_bbox[3] 20 | # test score 21 | scores = np.zeros((2, 1)) 22 | scores[1, 0] = 0.5 23 | batch_bbox = np.concatenate((batch_bbox, scores), axis=1) 24 | output_bbox = convert_bbox(batch_bbox, src='xywh', dst='xyxy') 25 | assert output_bbox[0, 4] == 0 and output_bbox[1, 4] == 0.5 26 | # test src == dst 27 | output_bbox = convert_bbox(batch_bbox, src='xywh', dst='xywh') 28 | assert (output_bbox == batch_bbox).all() 29 | 30 | 31 | def test_convert_bbox_torch(): 32 | # test dim 33 | single_bbox = torch.tensor((1, 2, 3, 4)) 34 | output_bbox = convert_bbox(single_bbox, src='xyxy', dst='xywh') 35 | assert isinstance(output_bbox, torch.Tensor) 36 | assert output_bbox[2] == 2 and output_bbox[3] == 2 37 | single_xyxy_bbox = output_bbox 38 | single_xywh_bbox = single_bbox 39 | batch_bbox = single_xyxy_bbox.unsqueeze(0).repeat(2, 1) 40 | output_bbox = convert_bbox(batch_bbox, src='xywh', dst='xyxy') 41 | assert isinstance(output_bbox, torch.Tensor) 42 | assert output_bbox[1, 2] == single_xywh_bbox[2] and\ 43 | output_bbox[1, 3] == single_xywh_bbox[3] 44 | # test score 45 | scores = torch.zeros((2, 1)) 46 | scores[1, 0] = 0.5 47 | batch_bbox = torch.cat((batch_bbox, scores), dim=1) 48 | output_bbox = convert_bbox(batch_bbox, src='xywh', dst='xyxy') 49 | assert output_bbox[0, 4] == 0 and output_bbox[1, 4] == 0.5 50 | # test src == dst 51 | output_bbox = convert_bbox(batch_bbox, src='xywh', dst='xywh') 52 | assert (output_bbox == batch_bbox).all() 53 | -------------------------------------------------------------------------------- /tests/transform/convention/test_keypoints_convention.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | import numpy as np 3 | import torch 4 | from xrprimer.data_structure import Keypoints 5 | from xrprimer.transform.convention.keypoints_convention import ( 6 | convert_keypoints, get_keypoint_num, get_mapping_dict, 7 | ) 8 | 9 | # yapf: enable 10 | 11 | 12 | def test_get_mapping_dict(): 13 | map_dict = get_mapping_dict(src='human_data', dst='coco') 14 | assert len(map_dict) == get_keypoint_num(convention='coco') 15 | for k, v in map_dict.items(): 16 | assert k >= 0 and k < 190 17 | assert v >= 0 and v < get_keypoint_num(convention='coco') 18 | 19 | 20 | def test_convert_keypoints(): 21 | # test convert np 22 | kps_np = np.zeros(shape=(2, 3, 25, 3)) 23 | mask_np = np.ones(shape=(2, 3, 25)) 24 | convention = 'openpose_25' 25 | keypoints = Keypoints(kps=kps_np, mask=mask_np, convention=convention) 26 | assert isinstance(keypoints.get_keypoints(), np.ndarray) 27 | hd_keypoints = convert_keypoints(keypoints=keypoints, dst='human_data') 28 | assert isinstance(hd_keypoints.get_keypoints(), np.ndarray) 29 | single_mask = hd_keypoints.get_mask()[0, 0] 30 | assert single_mask.sum() == mask_np.shape[-1] 31 | # test convert torch 32 | keypoints = keypoints.to_tensor() 33 | assert isinstance(keypoints.get_keypoints(), torch.Tensor) 34 | hd_keypoints = convert_keypoints(keypoints=keypoints, dst='human_data') 35 | assert isinstance(hd_keypoints.get_keypoints(), torch.Tensor) 36 | single_mask = hd_keypoints.get_mask()[0, 0] 37 | assert single_mask.sum() == mask_np.shape[-1] 38 | -------------------------------------------------------------------------------- /tests/transform/image/test_color.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | from xrmocap.transform.image.color import bgr2rgb, rgb2bgr 5 | 6 | 7 | def test_bgr2rgb(): 8 | # test numpy 9 | # one img 10 | rgb_image = np.zeros(shape=(3, 1920, 1080)) 11 | rgb_image[2, ...] = 2 12 | assert rgb_image[2, 0, 0] == 2 13 | bgr_image = rgb2bgr(rgb_image, color_dim=0) 14 | assert bgr_image[0, 0, 0] == 2 15 | assert bgr_image[2, 0, 0] == 0 16 | rgb_image = bgr2rgb(bgr_image, color_dim=0) 17 | assert rgb_image[2, 0, 0] == 2 18 | assert rgb_image[0, 0, 0] == 0 19 | # pytorch batch like 20 | rgb_image = np.zeros(shape=(2, 3, 1920, 1080)) 21 | rgb_image[:, 2, ...] = 2 22 | assert rgb_image[0, 2, 0, 0] == 2 23 | bgr_image = rgb2bgr(rgb_image, color_dim=1) 24 | assert bgr_image[0, 0, 0, 0] == 2 25 | assert bgr_image[0, 2, 0, 0] == 0 26 | # opencv video like 27 | rgb_image = np.zeros(shape=(2, 1920, 1080, 3)) 28 | rgb_image[..., 2] = 2 29 | assert rgb_image[0, 0, 0, 2] == 2 30 | bgr_image = rgb2bgr(rgb_image, color_dim=-1) 31 | assert bgr_image[0, 0, 0, 0] == 2 32 | assert bgr_image[0, 0, 0, 2] == 0 33 | # test torch 34 | # one img 35 | rgb_image = torch.zeros(size=(3, 1920, 1080)) 36 | rgb_image[2, ...] = 2 37 | assert rgb_image[2, 0, 0] == 2 38 | bgr_image = rgb2bgr(rgb_image, color_dim=0) 39 | assert bgr_image[0, 0, 0] == 2 40 | assert bgr_image[2, 0, 0] == 0 41 | # pytorch batch like 42 | rgb_image = torch.zeros(size=(2, 3, 1920, 1080)) 43 | rgb_image[:, 2, ...] = 2 44 | assert rgb_image[0, 2, 0, 0] == 2 45 | bgr_image = rgb2bgr(rgb_image, color_dim=1) 46 | assert bgr_image[0, 0, 0, 0] == 2 47 | assert bgr_image[0, 2, 0, 0] == 0 48 | # opencv video like 49 | rgb_image = torch.zeros(size=(2, 1920, 1080, 3)) 50 | rgb_image[..., 2] = 2 51 | assert rgb_image[0, 0, 0, 2] == 2 52 | bgr_image = rgb2bgr(rgb_image, color_dim=-1) 53 | assert bgr_image[0, 0, 0, 0] == 2 54 | assert bgr_image[0, 0, 0, 2] == 0 55 | # test in-place 56 | rgb_image = np.zeros(shape=(3, 1920, 1080)) 57 | rgb_image[2, ...] = 2 58 | rgb2bgr(rgb_image, color_dim=0, inplace=True) 59 | assert rgb_image[0, 0, 0] == 2 60 | rgb_image = torch.zeros(size=(3, 1920, 1080)) 61 | rgb_image[2, ...] = 2 62 | rgb2bgr(rgb_image, color_dim=0, inplace=True) 63 | assert rgb_image[0, 0, 0] == 2 64 | -------------------------------------------------------------------------------- /tests/transform/keypoints3d/test_optim.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | import numpy as np 3 | from xrprimer.data_structure import Keypoints 4 | 5 | from xrmocap.transform.keypoints3d.optim.builder import ( 6 | build_keypoints3d_optimizer, 7 | ) 8 | 9 | # yapf: enable 10 | 11 | 12 | def test_nan_interpolation(): 13 | # 3 frames, 2 people, 4 kps 14 | kps_arr = np.zeros(shape=[3, 2, 5, 4]) 15 | mask = np.ones_like(kps_arr[..., 0]) 16 | kps_arr[1, 0, :, :] = np.nan 17 | kps_arr[2, 0, :, :] = 20 18 | keypoints3d = Keypoints( 19 | dtype='numpy', kps=kps_arr, mask=mask, convention='non_exist_conv') 20 | kps_arr_backup = kps_arr.copy() 21 | cfg = dict(type='NanInterpolation') 22 | optim = build_keypoints3d_optimizer(cfg) 23 | # test numpy keypoints3d 24 | optimed_keypoints3d = optim.optimize_keypoints3d(keypoints3d) 25 | # assert input not changed 26 | assert np.isnan(keypoints3d.get_keypoints()[1, 0, 0, 0]) 27 | assert np.all(keypoints3d.get_keypoints()[2, ...] == kps_arr_backup[2, 28 | ...]) 29 | # the second person should be the same 30 | assert np.all(optimed_keypoints3d.get_keypoints()[:, 1, :, :] == 31 | keypoints3d.get_keypoints()[:, 1, :, :]) 32 | # the first person has been interpolated 33 | assert optimed_keypoints3d.get_keypoints()[1, 0, 0, 0] == 10 34 | assert not np.any(np.isnan(optimed_keypoints3d.get_keypoints())) 35 | # test torch keypoints3d 36 | optimed_keypoints3d = optim.optimize_keypoints3d(keypoints3d.to_tensor()) 37 | # the second person should be the same 38 | assert np.all(optimed_keypoints3d.get_keypoints()[:, 1, :, :] == 39 | keypoints3d.get_keypoints()[:, 1, :, :]) 40 | # the first person has been interpolated 41 | assert optimed_keypoints3d.get_keypoints()[1, 0, 0, 0] == 10 42 | assert not np.any(np.isnan(optimed_keypoints3d.get_keypoints())) 43 | -------------------------------------------------------------------------------- /tests/transform/test_limbs_transform.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import os 4 | import pytest 5 | import shutil 6 | from xrprimer.data_structure import Keypoints 7 | from xrprimer.transform.limbs import get_limbs_from_keypoints 8 | 9 | input_dir = 'tests/data/transform/test_limbs' 10 | output_dir = 'tests/data/output/transform/test_limbs' 11 | 12 | 13 | @pytest.fixture(scope='module', autouse=True) 14 | def fixture(): 15 | if os.path.exists(output_dir): 16 | shutil.rmtree(output_dir) 17 | os.makedirs(output_dir, exist_ok=False) 18 | 19 | 20 | def test_get_limbs_from_keypoints(): 21 | # test get from numpy 22 | keypoints3d = Keypoints.fromfile( 23 | 'tests/data/ops/test_projection/keypoints3d.npz') 24 | limbs = get_limbs_from_keypoints(keypoints=keypoints3d) 25 | assert len(limbs) > 0 26 | assert limbs.get_points() is None 27 | # test get from torch 28 | keypoints3d_torch = keypoints3d.to_tensor() 29 | limbs = get_limbs_from_keypoints(keypoints=keypoints3d_torch) 30 | assert len(limbs) > 0 31 | assert len(limbs.get_parts()) > 0 32 | assert limbs.get_points() is None 33 | # test get with points 34 | limbs = get_limbs_from_keypoints( 35 | keypoints=keypoints3d, frame_idx=0, person_idx=0) 36 | assert limbs.get_points() is not None 37 | conn = limbs.get_connections() 38 | canvas = np.ones(shape=(1080, 1920, 3), dtype=np.uint8) 39 | points = limbs.get_points() 40 | for start_pt_idx, end_pt_idx in conn: 41 | cv2.line( 42 | img=canvas, 43 | pt1=points[start_pt_idx, :2].astype(np.int32), 44 | pt2=points[end_pt_idx, :2].astype(np.int32), 45 | color=(255, 0, 0), 46 | thickness=2) 47 | cv2.imwrite( 48 | filename=os.path.join(output_dir, 'limbs_from_keypoints.jpg'), 49 | img=canvas) 50 | # test get connection names 51 | limbs = get_limbs_from_keypoints( 52 | keypoints=keypoints3d, frame_idx=0, person_idx=0, fill_limb_names=True) 53 | conn_dict = limbs.get_connections_by_names() 54 | assert len(conn_dict) > 0 55 | -------------------------------------------------------------------------------- /tools/clients/smpl_verts_client.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | import argparse 3 | import logging 4 | import numpy as np 5 | import os 6 | import sys 7 | import time 8 | from tqdm import tqdm 9 | 10 | from xrmocap.client.smpl_stream_client import SMPLStreamClient 11 | 12 | # yapf: enable 13 | 14 | 15 | def main(args) -> int: 16 | name = os.path.basename(__file__).split('.')[0] 17 | logger = logging.getLogger(name) 18 | if args.verbose: 19 | logger.setLevel(logging.INFO) 20 | else: 21 | logger.setLevel(logging.WARNING) 22 | if args.smpl_data_path is None: 23 | logger.error('Please specify smpl_data_path.') 24 | raise ValueError 25 | client = SMPLStreamClient( 26 | server_ip=args.server_ip, server_port=args.server_port, logger=logger) 27 | n_frames = client.upload_smpl_data(args.smpl_data_path) 28 | logger.info(f'Motion of {n_frames} frames uploaded.') 29 | faces = client.get_faces() 30 | faces_np = np.array(faces) 31 | logger.info(f'Get faces: {faces_np.shape}') 32 | start_time = time.time() 33 | for frame_idx in tqdm(range(n_frames)): 34 | verts = client.forward(frame_idx) 35 | if frame_idx == 0: 36 | verts_np = np.array(verts) 37 | logger.info(f'Get verts for first frame: {verts_np.shape}') 38 | loop_time = time.time() - start_time 39 | fps = n_frames / loop_time 40 | logger.info(f'Get verts for all frames, average fps: {fps:.2f}') 41 | client.close() 42 | return 0 43 | 44 | 45 | def setup_parser(): 46 | parser = argparse.ArgumentParser( 47 | description='Send a smpl data file to ' + 48 | 'SMPLStreamServer and receive faces and verts.') 49 | parser.add_argument( 50 | '--smpl_data_path', 51 | help='Path to a SMPL(X)Data file.', 52 | type=str, 53 | ) 54 | parser.add_argument( 55 | '--server_ip', 56 | help='IP address of the server.', 57 | type=str, 58 | default='127.0.0.1') 59 | parser.add_argument( 60 | '--server_port', 61 | help='Port number of the server.', 62 | type=int, 63 | default=29091) 64 | parser.add_argument( 65 | '--verbose', 66 | action='store_true', 67 | help='If True, INFO level log will be shown.', 68 | default=False) 69 | args = parser.parse_args() 70 | return args 71 | 72 | 73 | if __name__ == '__main__': 74 | args = setup_parser() 75 | ret_val = main(args) 76 | sys.exit(ret_val) 77 | -------------------------------------------------------------------------------- /tools/eval_model.py: -------------------------------------------------------------------------------- 1 | # yapf:disable 2 | import argparse 3 | import mmcv 4 | from xrprimer.utils.log_utils import setup_logger 5 | 6 | from xrmocap.core.train.builder import build_trainer 7 | from xrmocap.utils.distribute_utils import ( 8 | init_distributed_mode, is_main_process, 9 | ) 10 | from xrmocap.utils.mvp_utils import get_directory 11 | 12 | # yapf:enable 13 | 14 | 15 | def parse_args(): 16 | parser = argparse.ArgumentParser(description='Evaluate keypoints network') 17 | parser.add_argument( 18 | '--cfg', 19 | help='experiment configure file name', 20 | required=True, 21 | type=str) 22 | parser.add_argument( 23 | '--device', 24 | default='cuda', 25 | help='device to use for training / testing') 26 | parser.add_argument('--seed', default=42, type=int) 27 | # distributed training parameters 28 | parser.add_argument( 29 | '--world_size', 30 | default=1, 31 | type=int, 32 | help='number of distributed processes') 33 | parser.add_argument( 34 | '--dist_url', 35 | default='env://', 36 | help='url used to set up distributed training') 37 | parser.add_argument('--weight_decay', default=1e-4, type=float) 38 | parser.add_argument( 39 | '--model_path', 40 | default=None, 41 | type=str, 42 | help='pass model path for evaluation') 43 | 44 | args, rest = parser.parse_known_args() 45 | 46 | config = mmcv.Config.fromfile(args.cfg) 47 | 48 | return args, config 49 | 50 | 51 | def main(): 52 | args, config = parse_args() 53 | 54 | log_file, final_output_dir = get_directory( 55 | state='eval', 56 | output_dir=config.output_dir, 57 | cfg_name=args.cfg, 58 | dataset=config.dataset, 59 | model=config.model, 60 | resnet_layer=config.backbone_layers) 61 | 62 | logger = setup_logger(logger_name='mvp_eval', logger_path=log_file) 63 | 64 | distributed, gpu_idx = \ 65 | init_distributed_mode(args.world_size, 66 | args.dist_url, logger) 67 | 68 | config_dict = dict( 69 | type='MVPTrainer', 70 | logger=logger, 71 | device=args.device, 72 | seed=args.seed, 73 | distributed=distributed, 74 | model_path=args.model_path, 75 | gpu_idx=gpu_idx, 76 | final_output_dir=final_output_dir, 77 | ) 78 | config_dict.update(config.trainer_setup) 79 | 80 | if is_main_process(): 81 | logger.info(args) 82 | logger.info(config_dict) 83 | 84 | trainer = build_trainer(config_dict) 85 | 86 | trainer.eval() 87 | 88 | 89 | if __name__ == '__main__': 90 | main() 91 | -------------------------------------------------------------------------------- /tools/misc/publish_model.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import subprocess 3 | import torch 4 | from datetime import date 5 | 6 | 7 | def parse_args(): 8 | parser = argparse.ArgumentParser( 9 | description='Process a checkpoint to be published') 10 | parser.add_argument('in_file', help='input checkpoint filename') 11 | parser.add_argument('out_file', help='output checkpoint filename') 12 | args = parser.parse_args() 13 | return args 14 | 15 | 16 | def process_checkpoint(in_file, out_file): 17 | checkpoint = torch.load(in_file, map_location='cpu') 18 | # remove optimizer for smaller file size 19 | if 'optimizer' in checkpoint: 20 | del checkpoint['optimizer'] 21 | # if it is necessary to remove some sensitive data in checkpoint['meta'], 22 | # add the code here. 23 | torch.save(checkpoint, out_file) 24 | sha = subprocess.check_output(['sha256sum', out_file]).decode() 25 | if out_file.endswith('.pth'): 26 | out_file_name = out_file[:-4] 27 | else: 28 | out_file_name = out_file 29 | 30 | date_now = date.today().strftime('%Y%m%d') 31 | final_file = out_file_name + f'-{sha[:8]}_{date_now}.pth' 32 | subprocess.Popen(['mv', out_file, final_file]) 33 | 34 | 35 | def main(): 36 | args = parse_args() 37 | process_checkpoint(args.in_file, args.out_file) 38 | 39 | 40 | if __name__ == '__main__': 41 | main() 42 | -------------------------------------------------------------------------------- /tools/mview_mperson_evaluation.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | import argparse 3 | import datetime 4 | import mmcv 5 | import os 6 | from xrprimer.utils.log_utils import setup_logger 7 | 8 | from xrmocap.core.evaluation.builder import build_evaluation 9 | 10 | # yapf: enable 11 | 12 | 13 | def main(args): 14 | os.makedirs('logs', exist_ok=True) 15 | if args.enable_log_file: 16 | time_str = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S') 17 | log_path = os.path.join('logs', f'evaluation_log_{time_str}.txt') 18 | logger = setup_logger(logger_name=__name__, logger_path=log_path) 19 | else: 20 | logger = setup_logger(logger_name=__name__) 21 | evaluation_config = dict(mmcv.Config.fromfile(args.evaluation_config)) 22 | os.makedirs(evaluation_config['output_dir'], exist_ok=True) 23 | evaluation_config['logger'] = logger 24 | evaluation = build_evaluation(evaluation_config) 25 | evaluation.run(overwrite=True) 26 | 27 | 28 | def setup_parser(): 29 | parser = argparse.ArgumentParser( 30 | description='Evaluate Top-down keypoints3d estimator.') 31 | parser.add_argument( 32 | '--enable_log_file', 33 | action='store_true', 34 | help='If checked, log will be written as file.', 35 | default=False) 36 | parser.add_argument( 37 | '--evaluation_config', 38 | default='configs/mvpose_tracking/campus_config/eval_keypoints3d.py') 39 | args = parser.parse_args() 40 | return args 41 | 42 | 43 | if __name__ == '__main__': 44 | args = setup_parser() 45 | main(args) 46 | -------------------------------------------------------------------------------- /tools/start_service.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | import argparse 3 | import json 4 | import mmcv 5 | import os 6 | from xrprimer.utils.log_utils import logging, setup_logger 7 | 8 | from xrmocap.service.builder import build_service 9 | from xrmocap.utils.date_utils import get_datetime_local, get_str_from_datetime 10 | 11 | # yapf: enable 12 | 13 | 14 | def main(args): 15 | # load config 16 | service_config = dict(mmcv.Config.fromfile(args.config_path)) 17 | service_name = service_config['name'] 18 | # setup logger 19 | if not args.disable_log_file: 20 | datetime = get_datetime_local() 21 | time_str = get_str_from_datetime(datetime) 22 | log_dir = os.path.join('logs', f'{service_name}_{time_str}') 23 | os.makedirs(log_dir) 24 | main_logger_path = None \ 25 | if args.disable_log_file\ 26 | else os.path.join(log_dir, f'{service_name}_log.txt') 27 | flask_logger_path = None \ 28 | if args.disable_log_file\ 29 | else os.path.join(log_dir, 'flask_log.txt') 30 | logger = setup_logger( 31 | logger_name=service_name, 32 | file_level=logging.DEBUG, 33 | console_level=logging.INFO, 34 | logger_path=main_logger_path) 35 | # logger for Flask 36 | flask_logger = setup_logger( 37 | logger_name='werkzeug', 38 | file_level=logging.DEBUG, 39 | console_level=logging.INFO, 40 | logger_path=flask_logger_path) 41 | logger.info('Main logger starts.') 42 | flask_logger.info('Flask logger starts.') 43 | # build service 44 | service_config_str = json.dumps(service_config, indent=4) 45 | logger.debug(f'\nservice_config:\n{service_config_str}') 46 | service_config['logger'] = logger 47 | service = build_service(service_config) 48 | service.run() 49 | 50 | 51 | def setup_parser(): 52 | parser = argparse.ArgumentParser() 53 | # input args 54 | parser.add_argument( 55 | '--config_path', 56 | type=str, 57 | help='Path to service config file.', 58 | default='configs/modules/service/base_service.py') 59 | # log args 60 | parser.add_argument( 61 | '--disable_log_file', 62 | action='store_true', 63 | help='If checked, log will not be written as file.', 64 | default=False) 65 | args = parser.parse_args() 66 | return args 67 | 68 | 69 | if __name__ == '__main__': 70 | args = setup_parser() 71 | main(args) 72 | -------------------------------------------------------------------------------- /tools/train_model.py: -------------------------------------------------------------------------------- 1 | # yapf:disable 2 | import argparse 3 | import mmcv 4 | from xrprimer.utils.log_utils import setup_logger 5 | 6 | from xrmocap.core.train.builder import build_trainer 7 | from xrmocap.utils.distribute_utils import ( 8 | init_distributed_mode, is_main_process, 9 | ) 10 | from xrmocap.utils.mvp_utils import get_directory 11 | 12 | # yapf:enable 13 | 14 | 15 | def parse_args(): 16 | parser = argparse.ArgumentParser(description='Train keypoints network') 17 | parser.add_argument( 18 | '--cfg', 19 | help='experiment configure file name', 20 | required=True, 21 | type=str) 22 | parser.add_argument( 23 | '--device', 24 | default='cuda', 25 | help='device to use for training / testing') 26 | parser.add_argument('--seed', default=42, type=int) 27 | # distributed training parameters 28 | parser.add_argument( 29 | '--world_size', 30 | default=1, 31 | type=int, 32 | help='number of distributed processes') 33 | parser.add_argument( 34 | '--dist_url', 35 | default='env://', 36 | help='url used to set up distributed training') 37 | 38 | args, rest = parser.parse_known_args() 39 | 40 | config = mmcv.Config.fromfile(args.cfg) 41 | 42 | return args, config 43 | 44 | 45 | def main(): 46 | args, config = parse_args() 47 | 48 | log_file, final_output_dir = get_directory( 49 | state='train', 50 | output_dir=config.output_dir, 51 | cfg_name=args.cfg, 52 | dataset=config.dataset, 53 | model=config.model, 54 | resnet_layer=config.backbone_layers) 55 | 56 | logger = setup_logger(logger_name='mvp_train', logger_path=log_file) 57 | 58 | distributed, gpu_idx = \ 59 | init_distributed_mode(args.world_size, 60 | args.dist_url, logger) 61 | 62 | config_dict = dict( 63 | type='MVPTrainer', 64 | logger=logger, 65 | device=args.device, 66 | seed=args.seed, 67 | distributed=distributed, 68 | model_path=None, 69 | gpu_idx=gpu_idx, 70 | final_output_dir=final_output_dir, 71 | ) 72 | config_dict.update(config.trainer_setup) 73 | 74 | if is_main_process(): 75 | logger.info(args) 76 | logger.info(config_dict) 77 | 78 | trainer = build_trainer(config_dict) 79 | trainer.train() 80 | 81 | 82 | if __name__ == '__main__': 83 | main() 84 | -------------------------------------------------------------------------------- /xrmocap/__init__.py: -------------------------------------------------------------------------------- 1 | from .version import __version__ 2 | 3 | __all__ = ['__version__'] 4 | -------------------------------------------------------------------------------- /xrmocap/client/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | # client does not require xrprimer.utils.log_utils 4 | # logger's level is set to INFO by default 5 | logging.basicConfig( 6 | level=logging.INFO, 7 | format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') 8 | -------------------------------------------------------------------------------- /xrmocap/core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/core/__init__.py -------------------------------------------------------------------------------- /xrmocap/core/estimation/__init__.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | from .base_estimator import BaseEstimator 3 | from .mperson_smpl_estimator import MultiPersonSMPLEstimator 4 | from .mview_mperson_end2end_estimator import ( 5 | MultiViewMultiPersonEnd2EndEstimator, 6 | ) 7 | from .mview_mperson_topdown_estimator import ( 8 | MultiViewMultiPersonTopDownEstimator, 9 | ) 10 | from .mview_sperson_smpl_estimator import MultiViewSinglePersonSMPLEstimator 11 | 12 | # yapf: enable 13 | 14 | __all__ = [ 15 | 'BaseEstimator', 'MultiPersonSMPLEstimator', 16 | 'MultiViewMultiPersonEnd2EndEstimator', 17 | 'MultiViewMultiPersonTopDownEstimator', 18 | 'MultiViewSinglePersonSMPLEstimator' 19 | ] 20 | -------------------------------------------------------------------------------- /xrmocap/core/estimation/base_estimator.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Union 3 | from xrprimer.utils.log_utils import get_logger 4 | 5 | 6 | class BaseEstimator: 7 | """Base Estimator.""" 8 | 9 | def __init__(self, 10 | work_dir: str, 11 | verbose: bool = True, 12 | logger: Union[None, str, logging.Logger] = None) -> None: 13 | self.work_dir = work_dir 14 | self.verbose = verbose 15 | self.logger = get_logger(logger) 16 | 17 | def run(self) -> None: 18 | ... 19 | -------------------------------------------------------------------------------- /xrmocap/core/estimation/builder.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | from mmcv.utils import Registry 3 | 4 | from .base_estimator import BaseEstimator 5 | from .mperson_smpl_estimator import MultiPersonSMPLEstimator 6 | from .mview_mperson_end2end_estimator import ( 7 | MultiViewMultiPersonEnd2EndEstimator, 8 | ) 9 | from .mview_mperson_topdown_estimator import ( 10 | MultiViewMultiPersonTopDownEstimator, 11 | ) 12 | from .mview_sperson_smpl_estimator import MultiViewSinglePersonSMPLEstimator 13 | 14 | # yapf: enable 15 | 16 | ESTIMATORS = Registry('estimator') 17 | ESTIMATORS.register_module( 18 | name='MultiViewSinglePersonSMPLEstimator', 19 | module=MultiViewSinglePersonSMPLEstimator) 20 | ESTIMATORS.register_module( 21 | name='MultiPersonSMPLEstimator', module=MultiPersonSMPLEstimator) 22 | ESTIMATORS.register_module( 23 | name='MultiViewMultiPersonTopDownEstimator', 24 | module=MultiViewMultiPersonTopDownEstimator) 25 | ESTIMATORS.register_module( 26 | name='MultiViewMultiPersonEnd2EndEstimator', 27 | module=MultiViewMultiPersonEnd2EndEstimator) 28 | 29 | 30 | def build_estimator(cfg) -> BaseEstimator: 31 | """Build estimator.""" 32 | return ESTIMATORS.build(cfg) 33 | -------------------------------------------------------------------------------- /xrmocap/core/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | from xrmocap.core.evaluation.end2end_evaluation import End2EndEvaluation 3 | from xrmocap.core.evaluation.top_down_association_evaluation import ( 4 | TopDownAssociationEvaluation, 5 | ) 6 | 7 | # yapf: enable 8 | 9 | __all__ = ['End2EndEvaluation', 'TopDownAssociationEvaluation'] 10 | -------------------------------------------------------------------------------- /xrmocap/core/evaluation/base_evaluation.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | import logging 3 | import os 4 | from typing import List, Union 5 | from xrprimer.utils.log_utils import get_logger 6 | from xrprimer.utils.path_utils import prepare_output_path 7 | 8 | from xrmocap.data.data_visualization.builder import ( 9 | BaseDataVisualization, build_data_visualization, 10 | ) 11 | from xrmocap.data.dataset.builder import MviewMpersonDataset, build_dataset 12 | from .metric_manager import MetricManager 13 | from .metrics.base_metric import BaseMetric 14 | 15 | # yapf: enable 16 | 17 | 18 | class BaseEvaluation: 19 | 20 | def __init__( 21 | self, 22 | dataset: Union[dict, MviewMpersonDataset], 23 | output_dir: str, 24 | metric_list: List[Union[dict, BaseMetric]], 25 | pick_dict: Union[dict, None] = None, 26 | dataset_visualization: Union[None, dict, BaseDataVisualization] = None, 27 | eval_kps3d_convention: str = 'human_data', 28 | logger: Union[None, str, logging.Logger] = None, 29 | ) -> None: 30 | self.logger = get_logger(logger) 31 | self.output_dir = output_dir 32 | self.eval_kps3d_convention = eval_kps3d_convention 33 | self.metric_manager = MetricManager( 34 | metric_list=metric_list, pick_dict=pick_dict, logger=self.logger) 35 | 36 | if isinstance(dataset, dict): 37 | dataset['logger'] = self.logger 38 | self.dataset = build_dataset(dataset) 39 | else: 40 | self.dataset = dataset 41 | 42 | if isinstance(dataset_visualization, dict): 43 | dataset_visualization['logger'] = self.logger 44 | self.dataset_visualization = build_data_visualization( 45 | dataset_visualization) 46 | else: 47 | self.dataset_visualization = dataset_visualization 48 | 49 | def run(self, overwrite: bool = False): 50 | if not os.path.exists(self.output_dir): 51 | prepare_output_path( 52 | output_path=self.output_dir, 53 | allowed_suffix='', 54 | path_type='dir', 55 | overwrite=overwrite, 56 | logger=self.logger) 57 | -------------------------------------------------------------------------------- /xrmocap/core/evaluation/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry 2 | 3 | from .bottom_up_association_evaluation import BottomUpAssociationEvaluation 4 | from .end2end_evaluation import End2EndEvaluation 5 | from .top_down_association_evaluation import TopDownAssociationEvaluation 6 | 7 | EVALUATION = Registry('evaluation') 8 | 9 | EVALUATION.register_module( 10 | name='TopDownAssociationEvaluation', module=TopDownAssociationEvaluation) 11 | EVALUATION.register_module(name='End2EndEvaluation', module=End2EndEvaluation) 12 | EVALUATION.register_module( 13 | name='BottomUpAssociationEvaluation', module=BottomUpAssociationEvaluation) 14 | 15 | 16 | def build_evaluation(cfg): 17 | """Build a matching instance.""" 18 | return EVALUATION.build(cfg) 19 | -------------------------------------------------------------------------------- /xrmocap/core/evaluation/metrics/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/core/evaluation/metrics/__init__.py -------------------------------------------------------------------------------- /xrmocap/core/evaluation/metrics/base_metric.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | import logging 3 | from typing import Union 4 | from xrprimer.utils.log_utils import get_logger 5 | 6 | # yapf: enable 7 | 8 | 9 | class BaseMetric: 10 | RANK = 0 11 | 12 | def __init__( 13 | self, 14 | name: str, 15 | logger: Union[None, str, logging.Logger] = None, 16 | ) -> None: 17 | self.name = name 18 | self.logger = get_logger(logger) 19 | 20 | def __call__(self, *args, **kwargs): 21 | return dict() 22 | -------------------------------------------------------------------------------- /xrmocap/core/evaluation/metrics/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry 2 | 3 | from .base_metric import BaseMetric 4 | from .mpjpe_metric import MPJPEMetric 5 | from .pa_mpjpe_metric import PAMPJPEMetric 6 | from .pck_metric import PCKMetric 7 | from .pcp_metric import PCPMetric 8 | from .precision_recall_metric import PrecisionRecallMetric 9 | from .prediction_matcher import PredictionMatcher 10 | 11 | METRICS = Registry('metrics') 12 | 13 | METRICS.register_module(name='PredictionMatcher', module=PredictionMatcher) 14 | METRICS.register_module(name='MPJPEMetric', module=MPJPEMetric) 15 | METRICS.register_module(name='PAMPJPEMetric', module=PAMPJPEMetric) 16 | METRICS.register_module(name='PCKMetric', module=PCKMetric) 17 | METRICS.register_module(name='PCPMetric', module=PCPMetric) 18 | METRICS.register_module( 19 | name='PrecisionRecallMetric', module=PrecisionRecallMetric) 20 | 21 | 22 | def build_metric(cfg) -> BaseMetric: 23 | """Build an evaluation metric.""" 24 | return METRICS.build(cfg) 25 | -------------------------------------------------------------------------------- /xrmocap/core/evaluation/metrics/pck_metric.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | import logging 3 | import numpy as np 4 | from typing import List, Union 5 | from xrprimer.data_structure import Keypoints 6 | 7 | from .base_metric import BaseMetric 8 | 9 | # yapf: enable 10 | 11 | 12 | class PCKMetric(BaseMetric): 13 | """PCK metric measures accuracy of the localization of the body joints. 14 | 15 | This is a rank-2 metric, it depends on rank-1 metric pa_mpjpe or mpjpe. 16 | """ 17 | RANK = 2 18 | 19 | def __init__( 20 | self, 21 | name: str, 22 | threshold: Union[List[int], List[float]] = [50, 100], 23 | use_pa_mpjpe: bool = False, 24 | logger: Union[None, str, logging.Logger] = None, 25 | ) -> None: 26 | """Init PCK metric evaluation. 27 | 28 | Args: 29 | name (str): 30 | Name of the metric. 31 | threshold (Union[List[int],List[float]]): 32 | A list of threshold for PCK evaluation. 33 | use_pa_mpjpe (bool, optional): 34 | Whether to use PA-MPJPE instead of MPJPE. 35 | Defaults to False. 36 | logger (Union[None, str, logging.Logger], optional): 37 | Logger for logging. If None, root logger will be 38 | selected. Defaults to None. 39 | """ 40 | BaseMetric.__init__(self, name=name, logger=logger) 41 | self.threshold = threshold 42 | self.use_pa_mpjpe = use_pa_mpjpe 43 | 44 | def __call__(self, pred_keypoints3d: Keypoints, gt_keypoints3d: Keypoints, 45 | **kwargs): 46 | if self.use_pa_mpjpe and 'pa_mpjpe_value' in kwargs: 47 | raw_mpjpe_value = kwargs['pa_mpjpe_value'] 48 | elif not self.use_pa_mpjpe and 'mpjpe_value' in kwargs: 49 | raw_mpjpe_value = kwargs['mpjpe_value'] 50 | else: 51 | self.logger.error('No mpjpe metric found. ' 52 | 'Please add MPJPEMetric or PAMPJPEMetric ' 53 | 'in the config.') 54 | raise KeyError 55 | 56 | gt_mask = gt_keypoints3d.get_mask() 57 | masked_mpjpe_value = raw_mpjpe_value[np.where(gt_mask > 0)] 58 | 59 | pck_value = {} 60 | for thr in self.threshold: 61 | pck_thr = np.mean(masked_mpjpe_value <= thr) * 100 62 | pck_value[f'pck@{str(thr)}'] = pck_thr 63 | 64 | return pck_value 65 | -------------------------------------------------------------------------------- /xrmocap/core/hook/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/core/hook/__init__.py -------------------------------------------------------------------------------- /xrmocap/core/hook/smplify_hook/__init__.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | from xrmocap.core.hook.smplify_hook.smplify_base_hook import SMPLifyBaseHook 3 | from xrmocap.core.hook.smplify_hook.smplify_verbose_hook import ( 4 | SMPLifyVerboseHook, 5 | ) 6 | 7 | # yapf: enable 8 | 9 | __all__ = ['SMPLifyBaseHook', 'SMPLifyVerboseHook'] 10 | -------------------------------------------------------------------------------- /xrmocap/core/hook/smplify_hook/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry 2 | 3 | from .smplify_base_hook import SMPLifyBaseHook 4 | from .smplify_verbose_hook import SMPLifyVerboseHook 5 | 6 | SMPLIFY_HOOKS = Registry('smplify_hook') 7 | SMPLIFY_HOOKS.register_module( 8 | name='SMPLifyVerboseHook', module=SMPLifyVerboseHook) 9 | 10 | 11 | def build_smplify_hook(cfg) -> SMPLifyBaseHook: 12 | """Build a hook for smplify.""" 13 | return SMPLIFY_HOOKS.build(cfg) 14 | -------------------------------------------------------------------------------- /xrmocap/core/train/__init__.py: -------------------------------------------------------------------------------- 1 | from xrmocap.core.train.trainer import MVPTrainer, train_3d 2 | 3 | __all__ = ['MVPTrainer', 'train_3d'] 4 | -------------------------------------------------------------------------------- /xrmocap/core/train/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry 2 | 3 | from .trainer import MVPTrainer 4 | 5 | TRAINERS = Registry('trainers') 6 | 7 | TRAINERS.register_module(name='MVPTrainer', module=MVPTrainer) 8 | 9 | 10 | def build_trainer(cfg): 11 | """Build registrant.""" 12 | return TRAINERS.build(cfg) 13 | -------------------------------------------------------------------------------- /xrmocap/data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/data/__init__.py -------------------------------------------------------------------------------- /xrmocap/data/data_converter/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/data/data_converter/__init__.py -------------------------------------------------------------------------------- /xrmocap/data/data_converter/base_data_converter.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Union 3 | from xrprimer.utils.log_utils import get_logger 4 | from xrprimer.utils.path_utils import prepare_output_path 5 | 6 | 7 | class BaseDataCovnerter: 8 | 9 | def __init__(self, 10 | data_root: str, 11 | meta_path: str = 'xrmocap_meta', 12 | dataset_name: str = 'base_dataset', 13 | verbose: bool = True, 14 | logger: Union[None, str, logging.Logger] = None) -> None: 15 | """Base class of all data converters. It create a dir at meta_path, 16 | 17 | , and put dataset_name and meta-data into it. 18 | 19 | Args: 20 | data_root (str): 21 | Root path of the downloaded dataset. 22 | meta_path (str, optional): 23 | Path to the meta-data dir. Defaults to 'xrmocap_meta'. 24 | dataset_name (str, optional): 25 | Name of the dataset. Defaults to 'base_dataset'. 26 | verbose (bool, optional): 27 | Whether to print(logger.info) information. 28 | Defaults to True. 29 | logger (Union[None, str, logging.Logger], optional): 30 | Logger for logging. If None, root logger will be selected. 31 | Defaults to None. 32 | """ 33 | self.data_root = data_root 34 | self.meta_path = meta_path 35 | self.dataset_name = dataset_name 36 | self.verbose = verbose 37 | self.logger = get_logger(logger) 38 | 39 | def run(self, overwrite: bool = False) -> None: 40 | """Convert data from original dataset to meta-data defined by xrmocap. 41 | 42 | Args: 43 | overwrite (bool, optional): 44 | Whether replace the files at 45 | self.meta_path. 46 | Defaults to False. 47 | """ 48 | prepare_output_path( 49 | output_path=self.meta_path, 50 | allowed_suffix='', 51 | path_type='dir', 52 | overwrite=overwrite, 53 | logger=self.logger) 54 | -------------------------------------------------------------------------------- /xrmocap/data/data_converter/builder.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | from mmcv.utils import Registry 3 | 4 | from .base_data_converter import BaseDataCovnerter 5 | from .campus_data_converter import CampusDataCovnerter 6 | from .humman_smc_data_converter import HummanSMCDataCovnerter 7 | from .panoptic_data_converter import PanopticDataCovnerter 8 | from .shelf_data_converter import ShelfDataCovnerter 9 | 10 | # yapf: enable 11 | 12 | DATA_CONVERTERS = Registry('data_converter') 13 | DATA_CONVERTERS.register_module( 14 | name='CampusDataCovnerter', module=CampusDataCovnerter) 15 | DATA_CONVERTERS.register_module( 16 | name='ShelfDataCovnerter', module=ShelfDataCovnerter) 17 | DATA_CONVERTERS.register_module( 18 | name='PanopticDataCovnerter', module=PanopticDataCovnerter) 19 | DATA_CONVERTERS.register_module( 20 | name='HummanSMCDataCovnerter', module=HummanSMCDataCovnerter) 21 | 22 | 23 | def build_data_converter(cfg) -> BaseDataCovnerter: 24 | """Build data_converter.""" 25 | return DATA_CONVERTERS.build(cfg) 26 | -------------------------------------------------------------------------------- /xrmocap/data/data_visualization/__init__.py: -------------------------------------------------------------------------------- 1 | from .mview_mperson_data_visualization import MviewMpersonDataVisualization 2 | 3 | __all__ = ['MviewMpersonDataVisualization'] 4 | -------------------------------------------------------------------------------- /xrmocap/data/data_visualization/base_data_visualization.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Union 3 | from xrprimer.utils.log_utils import get_logger 4 | from xrprimer.utils.path_utils import prepare_output_path 5 | 6 | 7 | class BaseDataVisualization: 8 | 9 | def __init__(self, 10 | data_root: str, 11 | output_dir: str, 12 | meta_path: str = 'xrmocap_meta', 13 | verbose: bool = True, 14 | logger: Union[None, str, logging.Logger] = None) -> None: 15 | """Base class of all data visualizations. 16 | 17 | Args: 18 | data_root (str): 19 | Root path of the downloaded dataset. 20 | output_dir (str): 21 | Path to the output dir. 22 | meta_path (str, optional): 23 | Path to the meta-data dir. Defaults to 'xrmocap_meta'. 24 | verbose (bool, optional): 25 | Whether to print(logger.info) information. 26 | Defaults to True. 27 | logger (Union[None, str, logging.Logger], optional): 28 | Logger for logging. If None, root logger will be selected. 29 | Defaults to None. 30 | """ 31 | self.data_root = data_root 32 | self.meta_path = meta_path 33 | self.output_dir = output_dir 34 | self.verbose = verbose 35 | self.logger = get_logger(logger) 36 | 37 | def run(self, overwrite: bool = False) -> None: 38 | """Convert data from original dataset to meta-data defined by xrmocap. 39 | 40 | Args: 41 | overwrite (bool, optional): 42 | Whether replace the files at 43 | self.output_dir. 44 | Defaults to False. 45 | """ 46 | prepare_output_path( 47 | output_path=self.output_dir, 48 | allowed_suffix='', 49 | path_type='dir', 50 | overwrite=overwrite, 51 | logger=self.logger) 52 | -------------------------------------------------------------------------------- /xrmocap/data/data_visualization/builder.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | from mmcv.utils import Registry 3 | 4 | from .base_data_visualization import BaseDataVisualization 5 | from .mview_mperson_data_visualization import MviewMpersonDataVisualization 6 | 7 | # yapf: enable 8 | 9 | DATA_VISUALIZATION = Registry('data_visualization') 10 | DATA_VISUALIZATION.register_module( 11 | name='MviewMpersonDataVisualization', module=MviewMpersonDataVisualization) 12 | 13 | 14 | def build_data_visualization(cfg) -> BaseDataVisualization: 15 | """Build data_visualization.""" 16 | return DATA_VISUALIZATION.build(cfg) 17 | -------------------------------------------------------------------------------- /xrmocap/data/dataloader/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/data/dataloader/__init__.py -------------------------------------------------------------------------------- /xrmocap/data/dataloader/builder.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | from mmcv.utils import Registry 3 | from torch.utils.data.dataloader import DataLoader 4 | 5 | from xrmocap.data.dataset.builder import build_dataset 6 | 7 | # yapf: enable 8 | 9 | DATALOADERS = Registry('dataloader') 10 | DATALOADERS.register_module(name='DataLoader', module=DataLoader) 11 | 12 | 13 | def build_dataloader(cfg) -> DataLoader: 14 | """Build dataloader.""" 15 | dataset = cfg.get('dataset', None) 16 | if isinstance(dataset, dict): 17 | dataset = build_dataset(dataset) 18 | cfg['dataset'] = dataset 19 | return DATALOADERS.build(cfg) 20 | -------------------------------------------------------------------------------- /xrmocap/data/dataset/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/data/dataset/__init__.py -------------------------------------------------------------------------------- /xrmocap/data/dataset/base_dataset.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from torch.utils.data import Dataset 3 | from torchvision.transforms import Compose 4 | from typing import Union 5 | from xrprimer.utils.log_utils import get_logger 6 | 7 | from xrmocap.transform.image.builder import build_image_transform 8 | 9 | 10 | class BaseDataset(Dataset): 11 | 12 | def __init__(self, 13 | data_root: str, 14 | img_pipeline: list, 15 | meta_path: str = 'xrmocap_meta', 16 | dataset_name: str = 'base_dataset', 17 | test_mode: bool = True, 18 | logger: Union[None, str, logging.Logger] = None) -> None: 19 | """Base class of all Dataset in XRMocap. It loads data from source 20 | dataset and meta-data from data converter. 21 | 22 | Args: 23 | data_root (str): 24 | Root path of the downloaded dataset. 25 | img_pipeline (list): 26 | A list of image transform instances. 27 | meta_path (str, optional): 28 | Path to the meta-data dir. Defaults to 'xrmocap_meta'. 29 | dataset_name (str, optional): 30 | Name of the dataset. Defaults to 'base_dataset'. 31 | test_mode (bool, optional): 32 | Whether this dataset is used to load testset. 33 | Defaults to True. 34 | logger (Union[None, str, logging.Logger], optional): 35 | Logger for logging. If None, root logger will be selected. 36 | Defaults to None. 37 | """ 38 | super().__init__() 39 | self.data_root = data_root 40 | self.meta_path = meta_path 41 | self.dataset_name = dataset_name 42 | self.test_mode = test_mode 43 | self.logger = get_logger(logger) 44 | self.img_pipeline = [] 45 | for transform in img_pipeline: 46 | if isinstance(transform, dict): 47 | transform = build_image_transform(transform) 48 | self.img_pipeline.append(transform) 49 | self.img_pipeline = Compose(self.img_pipeline) 50 | -------------------------------------------------------------------------------- /xrmocap/data/dataset/builder.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | from mmcv.utils import Registry 3 | 4 | from .base_dataset import BaseDataset 5 | from .bottom_up_mview_mperson_dataset import BottomUpMviewMpersonDataset 6 | from .mview_mperson_dataset import MviewMpersonDataset 7 | from .mvp_dataset import MVPDataset 8 | 9 | # yapf: enable 10 | 11 | DATASETS = Registry('dataset') 12 | DATASETS.register_module( 13 | name='MviewMpersonDataset', module=MviewMpersonDataset) 14 | DATASETS.register_module(name='MVPDataset', module=MVPDataset) 15 | DATASETS.register_module( 16 | name='BottomUpMviewMpersonDataset', module=BottomUpMviewMpersonDataset) 17 | 18 | 19 | def build_dataset(cfg) -> BaseDataset: 20 | """Build dataset.""" 21 | return DATASETS.build(cfg) 22 | -------------------------------------------------------------------------------- /xrmocap/data_structure/__init__.py: -------------------------------------------------------------------------------- 1 | from .body_model import SMPLData, SMPLXData, SMPLXDData 2 | from .keypoints import Keypoints 3 | from .limbs import Limbs 4 | from .smc_reader import SMCReader 5 | 6 | __all__ = [ 7 | 'Keypoints', 'Limbs', 'SMCReader', 'SMPLData', 'SMPLXDData', 'SMPLXData' 8 | ] 9 | -------------------------------------------------------------------------------- /xrmocap/data_structure/body_model/__init__.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from typing import Tuple, Union 3 | from xrprimer.utils.log_utils import get_logger, logging 4 | 5 | from .smpl_data import SMPLData 6 | from .smplx_data import SMPLXData 7 | from .smplxd_data import SMPLXDData 8 | 9 | __all__ = ['SMPLData', 'SMPLXData', 'SMPLXDData'] 10 | 11 | _SMPL_DATA_CLASS_DICT = dict( 12 | SMPLData=SMPLData, SMPLXData=SMPLXData, SMPLXDData=SMPLXDData) 13 | 14 | 15 | def auto_load_smpl_data( 16 | npz_path: str, 17 | logger: Union[None, str, logging.Logger] = None 18 | ) -> Tuple[Union[SMPLData, SMPLXData, SMPLXDData], str]: 19 | """Check which smpl data type the npz file is, and use the correct class to 20 | load it. Useful when you forget file type. 21 | 22 | Args: 23 | npz_path (str): 24 | Path to a dumped npz file. 25 | logger (Union[None, str, logging.Logger], optional): 26 | Logger for logging. If None, root logger will be selected. 27 | Defaults to None. 28 | 29 | Returns: 30 | Union[SMPLData, SMPLXData, SMPLXDData]: 31 | Loaded SMPL/SMPLX/SMPLXD Data instance. 32 | str: 33 | Type(class name) of this npz file. 34 | """ 35 | logger = get_logger(logger) 36 | unpacked_dict = dict() 37 | with np.load(npz_path, allow_pickle=True) as npz_file: 38 | tmp_data_dict = dict(npz_file) 39 | for key, value in tmp_data_dict.items(): 40 | if isinstance(value, np.ndarray) and\ 41 | len(value.shape) == 0: 42 | # value is not an ndarray before dump 43 | value = value.item() 44 | unpacked_dict.__setitem__(key, value) 45 | if 'fullpose' in unpacked_dict: 46 | fullpose_dim = unpacked_dict['fullpose'].shape[1] 47 | else: 48 | fullpose_dim = 0 49 | if 'displacement' in unpacked_dict and \ 50 | fullpose_dim == SMPLXDData.get_fullpose_dim(): 51 | type_str = 'SMPLXDData' 52 | elif 'expression' in unpacked_dict and \ 53 | fullpose_dim == SMPLXData.get_fullpose_dim(): 54 | type_str = 'SMPLXData' 55 | elif fullpose_dim == SMPLData.get_fullpose_dim(): 56 | type_str = 'SMPLData' 57 | else: 58 | logger.error(f'File at {npz_path} is not dumped' + 59 | f' by any of {list(_SMPL_DATA_CLASS_DICT.keys())}.') 60 | raise TypeError 61 | smpl_data_class = _SMPL_DATA_CLASS_DICT[type_str] 62 | smpl_data_instance = smpl_data_class.from_dict(unpacked_dict) 63 | return smpl_data_instance, type_str 64 | -------------------------------------------------------------------------------- /xrmocap/human_perception/__init__.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | from xrmocap.human_perception.bbox_detection.mmdet_detector import ( 3 | MMdetDetector, 4 | ) 5 | from xrmocap.human_perception.builder import DETECTORS 6 | from xrmocap.human_perception.keypoints_estimation.mmpose_top_down_estimator import \ 7 | MMposeTopDownEstimator # noqa:E501 8 | 9 | # yapf: enable 10 | 11 | __all__ = ['DETECTORS', 'MMdetDetector', 'MMposeTopDownEstimator'] 12 | -------------------------------------------------------------------------------- /xrmocap/human_perception/bbox_detection/__init__.py: -------------------------------------------------------------------------------- 1 | from .mmdet_detector import MMdetDetector, process_mmdet_results 2 | from .mmdet_trt_detector import MMdetTrtDetector 3 | from .mmtrack_detector import MMtrackDetector, process_mmtrack_results 4 | 5 | __all__ = [ 6 | 'MMdetDetector', 'MMtrackDetector', 'process_mmdet_results', 7 | 'process_mmtrack_results', 'MMdetTrtDetector' 8 | ] 9 | -------------------------------------------------------------------------------- /xrmocap/human_perception/builder.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | from mmcv.utils import Registry 3 | 4 | from .bbox_detection.mmdet_detector import MMdetDetector 5 | from .bbox_detection.mmdet_trt_detector import MMdetTrtDetector 6 | from .bbox_detection.mmtrack_detector import MMtrackDetector 7 | from .keypoints_estimation.mediapipe_estimator import MediapipeEstimator 8 | from .keypoints_estimation.mmpose_top_down_estimator import ( 9 | MMposeTopDownEstimator, 10 | ) 11 | from .keypoints_estimation.mmpose_trt_top_down_estimator import ( 12 | MMposeTrtTopDownEstimator, 13 | ) 14 | 15 | # yapf: enable 16 | 17 | DETECTORS = Registry('detector') 18 | DETECTORS.register_module( 19 | name=('MMposeTopDownEstimator'), module=MMposeTopDownEstimator) 20 | DETECTORS.register_module( 21 | name=('MMposeTrtTopDownEstimator'), module=MMposeTrtTopDownEstimator) 22 | DETECTORS.register_module( 23 | name=('MediapipeEstimator'), module=MediapipeEstimator) 24 | DETECTORS.register_module(name=('MMdetDetector'), module=MMdetDetector) 25 | DETECTORS.register_module(name=('MMdetTrtDetector'), module=MMdetTrtDetector) 26 | 27 | DETECTORS.register_module(name=('MMtrackDetector'), module=MMtrackDetector) 28 | 29 | 30 | def build_detector(cfg): 31 | """Build detector.""" 32 | return DETECTORS.build(cfg) 33 | -------------------------------------------------------------------------------- /xrmocap/human_perception/keypoints_estimation/__init__.py: -------------------------------------------------------------------------------- 1 | from .mmpose_top_down_estimator import MMposeTopDownEstimator 2 | from .mmpose_trt_top_down_estimator import MMposeTrtTopDownEstimator 3 | 4 | __all__ = ['MMposeTopDownEstimator', 'MMposeTrtTopDownEstimator'] 5 | -------------------------------------------------------------------------------- /xrmocap/io/__init__.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | from .camera import ( 3 | get_all_color_kinect_parameter_from_smc, 4 | get_color_camera_parameter_from_smc, 5 | load_camera_parameters_from_zoemotion_dir, 6 | ) 7 | from .image import ( 8 | get_n_frame_from_mview_src, load_clip_from_mview_src, 9 | load_multiview_images, 10 | ) 11 | 12 | # yapf: enable 13 | 14 | __all__ = [ 15 | 'get_all_color_kinect_parameter_from_smc', 16 | 'get_color_camera_parameter_from_smc', 'get_n_frame_from_mview_src', 17 | 'load_camera_parameters_from_zoemotion_dir', 'load_clip_from_mview_src', 18 | 'load_multiview_images' 19 | ] 20 | -------------------------------------------------------------------------------- /xrmocap/model/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/model/__init__.py -------------------------------------------------------------------------------- /xrmocap/model/architecture/__init__.py: -------------------------------------------------------------------------------- 1 | from .affinity_estimator import AppearanceAffinityEstimator 2 | from .base_architecture import BaseArchitecture 3 | from .multi_view_pose_transformer import MviewPoseTransformer 4 | 5 | __all__ = [ 6 | 'AppearanceAffinityEstimator', 'BaseArchitecture', 'MviewPoseTransformer' 7 | ] 8 | -------------------------------------------------------------------------------- /xrmocap/model/architecture/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry 2 | 3 | from .affinity_estimator import AppearanceAffinityEstimator 4 | from .multi_view_pose_transformer import MviewPoseTransformer 5 | 6 | ARCHITECTURES = Registry('architectures') 7 | 8 | ARCHITECTURES.register_module( 9 | name='AppearanceAffinityEstimator', module=AppearanceAffinityEstimator) 10 | ARCHITECTURES.register_module( 11 | name='MviewPoseTransformer', module=MviewPoseTransformer) 12 | 13 | 14 | def build_architecture(cfg): 15 | """Build framework.""" 16 | return ARCHITECTURES.build(cfg) 17 | -------------------------------------------------------------------------------- /xrmocap/model/body_model/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/model/body_model/__init__.py -------------------------------------------------------------------------------- /xrmocap/model/body_model/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry 2 | 3 | from .smpl import SMPL 4 | from .smplx import SMPLX 5 | 6 | BODYMODELS = Registry('body_model') 7 | 8 | BODYMODELS.register_module(name='SMPL', module=SMPL) 9 | BODYMODELS.register_module(name='SMPLX', module=SMPLX) 10 | 11 | 12 | def build_body_model(cfg): 13 | """Build body model.""" 14 | return BODYMODELS.build(cfg) 15 | -------------------------------------------------------------------------------- /xrmocap/model/loss/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/model/loss/__init__.py -------------------------------------------------------------------------------- /xrmocap/model/loss/builder.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | from mmcv.utils import Registry 3 | 4 | from .kp_loss import SetCriterion 5 | from .mse_loss import KeypointMSELoss 6 | from .prior_loss import ( 7 | JointPriorLoss, LimbLengthLoss, MaxMixturePriorLoss, PoseRegLoss, 8 | ShapePriorLoss, SmoothJointLoss, 9 | ) 10 | 11 | # yapf: enable 12 | 13 | LOSSES = Registry('loss') 14 | 15 | LOSSES.register_module(name='KeypointMSELoss', module=KeypointMSELoss) 16 | LOSSES.register_module(name='ShapePriorLoss', module=ShapePriorLoss) 17 | LOSSES.register_module(name='JointPriorLoss', module=JointPriorLoss) 18 | LOSSES.register_module(name='SmoothJointLoss', module=SmoothJointLoss) 19 | LOSSES.register_module(name='MaxMixturePriorLoss', module=MaxMixturePriorLoss) 20 | LOSSES.register_module(name='LimbLengthLoss', module=LimbLengthLoss) 21 | LOSSES.register_module(name='PoseRegLoss', module=PoseRegLoss) 22 | LOSSES.register_module(name='SetCriterion', module=SetCriterion) 23 | 24 | 25 | def build_loss(cfg): 26 | """Build loss.""" 27 | return LOSSES.build(cfg) 28 | -------------------------------------------------------------------------------- /xrmocap/model/loss/mapping.py: -------------------------------------------------------------------------------- 1 | LOSS_MAPPING = { 2 | 'keypoints3d_limb_len': ['betas'], 3 | 'keypoints3d_mse': ['body_pose'], 4 | 'keypoints2d_mse': ['body_pose'], 5 | 'shape_prior': ['betas'], 6 | 'joint_prior': ['body_pose'], 7 | 'smooth_joint': ['body_pose'], 8 | 'pose_reg': ['body_pose'], 9 | } 10 | -------------------------------------------------------------------------------- /xrmocap/model/mvp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/model/mvp/__init__.py -------------------------------------------------------------------------------- /xrmocap/model/mvp/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry 2 | 3 | from .matcher import HungarianMatcher 4 | from .mvp_decoder import MLP, MvPDecoder, MvPDecoderLayer 5 | from .pose_resnet import PoseResNet 6 | from .position_encoding import PositionEmbeddingSine 7 | from .projattn import ProjAttn 8 | 9 | MODELS = Registry('models') 10 | 11 | MODELS.register_module(name='HungarianMatcher', module=HungarianMatcher) 12 | MODELS.register_module(name='MvPDecoderLayer', module=MvPDecoderLayer) 13 | MODELS.register_module(name='MvPDecoder', module=MvPDecoder) 14 | MODELS.register_module(name='MLP', module=MLP) 15 | MODELS.register_module(name='PoseResNet', module=PoseResNet) 16 | MODELS.register_module( 17 | name='PositionEmbeddingSine', module=PositionEmbeddingSine) 18 | MODELS.register_module(name='ProjAttn', module=ProjAttn) 19 | 20 | 21 | def build_model(cfg): 22 | """Build registrant.""" 23 | return MODELS.build(cfg) 24 | -------------------------------------------------------------------------------- /xrmocap/model/registrant/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/model/registrant/__init__.py -------------------------------------------------------------------------------- /xrmocap/model/registrant/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry 2 | 3 | from .smplify import SMPLify 4 | from .smplifyx import SMPLifyX 5 | from .smplifyxd import SMPLifyXD 6 | 7 | REGISTRANTS = Registry('registrant') 8 | REGISTRANTS.register_module(name='SMPLify', module=SMPLify) 9 | REGISTRANTS.register_module(name='SMPLifyX', module=SMPLifyX) 10 | REGISTRANTS.register_module(name='SMPLifyXD', module=SMPLifyXD) 11 | 12 | 13 | def build_registrant(cfg) -> SMPLify: 14 | """Build registrant.""" 15 | return REGISTRANTS.build(cfg) 16 | -------------------------------------------------------------------------------- /xrmocap/model/registrant/handler/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/model/registrant/handler/__init__.py -------------------------------------------------------------------------------- /xrmocap/model/registrant/handler/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry 2 | 3 | from .betas_prior_handler import BetasPriorHandler 4 | from .body_pose_prior_handler import BodyPosePriorHandler 5 | from .keypoint3d_limb_length_handler import ( # noqa:E501 6 | Keypoint3dLimbLenHandler, Keypoint3dLimbLenInput, 7 | ) 8 | from .keypoint3d_mse_handler import Keypoint3dMSEHandler, Keypoint3dMSEInput 9 | from .multiview_keypoint2d_mse_handler import ( # noqa:E501 10 | MultiviewKeypoint2dMSEHandler, MultiviewKeypoint2dMSEInput, 11 | ) 12 | 13 | REGISTRANT_HANDLERS = Registry('registrant_handler') 14 | REGISTRANT_HANDLERS.register_module( 15 | name='BetasPriorHandler', module=BetasPriorHandler) 16 | REGISTRANT_HANDLERS.register_module( 17 | name='BodyPosePriorHandler', module=BodyPosePriorHandler) 18 | REGISTRANT_HANDLERS.register_module( 19 | name='Keypoint3dMSEInput', module=Keypoint3dMSEInput) 20 | REGISTRANT_HANDLERS.register_module( 21 | name='Keypoint3dMSEHandler', module=Keypoint3dMSEHandler) 22 | REGISTRANT_HANDLERS.register_module( 23 | name='Keypoint3dLimbLenInput', module=Keypoint3dLimbLenInput) 24 | REGISTRANT_HANDLERS.register_module( 25 | name='Keypoint3dLimbLenHandler', module=Keypoint3dLimbLenHandler) 26 | REGISTRANT_HANDLERS.register_module( 27 | name='MultiviewKeypoint2dMSEInput', module=MultiviewKeypoint2dMSEInput) 28 | REGISTRANT_HANDLERS.register_module( 29 | name='MultiviewKeypoint2dMSEHandler', module=MultiviewKeypoint2dMSEHandler) 30 | 31 | 32 | def build_handler(cfg): 33 | """Build a handler for registrant.""" 34 | return REGISTRANT_HANDLERS.build(cfg) 35 | -------------------------------------------------------------------------------- /xrmocap/model/registrant/optimizable_parameters.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from typing import List 3 | 4 | 5 | class OptimizableParameters(): 6 | 7 | def __init__(self): 8 | """Collects parameters for optimization.""" 9 | self.opt_params = {} 10 | 11 | def add_param(self, 12 | key: str, 13 | param: torch.Tensor, 14 | fit_param: bool = True) -> None: 15 | """"Set requires_grad, collect the parameter for optimization. 16 | 17 | Args: 18 | key (str): 19 | Key of the param. 20 | param (torch.Tensor): 21 | Model parameter. 22 | fit_param (bool): 23 | Whether to optimize this body model parameter. 24 | Defaults to True. 25 | """ 26 | if fit_param: 27 | param.requires_grad = True 28 | else: 29 | param.requires_grad = False 30 | self.opt_params[key] = param 31 | 32 | def set_param(self, key: str, fit_param: bool = True) -> None: 33 | """"Set requires_grad of a param in self.opt_params. 34 | 35 | Args: 36 | key (str): 37 | Key of the param. 38 | fit_param (bool): 39 | Whether to optimize this body model parameter. 40 | Defaults to True. 41 | """ 42 | if fit_param: 43 | self.opt_params[key].requires_grad = True 44 | else: 45 | self.opt_params[key].requires_grad = False 46 | 47 | def parameters(self) -> List[torch.Tensor]: 48 | """Returns all parameters recorded by self. Compatible with mmcv's 49 | build_parameters() 50 | 51 | Returns: 52 | List[torch.Tensor]: 53 | A list of body model parameters for optimization. 54 | """ 55 | ret_list = [] 56 | for _, value in self.opt_params.items(): 57 | ret_list.append(value) 58 | return ret_list 59 | -------------------------------------------------------------------------------- /xrmocap/model/registrant/smplifyxd.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from .smplifyx import SMPLifyX 4 | 5 | 6 | class SMPLifyXD(SMPLifyX): 7 | """Re-implementation of SMPLify-X with displacement.""" 8 | OPTIM_PARAM = SMPLifyX.OPTIM_PARAM + [ 9 | 'displacement', 10 | ] 11 | 12 | def __prepare_optimizable_parameters__(self, init_dict: dict, 13 | batch_size: int) -> dict: 14 | """Prepare optimizable parameters in batch for registrant. If some of 15 | the parameters can be found in init_dict, use them for initialization. 16 | 17 | Args: 18 | init_dict (dict): 19 | A dict of init parameters. init_dict.keys() is a 20 | sub-set of self.__class__.OPTIM_PARAM. 21 | batch_size (int) 22 | 23 | Returns: 24 | dict: 25 | A dict of optimizable parameters, whose keys are 26 | self.__class__.OPTIM_PARAM and values are 27 | Tensors in batch. 28 | """ 29 | smplx_init_dict = init_dict.copy() 30 | init_displacement = smplx_init_dict.pop('displacement', None) 31 | OPTIM_PARAM_backup = self.__class__.OPTIM_PARAM 32 | self.__class__.OPTIM_PARAM = SMPLifyX.OPTIM_PARAM 33 | ret_dict = SMPLifyX.__prepare_optimizable_parameters__( 34 | self, init_dict=smplx_init_dict, batch_size=batch_size) 35 | self.__class__.OPTIM_PARAM = OPTIM_PARAM_backup 36 | default_displacement = torch.zeros( 37 | size=(1, self.body_model.NUM_VERTS, 3), 38 | dtype=self.body_model.betas.dtype, 39 | device=self.device, 40 | requires_grad=True) 41 | displacement = self.__match_init_batch_size__( 42 | init_param=init_displacement, 43 | default_param=default_displacement, 44 | batch_size=batch_size) 45 | ret_dict['displacement'] = displacement 46 | return ret_dict 47 | -------------------------------------------------------------------------------- /xrmocap/ops/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/ops/__init__.py -------------------------------------------------------------------------------- /xrmocap/ops/bottom_up_association/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/ops/bottom_up_association/__init__.py -------------------------------------------------------------------------------- /xrmocap/ops/bottom_up_association/builder.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | from mmcv.utils import Registry 3 | 4 | from .fourdag_associator import FourDAGAssociator 5 | 6 | # yapf: enable 7 | 8 | BOTTOM_UP_ASSOCIATORS = Registry('bottom_up_associator') 9 | 10 | BOTTOM_UP_ASSOCIATORS.register_module( 11 | name='FourDAGAssociator', module=FourDAGAssociator) 12 | 13 | 14 | def build_bottom_up_associator(cfg) -> FourDAGAssociator: 15 | """Build top_down_associator.""" 16 | return BOTTOM_UP_ASSOCIATORS.build(cfg) 17 | -------------------------------------------------------------------------------- /xrmocap/ops/bottom_up_association/graph_solver/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/ops/bottom_up_association/graph_solver/__init__.py -------------------------------------------------------------------------------- /xrmocap/ops/bottom_up_association/graph_solver/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry 2 | 3 | from .graph_associate import GraphAssociate 4 | from .graph_construct import GraphConstruct 5 | 6 | GRAPHSOLVER = Registry('graph_solver') 7 | 8 | GRAPHSOLVER.register_module(name='GraphAssociate', module=GraphAssociate) 9 | GRAPHSOLVER.register_module(name='GraphConstruct', module=GraphConstruct) 10 | 11 | 12 | def build_graph_solver(cfg): 13 | """Build a graph solver instance.""" 14 | return GRAPHSOLVER.build(cfg) 15 | -------------------------------------------------------------------------------- /xrmocap/ops/projection/__init__.py: -------------------------------------------------------------------------------- 1 | from .aniposelib_projector import AniposelibProjector 2 | from .pytorch_projector import PytorchProjector 3 | 4 | __all__ = ['AniposelibProjector', 'PytorchProjector'] 5 | -------------------------------------------------------------------------------- /xrmocap/ops/projection/builder.py: -------------------------------------------------------------------------------- 1 | from xrprimer.ops.projection.builder import ( # noqa: F401 2 | PROJECTORS, BaseProjector, build_projector, 3 | ) 4 | 5 | from .aniposelib_projector import AniposelibProjector 6 | from .pytorch_projector import PytorchProjector 7 | 8 | PROJECTORS.register_module( 9 | name='AniposelibProjector', module=AniposelibProjector) 10 | PROJECTORS.register_module(name='PytorchProjector', module=PytorchProjector) 11 | -------------------------------------------------------------------------------- /xrmocap/ops/top_down_association/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/ops/top_down_association/__init__.py -------------------------------------------------------------------------------- /xrmocap/ops/top_down_association/body_tracking/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/ops/top_down_association/body_tracking/__init__.py -------------------------------------------------------------------------------- /xrmocap/ops/top_down_association/body_tracking/builder.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | from mmcv.utils import Registry 3 | 4 | from .kalman_tracking import KalmanTracking 5 | 6 | # yapf: enable 7 | 8 | KALMAN_TRACKING = Registry('kalman_tracking') 9 | KALMAN_TRACKING.register_module(name='KalmanTracking', module=KalmanTracking) 10 | 11 | 12 | def build_kalman_tracking(cfg) -> KalmanTracking: 13 | """Build kalman_tracking.""" 14 | return KALMAN_TRACKING.build(cfg) 15 | -------------------------------------------------------------------------------- /xrmocap/ops/top_down_association/body_tracking/kalman_tracker.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from filterpy.kalman import KalmanFilter 3 | 4 | 5 | class KalmanJointTracker(object): 6 | """This class represents the internal state of individual tracked objects 7 | observed as joint set. 8 | 9 | state model: x, y, z, dx, dy, dz observation model: x, y, z As implemented 10 | in https://github.com/abewley/sort, but with some modifications. 11 | """ 12 | 13 | def __init__(self, kps3d: np.ndarray): 14 | """Initialises a tracker using initial body keypoints3d. 15 | 16 | Args: 17 | kps3d (np.ndarray): initial body keypoints3d, in shape 18 | (n_kps3d, 3). 19 | """ 20 | # define constant velocity model 21 | self.n_kps3d = kps3d.shape[0] 22 | self.kf = [] 23 | state_model = np.eye(6) 24 | state_model[:3, 3:] = np.eye(3) 25 | observation_model = np.eye(6)[:3] 26 | for i in range(self.n_kps3d): 27 | kf = KalmanFilter(dim_x=6, dim_z=3) 28 | kf.F = state_model 29 | kf.H = observation_model 30 | kf.P[3:, 3:] *= 100. 31 | kf.P *= 10. 32 | kf.Q[3:, 3:] *= 0.01 33 | kf.x[:3] = np.expand_dims(kps3d[i], -1) 34 | self.kf.append(kf) 35 | 36 | def predict(self): 37 | """Advances the state vector and returns the predicted body keypoints3d 38 | estimate.""" 39 | for i in range(self.n_kps3d): 40 | self.kf[i].predict() 41 | 42 | def update(self, kps3d: np.ndarray): 43 | """Updates the state vector with observed body keypoints3d. 44 | 45 | Args: 46 | kps3d (np.ndarray): The measurement 3d keypoints. 47 | """ 48 | for i in range(self.n_kps3d): 49 | self.kf[i].update(kps3d[i].reshape(-1, 1)) 50 | 51 | def get_update(self) -> list: 52 | """Returns the new estimate based on measurement `z`. 53 | 54 | Returns: 55 | list: State vector and covariance array of the update. 56 | """ 57 | estimate_kps3d_list = [] 58 | for i in range(self.n_kps3d): 59 | estimate_kps3d, _ = self.kf[i].get_update() 60 | estimate_kps3d_list.append(estimate_kps3d[:3]) 61 | return np.array(estimate_kps3d_list) 62 | 63 | def get_state(self) -> np.ndarray: 64 | """Returns the current keypoints3d estimate. 65 | 66 | Returns: 67 | np.ndarray: The current keypoints3d. 68 | """ 69 | keypoints3d = [] 70 | for i in range(self.n_kps3d): 71 | keypoints3d.append(self.kf[i].x[:3]) 72 | 73 | return np.array(keypoints3d).squeeze() 74 | -------------------------------------------------------------------------------- /xrmocap/ops/top_down_association/builder.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | from mmcv.utils import Registry 3 | 4 | from .mvpose_associator import MvposeAssociator 5 | 6 | # yapf: enable 7 | 8 | TOP_DOWN_ASSOCIATORS = Registry('top_down_associator') 9 | TOP_DOWN_ASSOCIATORS.register_module( 10 | name='MvposeAssociator', module=MvposeAssociator) 11 | 12 | 13 | def build_top_down_associator(cfg) -> MvposeAssociator: 14 | """Build top_down_associator.""" 15 | return TOP_DOWN_ASSOCIATORS.build(cfg) 16 | -------------------------------------------------------------------------------- /xrmocap/ops/top_down_association/identity_tracking/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/ops/top_down_association/identity_tracking/__init__.py -------------------------------------------------------------------------------- /xrmocap/ops/top_down_association/identity_tracking/base_tracking.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import List, Union 3 | from xrprimer.data_structure import Keypoints 4 | from xrprimer.utils.log_utils import get_logger 5 | 6 | 7 | class BaseTracking: 8 | 9 | def __init__(self, 10 | verbose: bool = False, 11 | logger: Union[None, str, logging.Logger] = None) -> None: 12 | """Base class for 3D identity tracking. 13 | 14 | Args: 15 | verbose (bool, optional): 16 | Whether to print individual losses during registration. 17 | Defaults to False. 18 | logger (Union[None, str, logging.Logger], optional): 19 | Logger for logging. If None, root logger will be selected. 20 | Defaults to None. 21 | """ 22 | self.verbose = verbose 23 | self.logger = get_logger(logger) 24 | 25 | def query(self, association_list: List[List[int]], keypoints3d: Keypoints, 26 | **kwargs: dict) -> List[int]: 27 | """Query identities, pass information about multi-person multi-view 28 | association as input, get a list of indentities. 29 | 30 | Args: 31 | association_list (List[List[int]]): 32 | A nested list of association result, 33 | in shape [n_person, n_view], and 34 | association_list[i][j] = k means 35 | the k-th 2D perception in view j 36 | is a 2D obersevation of person i. 37 | keypoints3d (List[Keypoints]): 38 | An instance of class Keypoints3d, 39 | whose n_person == len(association_list) 40 | and n_frame == 1. 41 | kwargs: 42 | Keyword args to be ignored. 43 | 44 | Returns: 45 | List[int]: 46 | A list of indentities, whose length 47 | is equal to len(association_list). 48 | """ 49 | raise NotImplementedError 50 | -------------------------------------------------------------------------------- /xrmocap/ops/top_down_association/identity_tracking/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry 2 | 3 | from .base_tracking import BaseTracking 4 | from .keypoints_distance_tracking import KeypointsDistanceTracking 5 | from .perception2d_tracking import Perception2dTracking 6 | 7 | IDENTITY_TRACKINGS = Registry('identity_tracking') 8 | 9 | IDENTITY_TRACKINGS.register_module( 10 | name='Perception2dTracking', module=Perception2dTracking) 11 | IDENTITY_TRACKINGS.register_module( 12 | name='KeypointsDistanceTracking', module=KeypointsDistanceTracking) 13 | 14 | 15 | def build_identity_tracking(cfg) -> BaseTracking: 16 | """Build a identity tracking class.""" 17 | return IDENTITY_TRACKINGS.build(cfg) 18 | -------------------------------------------------------------------------------- /xrmocap/ops/top_down_association/matching/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/ops/top_down_association/matching/__init__.py -------------------------------------------------------------------------------- /xrmocap/ops/top_down_association/matching/base_matching.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import numpy as np 3 | from typing import Tuple, Union 4 | from xrprimer.utils.log_utils import get_logger 5 | 6 | 7 | class BaseMatching: 8 | 9 | def __init__(self, 10 | logger: Union[None, str, logging.Logger] = None) -> None: 11 | """Base class for association matching. 12 | 13 | Args: 14 | logger (Union[None, str, logging.Logger], optional): 15 | Logger for logging. If None, root logger will be selected. 16 | Defaults to None. 17 | """ 18 | self.logger = get_logger(logger) 19 | 20 | def __call__(self, mview_kps2d: np.ndarray) -> Tuple[list, list, list]: 21 | """Compute method of Matching instance. Giving multi-view kps2d, it 22 | will return the match results. 23 | 24 | Args: 25 | mview_kps2d (np.ndarray): 26 | mview_kps2d (np.ndarray): Multi-view keypoints 2d array, 27 | in shape [n_view, n_person, n_kps2d, 2]. 28 | 29 | Raises: 30 | NotImplementedError: 31 | BaseMatching has not been implemented. 32 | 33 | Returns: 34 | Tuple[list, list, list]: 35 | matched_kps2d (list): Matched human keypoints. 36 | matched_human_idx (list): Matched human index. 37 | matched_observation (list): Matched human observed 38 | camera number. 39 | """ 40 | raise NotImplementedError 41 | -------------------------------------------------------------------------------- /xrmocap/ops/top_down_association/matching/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry 2 | 3 | from .base_matching import BaseMatching 4 | from .multi_way_matching import MultiWayMatching 5 | 6 | MATCHING = Registry('matching') 7 | 8 | MATCHING.register_module(name='MultiWayMatching', module=MultiWayMatching) 9 | 10 | 11 | def build_matching(cfg) -> BaseMatching: 12 | """Build a matching instance.""" 13 | return MATCHING.build(cfg) 14 | -------------------------------------------------------------------------------- /xrmocap/ops/top_down_association/matching/match_solver.py: -------------------------------------------------------------------------------- 1 | """This file is pytorch implementation of : 2 | 3 | Wang, Qianqian, Xiaowei Zhou, and Kostas Daniilidis. "Multi-Image Semantic 4 | Matching by Mining Consistent Features." arXiv preprint arXiv:1711.07641(2017). 5 | """ 6 | import torch 7 | 8 | 9 | def proj2dpam(Y, tol=1e-4): 10 | X0 = Y 11 | X = Y 12 | I2 = 0 13 | 14 | for iter_ in range(10): 15 | X1 = project((X0 + I2), 0) 16 | I1 = X1 - (X0 + I2) 17 | X2 = project((X0 + I1), 1) 18 | I2 = X2 - (X0 + I1) 19 | 20 | chg = torch.sum(torch.abs(X2[:] - X[:])) / X.numel() 21 | X = X2 22 | if chg < tol: 23 | return X 24 | return X 25 | 26 | 27 | def project(X, dim_to_project): 28 | if dim_to_project == 0: 29 | for i in range(X.shape[0]): 30 | X[i, :] = proj2pav(X[i, :]) 31 | elif dim_to_project == 1: 32 | for j in range(X.shape[1]): 33 | X[:, j] = proj2pav(X[:, j]) 34 | else: 35 | return None 36 | return X 37 | 38 | 39 | def proj2pav(y): 40 | y[y < 0] = 0 41 | x = torch.zeros_like(y) 42 | if torch.sum(y) < 1: 43 | x += y 44 | else: 45 | u, _ = torch.sort(y, descending=True) 46 | sv = torch.cumsum(u, 0) 47 | to_find = u > (sv - 1) / ( 48 | torch.arange(1, len(u) + 1, device=u.device, dtype=u.dtype)) 49 | rho = torch.nonzero(to_find.reshape(-1))[-1] 50 | theta = torch.max( 51 | torch.tensor(0, device=sv.device, dtype=sv.dtype), 52 | (sv[rho] - 1) / (rho.float() + 1)) 53 | x += torch.max(y - theta, 54 | torch.tensor(0, device=sv.device, dtype=y.dtype)) 55 | return x 56 | -------------------------------------------------------------------------------- /xrmocap/ops/top_down_association/matching/pictorial/__init__.py: -------------------------------------------------------------------------------- 1 | from .pictorial import get_conns, get_struct, infer_kps3d_max_product 2 | 3 | __all__ = ['get_conns', 'get_struct', 'infer_kps3d_max_product'] 4 | -------------------------------------------------------------------------------- /xrmocap/ops/triangulation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/ops/triangulation/__init__.py -------------------------------------------------------------------------------- /xrmocap/ops/triangulation/builder.py: -------------------------------------------------------------------------------- 1 | from xrprimer.ops.triangulation.builder import ( # noqa:F401 2 | TRIANGULATORS, BaseTriangulator, build_triangulator, 3 | ) 4 | 5 | from .aniposelib_triangulator import AniposelibTriangulator 6 | from .jacobi_triangulator import JacobiTriangulator 7 | 8 | TRIANGULATORS.register_module( 9 | name='AniposelibTriangulator', module=AniposelibTriangulator) 10 | 11 | TRIANGULATORS.register_module( 12 | name='JacobiTriangulator', module=JacobiTriangulator) 13 | -------------------------------------------------------------------------------- /xrmocap/ops/triangulation/point_selection/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/ops/triangulation/point_selection/__init__.py -------------------------------------------------------------------------------- /xrmocap/ops/triangulation/point_selection/base_selector.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import numpy as np 3 | from typing import Union 4 | from xrprimer.utils.log_utils import get_logger 5 | 6 | 7 | class BaseSelector(): 8 | 9 | def __init__(self, 10 | verbose: bool = True, 11 | logger: Union[None, str, logging.Logger] = None) -> None: 12 | self.verbose = verbose 13 | self.logger = get_logger(logger) 14 | 15 | def get_selection_mask( 16 | self, 17 | points: Union[np.ndarray, list, tuple], 18 | logger: Union[None, str, logging.Logger] = None) -> np.ndarray: 19 | raise NotImplementedError 20 | -------------------------------------------------------------------------------- /xrmocap/ops/triangulation/point_selection/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry 2 | 3 | from .auto_threshold_selector import AutoThresholdSelector 4 | from .base_selector import BaseSelector 5 | from .camera_error_selector import CameraErrorSelector 6 | from .hybrid_kps2d_selector import HybridKps2dSelector 7 | from .manual_threshold_selector import ManualThresholdSelector 8 | from .reprojection_error_point_selector import ReprojectionErrorPointSelector 9 | from .slow_camera_error_selector import SlowCameraErrorSelector 10 | 11 | POINTSELECTORS = Registry('point_selector') 12 | 13 | POINTSELECTORS.register_module( 14 | name='AutoThresholdSelector', module=AutoThresholdSelector) 15 | POINTSELECTORS.register_module( 16 | name='ManualThresholdSelector', module=ManualThresholdSelector) 17 | POINTSELECTORS.register_module( 18 | name='SlowCameraErrorSelector', module=SlowCameraErrorSelector) 19 | POINTSELECTORS.register_module( 20 | name='CameraErrorSelector', module=CameraErrorSelector) 21 | POINTSELECTORS.register_module( 22 | name='HybridKps2dSelector', module=HybridKps2dSelector) 23 | POINTSELECTORS.register_module( 24 | name='ReprojectionErrorPointSelector', 25 | module=ReprojectionErrorPointSelector) 26 | 27 | 28 | def build_point_selector(cfg) -> BaseSelector: 29 | """Build point selector.""" 30 | return POINTSELECTORS.build(cfg) 31 | -------------------------------------------------------------------------------- /xrmocap/service/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/service/__init__.py -------------------------------------------------------------------------------- /xrmocap/service/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry 2 | 3 | from .base_flask_service import BaseFlaskService 4 | from .smpl_stream_service import SMPLStreamService 5 | 6 | SERVICES = Registry('services') 7 | 8 | SERVICES.register_module(name='BaseFlaskService', module=BaseFlaskService) 9 | SERVICES.register_module(name='SMPLStreamService', module=SMPLStreamService) 10 | 11 | 12 | def build_service(cfg) -> BaseFlaskService: 13 | """Build a flask service.""" 14 | return SERVICES.build(cfg) 15 | -------------------------------------------------------------------------------- /xrmocap/transform/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/transform/__init__.py -------------------------------------------------------------------------------- /xrmocap/transform/bbox/__init__.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | try: 4 | from typing import Literal 5 | except ImportError: 6 | from typing_extensions import Literal 7 | 8 | 9 | def qsort_bbox_list(bbox_list: list, 10 | only_max: bool = False, 11 | bbox_convention: Literal['xyxy', 'xywh'] = 'xyxy'): 12 | """Sort a list of bboxes, by their area in pixel(W*H). 13 | 14 | Args: 15 | input_list (list): 16 | A list of bboxes. Each item is a list of (x1, y1, x2, y2) 17 | only_max (bool, optional): 18 | If True, only assure the max element at first place, 19 | others may not be well sorted. 20 | If False, return a well sorted descending list. 21 | Defaults to False. 22 | bbox_convention (str, optional): 23 | Bbox type, xyxy or xywh. Defaults to 'xyxy'. 24 | 25 | Returns: 26 | list: 27 | A sorted(maybe not so well) descending list. 28 | """ 29 | if len(bbox_list) <= 1: 30 | return bbox_list 31 | else: 32 | bigger_list = [] 33 | less_list = [] 34 | anchor_index = int(len(bbox_list) / 2) 35 | anchor_bbox = bbox_list[anchor_index] 36 | anchor_area = get_area_of_bbox(anchor_bbox, bbox_convention) 37 | for i in range(len(bbox_list)): 38 | if i == anchor_index: 39 | continue 40 | tmp_bbox = bbox_list[i] 41 | tmp_area = get_area_of_bbox(tmp_bbox, bbox_convention) 42 | if tmp_area >= anchor_area: 43 | bigger_list.append(tmp_bbox) 44 | else: 45 | less_list.append(tmp_bbox) 46 | if only_max: 47 | return qsort_bbox_list(bigger_list) + \ 48 | [anchor_bbox, ] + less_list 49 | else: 50 | return qsort_bbox_list(bigger_list) + \ 51 | [anchor_bbox, ] + qsort_bbox_list(less_list) 52 | 53 | 54 | def get_area_of_bbox( 55 | bbox: Union[list, tuple], 56 | bbox_convention: Literal['xyxy', 'xywh'] = 'xyxy') -> float: 57 | """Get the area of a bbox_xyxy. 58 | 59 | Args: 60 | (Union[list, tuple]): 61 | A list of [x1, y1, x2, y2]. 62 | bbox_convention (str, optional): 63 | Bbox type, xyxy or xywh. Defaults to 'xyxy'. 64 | 65 | Returns: 66 | float: 67 | Area of the bbox(|y2-y1|*|x2-x1|). 68 | """ 69 | if bbox_convention == 'xyxy': 70 | return abs(bbox[2] - bbox[0]) * abs(bbox[3] - bbox[1]) 71 | elif bbox_convention == 'xywh': 72 | return abs(bbox[2] * bbox[3]) 73 | else: 74 | raise TypeError(f'Wrong bbox convention: {bbox_convention}') 75 | -------------------------------------------------------------------------------- /xrmocap/transform/convention/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/transform/convention/__init__.py -------------------------------------------------------------------------------- /xrmocap/transform/convention/joints_convention/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/transform/convention/joints_convention/__init__.py -------------------------------------------------------------------------------- /xrmocap/transform/convention/keypoints_convention/fourdag_19.py: -------------------------------------------------------------------------------- 1 | FOURDAG19_KEYPOINTS = [ 2 | 'pelvis_openpose', # 'mid_hip' 3 | 'neck_openpose', # 'upper_neck' 4 | 'right_hip_openpose', 5 | 'left_hip_openpose', 6 | 'nose_openpose', 7 | 'right_shoulder_openpose', 8 | 'left_shoulder_openpose', 9 | 'right_knee_openpose', 10 | 'left_knee_openpose', 11 | 'right_ear_openpose', 12 | 'left_ear_openpose', 13 | 'right_elbow_openpose', 14 | 'left_elbow_openpose', 15 | 'right_ankle_openpose', 16 | 'left_ankle_openpose', 17 | 'right_wrist_openpose', 18 | 'left_wrist_openpose', 19 | 'left_bigtoe_openpose', 20 | 'right_bigtoe_openpose', 21 | ] 22 | -------------------------------------------------------------------------------- /xrmocap/transform/convention/keypoints_convention/human_data.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | from xrprimer.transform.convention.keypoints_convention.human_data import ( 3 | HUMAN_DATA_PARTS, 4 | ) 5 | 6 | # yapf: enable 7 | 8 | HUMAN_DATA_LIMB_NAMES = { 9 | 'left_ankle': { 10 | 'left_knee': 'left_lower_leg' 11 | }, 12 | 'right_ankle': { 13 | 'right_knee': 'right_lower_leg' 14 | }, 15 | 'left_shoulder': { 16 | 'left_elbow': 'left_upperarm' 17 | }, 18 | 'right_shoulder': { 19 | 'right_elbow': 'right_upperarm' 20 | }, 21 | 'left_elbow': { 22 | 'left_wrist': 'left_forearm' 23 | }, 24 | 'right_elbow': { 25 | 'right_wrist': 'right_forearm' 26 | }, 27 | 'left_hip_extra': { 28 | 'left_knee': 'left_thigh' 29 | }, 30 | 'right_hip_extra': { 31 | 'right_knee': 'right_thigh' 32 | }, 33 | } 34 | 35 | HUMAN_DATA_FOOT = [ 36 | 'left_ankle', 'left_foot', 'left_heel', 'left_ankle_openpose', 37 | 'left_bigtoe_openpose', 'left_smalltoe_openpose', 'left_toe_3dhp', 38 | 'left_bigtoe', 'left_smalltoe', 'right_ankle', 'right_foot', 'right_heel', 39 | 'right_ankle_openpose', 'right_bigtoe_openpose', 'right_smalltoe_openpose', 40 | 'right_toe_3dhp', 'right_bigtoe', 'right_smalltoe' 41 | ] 42 | 43 | HUMAN_DATA_PARTS['foot'] = HUMAN_DATA_FOOT 44 | -------------------------------------------------------------------------------- /xrmocap/transform/convention/keypoints_convention/paf.py: -------------------------------------------------------------------------------- 1 | ALL_PAF_MAPPING = dict( 2 | openpose_25=dict( 3 | openpose_25=list(range(26)), 4 | fourdag_19=[ 5 | 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 20, 23 6 | ], 7 | coco=[ 8 | 17, 16, 19, 18, -14, -10, [-11, 7], 12, 8, 13, 9, [-11, 0, 4], 9 | [-7, 0, 3], [-4, 3], 5, 1, 6, 2 10 | ])) 11 | -------------------------------------------------------------------------------- /xrmocap/transform/convention/keypoints_convention/panoptic.py: -------------------------------------------------------------------------------- 1 | PANOPTIC_KEYPOINTS = [ 2 | 'neck_openpose', 3 | 'nose_openpose', 4 | 'pelvis_openpose', 5 | 'left_shoulder_openpose', 6 | 'left_elbow_openpose', 7 | 'left_wrist_openpose', 8 | 'left_hip_openpose', 9 | 'left_knee_openpose', 10 | 'left_ankle_openpose', 11 | 'right_shoulder_openpose', 12 | 'right_elbow_openpose', 13 | 'right_wrist_openpose', 14 | 'right_hip_openpose', 15 | 'right_knee_openpose', 16 | 'right_ankle_openpose', 17 | 'left_eye_openpose', 18 | 'left_ear_openpose', 19 | 'right_eye_openpose', 20 | 'right_ear_openpose', 21 | ] 22 | 23 | PANOPTIC15_KEYPOINTS = [ 24 | 'neck_openpose', 25 | 'nose_openpose', 26 | 'pelvis_openpose', 27 | 'left_shoulder_openpose', 28 | 'left_elbow_openpose', 29 | 'left_wrist_openpose', 30 | 'left_hip_openpose', 31 | 'left_knee_openpose', 32 | 'left_ankle_openpose', 33 | 'right_shoulder_openpose', 34 | 'right_elbow_openpose', 35 | 'right_wrist_openpose', 36 | 'right_hip_openpose', 37 | 'right_knee_openpose', 38 | 'right_ankle_openpose', 39 | ] 40 | -------------------------------------------------------------------------------- /xrmocap/transform/image/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/transform/image/__init__.py -------------------------------------------------------------------------------- /xrmocap/transform/image/base_image_transform.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import torch 3 | from typing import Any, Union 4 | from xrprimer.utils.log_utils import get_logger 5 | 6 | 7 | class BaseImageTransform(torch.nn.Module): 8 | 9 | def __init__(self, 10 | logger: Union[None, str, logging.Logger] = None) -> None: 11 | """Base class for image transform. 12 | 13 | Args: 14 | logger (Union[None, str, logging.Logger], optional): 15 | Logger for logging. If None, root logger will be selected. 16 | Defaults to None. 17 | """ 18 | super().__init__() 19 | self.logger = get_logger(logger) 20 | 21 | def forward(self, input: Any) -> Any: 22 | """Forward function of ImageTransform. 23 | 24 | Args: 25 | input (Any) 26 | 27 | Returns: 28 | Any 29 | """ 30 | raise NotImplementedError 31 | -------------------------------------------------------------------------------- /xrmocap/transform/image/builder.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | from mmcv.utils import Registry 3 | from torchvision.transforms import Normalize, Resize, ToTensor 4 | 5 | from .color import BGR2RGB, RGB2BGR 6 | from .convert import CV2ToPIL 7 | from .load import LoadImageCV2, LoadImagePIL 8 | from .shape import WarpAffine 9 | 10 | # yapf: enable 11 | 12 | IMAGE_TRANSFORM = Registry('image_transform') 13 | IMAGE_TRANSFORM.register_module(name='BGR2RGB', module=BGR2RGB) 14 | IMAGE_TRANSFORM.register_module(name='RGB2BGR', module=RGB2BGR) 15 | IMAGE_TRANSFORM.register_module(name='LoadImageCV2', module=LoadImageCV2) 16 | IMAGE_TRANSFORM.register_module(name='LoadImagePIL', module=LoadImagePIL) 17 | IMAGE_TRANSFORM.register_module(name='CV2ToPIL', module=CV2ToPIL) 18 | IMAGE_TRANSFORM.register_module(name='Resize', module=Resize) 19 | IMAGE_TRANSFORM.register_module(name='ToTensor', module=ToTensor) 20 | IMAGE_TRANSFORM.register_module(name='Normalize', module=Normalize) 21 | IMAGE_TRANSFORM.register_module(name='WarpAffine', module=WarpAffine) 22 | 23 | 24 | def build_image_transform(cfg) -> None: 25 | """Build image_transform.""" 26 | return IMAGE_TRANSFORM.build(cfg) 27 | -------------------------------------------------------------------------------- /xrmocap/transform/image/convert.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import numpy as np 3 | from PIL import Image 4 | from typing import Union 5 | 6 | from .base_image_transform import BaseImageTransform 7 | 8 | 9 | class CV2ToPIL(BaseImageTransform): 10 | 11 | def __init__(self, 12 | logger: Union[None, str, logging.Logger] = None) -> None: 13 | """Convert cv2 image array to PIL.Image. 14 | 15 | Args: 16 | logger (Union[None, str, logging.Logger], optional): 17 | Logger for logging. If None, root logger will be selected. 18 | Defaults to None. 19 | """ 20 | BaseImageTransform.__init__(self, logger=logger) 21 | 22 | def forward(self, input: np.ndarray) -> Image: 23 | """Forward function of CV2ToPIL. 24 | 25 | Args: 26 | input (np.ndarray): 27 | Image array defined in cv2. 28 | 29 | Returns: 30 | Image: 31 | Image instance defined in PIL. 32 | """ 33 | pil_img = Image.fromarray(input) 34 | return pil_img 35 | -------------------------------------------------------------------------------- /xrmocap/transform/image/load.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import logging 3 | import numpy as np 4 | from PIL import Image 5 | from typing import Union 6 | 7 | from .base_image_transform import BaseImageTransform 8 | 9 | 10 | class LoadImageCV2(BaseImageTransform): 11 | 12 | def __init__(self, 13 | logger: Union[None, str, logging.Logger] = None) -> None: 14 | """Load image array from file by cv2. 15 | 16 | Args: 17 | logger (Union[None, str, logging.Logger], optional): 18 | Logger for logging. If None, root logger will be selected. 19 | Defaults to None. 20 | """ 21 | BaseImageTransform.__init__(self, logger=logger) 22 | 23 | def forward(self, input: str) -> np.ndarray: 24 | """Forward function of LoadImageCV2. 25 | 26 | Args: 27 | input (str): 28 | Path to the image file. 29 | 30 | Returns: 31 | np.ndarray: 32 | Image instance defined in PIL. 33 | """ 34 | img = cv2.imread(input) 35 | return img 36 | 37 | 38 | class LoadImagePIL(BaseImageTransform): 39 | 40 | def __init__(self, 41 | logger: Union[None, str, logging.Logger] = None) -> None: 42 | """Load image array from file by PIL. 43 | 44 | Args: 45 | logger (Union[None, str, logging.Logger], optional): 46 | Logger for logging. If None, root logger will be selected. 47 | Defaults to None. 48 | """ 49 | BaseImageTransform.__init__(self, logger=logger) 50 | 51 | def forward(self, input: str) -> Image: 52 | """Forward function of LoadImagePIL. 53 | 54 | Args: 55 | input (str): 56 | Path to the image file. 57 | 58 | Returns: 59 | Image: 60 | Image instance defined in PIL. 61 | """ 62 | img = Image.open(input) 63 | return img 64 | -------------------------------------------------------------------------------- /xrmocap/transform/keypoints3d/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/transform/keypoints3d/__init__.py -------------------------------------------------------------------------------- /xrmocap/transform/keypoints3d/optim/__init__.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | from .aniposelib_optimizer import AniposelibOptimizer 3 | from .base_optimizer import BaseOptimizer 4 | from .median_smooth import MedianSmooth, median_filter_data 5 | from .nan_interpolation import ( 6 | NanInterpolation, count_masked_nan, interpolate_np_data, 7 | ) 8 | from .smpl_shape_aware_optimizer import SMPLShapeAwareOptimizer 9 | from .trajectory_optimizer import TrajectoryOptimizer 10 | 11 | # yapf: enable 12 | 13 | __all__ = [ 14 | 'AniposelibOptimizer', 'BaseOptimizer', 'MedianSmooth', 'NanInterpolation', 15 | 'SMPLShapeAwareOptimizer', 'TrajectoryOptimizer', 'count_masked_nan', 16 | 'interpolate_np_data', 'median_filter_data' 17 | ] 18 | -------------------------------------------------------------------------------- /xrmocap/transform/keypoints3d/optim/base_optimizer.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Union 3 | from xrprimer.data_structure import Keypoints 4 | from xrprimer.utils.log_utils import get_logger 5 | 6 | 7 | class BaseOptimizer: 8 | 9 | def __init__(self, 10 | verbose: bool = True, 11 | logger: Union[None, str, logging.Logger] = None) -> None: 12 | """Base class for keypoints3d optimizer. 13 | 14 | Args: 15 | verbose (bool, optional): 16 | Whether to log info. 17 | Defaults to True. 18 | logger (Union[None, str, logging.Logger], optional): 19 | Logger for logging. If None, root logger will be selected. 20 | Defaults to None. 21 | """ 22 | self.verbose = verbose 23 | self.logger = get_logger(logger) 24 | 25 | def optimize_keypoints3d(self, keypoints3d: Keypoints, 26 | **kwargs: dict) -> Keypoints: 27 | """Forward function of keypoints3d optimizer. 28 | 29 | Args: 30 | keypoints3d (Keypoints): Input keypoints3d. 31 | kwargs: 32 | Redundant keyword arguments to be 33 | ignored, including: 34 | mview_keypoints2d 35 | 36 | Returns: 37 | Keypoints: The optimized keypoints3d. 38 | """ 39 | raise NotImplementedError 40 | -------------------------------------------------------------------------------- /xrmocap/transform/keypoints3d/optim/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry 2 | 3 | from .aniposelib_optimizer import AniposelibOptimizer 4 | from .base_optimizer import BaseOptimizer 5 | from .fourdag_base_optimizer import FourDAGBaseOptimizer 6 | from .fourdag_optimization import FourDAGOptimizer 7 | from .median_smooth import MedianSmooth 8 | from .nan_interpolation import NanInterpolation 9 | from .prior_optimizer import PriorConstraint 10 | from .rm_duplicate import RemoveDuplicate 11 | from .smpl_shape_aware_optimizer import SMPLShapeAwareOptimizer 12 | from .trajectory_optimizer import TrajectoryOptimizer 13 | 14 | KEYPOINTS3D_OPTIMIZERS = Registry('keypoints3d_optimizer') 15 | 16 | KEYPOINTS3D_OPTIMIZERS.register_module( 17 | name='NanInterpolation', module=NanInterpolation) 18 | KEYPOINTS3D_OPTIMIZERS.register_module( 19 | name='TrajectoryOptimizer', module=TrajectoryOptimizer) 20 | KEYPOINTS3D_OPTIMIZERS.register_module( 21 | name='MedianSmooth', module=MedianSmooth) 22 | KEYPOINTS3D_OPTIMIZERS.register_module( 23 | name='AniposelibOptimizer', module=AniposelibOptimizer) 24 | KEYPOINTS3D_OPTIMIZERS.register_module( 25 | name='SMPLShapeAwareOptimizer', module=SMPLShapeAwareOptimizer) 26 | KEYPOINTS3D_OPTIMIZERS.register_module( 27 | name='RemoveDuplicate', module=RemoveDuplicate) 28 | KEYPOINTS3D_OPTIMIZERS.register_module( 29 | name='FourDAGBaseOptimizer', module=FourDAGBaseOptimizer) 30 | KEYPOINTS3D_OPTIMIZERS.register_module( 31 | name='FourDAGOptimizer', module=FourDAGOptimizer) 32 | KEYPOINTS3D_OPTIMIZERS.register_module( 33 | name='PriorConstraint', module=PriorConstraint) 34 | 35 | 36 | def build_keypoints3d_optimizer(cfg) -> BaseOptimizer: 37 | """Build keypoints3d optimizer.""" 38 | return KEYPOINTS3D_OPTIMIZERS.build(cfg) 39 | -------------------------------------------------------------------------------- /xrmocap/transform/point/__init__.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from typing import Union 4 | 5 | 6 | def affine_transform_pts( 7 | pts: Union[list, torch.Tensor], 8 | t: Union[list, torch.Tensor]) -> Union[list, torch.Tensor]: 9 | """Affine transformation for points. 10 | 11 | Args: 12 | pts (Union[list, torch.Tensor]): 13 | Point(s) to be transferred. 14 | Nx2 or [batch_size, n_views, N, 2] 15 | t (Union[list, torch.Tensor]): 16 | Transformation. Nx2 or [batch_size, n_views, N, 2] 17 | 18 | Returns: 19 | pts_trans(Union[list, torch.Tensor]): 20 | Affine transformed point(s). 21 | """ 22 | 23 | if not hasattr(pts, 'device'): 24 | if pts.ndim == 1: 25 | pts_trans = np.array([pts[0], pts[1], 1.]).T 26 | pts_trans = np.dot(t, pts_trans) 27 | return pts_trans[:2] 28 | else: 29 | raise NotImplementedError('Batch affine transform for \ 30 | point on CPU is not implemented') 31 | else: 32 | pts_homo = torch.cat( 33 | [pts, torch.ones(pts.shape[:-1] + (1, ), device=pts.device)], 34 | dim=-1) 35 | pts_trans = torch.matmul(pts_homo, t.transpose(2, 3)) 36 | return pts_trans 37 | -------------------------------------------------------------------------------- /xrmocap/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/utils/__init__.py -------------------------------------------------------------------------------- /xrmocap/utils/date_utils.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from dateutil import tz 3 | 4 | 5 | def get_datetime_local() -> datetime: 6 | """Get datetime in local time zone. 7 | 8 | Returns: 9 | datetime: 10 | An instance of datetime 11 | in local time zone. 12 | """ 13 | datetime_src = datetime.now() 14 | # Auto-detect zones: 15 | # zone_src = tz.tzutc() 16 | zone_dst = tz.tzlocal() 17 | # # Tell the datetime object that it's in UTC time zone 18 | # datetime_src = datetime_src.replace(tzinfo=zone_src) 19 | # Convert time zone 20 | datetime_dst = datetime_src.astimezone(zone_dst) 21 | return datetime_dst 22 | 23 | 24 | def get_str_from_datetime(datetime_instance: datetime, 25 | format: str = '%Y.%m.%d_%H:%M:%S') -> str: 26 | """Get string from datetime instance. 27 | 28 | Args: 29 | datetime_instance (datetime): 30 | An instance of datetime. 31 | format (str): 32 | Format of the string. 33 | Defaults to '%Y.%m.%d_%H:%M:%S'. 34 | """ 35 | return datetime_instance.strftime(format) 36 | -------------------------------------------------------------------------------- /xrmocap/utils/service_utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Union 3 | 4 | 5 | def payload_to_dict(input_instance: Union[str, dict]) -> dict: 6 | """Convert flask payload to python dict. 7 | 8 | Args: 9 | input_instance (Union[str, dict]): 10 | Payload get from request.get_json(). 11 | 12 | Returns: 13 | dict: Payload in type dict. 14 | """ 15 | if isinstance(input_instance, dict): 16 | input_dict = input_instance 17 | else: 18 | input_dict = json.loads(s=input_instance) 19 | return input_dict 20 | -------------------------------------------------------------------------------- /xrmocap/version.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenXRLab. All rights reserved. 2 | 3 | __version__ = '0.8.0' 4 | 5 | 6 | def parse_version_info(version_str): 7 | """Parse a version string into a tuple. 8 | 9 | Args: 10 | version_str (str): The version string. 11 | Returns: 12 | tuple[int | str]: The version info, e.g., "1.3.0" is parsed into 13 | (1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1'). 14 | """ 15 | version_info = [] 16 | for x in version_str.split('.'): 17 | if x.isdigit(): 18 | version_info.append(int(x)) 19 | elif x.find('rc') != -1: 20 | patch_version = x.split('rc') 21 | version_info.append(int(patch_version[0])) 22 | version_info.append(f'rc{patch_version[1]}') 23 | return tuple(version_info) 24 | 25 | 26 | version_info = parse_version_info(__version__) 27 | 28 | __all__ = ['__version__', 'version_info', 'parse_version_info'] 29 | -------------------------------------------------------------------------------- /xrmocap/visualization/__init__.py: -------------------------------------------------------------------------------- 1 | # yapf: disable 2 | from .visualize_keypoints2d import visualize_keypoints2d 3 | from .visualize_keypoints3d import visualize_keypoints3d_projected 4 | 5 | # yapf: enable 6 | __all__ = ['visualize_keypoints2d', 'visualize_keypoints3d_projected'] 7 | -------------------------------------------------------------------------------- /xrmocap/visualization/render/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openxrlab/xrmocap/2d227a7b27cfec5c43bfe975d23bc0a54d24541a/xrmocap/visualization/render/__init__.py --------------------------------------------------------------------------------