├── .circleci
├── config.yml
├── docker
│ └── Dockerfile
├── scripts
│ └── get_mmcv_var.sh
└── test.yml
├── .github
├── CODE_OF_CONDUCT.md
├── ISSUE_TEMPLATE
│ ├── 1-bug-report.yml
│ ├── 2-feature_request.yml
│ ├── 3-documentation.yml
│ └── config.yml
├── pull_request_template.md
└── workflows
│ ├── deploy.yml
│ ├── lint.yml
│ ├── merge_stage_test.yml
│ ├── pr_stage_test.yml
│ └── scripts
│ └── get_mmcv_var.sh
├── .gitignore
├── .pre-commit-config.yaml
├── .pylintrc
├── CITATION.cff
├── LICENSE
├── LICENSES.md
├── MANIFEST.in
├── README.md
├── README_CN.md
├── configs
├── _base_
│ ├── datasets
│ │ ├── 300vw.py
│ │ ├── 300w.py
│ │ ├── 300wlp.py
│ │ ├── aflw.py
│ │ ├── aic.py
│ │ ├── ak.py
│ │ ├── animalpose.py
│ │ ├── ap10k.py
│ │ ├── atrw.py
│ │ ├── campus.py
│ │ ├── coco.py
│ │ ├── coco_aic.py
│ │ ├── coco_openpose.py
│ │ ├── coco_wholebody.py
│ │ ├── coco_wholebody_face.py
│ │ ├── coco_wholebody_hand.py
│ │ ├── coco_wholebody_openpose.py
│ │ ├── cofw.py
│ │ ├── crowdpose.py
│ │ ├── deepfashion2.py
│ │ ├── deepfashion_full.py
│ │ ├── deepfashion_lower.py
│ │ ├── deepfashion_upper.py
│ │ ├── exlpose.py
│ │ ├── fly.py
│ │ ├── freihand2d.py
│ │ ├── h36m.py
│ │ ├── h3wb.py
│ │ ├── halpe.py
│ │ ├── halpe26.py
│ │ ├── horse10.py
│ │ ├── humanart.py
│ │ ├── humanart21.py
│ │ ├── humanart_aic.py
│ │ ├── interhand2d.py
│ │ ├── interhand3d.py
│ │ ├── jhmdb.py
│ │ ├── lapa.py
│ │ ├── locust.py
│ │ ├── macaque.py
│ │ ├── mhp.py
│ │ ├── mpi_inf_3dhp.py
│ │ ├── mpii.py
│ │ ├── mpii_trb.py
│ │ ├── ochuman.py
│ │ ├── onehand10k.py
│ │ ├── panoptic_body3d.py
│ │ ├── panoptic_hand2d.py
│ │ ├── posetrack18.py
│ │ ├── rhd2d.py
│ │ ├── shelf.py
│ │ ├── ubody2d.py
│ │ ├── ubody3d.py
│ │ ├── wflw.py
│ │ └── zebra.py
│ └── default_runtime.py
├── animal_2d_keypoint
│ ├── README.md
│ ├── rtmpose
│ │ ├── README.md
│ │ └── ap10k
│ │ │ ├── rtmpose-m_8xb64-210e_ap10k-256x256.py
│ │ │ ├── rtmpose_ap10k.md
│ │ │ └── rtmpose_ap10k.yml
│ └── topdown_heatmap
│ │ ├── README.md
│ │ ├── ak
│ │ ├── hrnet_animalkingdom.md
│ │ ├── hrnet_animalkingdom.yml
│ │ ├── td-hm_hrnet-w32_8xb32-300e_animalkingdom_P1-256x256.py
│ │ ├── td-hm_hrnet-w32_8xb32-300e_animalkingdom_P2-256x256.py
│ │ ├── td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256.py
│ │ ├── td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_bird-256x256.py
│ │ ├── td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_fish-256x256.py
│ │ ├── td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_mammal-256x256.py
│ │ └── td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_reptile-256x256.py
│ │ ├── animalpose
│ │ ├── hrnet_animalpose.md
│ │ ├── hrnet_animalpose.yml
│ │ ├── resnet_animalpose.md
│ │ ├── resnet_animalpose.yml
│ │ ├── td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py
│ │ ├── td-hm_hrnet-w48_8xb64-210e_animalpose-256x256.py
│ │ ├── td-hm_res101_8xb64-210e_animalpose-256x256.py
│ │ ├── td-hm_res152_8xb32-210e_animalpose-256x256.py
│ │ └── td-hm_res50_8xb64-210e_animalpose-256x256.py
│ │ ├── ap10k
│ │ ├── cspnext-m_udp_8xb64-210e_ap10k-256x256.py
│ │ ├── cspnext_udp_ap10k.md
│ │ ├── cspnext_udp_ap10k.yml
│ │ ├── hrnet_ap10k.md
│ │ ├── hrnet_ap10k.yml
│ │ ├── resnet_ap10k.md
│ │ ├── resnet_ap10k.yml
│ │ ├── td-hm_hrnet-w32_8xb64-210e_ap10k-256x256.py
│ │ ├── td-hm_hrnet-w48_8xb64-210e_ap10k-256x256.py
│ │ ├── td-hm_res101_8xb64-210e_ap10k-256x256.py
│ │ └── td-hm_res50_8xb64-210e_ap10k-256x256.py
│ │ ├── locust
│ │ ├── resnet_locust.md
│ │ ├── resnet_locust.yml
│ │ ├── td-hm_res101_8xb64-210e_locust-160x160.py
│ │ ├── td-hm_res152_8xb32-210e_locust-160x160.py
│ │ └── td-hm_res50_8xb64-210e_locust-160x160.py
│ │ └── zebra
│ │ ├── resnet_zebra.md
│ │ ├── resnet_zebra.yml
│ │ ├── td-hm_res101_8xb64-210e_zebra-160x160.py
│ │ ├── td-hm_res152_8xb32-210e_zebra-160x160.py
│ │ └── td-hm_res50_8xb64-210e_zebra-160x160.py
├── body_2d_keypoint
│ ├── README.md
│ ├── associative_embedding
│ │ ├── README.md
│ │ └── coco
│ │ │ ├── ae_hrnet-w32_8xb24-300e_coco-512x512.py
│ │ │ ├── hrnet_coco.md
│ │ │ └── hrnet_coco.yml
│ ├── cid
│ │ └── coco
│ │ │ ├── cid_hrnet-w32_8xb20-140e_coco-512x512.py
│ │ │ ├── cid_hrnet-w48_8xb20-140e_coco-512x512.py
│ │ │ ├── hrnet_coco.md
│ │ │ └── hrnet_coco.yml
│ ├── dekr
│ │ ├── README.md
│ │ ├── coco
│ │ │ ├── dekr_hrnet-w32_8xb10-140e_coco-512x512.py
│ │ │ ├── dekr_hrnet-w48_8xb10-140e_coco-640x640.py
│ │ │ ├── hrnet_coco.md
│ │ │ └── hrnet_coco.yml
│ │ └── crowdpose
│ │ │ ├── dekr_hrnet-w32_8xb10-300e_crowdpose-512x512.py
│ │ │ ├── dekr_hrnet-w48_8xb5-300e_crowdpose-640x640.py
│ │ │ ├── hrnet_crowdpose.md
│ │ │ └── hrnet_crowdpose.yml
│ ├── edpose
│ │ └── coco
│ │ │ ├── edpose_coco.md
│ │ │ ├── edpose_coco.yml
│ │ │ └── edpose_res50_8xb2-50e_coco-800x1333.py
│ ├── integral_regression
│ │ ├── README.md
│ │ └── coco
│ │ │ ├── ipr_res50_8xb64-210e_coco-256x256.py
│ │ │ ├── ipr_res50_debias-8xb64-210e_coco-256x256.py
│ │ │ ├── ipr_res50_dsnt-8xb64-210e_coco-256x256.py
│ │ │ ├── resnet_debias_coco.md
│ │ │ ├── resnet_debias_coco.yml
│ │ │ ├── resnet_dsnt_coco.md
│ │ │ ├── resnet_dsnt_coco.yml
│ │ │ ├── resnet_ipr_coco.md
│ │ │ └── resnet_ipr_coco.yml
│ ├── rtmo
│ │ ├── README.md
│ │ ├── body7
│ │ │ ├── rtmo-l_16xb16-600e_body7-640x640.py
│ │ │ ├── rtmo-m_16xb16-600e_body7-640x640.py
│ │ │ ├── rtmo-s_8xb32-600e_body7-640x640.py
│ │ │ ├── rtmo-t_8xb32-600e_body7-416x416.py
│ │ │ ├── rtmo_body7.md
│ │ │ └── rtmo_body7.yml
│ │ ├── coco
│ │ │ ├── rtmo-l_16xb16-600e_coco-640x640.py
│ │ │ ├── rtmo-m_16xb16-600e_coco-640x640.py
│ │ │ ├── rtmo-s_8xb32-600e_coco-640x640.py
│ │ │ ├── rtmo_coco.md
│ │ │ └── rtmo_coco.yml
│ │ └── crowdpose
│ │ │ ├── rtmo-l_16xb16-700e_body7-crowdpose-640x640.py
│ │ │ ├── rtmo-l_16xb16-700e_crowdpose-640x640.py
│ │ │ ├── rtmo-m_16xb16-700e_crowdpose-640x640.py
│ │ │ ├── rtmo-s_8xb32-700e_crowdpose-640x640.py
│ │ │ ├── rtmo_crowdpose.md
│ │ │ └── rtmo_crowdpose.yml
│ ├── rtmpose
│ │ ├── README.md
│ │ ├── body8
│ │ │ ├── rtmpose-l_8xb256-420e_body8-256x192.py
│ │ │ ├── rtmpose-l_8xb256-420e_body8-384x288.py
│ │ │ ├── rtmpose-l_8xb512-700e_body8-halpe26-256x192.py
│ │ │ ├── rtmpose-l_8xb512-700e_body8-halpe26-384x288.py
│ │ │ ├── rtmpose-m_8xb256-420e_body8-256x192.py
│ │ │ ├── rtmpose-m_8xb256-420e_body8-384x288.py
│ │ │ ├── rtmpose-m_8xb512-700e_body8-halpe26-256x192.py
│ │ │ ├── rtmpose-m_8xb512-700e_body8-halpe26-384x288.py
│ │ │ ├── rtmpose-s_8xb1024-700e_body8-halpe26-256x192.py
│ │ │ ├── rtmpose-s_8xb256-420e_body8-256x192.py
│ │ │ ├── rtmpose-t_8xb1024-700e_body8-halpe26-256x192.py
│ │ │ ├── rtmpose-t_8xb256-420e_body8-256x192.py
│ │ │ ├── rtmpose-x_8xb256-700e_body8-halpe26-384x288.py
│ │ │ ├── rtmpose_body8-coco.md
│ │ │ ├── rtmpose_body8-coco.yml
│ │ │ ├── rtmpose_body8-halpe26.md
│ │ │ └── rtmpose_body8-halpe26.yml
│ │ ├── coco
│ │ │ ├── rtmpose-l_8xb256-420e_aic-coco-256x192.py
│ │ │ ├── rtmpose-l_8xb256-420e_aic-coco-384x288.py
│ │ │ ├── rtmpose-l_8xb256-420e_coco-256x192.py
│ │ │ ├── rtmpose-m_8xb256-420e_aic-coco-256x192.py
│ │ │ ├── rtmpose-m_8xb256-420e_aic-coco-384x288.py
│ │ │ ├── rtmpose-m_8xb256-420e_coco-256x192.py
│ │ │ ├── rtmpose-s_8xb256-420e_aic-coco-256x192.py
│ │ │ ├── rtmpose-s_8xb256-420e_coco-256x192.py
│ │ │ ├── rtmpose-t_8xb256-420e_aic-coco-256x192.py
│ │ │ ├── rtmpose-t_8xb256-420e_coco-256x192.py
│ │ │ ├── rtmpose_coco.md
│ │ │ └── rtmpose_coco.yml
│ │ ├── crowdpose
│ │ │ ├── rtmpose-m_8xb64-210e_crowdpose-256x192.py
│ │ │ ├── rtmpose_crowdpose.md
│ │ │ └── rtmpose_crowdpose.yml
│ │ ├── humanart
│ │ │ ├── rtmpose-l_8xb256-420e_humanart-256x192.py
│ │ │ ├── rtmpose-m_8xb256-420e_humanart-256x192.py
│ │ │ ├── rtmpose-s_8xb256-420e_humanart-256x192.py
│ │ │ ├── rtmpose-t_8xb256-420e_humanart-256x192.py
│ │ │ ├── rtmpose_humanart.md
│ │ │ └── rtmpose_humanart.yml
│ │ └── mpii
│ │ │ ├── rtmpose-m_8xb64-210e_mpii-256x256.py
│ │ │ ├── rtmpose_mpii.md
│ │ │ └── rtmpose_mpii.yml
│ ├── simcc
│ │ ├── README.md
│ │ ├── coco
│ │ │ ├── mobilenetv2_coco.md
│ │ │ ├── mobilenetv2_coco.yml
│ │ │ ├── resnet_coco.md
│ │ │ ├── resnet_coco.yml
│ │ │ ├── simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192.py
│ │ │ ├── simcc_res50_8xb32-140e_coco-384x288.py
│ │ │ ├── simcc_res50_8xb64-210e_coco-256x192.py
│ │ │ ├── simcc_vipnas-mbv3_8xb64-210e_coco-256x192.py
│ │ │ ├── vipnas_coco.md
│ │ │ └── vipnas_coco.yml
│ │ └── mpii
│ │ │ └── simcc_res50_wo-deconv-8xb64-210e_mpii-256x256.py
│ ├── topdown_heatmap
│ │ ├── README.md
│ │ ├── aic
│ │ │ ├── hrnet_aic.md
│ │ │ ├── hrnet_aic.yml
│ │ │ ├── resnet_aic.md
│ │ │ ├── resnet_aic.yml
│ │ │ ├── td-hm_hrnet-w32_8xb64-210e_aic-256x192.py
│ │ │ └── td-hm_res101_8xb64-210e_aic-256x192.py
│ │ ├── coco
│ │ │ ├── alexnet_coco.md
│ │ │ ├── alexnet_coco.yml
│ │ │ ├── cpm_coco.md
│ │ │ ├── cpm_coco.yml
│ │ │ ├── cspnext-l_udp_8xb256-210e_aic-coco-256x192.py
│ │ │ ├── cspnext-l_udp_8xb256-210e_coco-256x192.py
│ │ │ ├── cspnext-m_udp_8xb256-210e_aic-coco-256x192.py
│ │ │ ├── cspnext-m_udp_8xb256-210e_coco-256x192.py
│ │ │ ├── cspnext-s_udp_8xb256-210e_aic-coco-256x192.py
│ │ │ ├── cspnext-s_udp_8xb256-210e_coco-256x192.py
│ │ │ ├── cspnext-tiny_udp_8xb256-210e_aic-coco-256x192.py
│ │ │ ├── cspnext-tiny_udp_8xb256-210e_coco-256x192.py
│ │ │ ├── cspnext_udp_coco.md
│ │ │ ├── cspnext_udp_coco.yml
│ │ │ ├── hourglass_coco.md
│ │ │ ├── hourglass_coco.yml
│ │ │ ├── hrformer_coco.md
│ │ │ ├── hrformer_coco.yml
│ │ │ ├── hrnet_augmentation_coco.md
│ │ │ ├── hrnet_augmentation_coco.yml
│ │ │ ├── hrnet_coco.md
│ │ │ ├── hrnet_coco.yml
│ │ │ ├── hrnet_coco_aic.md
│ │ │ ├── hrnet_dark_coco.md
│ │ │ ├── hrnet_dark_coco.yml
│ │ │ ├── hrnet_fp16_coco.md
│ │ │ ├── hrnet_udp_coco.md
│ │ │ ├── hrnet_udp_coco.yml
│ │ │ ├── litehrnet_coco.md
│ │ │ ├── litehrnet_coco.yml
│ │ │ ├── mobilenetv2_coco.md
│ │ │ ├── mobilenetv2_coco.yml
│ │ │ ├── mspn_coco.md
│ │ │ ├── mspn_coco.yml
│ │ │ ├── pvt_coco.md
│ │ │ ├── pvt_coco.yml
│ │ │ ├── resnest_coco.md
│ │ │ ├── resnest_coco.yml
│ │ │ ├── resnet_coco.md
│ │ │ ├── resnet_coco.yml
│ │ │ ├── resnet_dark_coco.md
│ │ │ ├── resnet_dark_coco.yml
│ │ │ ├── resnet_fp16_coco.md
│ │ │ ├── resnetv1d_coco.md
│ │ │ ├── resnetv1d_coco.yml
│ │ │ ├── resnext_coco.md
│ │ │ ├── resnext_coco.yml
│ │ │ ├── rsn_coco.md
│ │ │ ├── rsn_coco.yml
│ │ │ ├── scnet_coco.md
│ │ │ ├── scnet_coco.yml
│ │ │ ├── seresnet_coco.md
│ │ │ ├── seresnet_coco.yml
│ │ │ ├── shufflenetv1_coco.md
│ │ │ ├── shufflenetv1_coco.yml
│ │ │ ├── shufflenetv2_coco.md
│ │ │ ├── shufflenetv2_coco.yml
│ │ │ ├── swin_coco.md
│ │ │ ├── swin_coco.yml
│ │ │ ├── td-hm-vis_res50_8xb64-210e_coco-aic-256x192-merge.py
│ │ │ ├── td-hm_2xmspn50_8xb32-210e_coco-256x192.py
│ │ │ ├── td-hm_2xrsn50_8xb32-210e_coco-256x192.py
│ │ │ ├── td-hm_3xmspn50_8xb32-210e_coco-256x192.py
│ │ │ ├── td-hm_3xrsn50_8xb32-210e_coco-256x192.py
│ │ │ ├── td-hm_4xmspn50_8xb32-210e_coco-256x192.py
│ │ │ ├── td-hm_ViTPose-base-simple_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_ViTPose-base_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_ViTPose-huge-simple_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_ViTPose-huge_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_ViTPose-large-simple_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_ViTPose-large_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_ViTPose-small-simple_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_ViTPose-small_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_alexnet_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_cpm_8xb32-210e_coco-384x288.py
│ │ │ ├── td-hm_cpm_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_hourglass52_8xb32-210e_coco-256x256.py
│ │ │ ├── td-hm_hourglass52_8xb32-210e_coco-384x384.py
│ │ │ ├── td-hm_hrformer-base_8xb32-210e_coco-256x192.py
│ │ │ ├── td-hm_hrformer-base_8xb32-210e_coco-384x288.py
│ │ │ ├── td-hm_hrformer-small_8xb32-210e_coco-256x192.py
│ │ │ ├── td-hm_hrformer-small_8xb32-210e_coco-384x288.py
│ │ │ ├── td-hm_hrnet-w32_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_hrnet-w32_8xb64-210e_coco-384x288.py
│ │ │ ├── td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-combine.py
│ │ │ ├── td-hm_hrnet-w32_8xb64-210e_coco-aic-256x192-merge.py
│ │ │ ├── td-hm_hrnet-w32_coarsedropout-8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_hrnet-w32_dark-8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_hrnet-w32_dark-8xb64-210e_coco-384x288.py
│ │ │ ├── td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_hrnet-w32_gridmask-8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_hrnet-w32_photometric-8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_hrnet-w32_udp-8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_hrnet-w32_udp-8xb64-210e_coco-384x288.py
│ │ │ ├── td-hm_hrnet-w32_udp-regress-8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_hrnet-w48_8xb32-210e_coco-256x192.py
│ │ │ ├── td-hm_hrnet-w48_8xb32-210e_coco-384x288.py
│ │ │ ├── td-hm_hrnet-w48_dark-8xb32-210e_coco-256x192.py
│ │ │ ├── td-hm_hrnet-w48_dark-8xb32-210e_coco-384x288.py
│ │ │ ├── td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192.py
│ │ │ ├── td-hm_hrnet-w48_udp-8xb32-210e_coco-384x288.py
│ │ │ ├── td-hm_litehrnet-18_8xb32-210e_coco-384x288.py
│ │ │ ├── td-hm_litehrnet-18_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_litehrnet-30_8xb32-210e_coco-384x288.py
│ │ │ ├── td-hm_litehrnet-30_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_mobilenetv2_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_mobilenetv2_8xb64-210e_coco-384x288.py
│ │ │ ├── td-hm_mspn50_8xb32-210e_coco-256x192.py
│ │ │ ├── td-hm_pvt-s_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_pvtv2-b2_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_res101_8xb32-210e_coco-384x288.py
│ │ │ ├── td-hm_res101_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_res101_dark-8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_res101_dark-8xb64-210e_coco-384x288.py
│ │ │ ├── td-hm_res152_8xb32-210e_coco-256x192.py
│ │ │ ├── td-hm_res152_8xb32-210e_coco-384x288.py
│ │ │ ├── td-hm_res152_dark-8xb32-210e_coco-256x192.py
│ │ │ ├── td-hm_res152_dark-8xb32-210e_coco-384x288.py
│ │ │ ├── td-hm_res50_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_res50_8xb64-210e_coco-384x288.py
│ │ │ ├── td-hm_res50_dark-8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_res50_dark-8xb64-210e_coco-384x288.py
│ │ │ ├── td-hm_res50_fp16-8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_resnest101_8xb32-210e_coco-384x288.py
│ │ │ ├── td-hm_resnest101_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_resnest200_8xb16-210e_coco-384x288.py
│ │ │ ├── td-hm_resnest200_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_resnest269_8xb16-210e_coco-384x288.py
│ │ │ ├── td-hm_resnest269_8xb32-210e_coco-256x192.py
│ │ │ ├── td-hm_resnest50_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_resnest50_8xb64-210e_coco-384x288.py
│ │ │ ├── td-hm_resnetv1d101_8xb32-210e_coco-384x288.py
│ │ │ ├── td-hm_resnetv1d101_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_resnetv1d152_8xb32-210e_coco-256x192.py
│ │ │ ├── td-hm_resnetv1d152_8xb48-210e_coco-384x288.py
│ │ │ ├── td-hm_resnetv1d50_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_resnetv1d50_8xb64-210e_coco-384x288.py
│ │ │ ├── td-hm_resnext101_8xb32-210e_coco-384x288.py
│ │ │ ├── td-hm_resnext101_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_resnext152_8xb32-210e_coco-256x192.py
│ │ │ ├── td-hm_resnext152_8xb48-210e_coco-384x288.py
│ │ │ ├── td-hm_resnext50_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_resnext50_8xb64-210e_coco-384x288.py
│ │ │ ├── td-hm_rsn18_8xb32-210e_coco-256x192.py
│ │ │ ├── td-hm_rsn50_8xb32-210e_coco-256x192.py
│ │ │ ├── td-hm_scnet101_8xb32-210e_coco-256x192.py
│ │ │ ├── td-hm_scnet101_8xb48-210e_coco-384x288.py
│ │ │ ├── td-hm_scnet50_8xb32-210e_coco-384x288.py
│ │ │ ├── td-hm_scnet50_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_seresnet101_8xb32-210e_coco-384x288.py
│ │ │ ├── td-hm_seresnet101_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_seresnet152_8xb32-210e_coco-256x192.py
│ │ │ ├── td-hm_seresnet152_8xb48-210e_coco-384x288.py
│ │ │ ├── td-hm_seresnet50_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_seresnet50_8xb64-210e_coco-384x288.py
│ │ │ ├── td-hm_shufflenetv1_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_shufflenetv1_8xb64-210e_coco-384x288.py
│ │ │ ├── td-hm_shufflenetv2_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_shufflenetv2_8xb64-210e_coco-384x288.py
│ │ │ ├── td-hm_swin-b-p4-w7_8xb32-210e_coco-256x192.py
│ │ │ ├── td-hm_swin-b-p4-w7_8xb32-210e_coco-384x288.py
│ │ │ ├── td-hm_swin-l-p4-w7_8xb32-210e_coco-256x192.py
│ │ │ ├── td-hm_swin-l-p4-w7_8xb32-210e_coco-384x288.py
│ │ │ ├── td-hm_swin-t-p4-w7_8xb32-210e_coco-256x192.py
│ │ │ ├── td-hm_vgg16-bn_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_vipnas-mbv3_8xb64-210e_coco-256x192.py
│ │ │ ├── td-hm_vipnas-res50_8xb64-210e_coco-256x192.py
│ │ │ ├── vgg_coco.md
│ │ │ ├── vgg_coco.yml
│ │ │ ├── vipnas_coco.md
│ │ │ ├── vipnas_coco.yml
│ │ │ ├── vitpose_coco.md
│ │ │ └── vitpose_coco.yml
│ │ ├── crowdpose
│ │ │ ├── cspnext-m_udp_8xb64-210e_crowpose-256x192.py
│ │ │ ├── cspnext_udp_crowdpose.md
│ │ │ ├── cspnext_udp_crowdpose.yml
│ │ │ ├── hrnet_crowdpose.md
│ │ │ ├── hrnet_crowdpose.yml
│ │ │ ├── resnet_crowdpose.md
│ │ │ ├── resnet_crowdpose.yml
│ │ │ ├── td-hm_hrnet-w32_8xb64-210e_crowdpose-256x192.py
│ │ │ ├── td-hm_res101_8xb64-210e_crowdpose-256x192.py
│ │ │ ├── td-hm_res101_8xb64-210e_crowdpose-320x256.py
│ │ │ ├── td-hm_res152_8xb64-210e_crowdpose-256x192.py
│ │ │ └── td-hm_res50_8xb64-210e_crowdpose-256x192.py
│ │ ├── exlpose
│ │ │ ├── hrnet_exlpose.md
│ │ │ ├── hrnet_exlpose.yml
│ │ │ └── td-hm_hrnet-w32_8xb64-210e_exlpose-256x192.py
│ │ ├── humanart
│ │ │ ├── hrnet_humanart.md
│ │ │ ├── hrnet_humanart.yml
│ │ │ ├── td-hm_ViTPose-base_8xb64-210e_humanart-256x192.py
│ │ │ ├── td-hm_ViTPose-huge_8xb64-210e_humanart-256x192.py
│ │ │ ├── td-hm_ViTPose-large_8xb64-210e_humanart-256x192.py
│ │ │ ├── td-hm_ViTPose-small_8xb64-210e_humanart-256x192.py
│ │ │ ├── td-hm_hrnet-w32_8xb64-210e_humanart-256x192.py
│ │ │ ├── td-hm_hrnet-w48_8xb32-210e_humanart-256x192.py
│ │ │ ├── vitpose_humanart.md
│ │ │ └── vitpose_humanart.yml
│ │ ├── jhmdb
│ │ │ ├── cpm_jhmdb.md
│ │ │ ├── cpm_jhmdb.yml
│ │ │ ├── resnet_jhmdb.md
│ │ │ ├── resnet_jhmdb.yml
│ │ │ ├── td-hm_cpm_8xb32-40e_jhmdb-sub1-368x368.py
│ │ │ ├── td-hm_cpm_8xb32-40e_jhmdb-sub2-368x368.py
│ │ │ ├── td-hm_cpm_8xb32-40e_jhmdb-sub3-368x368.py
│ │ │ ├── td-hm_res50-2deconv_8xb64-40e_jhmdb-sub1-256x256.py
│ │ │ ├── td-hm_res50-2deconv_8xb64-40e_jhmdb-sub2-256x256.py
│ │ │ ├── td-hm_res50-2deconv_8xb64-40e_jhmdb-sub3-256x256.py
│ │ │ ├── td-hm_res50_8xb64-20e_jhmdb-sub1-256x256.py
│ │ │ ├── td-hm_res50_8xb64-20e_jhmdb-sub2-256x256.py
│ │ │ └── td-hm_res50_8xb64-20e_jhmdb-sub3-256x256.py
│ │ ├── mpii
│ │ │ ├── cpm_mpii.md
│ │ │ ├── cpm_mpii.yml
│ │ │ ├── cspnext-m_udp_8xb64-210e_mpii-256x256.py
│ │ │ ├── cspnext_udp_mpii.md
│ │ │ ├── cspnext_udp_mpii.yml
│ │ │ ├── hourglass_mpii.md
│ │ │ ├── hourglass_mpii.yml
│ │ │ ├── hrnet_dark_mpii.md
│ │ │ ├── hrnet_dark_mpii.yml
│ │ │ ├── hrnet_mpii.md
│ │ │ ├── hrnet_mpii.yml
│ │ │ ├── litehrnet_mpii.md
│ │ │ ├── litehrnet_mpii.yml
│ │ │ ├── mobilenetv2_mpii.md
│ │ │ ├── mobilenetv2_mpii.yml
│ │ │ ├── resnet_mpii.md
│ │ │ ├── resnet_mpii.yml
│ │ │ ├── resnetv1d_mpii.md
│ │ │ ├── resnetv1d_mpii.yml
│ │ │ ├── resnext_mpii.md
│ │ │ ├── resnext_mpii.yml
│ │ │ ├── scnet_mpii.md
│ │ │ ├── scnet_mpii.yml
│ │ │ ├── seresnet_mpii.md
│ │ │ ├── seresnet_mpii.yml
│ │ │ ├── shufflenetv1_mpii.md
│ │ │ ├── shufflenetv1_mpii.yml
│ │ │ ├── shufflenetv2_mpii.md
│ │ │ ├── shufflenetv2_mpii.yml
│ │ │ ├── td-hm_cpm_8xb64-210e_mpii-368x368.py
│ │ │ ├── td-hm_hourglass52_8xb32-210e_mpii-384x384.py
│ │ │ ├── td-hm_hourglass52_8xb64-210e_mpii-256x256.py
│ │ │ ├── td-hm_hrnet-w32_8xb64-210e_mpii-256x256.py
│ │ │ ├── td-hm_hrnet-w32_dark-8xb64-210e_mpii-256x256.py
│ │ │ ├── td-hm_hrnet-w48_8xb64-210e_mpii-256x256.py
│ │ │ ├── td-hm_hrnet-w48_dark-8xb64-210e_mpii-256x256.py
│ │ │ ├── td-hm_litehrnet-18_8xb64-210e_mpii-256x256.py
│ │ │ ├── td-hm_litehrnet-30_8xb64-210e_mpii-256x256.py
│ │ │ ├── td-hm_mobilenetv2_8xb64-210e_mpii-256x256.py
│ │ │ ├── td-hm_res101_8xb64-210e_mpii-256x256.py
│ │ │ ├── td-hm_res152_8xb32-210e_mpii-256x256.py
│ │ │ ├── td-hm_res50_8xb64-210e_mpii-256x256.py
│ │ │ ├── td-hm_resnetv1d101_8xb64-210e_mpii-256x256.py
│ │ │ ├── td-hm_resnetv1d152_8xb64-210e_mpii-256x256.py
│ │ │ ├── td-hm_resnetv1d50_8xb64-210e_mpii-256x256.py
│ │ │ ├── td-hm_resnext152_8xb32-210e_mpii-256x256.py
│ │ │ ├── td-hm_scnet101_8xb64-210e_mpii-256x256.py
│ │ │ ├── td-hm_scnet50_8xb64-210e_mpii-256x256.py
│ │ │ ├── td-hm_seresnet101_8xb64-210e_mpii-256x256.py
│ │ │ ├── td-hm_seresnet152_8xb32-210e_mpii-256x256.py
│ │ │ ├── td-hm_seresnet50_8xb64-210e_mpii-256x256.py
│ │ │ ├── td-hm_shufflenetv1_8xb64-210e_mpii-256x256.py
│ │ │ └── td-hm_shufflenetv2_8xb64-210e_mpii-256x256.py
│ │ └── posetrack18
│ │ │ ├── hrnet_posetrack18.md
│ │ │ ├── hrnet_posetrack18.yml
│ │ │ ├── resnet_posetrack18.md
│ │ │ ├── resnet_posetrack18.yml
│ │ │ ├── td-hm_hrnet-w32_8xb64-20e_posetrack18-256x192.py
│ │ │ ├── td-hm_hrnet-w32_8xb64-20e_posetrack18-384x288.py
│ │ │ ├── td-hm_hrnet-w48_8xb64-20e_posetrack18-256x192.py
│ │ │ ├── td-hm_hrnet-w48_8xb64-20e_posetrack18-384x288.py
│ │ │ └── td-hm_res50_8xb64-20e_posetrack18-256x192.py
│ ├── topdown_regression
│ │ ├── README.md
│ │ ├── coco
│ │ │ ├── mobilenetv2_rle_coco.md
│ │ │ ├── mobilenetv2_rle_coco.yml
│ │ │ ├── resnet_coco.md
│ │ │ ├── resnet_coco.yml
│ │ │ ├── resnet_rle_coco.md
│ │ │ ├── resnet_rle_coco.yml
│ │ │ ├── td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192.py
│ │ │ ├── td-reg_res101_8xb64-210e_coco-256x192.py
│ │ │ ├── td-reg_res101_rle-8xb64-210e_coco-256x192.py
│ │ │ ├── td-reg_res152_8xb64-210e_coco-256x192.py
│ │ │ ├── td-reg_res152_rle-8xb64-210e_coco-256x192.py
│ │ │ ├── td-reg_res152_rle-8xb64-210e_coco-384x288.py
│ │ │ ├── td-reg_res50_8xb64-210e_coco-256x192.py
│ │ │ ├── td-reg_res50_rle-8xb64-210e_coco-256x192.py
│ │ │ └── td-reg_res50_rle-pretrained-8xb64-210e_coco-256x192.py
│ │ └── mpii
│ │ │ ├── resnet_mpii.md
│ │ │ ├── resnet_mpii.yml
│ │ │ ├── resnet_rle_mpii.md
│ │ │ ├── resnet_rle_mpii.yml
│ │ │ ├── td-reg_res101_8xb64-210e_mpii-256x256.py
│ │ │ ├── td-reg_res152_8xb64-210e_mpii-256x256.py
│ │ │ ├── td-reg_res50_8xb64-210e_mpii-256x256.py
│ │ │ └── td-reg_res50_rle-8xb64-210e_mpii-256x256.py
│ └── yoloxpose
│ │ ├── README.md
│ │ └── coco
│ │ ├── yoloxpose_coco.md
│ │ ├── yoloxpose_coco.yml
│ │ ├── yoloxpose_l_8xb32-300e_coco-640.py
│ │ ├── yoloxpose_m_8xb32-300e_coco-640.py
│ │ ├── yoloxpose_s_8xb32-300e_coco-640.py
│ │ └── yoloxpose_tiny_4xb64-300e_coco-416.py
├── body_3d_keypoint
│ ├── README.md
│ ├── image_pose_lift
│ │ ├── README.md
│ │ └── h36m
│ │ │ ├── image-pose-lift_tcn_8xb64-200e_h36m.py
│ │ │ ├── simplebaseline3d_h36m.md
│ │ │ └── simplebaseline3d_h36m.yml
│ ├── motionbert
│ │ ├── README.md
│ │ └── h36m
│ │ │ ├── motionbert_dstformer-243frm_8xb32-240e_h36m-original.py
│ │ │ ├── motionbert_dstformer-243frm_8xb32-240e_h36m.py
│ │ │ ├── motionbert_dstformer-ft-243frm_8xb32-120e_h36m-original.py
│ │ │ ├── motionbert_dstformer-ft-243frm_8xb32-120e_h36m.py
│ │ │ ├── motionbert_h36m.md
│ │ │ └── motionbert_h36m.yml
│ └── video_pose_lift
│ │ ├── README.md
│ │ └── h36m
│ │ ├── video-pose-lift_tcn-1frm-supv-cpn-ft_8xb128-160e_h36m.py
│ │ ├── video-pose-lift_tcn-243frm-supv-cpn-ft_8xb128-200e_h36m.py
│ │ ├── video-pose-lift_tcn-243frm-supv_8xb128-160e_h36m.py
│ │ ├── video-pose-lift_tcn-27frm-semi-supv-cpn-ft_8xb64-200e_h36m.py
│ │ ├── video-pose-lift_tcn-27frm-semi-supv_8xb64-200e_h36m.py
│ │ ├── video-pose-lift_tcn-27frm-supv_8xb128-160e_h36m.py
│ │ ├── video-pose-lift_tcn-81frm-supv_8xb128-160e_h36m.py
│ │ ├── videopose3d_h36m.md
│ │ └── videopose3d_h36m.yml
├── face_2d_keypoint
│ ├── README.md
│ ├── rtmpose
│ │ ├── README.md
│ │ ├── coco_wholebody_face
│ │ │ ├── rtmpose-m_8xb32-60e_coco-wholebody-face-256x256.py
│ │ │ ├── rtmpose_coco_wholebody_face.md
│ │ │ └── rtmpose_coco_wholebody_face.yml
│ │ ├── face6
│ │ │ ├── rtmpose-m_8xb256-120e_face6-256x256.py
│ │ │ ├── rtmpose-s_8xb256-120e_face6-256x256.py
│ │ │ ├── rtmpose-t_8xb256-120e_face6-256x256.py
│ │ │ ├── rtmpose_face6.md
│ │ │ └── rtmpose_face6.yml
│ │ ├── lapa
│ │ │ ├── rtmpose-m_8xb64-120e_lapa-256x256.py
│ │ │ ├── rtmpose_lapa.md
│ │ │ └── rtmpose_lapa.yml
│ │ └── wflw
│ │ │ ├── rtmpose-m_8xb64-60e_wflw-256x256.py
│ │ │ ├── rtmpose_wflw.md
│ │ │ └── rtmpose_wflw.yml
│ ├── topdown_heatmap
│ │ ├── 300w
│ │ │ ├── hrnetv2_300w.md
│ │ │ ├── hrnetv2_300w.yml
│ │ │ └── td-hm_hrnetv2-w18_8xb64-60e_300w-256x256.py
│ │ ├── 300wlp
│ │ │ ├── hrnetv2_300wlp.md
│ │ │ ├── hrnetv2_300wlp.yml
│ │ │ └── td-hm_hrnetv2-w18_8xb64-60e_300wlp-256x256.py
│ │ ├── README.md
│ │ ├── aflw
│ │ │ ├── hrnetv2_aflw.md
│ │ │ ├── hrnetv2_aflw.yml
│ │ │ ├── hrnetv2_dark_aflw.md
│ │ │ ├── hrnetv2_dark_aflw.yml
│ │ │ ├── td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py
│ │ │ └── td-hm_hrnetv2-w18_dark-8xb64-60e_aflw-256x256.py
│ │ ├── coco_wholebody_face
│ │ │ ├── hourglass_coco_wholebody_face.md
│ │ │ ├── hourglass_coco_wholebody_face.yml
│ │ │ ├── hrnetv2_coco_wholebody_face.md
│ │ │ ├── hrnetv2_coco_wholebody_face.yml
│ │ │ ├── hrnetv2_dark_coco_wholebody_face.md
│ │ │ ├── hrnetv2_dark_coco_wholebody_face.yml
│ │ │ ├── mobilenetv2_coco_wholebody_face.md
│ │ │ ├── mobilenetv2_coco_wholebody_face.yml
│ │ │ ├── resnet_coco_wholebody_face.md
│ │ │ ├── resnet_coco_wholebody_face.yml
│ │ │ ├── scnet_coco_wholebody_face.md
│ │ │ ├── scnet_coco_wholebody_face.yml
│ │ │ ├── td-hm_hourglass52_8xb32-60e_coco-wholebody-face-256x256.py
│ │ │ ├── td-hm_hrnetv2-w18_8xb32-60e_coco-wholebody-face-256x256.py
│ │ │ ├── td-hm_hrnetv2-w18_dark-8xb32-60e_coco-wholebody-face-256x256.py
│ │ │ ├── td-hm_mobilenetv2_8xb32-60e_coco-wholebody-face-256x256.py
│ │ │ ├── td-hm_res50_8xb32-60e_coco-wholebody-face-256x256.py
│ │ │ └── td-hm_scnet50_8xb32-60e_coco-wholebody-face-256x256.py
│ │ ├── cofw
│ │ │ ├── hrnetv2_cofw.md
│ │ │ ├── hrnetv2_cofw.yml
│ │ │ └── td-hm_hrnetv2-w18_8xb64-60e_cofw-256x256.py
│ │ └── wflw
│ │ │ ├── hrnetv2_awing_wflw.md
│ │ │ ├── hrnetv2_awing_wflw.yml
│ │ │ ├── hrnetv2_dark_wflw.md
│ │ │ ├── hrnetv2_dark_wflw.yml
│ │ │ ├── hrnetv2_wflw.md
│ │ │ ├── hrnetv2_wflw.yml
│ │ │ ├── td-hm_hrnetv2-w18_8xb64-60e_wflw-256x256.py
│ │ │ ├── td-hm_hrnetv2-w18_awing-8xb64-60e_wflw-256x256.py
│ │ │ └── td-hm_hrnetv2-w18_dark-8xb64-60e_wflw-256x256.py
│ └── topdown_regression
│ │ ├── README.md
│ │ └── wflw
│ │ ├── resnet_softwingloss_wflw.md
│ │ ├── resnet_softwingloss_wflw.yml
│ │ ├── resnet_wflw.md
│ │ ├── resnet_wflw.yml
│ │ ├── resnet_wingloss_wflw.md
│ │ ├── resnet_wingloss_wflw.yml
│ │ ├── td-reg_res50_8xb64-210e_wflw-256x256.py
│ │ ├── td-reg_res50_softwingloss_8xb64-210e_wflw-256x256.py
│ │ └── td-reg_res50_wingloss_8xb64-210e_wflw-256x256.py
├── fashion_2d_keypoint
│ ├── README.md
│ └── topdown_heatmap
│ │ ├── README.md
│ │ ├── deepfashion
│ │ ├── hrnet_deepfashion.md
│ │ ├── hrnet_deepfashion.yml
│ │ ├── resnet_deepfashion.md
│ │ ├── resnet_deepfashion.yml
│ │ ├── td-hm_hrnet-w32_8xb64-210e_deepfashion_full-256x192.py
│ │ ├── td-hm_hrnet-w32_8xb64-210e_deepfashion_lower-256x192.py
│ │ ├── td-hm_hrnet-w32_8xb64-210e_deepfashion_upper-256x192.py
│ │ ├── td-hm_hrnet-w32_udp_8xb64-210e_deepfashion_full-256x192.py
│ │ ├── td-hm_hrnet-w32_udp_8xb64-210e_deepfashion_lower-256x192.py
│ │ ├── td-hm_hrnet-w32_udp_8xb64-210e_deepfashion_upper-256x192.py
│ │ ├── td-hm_hrnet-w48_8xb32-210e_deepfashion_full-256x192.py
│ │ ├── td-hm_hrnet-w48_8xb32-210e_deepfashion_lower-256x192.py
│ │ ├── td-hm_hrnet-w48_8xb32-210e_deepfashion_upper-256x192.py
│ │ ├── td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_full-256x192.py
│ │ ├── td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_lower-256x192.py
│ │ ├── td-hm_hrnet-w48_udp_8xb32-210e_deepfashion_upper-256x192.py
│ │ ├── td-hm_res101_8xb64-210e_deepfashion_full-256x192.py
│ │ ├── td-hm_res101_8xb64-210e_deepfashion_lower-256x192.py
│ │ ├── td-hm_res101_8xb64-210e_deepfashion_upper-256x192.py
│ │ ├── td-hm_res152_8xb32-210e_deepfashion_full-256x192.py
│ │ ├── td-hm_res152_8xb32-210e_deepfashion_lower-256x192.py
│ │ ├── td-hm_res152_8xb32-210e_deepfashion_upper-256x192.py
│ │ ├── td-hm_res50_8xb64-210e_deepfashion_full-256x192.py
│ │ ├── td-hm_res50_8xb64-210e_deepfashion_lower-256x192.py
│ │ └── td-hm_res50_8xb64-210e_deepfashion_upper-256x192.py
│ │ └── deepfashion2
│ │ ├── res50_deepfashion2.md
│ │ ├── res50_deepfasion2.yml
│ │ ├── td-hm_res50_1xb64-210e_deepfasion2-long-sleeved-dress-256x192.py
│ │ ├── td-hm_res50_1xb64-210e_deepfasion2-skirt-256x192.py
│ │ ├── td-hm_res50_1xb64-210e_deepfasion2-vest-dress-256x192.py
│ │ ├── td-hm_res50_2xb64-210e_deepfasion2-trousers-256x192.py
│ │ ├── td-hm_res50_3xb64-210e_deepfasion2-shorts-256x192.py
│ │ ├── td-hm_res50_4xb64-210e_deepfasion2-short-sleeved-dress-256x192.py
│ │ ├── td-hm_res50_4xb64-210e_deepfasion2-sling-256x192.py
│ │ ├── td-hm_res50_4xb64-210e_deepfasion2-sling-dress-256x192.py
│ │ ├── td-hm_res50_4xb64-210e_deepfasion2-vest-256x192.py
│ │ ├── td-hm_res50_6xb64-210e_deepfasion2-short-sleeved-shirt-256x192.py
│ │ ├── td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-outwear-256x192.py
│ │ ├── td-hm_res50_8xb64-210e_deepfasion2-long-sleeved-shirt-256x192.py
│ │ └── td-hm_res50_8xb64-210e_deepfasion2-short-sleeved-outwear-256x192.py
├── hand_2d_keypoint
│ ├── README.md
│ ├── rtmpose
│ │ ├── README.md
│ │ ├── coco_wholebody_hand
│ │ │ ├── rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py
│ │ │ ├── rtmpose_coco_wholebody_hand.md
│ │ │ └── rtmpose_coco_wholebody_hand.yml
│ │ └── hand5
│ │ │ ├── rtmpose-m_8xb256-210e_hand5-256x256.py
│ │ │ ├── rtmpose_hand5.md
│ │ │ └── rtmpose_hand5.yml
│ ├── topdown_heatmap
│ │ ├── README.md
│ │ ├── coco_wholebody_hand
│ │ │ ├── hourglass_coco_wholebody_hand.md
│ │ │ ├── hourglass_coco_wholebody_hand.yml
│ │ │ ├── hrnetv2_coco_wholebody_hand.md
│ │ │ ├── hrnetv2_coco_wholebody_hand.yml
│ │ │ ├── hrnetv2_dark_coco_wholebody_hand.md
│ │ │ ├── hrnetv2_dark_coco_wholebody_hand.yml
│ │ │ ├── litehrnet_coco_wholebody_hand.md
│ │ │ ├── litehrnet_coco_wholebody_hand.yml
│ │ │ ├── mobilenetv2_coco_wholebody_hand.md
│ │ │ ├── mobilenetv2_coco_wholebody_hand.yml
│ │ │ ├── resnet_coco_wholebody_hand.md
│ │ │ ├── resnet_coco_wholebody_hand.yml
│ │ │ ├── scnet_coco_wholebody_hand.md
│ │ │ ├── scnet_coco_wholebody_hand.yml
│ │ │ ├── td-hm_hourglass52_8xb32-210e_coco-wholebody-hand-256x256.py
│ │ │ ├── td-hm_hrnetv2-w18_8xb32-210e_coco-wholebody-hand-256x256.py
│ │ │ ├── td-hm_hrnetv2-w18_dark-8xb32-210e_coco-wholebody-hand-256x256.py
│ │ │ ├── td-hm_litehrnet-w18_8xb32-210e_coco-wholebody-hand-256x256.py
│ │ │ ├── td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py
│ │ │ ├── td-hm_res50_8xb32-210e_coco-wholebody-hand-256x256.py
│ │ │ └── td-hm_scnet50_8xb32-210e_coco-wholebody-hand-256x256.py
│ │ ├── freihand2d
│ │ │ ├── resnet_freihand2d.md
│ │ │ ├── resnet_freihand2d.yml
│ │ │ └── td-hm_res50_8xb64-100e_freihand2d-224x224.py
│ │ ├── onehand10k
│ │ │ ├── hrnetv2_dark_onehand10k.md
│ │ │ ├── hrnetv2_dark_onehand10k.yml
│ │ │ ├── hrnetv2_onehand10k.md
│ │ │ ├── hrnetv2_onehand10k.yml
│ │ │ ├── hrnetv2_udp_onehand10k.md
│ │ │ ├── hrnetv2_udp_onehand10k.yml
│ │ │ ├── mobilenetv2_onehand10k.md
│ │ │ ├── mobilenetv2_onehand10k.yml
│ │ │ ├── resnet_onehand10k.md
│ │ │ ├── resnet_onehand10k.yml
│ │ │ ├── td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py
│ │ │ ├── td-hm_hrnetv2-w18_dark-8xb64-210e_onehand10k-256x256.py
│ │ │ ├── td-hm_hrnetv2-w18_udp-8xb64-210e_onehand10k-256x256.py
│ │ │ ├── td-hm_mobilenetv2_8xb64-210e_onehand10k-256x256.py
│ │ │ └── td-hm_res50_8xb32-210e_onehand10k-256x256.py
│ │ └── rhd2d
│ │ │ ├── hrnetv2_dark_rhd2d.md
│ │ │ ├── hrnetv2_dark_rhd2d.yml
│ │ │ ├── hrnetv2_rhd2d.md
│ │ │ ├── hrnetv2_rhd2d.yml
│ │ │ ├── hrnetv2_udp_rhd2d.md
│ │ │ ├── hrnetv2_udp_rhd2d.yml
│ │ │ ├── mobilenetv2_rhd2d.md
│ │ │ ├── mobilenetv2_rhd2d.yml
│ │ │ ├── resnet_rhd2d.md
│ │ │ ├── resnet_rhd2d.yml
│ │ │ ├── td-hm_hrnetv2-w18_8xb64-210e_rhd2d-256x256.py
│ │ │ ├── td-hm_hrnetv2-w18_dark-8xb64-210e_rhd2d-256x256.py
│ │ │ ├── td-hm_hrnetv2-w18_udp-8xb64-210e_rhd2d-256x256.py
│ │ │ ├── td-hm_mobilenetv2_8xb64-210e_rhd2d-256x256.py
│ │ │ └── td-hm_res50_8xb64-210e_rhd2d-256x256.py
│ └── topdown_regression
│ │ ├── README.md
│ │ ├── onehand10k
│ │ ├── resnet_onehand10k.md
│ │ ├── resnet_onehand10k.yml
│ │ └── td-reg_res50_8xb64-210e_onehand10k-256x256.py
│ │ └── rhd2d
│ │ ├── resnet_rhd2d.md
│ │ ├── resnet_rhd2d.yml
│ │ └── td-reg_res50_8xb64-210e_rhd2d-256x256.py
├── hand_3d_keypoint
│ ├── README.md
│ └── internet
│ │ ├── README.md
│ │ └── interhand3d
│ │ ├── internet_interhand3d.md
│ │ ├── internet_interhand3d.yml
│ │ └── internet_res50_4xb16-20e_interhand3d-256x256.py
├── hand_gesture
│ └── README.md
└── wholebody_2d_keypoint
│ ├── README.md
│ ├── dwpose
│ ├── README.md
│ ├── coco-wholebody
│ │ ├── s1_dis
│ │ │ ├── dwpose_l_dis_m_coco-256x192.py
│ │ │ └── dwpose_x_dis_l_coco-384x288.py
│ │ └── s2_dis
│ │ │ ├── dwpose_l-ll_coco-384x288.py
│ │ │ └── dwpose_m-mm_coco-256x192.py
│ └── ubody
│ │ ├── s1_dis
│ │ ├── dwpose_l_dis_m_coco-ubody-256x192.py
│ │ ├── dwpose_l_dis_s_coco-ubody-256x192.py
│ │ ├── dwpose_l_dis_t_coco-ubody-256x192.py
│ │ ├── dwpose_x_dis_l_coco-ubody-256x192.py
│ │ └── rtmpose_x_dis_l_coco-ubody-384x288.py
│ │ └── s2_dis
│ │ ├── dwpose_l-ll_coco-ubody-256x192.py
│ │ ├── dwpose_l-ll_coco-ubody-384x288.py
│ │ ├── dwpose_m-mm_coco-ubody-256x192.py
│ │ ├── dwpose_s-ss_coco-ubody-256x192.py
│ │ └── dwpose_t-tt_coco-ubody-256x192.py
│ ├── rtmpose
│ ├── README.md
│ ├── cocktail14
│ │ ├── rtmw-l_8xb1024-270e_cocktail14-256x192.py
│ │ ├── rtmw-l_8xb320-270e_cocktail14-384x288.py
│ │ ├── rtmw-m_8xb1024-270e_cocktail14-256x192.py
│ │ ├── rtmw-x_8xb320-270e_cocktail14-384x288.py
│ │ ├── rtmw-x_8xb704-270e_cocktail14-256x192.py
│ │ ├── rtmw_cocktail14.md
│ │ └── rtmw_cocktail14.yml
│ ├── coco-wholebody
│ │ ├── rtmpose-l_8xb32-270e_coco-wholebody-384x288.py
│ │ ├── rtmpose-l_8xb64-270e_coco-wholebody-256x192.py
│ │ ├── rtmpose-m_8xb64-270e_coco-wholebody-256x192.py
│ │ ├── rtmpose-x_8xb32-270e_coco-wholebody-384x288.py
│ │ ├── rtmpose_coco-wholebody.md
│ │ └── rtmpose_coco-wholebody.yml
│ └── ubody
│ │ ├── rtmpose-l_8xb32-270e_coco-ubody-wholebody-384x288.py
│ │ ├── rtmpose-l_8xb64-270e_coco-ubody-wholebody-256x192.py
│ │ ├── rtmpose-m_8xb64-270e_coco-ubody-wholebody-256x192.py
│ │ ├── rtmpose-s_8xb64-270e_coco-ubody-wholebody-256x192.py
│ │ ├── rtmpose-t_8xb64-270e_coco-ubody-wholebody-256x192.py
│ │ ├── rtmpose-x_8xb32-270e_coco-ubody-wholebody-384x288.py
│ │ └── rtmpose-x_8xb64-270e_coco-ubody-wholebody-256x192.py
│ └── topdown_heatmap
│ ├── README.md
│ ├── coco-wholebody
│ ├── cspnext-l_udp_8xb64-210e_coco-wholebody-256x192.py
│ ├── cspnext-m_udp_8xb64-210e_coco-wholebody-256x192.py
│ ├── cspnext_udp_coco-wholebody.md
│ ├── cspnext_udp_coco-wholebody.yml
│ ├── hrnet_coco-wholebody.md
│ ├── hrnet_coco-wholebody.yml
│ ├── hrnet_dark_coco-wholebody.md
│ ├── hrnet_dark_coco-wholebody.yml
│ ├── resnet_coco-wholebody.md
│ ├── resnet_coco-wholebody.yml
│ ├── td-hm_hrnet-w32_8xb64-210e_coco-wholebody-256x192.py
│ ├── td-hm_hrnet-w32_8xb64-210e_coco-wholebody-384x288.py
│ ├── td-hm_hrnet-w32_dark-8xb64-210e_coco-wholebody-256x192.py
│ ├── td-hm_hrnet-w48_8xb32-210e_coco-wholebody-256x192.py
│ ├── td-hm_hrnet-w48_8xb32-210e_coco-wholebody-384x288.py
│ ├── td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py
│ ├── td-hm_res101_8xb32-210e_coco-wholebody-256x192.py
│ ├── td-hm_res101_8xb32-210e_coco-wholebody-384x288.py
│ ├── td-hm_res152_8xb32-210e_coco-wholebody-256x192.py
│ ├── td-hm_res152_8xb32-210e_coco-wholebody-384x288.py
│ ├── td-hm_res50_8xb64-210e_coco-wholebody-256x192.py
│ ├── td-hm_res50_8xb64-210e_coco-wholebody-384x288.py
│ ├── td-hm_vipnas-mbv3_8xb64-210e_coco-wholebody-256x192.py
│ ├── td-hm_vipnas-mbv3_dark-8xb64-210e_coco-wholebody-256x192.py
│ ├── td-hm_vipnas-res50_8xb64-210e_coco-wholebody-256x192.py
│ ├── td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py
│ ├── vipnas_coco-wholebody.md
│ ├── vipnas_coco-wholebody.yml
│ ├── vipnas_dark_coco-wholebody.md
│ └── vipnas_dark_coco-wholebody.yml
│ └── ubody2d
│ ├── hrnet_coco-wholebody.yml
│ ├── hrnet_ubody-coco-wholebody.md
│ └── td-hm_hrnet-w32_8xb64-210e_ubody-256x192.py
├── dataset-index.yml
├── demo
├── MMPose_Tutorial.ipynb
├── body3d_pose_lifter_demo.py
├── bottomup_demo.py
├── docs
│ ├── en
│ │ ├── 2d_animal_demo.md
│ │ ├── 2d_face_demo.md
│ │ ├── 2d_hand_demo.md
│ │ ├── 2d_human_pose_demo.md
│ │ ├── 2d_wholebody_pose_demo.md
│ │ ├── 3d_hand_demo.md
│ │ ├── 3d_human_pose_demo.md
│ │ ├── mmdet_modelzoo.md
│ │ └── webcam_api_demo.md
│ └── zh_cn
│ │ ├── 2d_animal_demo.md
│ │ ├── 2d_face_demo.md
│ │ ├── 2d_hand_demo.md
│ │ ├── 2d_human_pose_demo.md
│ │ ├── 2d_wholebody_pose_demo.md
│ │ ├── 3d_human_pose_demo.md
│ │ ├── mmdet_modelzoo.md
│ │ └── webcam_api_demo.md
├── hand3d_internet_demo.py
├── image_demo.py
├── inferencer_demo.py
├── mmdetection_cfg
│ ├── cascade_rcnn_x101_64x4d_fpn_1class.py
│ ├── cascade_rcnn_x101_64x4d_fpn_coco.py
│ ├── faster_rcnn_r50_fpn_1class.py
│ ├── faster_rcnn_r50_fpn_coco.py
│ ├── mask_rcnn_r50_fpn_2x_coco.py
│ ├── rtmdet_m_640-8xb32_coco-person.py
│ ├── rtmdet_m_8xb32-300e_coco.py
│ ├── rtmdet_nano_320-8xb32_coco-person.py
│ ├── rtmdet_nano_320-8xb32_hand.py
│ ├── rtmdet_tiny_8xb32-300e_coco.py
│ ├── ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py
│ ├── ssdlite_mobilenetv2_scratch_600e_onehand.py
│ ├── yolov3_d53_320_273e_coco.py
│ └── yolox-s_8xb8-300e_coco-face.py
├── mmtracking_cfg
│ ├── deepsort_faster-rcnn_fpn_4e_mot17-private-half.py
│ └── tracktor_faster-rcnn_r50_fpn_4e_mot17-private.py
├── resources
│ ├── demo.mp4
│ ├── demo_coco.gif
│ └── sunglasses.jpg
└── topdown_demo_with_mmdet.py
├── docker
├── Dockerfile
└── serve
│ ├── Dockerfile
│ ├── config.properties
│ └── entrypoint.sh
├── docs
├── en
│ ├── .readthedocs.yaml
│ ├── Makefile
│ ├── _static
│ │ ├── css
│ │ │ └── readthedocs.css
│ │ └── images
│ │ │ └── mmpose-logo.png
│ ├── advanced_guides
│ │ ├── codecs.md
│ │ ├── customize_datasets.md
│ │ ├── customize_evaluation.md
│ │ ├── customize_logging.md
│ │ ├── customize_optimizer.md
│ │ ├── customize_transforms.md
│ │ ├── dataflow.md
│ │ └── implement_new_models.md
│ ├── api.rst
│ ├── collect_modelzoo.py
│ ├── collect_projects.py
│ ├── conf.py
│ ├── contribution_guide.md
│ ├── dataset_zoo
│ │ ├── 2d_animal_keypoint.md
│ │ ├── 2d_body_keypoint.md
│ │ ├── 2d_face_keypoint.md
│ │ ├── 2d_fashion_landmark.md
│ │ ├── 2d_hand_keypoint.md
│ │ ├── 2d_wholebody_keypoint.md
│ │ ├── 3d_body_keypoint.md
│ │ ├── 3d_body_mesh.md
│ │ ├── 3d_hand_keypoint.md
│ │ └── 3d_wholebody_keypoint.md
│ ├── faq.md
│ ├── guide_to_framework.md
│ ├── index.rst
│ ├── installation.md
│ ├── make.bat
│ ├── merge_docs.sh
│ ├── migration.md
│ ├── notes
│ │ ├── benchmark.md
│ │ ├── changelog.md
│ │ ├── ecosystem.md
│ │ └── pytorch_2.md
│ ├── overview.md
│ ├── projects
│ │ └── projects.md
│ ├── quick_run.md
│ ├── stats.py
│ ├── switch_language.md
│ ├── user_guides
│ │ ├── configs.md
│ │ ├── dataset_tools.md
│ │ ├── how_to_deploy.md
│ │ ├── inference.md
│ │ ├── label_studio.md
│ │ ├── mixed_datasets.md
│ │ ├── model_analysis.md
│ │ ├── prepare_datasets.md
│ │ └── train_and_test.md
│ └── visualization.md
├── src
│ └── papers
│ │ ├── algorithms
│ │ ├── associative_embedding.md
│ │ ├── awingloss.md
│ │ ├── cid.md
│ │ ├── cpm.md
│ │ ├── dark.md
│ │ ├── debias_ipr.md
│ │ ├── deeppose.md
│ │ ├── dekr.md
│ │ ├── dsnt.md
│ │ ├── dwpose.md
│ │ ├── edpose.md
│ │ ├── higherhrnet.md
│ │ ├── hmr.md
│ │ ├── hourglass.md
│ │ ├── hrnet.md
│ │ ├── hrnetv2.md
│ │ ├── internet.md
│ │ ├── ipr.md
│ │ ├── litehrnet.md
│ │ ├── motionbert.md
│ │ ├── mspn.md
│ │ ├── posewarper.md
│ │ ├── rle.md
│ │ ├── rsn.md
│ │ ├── rtmo.md
│ │ ├── rtmpose.md
│ │ ├── scnet.md
│ │ ├── simcc.md
│ │ ├── simplebaseline2d.md
│ │ ├── simplebaseline3d.md
│ │ ├── softwingloss.md
│ │ ├── udp.md
│ │ ├── videopose3d.md
│ │ ├── vipnas.md
│ │ ├── vitpose.md
│ │ ├── voxelpose.md
│ │ ├── wingloss.md
│ │ └── yolopose.md
│ │ ├── backbones
│ │ ├── alexnet.md
│ │ ├── cpm.md
│ │ ├── higherhrnet.md
│ │ ├── hourglass.md
│ │ ├── hrformer.md
│ │ ├── hrnet.md
│ │ ├── hrnetv2.md
│ │ ├── litehrnet.md
│ │ ├── mobilenetv2.md
│ │ ├── mspn.md
│ │ ├── pvt.md
│ │ ├── pvtv2.md
│ │ ├── resnest.md
│ │ ├── resnet.md
│ │ ├── resnetv1d.md
│ │ ├── resnext.md
│ │ ├── rsn.md
│ │ ├── scnet.md
│ │ ├── seresnet.md
│ │ ├── shufflenetv1.md
│ │ ├── shufflenetv2.md
│ │ ├── swin.md
│ │ ├── vgg.md
│ │ └── vipnas.md
│ │ ├── datasets
│ │ ├── 300vw.md
│ │ ├── 300w.md
│ │ ├── 300wlp.md
│ │ ├── aflw.md
│ │ ├── aic.md
│ │ ├── animalkingdom.md
│ │ ├── animalpose.md
│ │ ├── ap10k.md
│ │ ├── atrw.md
│ │ ├── campus_and_shelf.md
│ │ ├── coco.md
│ │ ├── coco_wholebody.md
│ │ ├── coco_wholebody_face.md
│ │ ├── coco_wholebody_hand.md
│ │ ├── cofw.md
│ │ ├── crowdpose.md
│ │ ├── deepfashion.md
│ │ ├── exlpose.md
│ │ ├── fly.md
│ │ ├── freihand.md
│ │ ├── h36m.md
│ │ ├── halpe.md
│ │ ├── horse10.md
│ │ ├── human_art.md
│ │ ├── interhand.md
│ │ ├── jhmdb.md
│ │ ├── lapa.md
│ │ ├── locust.md
│ │ ├── macaque.md
│ │ ├── mhp.md
│ │ ├── mpi_inf_3dhp.md
│ │ ├── mpii.md
│ │ ├── mpii_trb.md
│ │ ├── ochuman.md
│ │ ├── onehand10k.md
│ │ ├── panoptic.md
│ │ ├── panoptic_body3d.md
│ │ ├── posetrack18.md
│ │ ├── rhd.md
│ │ ├── ubody.md
│ │ ├── wflw.md
│ │ └── zebra.md
│ │ └── techniques
│ │ ├── albumentations.md
│ │ ├── awingloss.md
│ │ ├── dark.md
│ │ ├── fp16.md
│ │ ├── fpn.md
│ │ ├── rle.md
│ │ ├── smoothnet.md
│ │ ├── softwingloss.md
│ │ ├── udp.md
│ │ └── wingloss.md
└── zh_cn
│ ├── .readthedocs.yaml
│ ├── Makefile
│ ├── _static
│ ├── css
│ │ └── readthedocs.css
│ └── images
│ │ └── mmpose-logo.png
│ ├── advanced_guides
│ ├── codecs.md
│ ├── customize_datasets.md
│ ├── customize_evaluation.md
│ ├── customize_logging.md
│ ├── customize_optimizer.md
│ ├── customize_transforms.md
│ ├── dataflow.md
│ └── implement_new_models.md
│ ├── api.rst
│ ├── collect_modelzoo.py
│ ├── collect_projects.py
│ ├── conf.py
│ ├── contribution_guide.md
│ ├── dataset_zoo
│ ├── 2d_animal_keypoint.md
│ ├── 2d_body_keypoint.md
│ ├── 2d_face_keypoint.md
│ ├── 2d_fashion_landmark.md
│ ├── 2d_hand_keypoint.md
│ ├── 2d_wholebody_keypoint.md
│ ├── 3d_body_keypoint.md
│ ├── 3d_body_mesh.md
│ └── 3d_hand_keypoint.md
│ ├── faq.md
│ ├── guide_to_framework.md
│ ├── index.rst
│ ├── installation.md
│ ├── make.bat
│ ├── merge_docs.sh
│ ├── migration.md
│ ├── notes
│ ├── changelog.md
│ ├── ecosystem.md
│ ├── projects.md
│ └── pytorch_2.md
│ ├── overview.md
│ ├── quick_run.md
│ ├── stats.py
│ ├── switch_language.md
│ └── user_guides
│ ├── configs.md
│ ├── dataset_tools.md
│ ├── how_to_deploy.md
│ ├── inference.md
│ ├── label_studio.md
│ ├── mixed_datasets.md
│ ├── model_analysis.md
│ ├── prepare_datasets.md
│ └── train_and_test.md
├── mmpose
├── __init__.py
├── apis
│ ├── __init__.py
│ ├── inference.py
│ ├── inference_3d.py
│ ├── inference_tracking.py
│ ├── inferencers
│ │ ├── __init__.py
│ │ ├── base_mmpose_inferencer.py
│ │ ├── hand3d_inferencer.py
│ │ ├── mmpose_inferencer.py
│ │ ├── pose2d_inferencer.py
│ │ ├── pose3d_inferencer.py
│ │ └── utils
│ │ │ ├── __init__.py
│ │ │ ├── default_det_models.py
│ │ │ └── get_model_alias.py
│ └── visualization.py
├── codecs
│ ├── __init__.py
│ ├── annotation_processors.py
│ ├── associative_embedding.py
│ ├── base.py
│ ├── decoupled_heatmap.py
│ ├── edpose_label.py
│ ├── hand_3d_heatmap.py
│ ├── image_pose_lifting.py
│ ├── integral_regression_label.py
│ ├── megvii_heatmap.py
│ ├── motionbert_label.py
│ ├── msra_heatmap.py
│ ├── regression_label.py
│ ├── simcc_label.py
│ ├── spr.py
│ ├── udp_heatmap.py
│ ├── utils
│ │ ├── __init__.py
│ │ ├── camera_image_projection.py
│ │ ├── gaussian_heatmap.py
│ │ ├── instance_property.py
│ │ ├── offset_heatmap.py
│ │ ├── post_processing.py
│ │ └── refinement.py
│ └── video_pose_lifting.py
├── configs
│ ├── _base_
│ │ └── default_runtime.py
│ ├── body_2d_keypoint
│ │ ├── rtmpose
│ │ │ └── coco
│ │ │ │ ├── rtmpose_m_8xb256-420e_coco-256x192.py
│ │ │ │ └── rtmpose_s_8xb256_420e_aic_coco_256x192.py
│ │ └── topdown_heatmap
│ │ │ └── coco
│ │ │ └── td-hm_hrnet-w48_udp-8xb32-210e_coco-256x192.py
│ └── wholebody_2d_keypoint
│ │ └── rtmpose
│ │ └── cocktail13
│ │ ├── rtmw-l_8xb1024-270e_cocktail14-256x192.py
│ │ ├── rtmw-l_8xb320-270e_cocktail14-384x288.py
│ │ ├── rtmw-m_8xb1024-270e_cocktail14-256x192.py
│ │ ├── rtmw-x_8xb320-270e_cocktail14-384x288.py
│ │ └── rtmw-x_8xb704-270e_cocktail14-256x192.py
├── datasets
│ ├── __init__.py
│ ├── builder.py
│ ├── dataset_wrappers.py
│ ├── datasets
│ │ ├── __init__.py
│ │ ├── animal
│ │ │ ├── __init__.py
│ │ │ ├── animalkingdom_dataset.py
│ │ │ ├── animalpose_dataset.py
│ │ │ ├── ap10k_dataset.py
│ │ │ ├── atrw_dataset.py
│ │ │ ├── fly_dataset.py
│ │ │ ├── horse10_dataset.py
│ │ │ ├── locust_dataset.py
│ │ │ ├── macaque_dataset.py
│ │ │ └── zebra_dataset.py
│ │ ├── base
│ │ │ ├── __init__.py
│ │ │ ├── base_coco_style_dataset.py
│ │ │ └── base_mocap_dataset.py
│ │ ├── body
│ │ │ ├── __init__.py
│ │ │ ├── aic_dataset.py
│ │ │ ├── coco_dataset.py
│ │ │ ├── crowdpose_dataset.py
│ │ │ ├── exlpose_dataset.py
│ │ │ ├── humanart21_dataset.py
│ │ │ ├── humanart_dataset.py
│ │ │ ├── jhmdb_dataset.py
│ │ │ ├── mhp_dataset.py
│ │ │ ├── mpii_dataset.py
│ │ │ ├── mpii_trb_dataset.py
│ │ │ ├── ochuman_dataset.py
│ │ │ ├── posetrack18_dataset.py
│ │ │ └── posetrack18_video_dataset.py
│ │ ├── body3d
│ │ │ ├── __init__.py
│ │ │ └── h36m_dataset.py
│ │ ├── face
│ │ │ ├── __init__.py
│ │ │ ├── aflw_dataset.py
│ │ │ ├── coco_wholebody_face_dataset.py
│ │ │ ├── cofw_dataset.py
│ │ │ ├── face_300vw_dataset.py
│ │ │ ├── face_300w_dataset.py
│ │ │ ├── face_300wlp_dataset.py
│ │ │ ├── lapa_dataset.py
│ │ │ └── wflw_dataset.py
│ │ ├── fashion
│ │ │ ├── __init__.py
│ │ │ ├── deepfashion2_dataset.py
│ │ │ └── deepfashion_dataset.py
│ │ ├── hand
│ │ │ ├── __init__.py
│ │ │ ├── coco_wholebody_hand_dataset.py
│ │ │ ├── freihand_dataset.py
│ │ │ ├── interhand2d_double_dataset.py
│ │ │ ├── onehand10k_dataset.py
│ │ │ ├── panoptic_hand2d_dataset.py
│ │ │ └── rhd2d_dataset.py
│ │ ├── hand3d
│ │ │ ├── __init__.py
│ │ │ └── interhand_3d_dataset.py
│ │ ├── utils.py
│ │ ├── wholebody
│ │ │ ├── __init__.py
│ │ │ ├── coco_wholebody_dataset.py
│ │ │ ├── halpe_dataset.py
│ │ │ └── ubody2d_dataset.py
│ │ └── wholebody3d
│ │ │ ├── __init__.py
│ │ │ ├── h3wb_dataset.py
│ │ │ └── ubody3d_dataset.py
│ ├── samplers.py
│ └── transforms
│ │ ├── __init__.py
│ │ ├── bottomup_transforms.py
│ │ ├── common_transforms.py
│ │ ├── converting.py
│ │ ├── formatting.py
│ │ ├── hand_transforms.py
│ │ ├── loading.py
│ │ ├── mix_img_transforms.py
│ │ ├── pose3d_transforms.py
│ │ └── topdown_transforms.py
├── engine
│ ├── __init__.py
│ ├── hooks
│ │ ├── __init__.py
│ │ ├── badcase_hook.py
│ │ ├── ema_hook.py
│ │ ├── mode_switch_hooks.py
│ │ ├── sync_norm_hook.py
│ │ └── visualization_hook.py
│ ├── optim_wrappers
│ │ ├── __init__.py
│ │ ├── force_default_constructor.py
│ │ └── layer_decay_optim_wrapper.py
│ └── schedulers
│ │ ├── __init__.py
│ │ ├── constant_lr.py
│ │ └── quadratic_warmup.py
├── evaluation
│ ├── __init__.py
│ ├── evaluators
│ │ ├── __init__.py
│ │ └── mutli_dataset_evaluator.py
│ ├── functional
│ │ ├── __init__.py
│ │ ├── keypoint_eval.py
│ │ ├── mesh_eval.py
│ │ ├── nms.py
│ │ └── transforms.py
│ └── metrics
│ │ ├── __init__.py
│ │ ├── coco_metric.py
│ │ ├── coco_wholebody_metric.py
│ │ ├── hand_metric.py
│ │ ├── keypoint_2d_metrics.py
│ │ ├── keypoint_3d_metrics.py
│ │ ├── keypoint_partition_metric.py
│ │ ├── posetrack18_metric.py
│ │ └── simple_keypoint_3d_metrics.py
├── models
│ ├── __init__.py
│ ├── backbones
│ │ ├── __init__.py
│ │ ├── alexnet.py
│ │ ├── base_backbone.py
│ │ ├── cpm.py
│ │ ├── csp_darknet.py
│ │ ├── cspnext.py
│ │ ├── dstformer.py
│ │ ├── hourglass.py
│ │ ├── hourglass_ae.py
│ │ ├── hrformer.py
│ │ ├── hrnet.py
│ │ ├── litehrnet.py
│ │ ├── mobilenet_v2.py
│ │ ├── mobilenet_v3.py
│ │ ├── mspn.py
│ │ ├── pvt.py
│ │ ├── regnet.py
│ │ ├── resnest.py
│ │ ├── resnet.py
│ │ ├── resnext.py
│ │ ├── rsn.py
│ │ ├── scnet.py
│ │ ├── seresnet.py
│ │ ├── seresnext.py
│ │ ├── shufflenet_v1.py
│ │ ├── shufflenet_v2.py
│ │ ├── swin.py
│ │ ├── tcn.py
│ │ ├── utils
│ │ │ ├── __init__.py
│ │ │ ├── channel_shuffle.py
│ │ │ ├── ckpt_convert.py
│ │ │ ├── inverted_residual.py
│ │ │ ├── make_divisible.py
│ │ │ ├── se_layer.py
│ │ │ └── utils.py
│ │ ├── v2v_net.py
│ │ ├── vgg.py
│ │ ├── vipnas_mbv3.py
│ │ └── vipnas_resnet.py
│ ├── builder.py
│ ├── data_preprocessors
│ │ ├── __init__.py
│ │ ├── batch_augmentation.py
│ │ └── data_preprocessor.py
│ ├── distillers
│ │ ├── __init__.py
│ │ └── dwpose_distiller.py
│ ├── heads
│ │ ├── __init__.py
│ │ ├── base_head.py
│ │ ├── coord_cls_heads
│ │ │ ├── __init__.py
│ │ │ ├── rtmcc_head.py
│ │ │ ├── rtmw_head.py
│ │ │ └── simcc_head.py
│ │ ├── heatmap_heads
│ │ │ ├── __init__.py
│ │ │ ├── ae_head.py
│ │ │ ├── cid_head.py
│ │ │ ├── cpm_head.py
│ │ │ ├── heatmap_head.py
│ │ │ ├── internet_head.py
│ │ │ ├── mspn_head.py
│ │ │ └── vipnas_head.py
│ │ ├── hybrid_heads
│ │ │ ├── __init__.py
│ │ │ ├── dekr_head.py
│ │ │ ├── rtmo_head.py
│ │ │ ├── vis_head.py
│ │ │ └── yoloxpose_head.py
│ │ ├── regression_heads
│ │ │ ├── __init__.py
│ │ │ ├── dsnt_head.py
│ │ │ ├── integral_regression_head.py
│ │ │ ├── motion_regression_head.py
│ │ │ ├── regression_head.py
│ │ │ ├── rle_head.py
│ │ │ ├── temporal_regression_head.py
│ │ │ └── trajectory_regression_head.py
│ │ └── transformer_heads
│ │ │ ├── __init__.py
│ │ │ ├── base_transformer_head.py
│ │ │ ├── edpose_head.py
│ │ │ └── transformers
│ │ │ ├── __init__.py
│ │ │ ├── deformable_detr_layers.py
│ │ │ ├── detr_layers.py
│ │ │ └── utils.py
│ ├── losses
│ │ ├── __init__.py
│ │ ├── ae_loss.py
│ │ ├── bbox_loss.py
│ │ ├── classification_loss.py
│ │ ├── fea_dis_loss.py
│ │ ├── heatmap_loss.py
│ │ ├── logit_dis_loss.py
│ │ ├── loss_wrappers.py
│ │ └── regression_loss.py
│ ├── necks
│ │ ├── __init__.py
│ │ ├── channel_mapper.py
│ │ ├── cspnext_pafpn.py
│ │ ├── fmap_proc_neck.py
│ │ ├── fpn.py
│ │ ├── gap_neck.py
│ │ ├── hybrid_encoder.py
│ │ ├── posewarper_neck.py
│ │ └── yolox_pafpn.py
│ ├── pose_estimators
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── bottomup.py
│ │ ├── pose_lifter.py
│ │ └── topdown.py
│ ├── task_modules
│ │ ├── __init__.py
│ │ ├── assigners
│ │ │ ├── __init__.py
│ │ │ ├── metric_calculators.py
│ │ │ └── sim_ota_assigner.py
│ │ └── prior_generators
│ │ │ ├── __init__.py
│ │ │ └── mlvl_point_generator.py
│ └── utils
│ │ ├── __init__.py
│ │ ├── check_and_update_config.py
│ │ ├── ckpt_convert.py
│ │ ├── csp_layer.py
│ │ ├── geometry.py
│ │ ├── misc.py
│ │ ├── ops.py
│ │ ├── realnvp.py
│ │ ├── regularizations.py
│ │ ├── reparam_layers.py
│ │ ├── rtmcc_block.py
│ │ ├── transformer.py
│ │ └── tta.py
├── registry.py
├── structures
│ ├── __init__.py
│ ├── bbox
│ │ ├── __init__.py
│ │ ├── bbox_overlaps.py
│ │ └── transforms.py
│ ├── keypoint
│ │ ├── __init__.py
│ │ └── transforms.py
│ ├── multilevel_pixel_data.py
│ ├── pose_data_sample.py
│ └── utils.py
├── testing
│ ├── __init__.py
│ └── _utils.py
├── utils
│ ├── __init__.py
│ ├── camera.py
│ ├── collect_env.py
│ ├── config_utils.py
│ ├── dist_utils.py
│ ├── hooks.py
│ ├── logger.py
│ ├── setup_env.py
│ ├── tensor_utils.py
│ ├── timer.py
│ └── typing.py
├── version.py
└── visualization
│ ├── __init__.py
│ ├── fast_visualizer.py
│ ├── local_visualizer.py
│ ├── local_visualizer_3d.py
│ ├── opencv_backend_visualizer.py
│ └── simcc_vis.py
├── model-index.yml
├── projects
├── README.md
├── awesome-mmpose
│ └── README.md
├── example_project
│ ├── README.md
│ ├── configs
│ │ └── example-head-loss_hrnet-w32_8xb64-210e_coco-256x192.py
│ └── models
│ │ ├── __init__.py
│ │ ├── example_head.py
│ │ └── example_loss.py
├── faq.md
├── just_dance
│ ├── README.md
│ ├── app.py
│ ├── calculate_similarity.py
│ ├── configs
│ │ ├── _base_
│ │ └── rtmdet-nano_one-person.py
│ ├── just_dance_demo.ipynb
│ ├── process_video.py
│ └── utils.py
├── mmpose4aigc
│ ├── README.md
│ ├── README_CN.md
│ ├── download_models.sh
│ ├── install_posetracker_linux.sh
│ ├── mmpose_openpose.sh
│ ├── mmpose_style_skeleton.sh
│ └── openpose_visualization.py
├── pose_anything
│ ├── README.md
│ ├── configs
│ │ ├── demo.py
│ │ └── demo_b.py
│ ├── datasets
│ │ ├── __init__.py
│ │ ├── builder.py
│ │ ├── datasets
│ │ │ ├── __init__.py
│ │ │ └── mp100
│ │ │ │ ├── __init__.py
│ │ │ │ ├── fewshot_base_dataset.py
│ │ │ │ ├── fewshot_dataset.py
│ │ │ │ ├── test_base_dataset.py
│ │ │ │ ├── test_dataset.py
│ │ │ │ ├── transformer_base_dataset.py
│ │ │ │ └── transformer_dataset.py
│ │ └── pipelines
│ │ │ ├── __init__.py
│ │ │ ├── post_transforms.py
│ │ │ └── top_down_transform.py
│ ├── demo.py
│ ├── models
│ │ ├── __init__.py
│ │ ├── backbones
│ │ │ ├── __init__.py
│ │ │ ├── simmim.py
│ │ │ ├── swin_mlp.py
│ │ │ ├── swin_transformer.py
│ │ │ ├── swin_transformer_moe.py
│ │ │ ├── swin_transformer_v2.py
│ │ │ └── swin_utils.py
│ │ ├── detectors
│ │ │ ├── __init__.py
│ │ │ └── pam.py
│ │ ├── keypoint_heads
│ │ │ ├── __init__.py
│ │ │ └── head.py
│ │ └── utils
│ │ │ ├── __init__.py
│ │ │ ├── builder.py
│ │ │ ├── encoder_decoder.py
│ │ │ ├── positional_encoding.py
│ │ │ └── transformer.py
│ └── tools
│ │ └── visualization.py
├── rtmo
│ └── README.md
├── rtmpose
│ ├── README.md
│ ├── README_CN.md
│ ├── app.py
│ ├── benchmark
│ │ ├── README.md
│ │ └── README_CN.md
│ ├── examples
│ │ ├── PoseTracker-Android-Prototype
│ │ │ └── README.md
│ │ ├── README.md
│ │ ├── RTMPose-Deploy
│ │ │ ├── README.md
│ │ │ ├── README_CN.md
│ │ │ └── Windows
│ │ │ │ ├── OnnxRumtime-CPU
│ │ │ │ └── src
│ │ │ │ │ └── RTMPoseOnnxRuntime
│ │ │ │ │ ├── characterset_convert.h
│ │ │ │ │ ├── main.cpp
│ │ │ │ │ ├── rtmdet_onnxruntime.cpp
│ │ │ │ │ ├── rtmdet_onnxruntime.h
│ │ │ │ │ ├── rtmpose_onnxruntime.cpp
│ │ │ │ │ ├── rtmpose_onnxruntime.h
│ │ │ │ │ ├── rtmpose_tracker_onnxruntime.cpp
│ │ │ │ │ ├── rtmpose_tracker_onnxruntime.h
│ │ │ │ │ └── rtmpose_utils.h
│ │ │ │ └── TensorRT
│ │ │ │ ├── README.md
│ │ │ │ ├── python
│ │ │ │ └── convert_rtmdet.py
│ │ │ │ └── src
│ │ │ │ └── RTMPoseTensorRT
│ │ │ │ ├── inference.cpp
│ │ │ │ ├── inference.h
│ │ │ │ ├── main.cpp
│ │ │ │ ├── rtmdet.cpp
│ │ │ │ ├── rtmdet.h
│ │ │ │ ├── rtmpose.cpp
│ │ │ │ ├── rtmpose.h
│ │ │ │ ├── utils.cpp
│ │ │ │ └── utils.h
│ │ ├── onnxruntime
│ │ │ ├── README.md
│ │ │ ├── README_CN.md
│ │ │ ├── human-pose.jpeg
│ │ │ ├── main.py
│ │ │ └── requirements.txt
│ │ └── rtmlib
│ │ │ └── README.md
│ ├── rtmdet
│ │ ├── README.md
│ │ ├── hand
│ │ │ └── rtmdet_nano_320-8xb32_hand.py
│ │ └── person
│ │ │ ├── humanart_detection.py
│ │ │ ├── rtmdet_l_8xb32-300e_humanart.py
│ │ │ ├── rtmdet_m_640-8xb32_coco-person.py
│ │ │ ├── rtmdet_m_8xb32-300e_humanart.py
│ │ │ ├── rtmdet_nano_320-8xb32_coco-person.py
│ │ │ ├── rtmdet_s_8xb32-300e_humanart.py
│ │ │ ├── rtmdet_tiny_8xb32-300e_humanart.py
│ │ │ └── rtmdet_x_8xb32-300e_humanart.py
│ ├── rtmpose
│ │ ├── animal_2d_keypoint
│ │ │ └── rtmpose-m_8xb64-210e_ap10k-256x256.py
│ │ ├── body_2d_keypoint
│ │ │ ├── rtmpose-l_8xb256-420e_coco-256x192.py
│ │ │ ├── rtmpose-l_8xb256-420e_coco-384x288.py
│ │ │ ├── rtmpose-l_8xb512-700e_body8-halpe26-256x192.py
│ │ │ ├── rtmpose-l_8xb512-700e_body8-halpe26-384x288.py
│ │ │ ├── rtmpose-m_8xb256-420e_coco-256x192.py
│ │ │ ├── rtmpose-m_8xb256-420e_coco-384x288.py
│ │ │ ├── rtmpose-m_8xb512-700e_body8-halpe26-256x192.py
│ │ │ ├── rtmpose-m_8xb512-700e_body8-halpe26-384x288.py
│ │ │ ├── rtmpose-s_8xb1024-700e_body8-halpe26-256x192.py
│ │ │ ├── rtmpose-s_8xb256-420e_coco-256x192.py
│ │ │ ├── rtmpose-t_8xb1024-700e_body8-halpe26-256x192.py
│ │ │ ├── rtmpose-t_8xb256-420e_coco-256x192.py
│ │ │ ├── rtmpose-x_8xb256-700e_body8-halpe26-384x288.py
│ │ │ └── rtmpose-x_8xb256-700e_coco-384x288.py
│ │ ├── face_2d_keypoint
│ │ │ ├── rtmpose-m_8xb256-120e_lapa-256x256.py
│ │ │ ├── rtmpose-s_8xb256-120e_lapa-256x256.py
│ │ │ └── rtmpose-t_8xb256-120e_lapa-256x256.py
│ │ ├── hand_2d_keypoint
│ │ │ └── rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py
│ │ ├── pretrain_cspnext_udp
│ │ │ ├── cspnext-l_udp_8xb256-210e_coco-256x192.py
│ │ │ ├── cspnext-m_udp_8xb256-210e_coco-256x192.py
│ │ │ ├── cspnext-s_udp_8xb256-210e_coco-256x192.py
│ │ │ └── cspnext-tiny_udp_8xb256-210e_coco-256x192.py
│ │ ├── pruning
│ │ │ ├── README.md
│ │ │ ├── README_CN.md
│ │ │ ├── group_fisher_deploy_rtmpose-s_8xb256-420e_aic-coco-256x192.py
│ │ │ ├── group_fisher_deploy_rtmpose-s_8xb256-420e_coco-256x192.py
│ │ │ ├── group_fisher_finetune_rtmpose-s_8xb256-420e_aic-coco-256x192.py
│ │ │ ├── group_fisher_finetune_rtmpose-s_8xb256-420e_coco-256x192.py
│ │ │ ├── group_fisher_prune_rtmpose-s_8xb256-420e_aic-coco-256x192.py
│ │ │ └── group_fisher_prune_rtmpose-s_8xb256-420e_coco-256x192.py
│ │ └── wholebody_2d_keypoint
│ │ │ ├── rtmpose-l_8xb32-270e_coco-wholebody-384x288.py
│ │ │ ├── rtmpose-l_8xb64-270e_coco-wholebody-256x192.py
│ │ │ ├── rtmpose-m_8xb64-270e_coco-wholebody-256x192.py
│ │ │ ├── rtmpose-s_8xb64-270e_coco-wholebody-256x192.py
│ │ │ ├── rtmpose-t_8xb64-270e_coco-wholebody-256x192.py
│ │ │ ├── rtmpose-x_8xb32-270e_coco-wholebody-384x288.py
│ │ │ ├── rtmw-l_8xb1024-270e_cocktail14-256x192.py
│ │ │ ├── rtmw-l_8xb320-270e_cocktail14-384x288.py
│ │ │ ├── rtmw-m_8xb1024-270e_cocktail14-256x192.py
│ │ │ ├── rtmw-x_8xb320-270e_cocktail14-384x288.py
│ │ │ └── rtmw-x_8xb704-270e_cocktail14-256x192.py
│ └── yolox
│ │ └── humanart
│ │ ├── yolox_l_8xb8-300e_humanart.py
│ │ ├── yolox_m_8xb8-300e_humanart.py
│ │ ├── yolox_nano_8xb8-300e_humanart.py
│ │ ├── yolox_s_8xb8-300e_humanart.py
│ │ ├── yolox_tiny_8xb8-300e_humanart.py
│ │ └── yolox_x_8xb8-300e_humanart.py
├── rtmpose3d
│ ├── README.md
│ ├── configs
│ │ ├── rtmw3d-l_8xb64_cocktail14-384x288.py
│ │ └── rtmw3d-x_8xb32_cocktail14-384x288.py
│ ├── demo
│ │ ├── body3d_img2pose_demo.py
│ │ └── rtmdet_m_640-8xb32_coco-person.py
│ └── rtmpose3d
│ │ ├── __init__.py
│ │ ├── loss.py
│ │ ├── pose_estimator.py
│ │ ├── rtmw3d_head.py
│ │ ├── simcc_3d_label.py
│ │ └── utils.py
├── skps
│ ├── README.md
│ ├── configs
│ │ ├── td-hm_hrnetv2-w18_skps-1xb16-160e_cofw-256x256.py
│ │ └── td-hm_hrnetv2-w18_skps-1xb64-80e_wflw-256x256.py
│ ├── custom_codecs
│ │ ├── __init__.py
│ │ └── skps_heatmap.py
│ └── models
│ │ ├── __init__.py
│ │ └── skps_head.py
├── uniformer
│ ├── README.md
│ ├── configs
│ │ ├── td-hm_uniformer-b-8xb128-210e_coco-256x192.py
│ │ ├── td-hm_uniformer-b-8xb32-210e_coco-384x288.py
│ │ ├── td-hm_uniformer-b-8xb32-210e_coco-448x320.py
│ │ ├── td-hm_uniformer-s-8xb128-210e_coco-256x192.py
│ │ ├── td-hm_uniformer-s-8xb128-210e_coco-384x288.py
│ │ └── td-hm_uniformer-s-8xb64-210e_coco-448x320.py
│ └── models
│ │ ├── __init__.py
│ │ └── uniformer.py
└── yolox_pose
│ ├── README.md
│ ├── configs
│ ├── _base_
│ │ ├── datasets
│ │ ├── default_runtime.py
│ │ └── py_default_runtime.py
│ ├── py_yolox_pose_s_8xb32_300e_coco.py
│ ├── yolox-pose_l_4xb64-300e_coco.py
│ ├── yolox-pose_m_4xb64-300e_coco.py
│ ├── yolox-pose_s_8xb32-300e_coco.py
│ └── yolox-pose_tiny_4xb64-300e_coco.py
│ ├── datasets
│ ├── __init__.py
│ ├── bbox_keypoint_structure.py
│ ├── coco_dataset.py
│ └── transforms.py
│ ├── demo
│ ├── models
│ ├── __init__.py
│ ├── assigner.py
│ ├── data_preprocessor.py
│ ├── oks_loss.py
│ ├── utils.py
│ └── yolox_pose_head.py
│ └── tools
├── pytest.ini
├── requirements.txt
├── requirements
├── albu.txt
├── build.txt
├── docs.txt
├── mminstall.txt
├── optional.txt
├── poseval.txt
├── readthedocs.txt
├── runtime.txt
└── tests.txt
├── resources
└── mmpose-logo.png
├── setup.cfg
├── setup.py
├── tests
├── data
│ ├── 300vw
│ │ ├── 401
│ │ │ ├── annot
│ │ │ │ ├── 000731.pts
│ │ │ │ └── 000732.pts
│ │ │ └── imgs
│ │ │ │ ├── 000731.jpg
│ │ │ │ └── 000732.jpg
│ │ ├── 001
│ │ │ ├── annot
│ │ │ │ ├── 000006.pts
│ │ │ │ └── 000009.pts
│ │ │ └── imgs
│ │ │ │ ├── 000006.jpg
│ │ │ │ └── 000009.jpg
│ │ ├── anno_300vw.json
│ │ └── broken_frames.npy
│ ├── 300w
│ │ ├── indoor_020.png
│ │ ├── indoor_029.png
│ │ └── test_300w.json
│ ├── 300wlp
│ │ ├── AFW_134212_1_0.jpg
│ │ ├── AFW_134212_2_0.jpg
│ │ └── test_300wlp.json
│ ├── aflw
│ │ ├── image04476.jpg
│ │ ├── image22568.jpg
│ │ └── test_aflw.json
│ ├── aic
│ │ ├── 054d9ce9201beffc76e5ff2169d2af2f027002ca.jpg
│ │ ├── fa436c914fe4a8ec1ec5474af4d3820b84d17561.jpg
│ │ ├── ff945ae2e729f24eea992814639d59b3bdec8bd8.jpg
│ │ └── test_aic.json
│ ├── ak
│ │ ├── AAOYRUDX
│ │ │ ├── AAOYRUDX_f000027.jpg
│ │ │ └── AAOYRUDX_f000028.jpg
│ │ └── test_animalkingdom.json
│ ├── animalpose
│ │ ├── ca110.jpeg
│ │ ├── ho105.jpeg
│ │ └── test_animalpose.json
│ ├── ap10k
│ │ ├── 000000000004.jpg
│ │ ├── 000000037516.jpg
│ │ └── test_ap10k.json
│ ├── atrw
│ │ ├── 000061.jpg
│ │ ├── 003464.jpg
│ │ └── test_atrw.json
│ ├── campus
│ │ ├── actorsGT.mat
│ │ ├── calibration_campus.json
│ │ ├── panoptic_training_pose.pkl
│ │ └── pred_campus_maskrcnn_hrnet_coco.pkl
│ ├── coco
│ │ ├── 000000000785.jpg
│ │ ├── 000000040083.jpg
│ │ ├── 000000196141.jpg
│ │ ├── 000000197388.jpg
│ │ ├── test_coco.json
│ │ ├── test_coco_det_AP_H_56.json
│ │ ├── test_coco_wholebody.json
│ │ └── test_keypoint_partition_metric.json
│ ├── cofw
│ │ ├── 001766.jpg
│ │ ├── 001805.jpg
│ │ └── test_cofw.json
│ ├── crowdpose
│ │ ├── 103319.jpg
│ │ ├── 106848.jpg
│ │ ├── test_crowdpose.json
│ │ └── test_crowdpose_det_AP_40.json
│ ├── deepfasion2
│ │ ├── 000264.jpg
│ │ ├── 000265.jpg
│ │ └── deepfasion2.json
│ ├── exlpose
│ │ ├── imgs_0212_hwangridan_vid000020_exp1200_dark_000052__gain_3.40_exposure_417.png
│ │ ├── imgs_0212_hwangridan_vid000020_exp400_dark_000052__gain_3.40_exposure_1250.png
│ │ └── test_exlpose.json
│ ├── fld
│ │ ├── img_00000128.jpg
│ │ ├── img_00000132.jpg
│ │ └── test_fld.json
│ ├── fly
│ │ ├── 1400.jpg
│ │ ├── 1450.jpg
│ │ └── test_fly.json
│ ├── freihand
│ │ ├── 00000355.jpg
│ │ ├── 00017620.jpg
│ │ ├── 00032915.jpg
│ │ ├── 00050180.jpg
│ │ ├── 00065475.jpg
│ │ ├── 00082740.jpg
│ │ ├── 00098035.jpg
│ │ ├── 00115300.jpg
│ │ └── test_freihand.json
│ ├── h36m
│ │ ├── BF_IUV_gt
│ │ │ ├── S1_Directions_1.54138969_000001_467_466.png
│ │ │ ├── S5_SittingDown.54138969_002061_478_619.png
│ │ │ ├── S7_Greeting.55011271_000396_365_433.png
│ │ │ └── S8_WalkDog_1.55011271_000026_592_382.png
│ │ ├── S1
│ │ │ └── S1_Directions_1.54138969
│ │ │ │ └── S1_Directions_1.54138969_000001.jpg
│ │ ├── S5
│ │ │ └── S5_SittingDown.54138969
│ │ │ │ └── S5_SittingDown.54138969_002061.jpg
│ │ ├── S7
│ │ │ └── S7_Greeting.55011271
│ │ │ │ └── S7_Greeting.55011271_000396.jpg
│ │ ├── S8
│ │ │ └── S8_WalkDog_1.55011271
│ │ │ │ └── S8_WalkDog_1.55011271_000026.jpg
│ │ ├── cameras.pkl
│ │ ├── h36m_coco.json
│ │ ├── test_h36m.npz
│ │ ├── test_h36m_2d_detection.npy
│ │ └── test_h36m_body3d.npz
│ ├── h3wb
│ │ └── h3wb_train_bbox_subset.npz
│ ├── halpe
│ │ └── test_halpe.json
│ ├── horse10
│ │ ├── 0244.png
│ │ ├── 0292.png
│ │ ├── 0465.png
│ │ └── test_horse10.json
│ ├── humanart
│ │ ├── 2D_virtual_human
│ │ │ └── digital_art
│ │ │ │ └── 000000001648.jpg
│ │ ├── 3D_virtual_human
│ │ │ └── garage_kits
│ │ │ │ └── 000000005603.jpg
│ │ ├── real_human
│ │ │ └── acrobatics
│ │ │ │ └── 000000000590.jpg
│ │ ├── test_humanart.json
│ │ └── test_humanart_det_AP_H_56.json
│ ├── interhand2.6m
│ │ ├── image2017.jpg
│ │ ├── image29590.jpg
│ │ ├── image44669.jpg
│ │ ├── image69148.jpg
│ │ ├── test_interhand2.6m_camera.json
│ │ ├── test_interhand2.6m_data.json
│ │ └── test_interhand2.6m_joint_3d.json
│ ├── jhmdb
│ │ ├── Frisbee_catch_f_cm_np1_ri_med_0
│ │ │ └── 00001.png
│ │ ├── Frisbee_catch_f_cm_np1_ri_med_1
│ │ │ └── 00001.png
│ │ ├── Goalkeeper_Training_Day_@_7_catch_f_cm_np1_ri_med_0
│ │ │ └── 00001.png
│ │ └── test_jhmdb_sub1.json
│ ├── lapa
│ │ ├── 10773046825_0.jpg
│ │ ├── 13609937564_5.jpg
│ │ └── test_lapa.json
│ ├── locust
│ │ ├── 630.jpg
│ │ ├── 650.jpg
│ │ └── test_locust.json
│ ├── macaque
│ │ ├── PRI_1473.jpg
│ │ ├── d47f1b1ee9d3217e.jpg
│ │ └── test_macaque.json
│ ├── mhp
│ │ ├── 10084.jpg
│ │ ├── 10112.jpg
│ │ └── test_mhp.json
│ ├── mosh
│ │ └── test_mosh.npz
│ ├── mpi_inf_3dhp
│ │ ├── S4_Seq2_Cam0_001033.jpg
│ │ ├── S8_Seq1_Cam8_002165.jpg
│ │ ├── TS1_002001.jpg
│ │ ├── TS2_001850.jpg
│ │ ├── cameras_test.pkl
│ │ ├── cameras_train.pkl
│ │ ├── test_3dhp_test.npz
│ │ └── test_3dhp_train.npz
│ ├── mpii
│ │ ├── 004645041.jpg
│ │ ├── 005808361.jpg
│ │ ├── 051423444.jpg
│ │ ├── 052475643.jpg
│ │ ├── 060754485.jpg
│ │ ├── test_mpii.json
│ │ └── test_mpii_trb.json
│ ├── ochuman
│ │ ├── 000817.jpg
│ │ ├── 003799.jpg
│ │ ├── 003896.jpg
│ │ └── test_ochuman.json
│ ├── onehand10k
│ │ ├── 1402.jpg
│ │ ├── 33.jpg
│ │ ├── 784.jpg
│ │ ├── 9.jpg
│ │ └── test_onehand10k.json
│ ├── panoptic
│ │ ├── 005880453_01_l.jpg
│ │ ├── 005880453_01_r.jpg
│ │ ├── ex2_2.flv_000040_l.jpg
│ │ ├── ex2_2.flv_000040_r.jpg
│ │ └── test_panoptic.json
│ ├── panoptic_body3d
│ │ ├── 160906_band1
│ │ │ ├── calibration_160906_band1.json
│ │ │ └── hdPose3d_stage1_coco19
│ │ │ │ ├── body3DScene_00000168.json
│ │ │ │ └── body3DScene_00000169.json
│ │ └── 160906_band2
│ │ │ ├── calibration_160906_band2.json
│ │ │ └── hdPose3d_stage1_coco19
│ │ │ ├── body3DScene_00000139.json
│ │ │ └── body3DScene_00000140.json
│ ├── posetrack18
│ │ ├── annotations
│ │ │ ├── test_posetrack18_human_detections.json
│ │ │ ├── test_posetrack18_val.json
│ │ │ └── val
│ │ │ │ ├── 003418_mpii_test.json
│ │ │ │ ├── 009473_mpii_test.json
│ │ │ │ └── 012834_mpii_test.json
│ │ ├── images
│ │ │ └── val
│ │ │ │ ├── 003418_mpii_test
│ │ │ │ └── 000000.jpg
│ │ │ │ ├── 009473_mpii_test
│ │ │ │ └── 000000.jpg
│ │ │ │ └── 012834_mpii_test
│ │ │ │ └── 000000.jpg
│ │ ├── mask
│ │ │ └── val
│ │ │ │ ├── 003418_mpii_test
│ │ │ │ └── 000000.jpg
│ │ │ │ ├── 009473_mpii_test
│ │ │ │ └── 000000.jpg
│ │ │ │ └── 012834_mpii_test
│ │ │ │ └── 000000.jpg
│ │ └── videos
│ │ │ └── 000001_mpiinew_test
│ │ │ ├── 000000.jpg
│ │ │ ├── 000001.jpg
│ │ │ ├── 000001_mpiinew_test.mp4
│ │ │ ├── 000002.jpg
│ │ │ ├── 000003.jpg
│ │ │ └── 000004.jpg
│ ├── rhd
│ │ ├── 00111.png
│ │ ├── 01111.png
│ │ ├── 11111.png
│ │ └── test_rhd.json
│ ├── shelf
│ │ ├── actorsGT.mat
│ │ ├── calibration_shelf.json
│ │ ├── panoptic_training_pose.pkl
│ │ └── pred_shelf_maskrcnn_hrnet_coco.pkl
│ ├── smpl
│ │ └── smpl_mean_params.npz
│ ├── ubody3d
│ │ └── ubody3d_train.json
│ ├── wflw
│ │ ├── 36_Football_americanfootball_ball_36_415.jpg
│ │ ├── 7_Cheering_Cheering_7_16.jpg
│ │ └── test_wflw.json
│ └── zebra
│ │ ├── 810.jpg
│ │ ├── 850.jpg
│ │ └── test_zebra.json
├── test_apis
│ ├── test_inference.py
│ └── test_inferencers
│ │ ├── test_hand3d_inferencer.py
│ │ ├── test_mmpose_inferencer.py
│ │ ├── test_pose2d_inferencer.py
│ │ └── test_pose3d_inferencer.py
├── test_codecs
│ ├── test_annotation_processors.py
│ ├── test_associative_embedding.py
│ ├── test_decoupled_heatmap.py
│ ├── test_edpose_label.py
│ ├── test_hand_3d_heatmap.py
│ ├── test_image_pose_lifting.py
│ ├── test_integral_regression_label.py
│ ├── test_megvii_heatmap.py
│ ├── test_motionbert_label.py
│ ├── test_msra_heatmap.py
│ ├── test_regression_label.py
│ ├── test_simcc_label.py
│ ├── test_spr.py
│ ├── test_udp_heatmap.py
│ └── test_video_pose_lifting.py
├── test_datasets
│ ├── test_datasets
│ │ ├── test_animal_datasets
│ │ │ ├── test_animalkingdom_dataset.py
│ │ │ ├── test_animalpose_dataset.py
│ │ │ ├── test_ap10k_dataset.py
│ │ │ ├── test_atrw_dataset.py
│ │ │ ├── test_fly_dataset.py
│ │ │ ├── test_horse10_dataset.py
│ │ │ ├── test_locust_dataset.py
│ │ │ ├── test_macaque_dataset.py
│ │ │ └── test_zebra_dataset.py
│ │ ├── test_body_datasets
│ │ │ ├── test_aic_dataset.py
│ │ │ ├── test_coco_dataset.py
│ │ │ ├── test_crowdpose_dataset.py
│ │ │ ├── test_exlpose_dataset.py
│ │ │ ├── test_h36m_dataset.py
│ │ │ ├── test_humanart21_dataset.py
│ │ │ ├── test_humanart_dataset.py
│ │ │ ├── test_jhmdb_dataset.py
│ │ │ ├── test_mhp_dataset.py
│ │ │ ├── test_mpii_dataset.py
│ │ │ ├── test_mpii_trb_dataset.py
│ │ │ ├── test_ochuman_dataset.py
│ │ │ ├── test_posetrack18_dataset.py
│ │ │ └── test_posetrack18_video_dataset.py
│ │ ├── test_dataset_wrappers
│ │ │ └── test_combined_dataset.py
│ │ ├── test_face_datasets
│ │ │ ├── test_aflw_dataset.py
│ │ │ ├── test_coco_wholebody_face_dataset.py
│ │ │ ├── test_cofw_dataset.py
│ │ │ ├── test_face_300vw_dataset.py
│ │ │ ├── test_face_300w_dataset.py
│ │ │ ├── test_face_300wlp_dataset.py
│ │ │ ├── test_lapa_dataset.py
│ │ │ └── test_wflw_dataset.py
│ │ ├── test_fashion_datasets
│ │ │ └── test_deepfashion_dataset.py
│ │ ├── test_hand_datasets
│ │ │ ├── test_coco_wholebody_hand_dataset.py
│ │ │ ├── test_freihand_dataset.py
│ │ │ ├── test_interhand2d_double_dataset.py
│ │ │ ├── test_interhand3d_dataset.py
│ │ │ ├── test_onehand10k_dataset.py
│ │ │ ├── test_panoptic_hand2d_dataset.py
│ │ │ └── test_rhd2d_dataset.py
│ │ └── test_wholebody_datasets
│ │ │ ├── test_coco_wholebody_dataset.py
│ │ │ ├── test_h3wb_dataset.py
│ │ │ ├── test_halpe_dataset.py
│ │ │ └── test_ubody_dataset.py
│ └── test_transforms
│ │ ├── test_bottomup_transforms.py
│ │ ├── test_common_transforms.py
│ │ ├── test_converting.py
│ │ ├── test_formatting.py
│ │ ├── test_loading.py
│ │ ├── test_mix_img_transform.py
│ │ ├── test_pose3d_transforms.py
│ │ └── test_topdown_transforms.py
├── test_engine
│ ├── test_hooks
│ │ ├── test_badcase_hook.py
│ │ ├── test_mode_switch_hooks.py
│ │ ├── test_sync_norm_hook.py
│ │ └── test_visualization_hook.py
│ └── test_schedulers
│ │ ├── test_lr_scheduler.py
│ │ └── test_quadratic_warmup.py
├── test_evaluation
│ ├── test_evaluator
│ │ └── test_multi_dataset_evaluator.py
│ ├── test_functional
│ │ ├── test_keypoint_eval.py
│ │ ├── test_nms.py
│ │ └── test_transforms.py
│ └── test_metrics
│ │ ├── test_coco_metric.py
│ │ ├── test_coco_wholebody_metric.py
│ │ ├── test_hand_metric.py
│ │ ├── test_keypoint_2d_metrics.py
│ │ ├── test_keypoint_3d_metrics.py
│ │ ├── test_keypoint_partition_metric.py
│ │ └── test_posetrack18_metric.py
├── test_external
│ └── test_mim.py
├── test_models
│ ├── test_backbones
│ │ ├── test_alexnet.py
│ │ ├── test_backbones_utils.py
│ │ ├── test_cpm.py
│ │ ├── test_csp_darknet.py
│ │ ├── test_dstformer.py
│ │ ├── test_hourglass.py
│ │ ├── test_hrformer.py
│ │ ├── test_hrnet.py
│ │ ├── test_litehrnet.py
│ │ ├── test_mobilenet_v2.py
│ │ ├── test_mobilenet_v3.py
│ │ ├── test_mspn.py
│ │ ├── test_pvt.py
│ │ ├── test_regnet.py
│ │ ├── test_resnest.py
│ │ ├── test_resnet.py
│ │ ├── test_resnext.py
│ │ ├── test_rsn.py
│ │ ├── test_scnet.py
│ │ ├── test_seresnet.py
│ │ ├── test_seresnext.py
│ │ ├── test_shufflenet_v1.py
│ │ ├── test_shufflenet_v2.py
│ │ ├── test_swin.py
│ │ ├── test_tcn.py
│ │ ├── test_v2v_net.py
│ │ ├── test_vgg.py
│ │ ├── test_vipnas_mbv3.py
│ │ └── test_vipnas_resnet.py
│ ├── test_data_preprocessors
│ │ └── test_data_preprocessor.py
│ ├── test_distillers
│ │ └── test_dwpose_distiller.py
│ ├── test_heads
│ │ ├── test_heatmap_heads
│ │ │ ├── test_ae_head.py
│ │ │ ├── test_cid_head.py
│ │ │ ├── test_cpm_head.py
│ │ │ ├── test_heatmap_head.py
│ │ │ ├── test_mspn_head.py
│ │ │ ├── test_rtmcc_head.py
│ │ │ ├── test_simcc_head.py
│ │ │ └── test_vipnas_head.py
│ │ ├── test_hybrid_heads
│ │ │ ├── test_dekr_head.py
│ │ │ └── test_vis_head.py
│ │ └── test_regression_heads
│ │ │ ├── test_dsnt_head.py
│ │ │ ├── test_integral_regression_head.py
│ │ │ ├── test_regression_head.py
│ │ │ └── test_rle_head.py
│ ├── test_losses
│ │ ├── test_ae_loss.py
│ │ ├── test_classification_losses.py
│ │ ├── test_heatmap_losses.py
│ │ └── test_regression_losses.py
│ ├── test_necks
│ │ ├── test_fmap_proc_neck.py
│ │ └── test_yolox_pafpn.py
│ ├── test_pose_estimators
│ │ ├── test_bottomup.py
│ │ └── test_topdown.py
│ └── test_utils
│ │ ├── test_check_and_update_config.py
│ │ └── test_transformers.py
├── test_structures
│ ├── test_bbox
│ │ ├── test_bbox_overlaps.py
│ │ └── test_bbox_transforms.py
│ ├── test_keypoint
│ │ └── test_keypoint_transforms.py
│ ├── test_multilevel_pixel_data.py
│ └── test_pose_data_sample.py
├── test_utils
│ └── test_setup_env.py
└── test_visualization
│ ├── test_fast_visualizer.py
│ └── test_pose_visualizer.py
└── tools
├── analysis_tools
├── analyze_logs.py
├── get_flops.py
└── print_config.py
├── dataset_converters
├── 300vw2coco.py
├── 300wlp2coco.py
├── h36m_to_coco.py
├── labelstudio2coco.py
├── lapa2coco.py
├── mat2json.py
├── parse_animalpose_dataset.py
├── parse_cofw_dataset.py
├── parse_deepposekit_dataset.py
├── parse_macaquepose_dataset.py
├── preprocess_h36m.py
├── preprocess_mpi_inf_3dhp.py
├── scripts
│ ├── preprocess_300w.sh
│ ├── preprocess_aic.sh
│ ├── preprocess_ap10k.sh
│ ├── preprocess_coco2017.sh
│ ├── preprocess_crowdpose.sh
│ ├── preprocess_freihand.sh
│ ├── preprocess_hagrid.sh
│ ├── preprocess_halpe.sh
│ ├── preprocess_lapa.sh
│ ├── preprocess_mpii.sh
│ ├── preprocess_onehand10k.sh
│ └── preprocess_wflw.sh
├── ubody_kpts_to_coco.py
├── ubody_smplx_to_coco.py
└── wflw2coco.py
├── dist_test.sh
├── dist_train.sh
├── misc
├── browse_dataset.py
├── generate_bbox_file.py
├── keypoints2coco_without_mmdet.py
├── pth_transfer.py
└── publish_model.py
├── slurm_test.sh
├── slurm_train.sh
├── test.py
├── torchserve
├── mmpose2torchserve.py
├── mmpose_handler.py
└── test_torchserver.py
└── train.py
/.circleci/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG PYTORCH="1.7.1"
2 | ARG CUDA="11.0"
3 | ARG CUDNN="8"
4 |
5 | FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel
6 |
7 | # To fix GPG key error when running apt-get update
8 | RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
9 | RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub
10 |
11 | RUN apt-get update && apt-get install -y ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx
12 |
--------------------------------------------------------------------------------
/.circleci/scripts/get_mmcv_var.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | TORCH=$1
4 | CUDA=$2
5 |
6 | # 10.2 -> cu102
7 | MMCV_CUDA="cu`echo ${CUDA} | tr -d '.'`"
8 |
9 | # MMCV only provides pre-compiled packages for torch 1.x.0
10 | # which works for any subversions of torch 1.x.
11 | # We force the torch version to be 1.x.0 to ease package searching
12 | # and avoid unnecessary rebuild during MMCV's installation.
13 | TORCH_VER_ARR=(${TORCH//./ })
14 | TORCH_VER_ARR[2]=0
15 | printf -v MMCV_TORCH "%s." "${TORCH_VER_ARR[@]}"
16 | MMCV_TORCH=${MMCV_TORCH%?} # Remove the last dot
17 |
18 | echo "export MMCV_CUDA=${MMCV_CUDA}" >> $BASH_ENV
19 | echo "export MMCV_TORCH=${MMCV_TORCH}" >> $BASH_ENV
20 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: false
2 |
3 | contact_links:
4 | - name: Common Issues
5 | url: https://mmpose.readthedocs.io/en/latest/faq.html
6 | about: Check if your issue already has solutions
7 | - name: MMPose Documentation
8 | url: https://mmpose.readthedocs.io/en/latest/
9 | about: Check if your question is answered in docs
10 |
--------------------------------------------------------------------------------
/.github/workflows/deploy.yml:
--------------------------------------------------------------------------------
1 | name: deploy
2 |
3 | on: push
4 |
5 | jobs:
6 | build-n-publish:
7 | runs-on: ubuntu-latest
8 | if: startsWith(github.event.ref, 'refs/tags')
9 | steps:
10 | - uses: actions/checkout@v2
11 | - name: Set up Python 3.7
12 | uses: actions/setup-python@v2
13 | with:
14 | python-version: 3.7
15 | - name: Build MMPose
16 | run: |
17 | pip install wheel
18 | python setup.py sdist bdist_wheel
19 | - name: Publish distribution to PyPI
20 | run: |
21 | pip install twine
22 | twine upload dist/* -u __token__ -p ${{ secrets.pypi_password }}
23 |
--------------------------------------------------------------------------------
/.github/workflows/lint.yml:
--------------------------------------------------------------------------------
1 | name: lint
2 |
3 | on: [push, pull_request]
4 |
5 | concurrency:
6 | group: github.workflow−{{ github.ref }}
7 | cancel-in-progress: true
8 |
9 | jobs:
10 | lint:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - uses: actions/checkout@v2
14 | - name: Set up Python 3.7
15 | uses: actions/setup-python@v2
16 | with:
17 | python-version: 3.7
18 | - name: Install pre-commit hook
19 | run: |
20 | pip install pre-commit
21 | pre-commit install
22 | - name: Linting
23 | run: pre-commit run --all-files
24 | - name: Check docstring coverage
25 | run: |
26 | pip install interrogate
27 | interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-regex "__repr__" --fail-under 80 mmpose
28 |
--------------------------------------------------------------------------------
/.github/workflows/scripts/get_mmcv_var.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | TORCH=$1
4 | CUDA=$2
5 |
6 | # 10.2 -> cu102
7 | MMCV_CUDA="cu`echo ${CUDA} | tr -d '.'`"
8 |
9 | # MMCV only provides pre-compiled packages for torch 1.x.0
10 | # which works for any subversions of torch 1.x.
11 | # We force the torch version to be 1.x.0 to ease package searching
12 | # and avoid unnecessary rebuild during MMCV's installation.
13 | TORCH_VER_ARR=(${TORCH//./ })
14 | TORCH_VER_ARR[2]=0
15 | printf -v MMCV_TORCH "%s." "${TORCH_VER_ARR[@]}"
16 | MMCV_TORCH=${MMCV_TORCH%?} # Remove the last dot
17 |
18 | echo "MMCV_CUDA=${MMCV_CUDA}" >> $GITHUB_ENV
19 | echo "MMCV_TORCH=${MMCV_TORCH}" >> $GITHUB_ENV
20 |
--------------------------------------------------------------------------------
/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: 1.3.1
2 | message: "If you use this software, please cite it as below."
3 | authors:
4 | - name: "MMPose Contributors"
5 | title: "OpenMMLab Pose Estimation Toolbox and Benchmark"
6 | date-released: 2020-08-31
7 | url: "https://github.com/open-mmlab/mmpose"
8 | license: Apache-2.0
9 |
--------------------------------------------------------------------------------
/LICENSES.md:
--------------------------------------------------------------------------------
1 | # Licenses for special algorithms
2 |
3 | In this file, we list the algorithms with other licenses instead of Apache 2.0. Users should be careful about adopting these algorithms in any commercial matters.
4 |
5 | | Algorithm | Files | License |
6 | | :-------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
7 | | EDPose | [mmpose/models/heads/transformer_heads/edpose_head.py](https://github.com/open-mmlab/mmpose/blob/main/mmpose/models/heads/transformer_heads/edpose_head.py) | IDEA License 1.0 |
8 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include requirements/*.txt
2 | include mmpose/.mim/model-index.yml
3 | include mmpose/.mim/dataset-index.yml
4 | recursive-include mmpose/.mim/configs *.py *.yml
5 | recursive-include mmpose/.mim/tools *.py *.sh
6 | recursive-include mmpose/.mim/demo *.py
7 |
--------------------------------------------------------------------------------
/configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose_ap10k.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/animal_2d_keypoint/rtmpose/ap10k/rtmpose-m_8xb64-210e_ap10k-256x256.py
3 | In Collection: RTMPose
4 | Alias: animal
5 | Metadata:
6 | Architecture:
7 | - RTMPose
8 | Training Data: AP-10K
9 | Name: rtmpose-m_8xb64-210e_ap10k-256x256
10 | Results:
11 | - Dataset: AP-10K
12 | Metrics:
13 | AP: 0.722
14 | AP@0.5: 0.939
15 | AP@0.75: 0.788
16 | AP (L): 0.728
17 | AP (M): 0.569
18 | Task: Animal 2D Keypoint
19 | Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-ap10k_pt-aic-coco_210e-256x256-7a041aa1_20230206.pth
20 |
--------------------------------------------------------------------------------
/configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext_udp_ap10k.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/animal_2d_keypoint/topdown_heatmap/ap10k/cspnext-m_udp_8xb64-210e_ap10k-256x256.py
3 | In Collection: UDP
4 | Metadata:
5 | Architecture: &id001
6 | - UDP
7 | - HRNet
8 | Training Data: AP-10K
9 | Name: cspnext-m_udp_8xb64-210e_ap10k-256x256
10 | Results:
11 | - Dataset: AP-10K
12 | Metrics:
13 | AP: 0.703
14 | AP@0.5: 0.944
15 | AP@0.75: 0.776
16 | AP (L): 0.71
17 | AP (M): 0.513
18 | Task: Animal 2D Keypoint
19 | Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-ap10k_pt-in1k_210e-256x256-1f2d947a_20230123.pth
20 |
--------------------------------------------------------------------------------
/configs/body_2d_keypoint/associative_embedding/README.md:
--------------------------------------------------------------------------------
1 | # Associative embedding: End-to-end learning for joint detection and grouping (AE)
2 |
3 | Associative Embedding is one of the most popular 2D bottom-up pose estimation approaches, that first detect all the keypoints and then group/associate them into person instances.
4 |
5 | In order to group all the predicted keypoints to individuals, a tag is also predicted for each detected keypoint. Tags of the same person are similar, while tags of different people are different. Thus the keypoints can be grouped according to the tags.
6 |
7 |
8 |

9 |
10 |
--------------------------------------------------------------------------------
/configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose_crowdpose.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/body_2d_keypoint/rtmpose/crowdpose/rtmpose-m_8xb64-210e_crowdpose-256x192.py
3 | In Collection: RTMPose
4 | Metadata:
5 | Architecture:
6 | - RTMPose
7 | Training Data: CrowdPose
8 | Name: rtmpose-t_8xb256-420e_coco-256x192
9 | Results:
10 | - Dataset: CrowdPose
11 | Metrics:
12 | AP: 0.706
13 | AP@0.5: 0.841
14 | AP@0.75: 0.765
15 | AP (E): 0.799
16 | AP (M): 0.719
17 | AP (L): 0.582
18 | Task: Body 2D Keypoint
19 | Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-crowdpose_pt-aic-coco_210e-256x192-e6192cac_20230224.pth
20 |
--------------------------------------------------------------------------------
/configs/body_2d_keypoint/rtmpose/mpii/rtmpose_mpii.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/body_2d_keypoint/rtmpose/mpii/rtmpose-m_8xb64-210e_mpii-256x256.py
3 | In Collection: RTMPose
4 | Metadata:
5 | Architecture:
6 | - RTMPose
7 | Training Data: MPII
8 | Name: rtmpose-m_8xb64-210e_mpii-256x256
9 | Results:
10 | - Dataset: MPII
11 | Metrics:
12 | Mean: 0.907
13 | Mean@0.1: 0.348
14 | Task: Body 2D Keypoint
15 | Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-mpii_pt-aic-coco_210e-256x256-ec4dbec8_20230206.pth
16 |
--------------------------------------------------------------------------------
/configs/body_2d_keypoint/simcc/coco/mobilenetv2_coco.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/body_2d_keypoint/simcc/coco/simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192.py
3 | In Collection: SimCC
4 | Metadata:
5 | Architecture: &id001
6 | - SimCC
7 | - MobilenetV2
8 | Training Data: COCO
9 | Name: simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192
10 | Results:
11 | - Dataset: COCO
12 | Metrics:
13 | AP: 0.62
14 | AP@0.5: 0.855
15 | AP@0.75: 0.697
16 | AR: 0.678
17 | AR@0.5: 0.902
18 | Task: Body 2D Keypoint
19 | Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192-4b0703bb_20221010.pth
20 |
--------------------------------------------------------------------------------
/configs/body_2d_keypoint/simcc/coco/vipnas_coco.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/body_2d_keypoint/simcc/coco/simcc_vipnas-mbv3_8xb64-210e_coco-256x192.py
3 | In Collection: SimCC
4 | Metadata:
5 | Architecture: &id001
6 | - SimCC
7 | - ViPNAS
8 | Training Data: COCO
9 | Name: simcc_vipnas-mbv3_8xb64-210e_coco-256x192
10 | Results:
11 | - Dataset: COCO
12 | Metrics:
13 | AP: 0.695
14 | AP@0.5: 0.883
15 | AP@0.75: 0.772
16 | AR: 0.755
17 | AR@0.5: 0.927
18 | Task: Body 2D Keypoint
19 | Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/simcc/coco/simcc_vipnas-mbv3_8xb64-210e_coco-256x192-719f3489_20220922.pth
20 |
--------------------------------------------------------------------------------
/configs/body_2d_keypoint/topdown_heatmap/aic/hrnet_aic.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_hrnet-w32_8xb64-210e_aic-256x192.py
3 | In Collection: HRNet
4 | Metadata:
5 | Architecture:
6 | - HRNet
7 | Training Data: AI Challenger
8 | Name: td-hm_hrnet-w32_8xb64-210e_aic-256x192
9 | Results:
10 | - Dataset: AI Challenger
11 | Metrics:
12 | AP: 0.323
13 | AP@0.5: 0.761
14 | AP@0.75: 0.218
15 | AR: 0.366
16 | AR@0.5: 0.789
17 | Task: Body 2D Keypoint
18 | Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_aic_256x192-30a4e465_20200826.pth
19 |
--------------------------------------------------------------------------------
/configs/body_2d_keypoint/topdown_heatmap/aic/resnet_aic.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/body_2d_keypoint/topdown_heatmap/aic/td-hm_res101_8xb64-210e_aic-256x192.py
3 | In Collection: SimpleBaseline2D
4 | Metadata:
5 | Architecture:
6 | - SimpleBaseline2D
7 | - ResNet
8 | Training Data: AI Challenger
9 | Name: td-hm_res101_8xb64-210e_aic-256x192
10 | Results:
11 | - Dataset: AI Challenger
12 | Metrics:
13 | AP: 0.294
14 | AP@0.5: 0.736
15 | AP@0.75: 0.172
16 | AR: 0.337
17 | AR@0.5: 0.762
18 | Task: Body 2D Keypoint
19 | Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res101_aic_256x192-79b35445_20200826.pth
20 |
--------------------------------------------------------------------------------
/configs/body_2d_keypoint/topdown_heatmap/coco/alexnet_coco.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_alexnet_8xb64-210e_coco-256x192.py
3 | In Collection: SimpleBaseline2D
4 | Metadata:
5 | Architecture:
6 | - SimpleBaseline2D
7 | - AlexNet
8 | Training Data: COCO
9 | Name: td-hm_alexnet_8xb64-210e_coco-256x192
10 | Results:
11 | - Dataset: COCO
12 | Metrics:
13 | AP: 0.448
14 | AP@0.5: 0.767
15 | AP@0.75: 0.461
16 | AR: 0.521
17 | AR@0.5: 0.829
18 | Task: Body 2D Keypoint
19 | Weights: https://download.openmmlab.com/mmpose/top_down/alexnet/alexnet_coco_256x192-a7b1fd15_20200727.pth
20 |
--------------------------------------------------------------------------------
/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_fp16-8xb64-210e_coco-256x192.py:
--------------------------------------------------------------------------------
1 | _base_ = ['./td-hm_hrnet-w32_8xb64-210e_coco-256x192.py']
2 |
3 | # fp16 settings
4 | optim_wrapper = dict(
5 | type='AmpOptimWrapper',
6 | loss_scale='dynamic',
7 | )
8 |
--------------------------------------------------------------------------------
/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_fp16-8xb64-210e_coco-256x192.py:
--------------------------------------------------------------------------------
1 | _base_ = ['./td-hm_res50_8xb64-210e_coco-256x192.py']
2 |
3 | # fp16 settings
4 | optim_wrapper = dict(
5 | type='AmpOptimWrapper',
6 | loss_scale='dynamic',
7 | )
8 |
--------------------------------------------------------------------------------
/configs/body_2d_keypoint/topdown_heatmap/coco/vgg_coco.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_vgg16-bn_8xb64-210e_coco-256x192.py
3 | In Collection: SimpleBaseline2D
4 | Metadata:
5 | Architecture:
6 | - SimpleBaseline2D
7 | - VGG
8 | Training Data: COCO
9 | Name: td-hm_vgg16-bn_8xb64-210e_coco-256x192
10 | Results:
11 | - Dataset: COCO
12 | Metrics:
13 | AP: 0.699
14 | AP@0.5: 0.89
15 | AP@0.75: 0.769
16 | AR: 0.754
17 | AR@0.5: 0.927
18 | Task: Body 2D Keypoint
19 | Weights: https://download.openmmlab.com/mmpose/top_down/vgg/vgg16_bn_coco_256x192-7e7c58d6_20210517.pth
20 |
--------------------------------------------------------------------------------
/configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext_udp_crowdpose.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/body_2d_keypoint/topdown_heatmap/crowdpose/cspnext-m_udp_8xb64-210e_crowpose-256x192.py
3 | In Collection: UDP
4 | Metadata:
5 | Architecture:
6 | - UDP
7 | - CSPNeXt
8 | Training Data: CrowdPose
9 | Name: cspnext-m_udp_8xb64-210e_crowpose-256x192
10 | Results:
11 | - Dataset: CrowdPose
12 | Metrics:
13 | AP: 0.662
14 | AP (E): 0.759
15 | AP (H): 0.539
16 | AP (M): 0.675
17 | AP@0.5: 0.821
18 | AP@0.75: 0.723
19 | Task: Body 2D Keypoint
20 | Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-crowdpose_pt-in1k_210e-256x192-f591079f_20230123.pth
21 |
--------------------------------------------------------------------------------
/configs/body_2d_keypoint/topdown_heatmap/crowdpose/hrnet_crowdpose.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/body_2d_keypoint/topdown_heatmap/crowdpose/td-hm_hrnet-w32_8xb64-210e_crowdpose-256x192.py
3 | In Collection: HRNet
4 | Metadata:
5 | Architecture:
6 | - HRNet
7 | Training Data: CrowdPose
8 | Name: td-hm_hrnet-w32_8xb64-210e_crowdpose-256x192
9 | Results:
10 | - Dataset: CrowdPose
11 | Metrics:
12 | AP: 0.675
13 | AP (E): 0.77
14 | AP (H): 0.553
15 | AP (M): 0.687
16 | AP@0.5: 0.825
17 | AP@0.75: 0.729
18 | Task: Body 2D Keypoint
19 | Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_crowdpose_256x192-960be101_20201227.pth
20 |
--------------------------------------------------------------------------------
/configs/body_2d_keypoint/topdown_heatmap/exlpose/hrnet_exlpose.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/body_2d_keypoint/topdown_heatmap/exlpose/td-hm_hrnet-w32_8xb64-210e_exlpose-256x192.py
3 | In Collection: HRNet
4 | Metadata:
5 | Architecture:
6 | - HRNet
7 | Training Data: ExLPose-LL
8 | Name: td-hm_hrnet-w32_8xb64-210e_exlpose-256x192
9 | Results:
10 | - Dataset: ExLPose
11 | Metrics:
12 | AP: 0.401
13 | AP@0.5: 0.64
14 | AP@0.75: 0.40
15 | AR: 0.452
16 | AR@0.5: 0.693
17 | Task: Body 2D Keypoint
18 | Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/exlpose/td-hm_hrnet-w32_8xb64-210e_exlpose-ll-256x192.pth
19 |
--------------------------------------------------------------------------------
/configs/body_2d_keypoint/topdown_heatmap/mpii/cpm_mpii.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_cpm_8xb64-210e_mpii-368x368.py
3 | In Collection: CPM
4 | Metadata:
5 | Architecture:
6 | - CPM
7 | Training Data: MPII
8 | Name: td-hm_cpm_8xb64-210e_mpii-368x368
9 | Results:
10 | - Dataset: MPII
11 | Metrics:
12 | Mean: 0.876
13 | Mean@0.1: 0.285
14 | Task: Body 2D Keypoint
15 | Weights: https://download.openmmlab.com/mmpose/top_down/cpm/cpm_mpii_368x368-116e62b8_20200822.pth
16 |
--------------------------------------------------------------------------------
/configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext_udp_mpii.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/body_2d_keypoint/topdown_heatmap/mpii/cspnext-m_udp_8xb64-210e_mpii-256x256.py
3 | In Collection: UDP
4 | Metadata:
5 | Architecture:
6 | - UDP
7 | - CSPNeXt
8 | Training Data: MPII
9 | Name: cspnext-m_udp_8xb64-210e_mpii-256x256
10 | Results:
11 | - Dataset: MPII
12 | Metrics:
13 | Mean: 0.902
14 | Mean@0.1: 0.303
15 | Task: Body 2D Keypoint
16 | Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-mpii_pt-in1k_210e-256x256-68d0402f_20230208.pth
17 |
--------------------------------------------------------------------------------
/configs/body_2d_keypoint/topdown_heatmap/mpii/mobilenetv2_mpii.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_mobilenetv2_8xb64-210e_mpii-256x256.py
3 | In Collection: SimpleBaseline2D
4 | Metadata:
5 | Architecture:
6 | - SimpleBaseline2D
7 | - MobilenetV2
8 | Training Data: MPII
9 | Name: td-hm_mobilenetv2_8xb64-210e_mpii-256x256
10 | Results:
11 | - Dataset: MPII
12 | Metrics:
13 | Mean: 0.854
14 | Mean@0.1: 0.234
15 | Task: Body 2D Keypoint
16 | Weights: https://download.openmmlab.com/mmpose/top_down/mobilenetv2/mobilenetv2_mpii_256x256-e068afa7_20200812.pth
17 |
--------------------------------------------------------------------------------
/configs/body_2d_keypoint/topdown_heatmap/mpii/resnext_mpii.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_resnext152_8xb32-210e_mpii-256x256.py
3 | In Collection: SimpleBaseline2D
4 | Metadata:
5 | Architecture:
6 | - SimpleBaseline2D
7 | - ResNext
8 | Training Data: MPII
9 | Name: td-hm_resnext152_8xb32-210e_mpii-256x256
10 | Results:
11 | - Dataset: MPII
12 | Metrics:
13 | Mean: 0.887
14 | Mean@0.1: 0.294
15 | Task: Body 2D Keypoint
16 | Weights: https://download.openmmlab.com/mmpose/top_down/resnext/resnext152_mpii_256x256-df302719_20200927.pth
17 |
--------------------------------------------------------------------------------
/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv1_mpii.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv1_8xb64-210e_mpii-256x256.py
3 | In Collection: SimpleBaseline2D
4 | Metadata:
5 | Architecture:
6 | - SimpleBaseline2D
7 | - ShufflenetV1
8 | Training Data: MPII
9 | Name: td-hm_shufflenetv1_8xb64-210e_mpii-256x256
10 | Results:
11 | - Dataset: MPII
12 | Metrics:
13 | Mean: 0.824
14 | Mean@0.1: 0.195
15 | Task: Body 2D Keypoint
16 | Weights: https://download.openmmlab.com/mmpose/top_down/shufflenetv1/shufflenetv1_mpii_256x256-dcc1c896_20200925.pth
17 |
--------------------------------------------------------------------------------
/configs/body_2d_keypoint/topdown_heatmap/mpii/shufflenetv2_mpii.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/body_2d_keypoint/topdown_heatmap/mpii/td-hm_shufflenetv2_8xb64-210e_mpii-256x256.py
3 | In Collection: SimpleBaseline2D
4 | Metadata:
5 | Architecture:
6 | - SimpleBaseline2D
7 | - ShufflenetV2
8 | Training Data: MPII
9 | Name: td-hm_shufflenetv2_8xb64-210e_mpii-256x256
10 | Results:
11 | - Dataset: MPII
12 | Metrics:
13 | Mean: 0.828
14 | Mean@0.1: 0.205
15 | Task: Body 2D Keypoint
16 | Weights: https://download.openmmlab.com/mmpose/top_down/shufflenetv2/shufflenetv2_mpii_256x256-4fb9df2d_20200925.pth
17 |
--------------------------------------------------------------------------------
/configs/body_2d_keypoint/topdown_heatmap/posetrack18/resnet_posetrack18.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/body_2d_keypoint/topdown_heatmap/posetrack18/td-hm_res50_8xb64-20e_posetrack18-256x192.py
3 | In Collection: SimpleBaseline2D
4 | Metadata:
5 | Architecture: &id001
6 | - SimpleBaseline2D
7 | - ResNet
8 | Training Data: PoseTrack18
9 | Name: td-hm_res50_8xb64-20e_posetrack18-256x192
10 | Results:
11 | - Dataset: PoseTrack18
12 | Metrics:
13 | Ankl: 74.2
14 | Elb: 82.5
15 | Head: 86.5
16 | Hip: 80.1
17 | Knee: 78.8
18 | Shou: 87.7
19 | Total: 81.2
20 | Wri: 75.8
21 | Task: Body 2D Keypoint
22 | Weights: https://download.openmmlab.com/mmpose/top_down/resnet/res50_posetrack18_256x192-a62807c7_20201028.pth
23 |
--------------------------------------------------------------------------------
/configs/body_2d_keypoint/topdown_regression/coco/mobilenetv2_rle_coco.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/body_2d_keypoint/topdown_regression/coco/td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192.py
3 | In Collection: RLE
4 | Metadata:
5 | Architecture: &id001
6 | - DeepPose
7 | - RLE
8 | - MobileNet
9 | Training Data: COCO
10 | Name: td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192
11 | Results:
12 | - Dataset: COCO
13 | Metrics:
14 | AP: 0.593
15 | AP@0.5: 0.836
16 | AP@0.75: 0.66
17 | AR: 0.644
18 | AR@0.5: 0.877
19 | Task: Body 2D Keypoint
20 | Weights: https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_regression/coco/td-reg_mobilenetv2_rle-pretrained-8xb64-210e_coco-256x192-39b73bd5_20220922.pth
21 |
--------------------------------------------------------------------------------
/configs/body_2d_keypoint/topdown_regression/mpii/resnet_rle_mpii.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/body_2d_keypoint/topdown_regression/mpii/td-reg_res50_rle-8xb64-210e_mpii-256x256.py
3 | In Collection: RLE
4 | Metadata:
5 | Architecture:
6 | - DeepPose
7 | - RLE
8 | - ResNet
9 | Training Data: MPII
10 | Name: td-reg_res50_rle-8xb64-210e_mpii-256x256
11 | Results:
12 | - Dataset: MPII
13 | Metrics:
14 | Mean: 0.861
15 | Mean@0.1: 0.277
16 | Task: Body 2D Keypoint
17 | Weights: https://download.openmmlab.com/mmpose/top_down/deeppose/deeppose_res50_mpii_256x256_rle-5f92a619_20220504.pth
18 |
--------------------------------------------------------------------------------
/configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_l_8xb32-300e_coco-640.py:
--------------------------------------------------------------------------------
1 | _base_ = './yoloxpose_s_8xb32-300e_coco-640.py'
2 |
3 | widen_factor = 1
4 | deepen_factor = 1
5 | checkpoint = 'https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_' \
6 | 'l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth'
7 |
8 | # model settings
9 | model = dict(
10 | backbone=dict(
11 | deepen_factor=deepen_factor,
12 | widen_factor=widen_factor,
13 | init_cfg=dict(checkpoint=checkpoint),
14 | ),
15 | neck=dict(
16 | in_channels=[256, 512, 1024], out_channels=256, num_csp_blocks=3),
17 | head=dict(head_module_cfg=dict(widen_factor=widen_factor)))
18 |
--------------------------------------------------------------------------------
/configs/body_2d_keypoint/yoloxpose/coco/yoloxpose_m_8xb32-300e_coco-640.py:
--------------------------------------------------------------------------------
1 | _base_ = './yoloxpose_s_8xb32-300e_coco-640.py'
2 |
3 | widen_factor = 0.75
4 | deepen_factor = 0.67
5 | checkpoint = 'https://download.openmmlab.com/mmpose/v1/pretrained_models/' \
6 | 'yolox_m_8x8_300e_coco_20230829.pth'
7 |
8 | # model settings
9 | model = dict(
10 | backbone=dict(
11 | deepen_factor=deepen_factor,
12 | widen_factor=widen_factor,
13 | init_cfg=dict(checkpoint=checkpoint),
14 | ),
15 | neck=dict(in_channels=[192, 384, 768], out_channels=192, num_csp_blocks=2),
16 | head=dict(head_module_cfg=dict(widen_factor=widen_factor)))
17 |
--------------------------------------------------------------------------------
/configs/body_3d_keypoint/README.md:
--------------------------------------------------------------------------------
1 | # Human Body 3D Pose Estimation
2 |
3 | 3D pose estimation is the detection and analysis of X, Y, Z coordinates of human body joints from RGB images. For single-person 3D pose estimation from a monocular camera, existing works can be classified into three categories: (1) from 2D poses to 3D poses (2D-to-3D pose lifting) (2) jointly learning 2D and 3D poses, and (3) directly regressing 3D poses from images.
4 |
5 | ## Data preparation
6 |
7 | Please follow [DATA Preparation](/docs/en/dataset_zoo/3d_body_keypoint.md) to prepare data.
8 |
9 | ## Demo
10 |
11 | Please follow [Demo](/demo/docs/en/3d_human_pose_demo.md) to run demos.
12 |
13 | 
14 |
--------------------------------------------------------------------------------
/configs/face_2d_keypoint/README.md:
--------------------------------------------------------------------------------
1 | # 2D Face Landmark Detection
2 |
3 | 2D face landmark detection (also referred to as face alignment) is defined as the task of detecting the face keypoints from an input image.
4 |
5 | Normally, the input images are cropped face images, where the face locates at the center;
6 | or the rough location (or the bounding box) of the hand is provided.
7 |
8 | ## Data preparation
9 |
10 | Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_face_keypoint.md) to prepare data.
11 |
12 | ## Demo
13 |
14 | Please follow [Demo](/demo/docs/en/2d_face_demo.md) to run demos.
15 |
16 | 
17 |
--------------------------------------------------------------------------------
/configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose_coco_wholebody_face.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/face_2d_keypoint/rtmpose/coco_wholebody_face/rtmpose-m_8xb32-60e_coco-wholebody-face-256x256.py
3 | In Collection: RTMPose
4 | Metadata:
5 | Architecture:
6 | - RTMPose
7 | Training Data: COCO-WholeBody-Face
8 | Name: rtmpose-m_8xb32-60e_coco-wholebody-face-256x256
9 | Results:
10 | - Dataset: COCO-WholeBody-Face
11 | Metrics:
12 | NME: 0.0466
13 | Task: Face 2D Keypoint
14 | Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody-face_pt-aic-coco_60e-256x256-62026ef2_20230228.pth
15 |
--------------------------------------------------------------------------------
/configs/face_2d_keypoint/rtmpose/lapa/rtmpose_lapa.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/face_2d_keypoint/rtmpose/lapa/rtmpose-m_8xb64-120e_lapa-256x256.py
3 | In Collection: RTMPose
4 | Alias: face
5 | Metadata:
6 | Architecture:
7 | - RTMPose
8 | Training Data: LaPa
9 | Name: rtmpose-m_8xb64-120e_lapa-256x256
10 | Results:
11 | - Dataset: WFLW
12 | Metrics:
13 | NME: 1.29
14 | Task: Face 2D Keypoint
15 | Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-lapa_pt-aic-coco_120e-256x256-762b1ae2_20230422.pth
16 |
--------------------------------------------------------------------------------
/configs/face_2d_keypoint/rtmpose/wflw/rtmpose_wflw.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/face_2d_keypoint/rtmpose/wflw/rtmpose-m_8xb64-60e_wflw-256x256.py
3 | In Collection: RTMPose
4 | Metadata:
5 | Architecture:
6 | - RTMPose
7 | Training Data: WFLW
8 | Name: rtmpose-m_8xb64-60e_wflw-256x256
9 | Results:
10 | - Dataset: WFLW
11 | Metrics:
12 | NME: 4.01
13 | Task: Face 2D Keypoint
14 | Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-wflw_pt-aic-coco_60e-256x256-dc1dcdcf_20230228.pth
15 |
--------------------------------------------------------------------------------
/configs/face_2d_keypoint/topdown_heatmap/300wlp/hrnetv2_300wlp.yml:
--------------------------------------------------------------------------------
1 | Collections:
2 | - Name: HRNetv2
3 | Paper:
4 | Title: Deep High-Resolution Representation Learning for Visual Recognition
5 | URL: https://ieeexplore.ieee.org/abstract/document/9052469/
6 | README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/hrnetv2.md
7 | Models:
8 | - Config: configs/face_2d_keypoint/topdown_heatmap/300wlp/td-hm_hrnetv2-w18_8xb64-60e_300wlp-256x256.py
9 | In Collection: HRNetv2
10 | Metadata:
11 | Architecture:
12 | - HRNetv2
13 | Training Data: 300W-LP
14 | Name: td-hm_hrnetv2-w18_8xb64-60e_300wlp-256x256
15 | Results:
16 | - Dataset: 300W-LP
17 | Metrics:
18 | NME full: 0.0413
19 | Task: Face 2D Keypoint
20 | Weights: https://download.openmmlab.com/mmpose/v1/face_2d_keypoint/topdown_heatmap/300wlp/hrnetv2_w18_300wlp_256x256-fb433d21_20230922.pth
21 |
--------------------------------------------------------------------------------
/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_aflw.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py
3 | In Collection: HRNetv2
4 | Metadata:
5 | Architecture:
6 | - HRNetv2
7 | Training Data: AFLW
8 | Name: td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256
9 | Results:
10 | - Dataset: AFLW
11 | Metrics:
12 | NME frontal: 1.27
13 | NME full: 1.41
14 | Task: Face 2D Keypoint
15 | Weights: https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth
16 |
--------------------------------------------------------------------------------
/configs/face_2d_keypoint/topdown_heatmap/aflw/hrnetv2_dark_aflw.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_dark-8xb64-60e_aflw-256x256.py
3 | In Collection: DarkPose
4 | Metadata:
5 | Architecture:
6 | - HRNetv2
7 | - DarkPose
8 | Training Data: AFLW
9 | Name: td-hm_hrnetv2-w18_dark-8xb64-60e_aflw-256x256
10 | Results:
11 | - Dataset: AFLW
12 | Metrics:
13 | NME frontal: 1.19
14 | NME full: 1.34
15 | Task: Face 2D Keypoint
16 | Weights: https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_aflw_256x256_dark-219606c0_20210125.pth
17 |
--------------------------------------------------------------------------------
/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hourglass_coco_wholebody_face.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hourglass52_8xb32-60e_coco-wholebody-face-256x256.py
3 | In Collection: Hourglass
4 | Metadata:
5 | Architecture:
6 | - Hourglass
7 | Training Data: COCO-WholeBody-Face
8 | Name: td-hm_hourglass52_8xb32-60e_coco-wholebody-face-256x256
9 | Results:
10 | - Dataset: COCO-WholeBody-Face
11 | Metrics:
12 | NME: 0.0587
13 | Task: Face 2D Keypoint
14 | Weights: https://download.openmmlab.com/mmpose/face/hourglass/hourglass52_coco_wholebody_face_256x256-6994cf2e_20210909.pth
15 |
--------------------------------------------------------------------------------
/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_coco_wholebody_face.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_8xb32-60e_coco-wholebody-face-256x256.py
3 | In Collection: HRNetv2
4 | Metadata:
5 | Architecture:
6 | - HRNetv2
7 | Training Data: COCO-WholeBody-Face
8 | Name: td-hm_hrnetv2-w18_8xb32-60e_coco-wholebody-face-256x256
9 | Results:
10 | - Dataset: COCO-WholeBody-Face
11 | Metrics:
12 | NME: 0.0569
13 | Task: Face 2D Keypoint
14 | Weights: https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_coco_wholebody_face_256x256-c1ca469b_20210909.pth
15 |
--------------------------------------------------------------------------------
/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/hrnetv2_dark_coco_wholebody_face.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_hrnetv2-w18_dark-8xb32-60e_coco-wholebody-face-256x256.py
3 | In Collection: DarkPose
4 | Metadata:
5 | Architecture:
6 | - HRNetv2
7 | - DarkPose
8 | Training Data: COCO-WholeBody-Face
9 | Name: td-hm_hrnetv2-w18_dark-8xb32-60e_coco-wholebody-face-256x256
10 | Results:
11 | - Dataset: COCO-WholeBody-Face
12 | Metrics:
13 | NME: 0.0513
14 | Task: Face 2D Keypoint
15 | Weights: https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_coco_wholebody_face_256x256_dark-3d9a334e_20210909.pth
16 |
--------------------------------------------------------------------------------
/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/mobilenetv2_coco_wholebody_face.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_mobilenetv2_8xb32-60e_coco-wholebody-face-256x256.py
3 | In Collection: SimpleBaseline2D
4 | Metadata:
5 | Architecture:
6 | - SimpleBaseline2D
7 | - MobilenetV2
8 | Training Data: COCO-WholeBody-Face
9 | Name: td-hm_mobilenetv2_8xb32-60e_coco-wholebody-face-256x256
10 | Results:
11 | - Dataset: COCO-WholeBody-Face
12 | Metrics:
13 | NME: 0.0611
14 | Task: Face 2D Keypoint
15 | Weights: https://download.openmmlab.com/mmpose/face/mobilenetv2/mobilenetv2_coco_wholebody_face_256x256-4a3f096e_20210909.pth
16 |
--------------------------------------------------------------------------------
/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/resnet_coco_wholebody_face.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_res50_8xb32-60e_coco-wholebody-face-256x256.py
3 | In Collection: SimpleBaseline2D
4 | Metadata:
5 | Architecture:
6 | - SimpleBaseline2D
7 | - ResNet
8 | Training Data: COCO-WholeBody-Face
9 | Name: td-hm_res50_8xb32-60e_coco-wholebody-face-256x256
10 | Results:
11 | - Dataset: COCO-WholeBody-Face
12 | Metrics:
13 | NME: 0.0582
14 | Task: Face 2D Keypoint
15 | Weights: https://download.openmmlab.com/mmpose/face/resnet/res50_coco_wholebody_face_256x256-5128edf5_20210909.pth
16 |
--------------------------------------------------------------------------------
/configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/scnet_coco_wholebody_face.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/face_2d_keypoint/topdown_heatmap/coco_wholebody_face/td-hm_scnet50_8xb32-60e_coco-wholebody-face-256x256.py
3 | In Collection: SimpleBaseline2D
4 | Metadata:
5 | Architecture:
6 | - SimpleBaseline2D
7 | - SCNet
8 | Training Data: COCO-WholeBody-Face
9 | Name: td-hm_scnet50_8xb32-60e_coco-wholebody-face-256x256
10 | Results:
11 | - Dataset: COCO-WholeBody-Face
12 | Metrics:
13 | NME: 0.0567
14 | Task: Face 2D Keypoint
15 | Weights: https://download.openmmlab.com/mmpose/face/scnet/scnet50_coco_wholebody_face_256x256-a0183f5f_20210909.pth
16 |
--------------------------------------------------------------------------------
/configs/face_2d_keypoint/topdown_heatmap/cofw/hrnetv2_cofw.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/face_2d_keypoint/topdown_heatmap/cofw/td-hm_hrnetv2-w18_8xb64-60e_cofw-256x256.py
3 | In Collection: HRNetv2
4 | Metadata:
5 | Architecture:
6 | - HRNetv2
7 | Training Data: COFW
8 | Name: td-hm_hrnetv2-w18_8xb64-60e_cofw-256x256
9 | Results:
10 | - Dataset: COFW
11 | Metrics:
12 | NME: 3.48
13 | Task: Face 2D Keypoint
14 | Weights: https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_cofw_256x256-49243ab8_20211019.pth
15 |
--------------------------------------------------------------------------------
/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_awing_wflw.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_awing-8xb64-60e_wflw-256x256.py
3 | In Collection: HRNetv2
4 | Metadata:
5 | Architecture:
6 | - HRNetv2
7 | - AdaptiveWingloss
8 | Training Data: WFLW
9 | Name: td-hm_hrnetv2-w18_awing-8xb64-60e_wflw-256x256
10 | Results:
11 | - Dataset: WFLW
12 | Metrics:
13 | NME blur: 4.59
14 | NME expression: 4.28
15 | NME illumination: 3.97
16 | NME makeup: 3.87
17 | NME occlusion: 4.78
18 | NME pose: 6.94
19 | NME test: 4.02
20 | Task: Face 2D Keypoint
21 | Weights: https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_wflw_256x256_awing-5af5055c_20211212.pth
22 |
--------------------------------------------------------------------------------
/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_dark_wflw.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_dark-8xb64-60e_wflw-256x256.py
3 | In Collection: DarkPose
4 | Metadata:
5 | Architecture:
6 | - HRNetv2
7 | - DarkPose
8 | Training Data: WFLW
9 | Name: td-hm_hrnetv2-w18_dark-8xb64-60e_wflw-256x256
10 | Results:
11 | - Dataset: WFLW
12 | Metrics:
13 | NME blur: 4.56
14 | NME expression: 4.29
15 | NME illumination: 3.96
16 | NME makeup: 3.89
17 | NME occlusion: 4.78
18 | NME pose: 6.98
19 | NME test: 3.98
20 | Task: Face 2D Keypoint
21 | Weights: https://download.openmmlab.com/mmpose/face/darkpose/hrnetv2_w18_wflw_256x256_dark-3f8e0c2c_20210125.pth
22 |
--------------------------------------------------------------------------------
/configs/face_2d_keypoint/topdown_heatmap/wflw/hrnetv2_wflw.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_8xb64-60e_wflw-256x256.py
3 | In Collection: HRNetv2
4 | Metadata:
5 | Architecture:
6 | - HRNetv2
7 | Training Data: WFLW
8 | Name: td-hm_hrnetv2-w18_8xb64-60e_wflw-256x256
9 | Results:
10 | - Dataset: WFLW
11 | Metrics:
12 | NME blur: 4.58
13 | NME expression: 4.33
14 | NME illumination: 3.99
15 | NME makeup: 3.94
16 | NME occlusion: 4.83
17 | NME pose: 6.97
18 | NME test: 4.06
19 | Task: Face 2D Keypoint
20 | Weights: https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_wflw_256x256-2bf032a6_20210125.pth
21 |
--------------------------------------------------------------------------------
/configs/face_2d_keypoint/topdown_regression/wflw/resnet_wflw.yml:
--------------------------------------------------------------------------------
1 | Collections:
2 | - Name: ResNet
3 | Paper:
4 | Title: Deep residual learning for image recognition
5 | URL: http://openaccess.thecvf.com/content_cvpr_2016/html/He_Deep_Residual_Learning_CVPR_2016_paper.html
6 | README: https://github.com/open-mmlab/mmpose/blob/main/docs/src/papers/backbones/resnet.md
7 | Models:
8 | - Config: configs/face_2d_keypoint/topdown_regression/wflw/td-reg_res50_8xb64-210e_wflw-256x256.py
9 | In Collection: ResNet
10 | Metadata:
11 | Architecture:
12 | - DeepPose
13 | - ResNet
14 | Training Data: WFLW
15 | Name: td-reg_res50_8x64e-210e_wflw-256x256
16 | Results:
17 | - Dataset: WFLW
18 | Metrics:
19 | NME: 4.88
20 | Task: Face 2D Keypoint
21 | Weights: https://download.openmmlab.com/mmpose/face/deeppose/deeppose_res50_wflw_256x256-92d0ba7f_20210303.pth
22 |
--------------------------------------------------------------------------------
/configs/fashion_2d_keypoint/README.md:
--------------------------------------------------------------------------------
1 | # 2D Fashion Landmark Detection
2 |
3 | 2D fashion landmark detection (also referred to as fashion alignment) aims to detect the key-point located at the functional region of clothes, for example the neckline and the cuff.
4 |
5 | ## Data preparation
6 |
7 | Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_fashion_landmark.md) to prepare data.
8 |
--------------------------------------------------------------------------------
/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res101_8xb64-210e_deepfashion_full-256x192.py:
--------------------------------------------------------------------------------
1 | _base_ = './td-hm_res50_8xb64-210e_deepfashion_full-256x192.py'
2 |
3 | model = dict(
4 | backbone=dict(
5 | type='ResNet',
6 | depth=101,
7 | init_cfg=dict(type='Pretrained',
8 | checkpoint='torchvision://resnet101')))
9 |
--------------------------------------------------------------------------------
/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res101_8xb64-210e_deepfashion_lower-256x192.py:
--------------------------------------------------------------------------------
1 | _base_ = './td-hm_res50_8xb64-210e_deepfashion_lower-256x192.py'
2 |
3 | model = dict(
4 | backbone=dict(
5 | type='ResNet',
6 | depth=101,
7 | init_cfg=dict(type='Pretrained',
8 | checkpoint='torchvision://resnet101')))
9 |
--------------------------------------------------------------------------------
/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res101_8xb64-210e_deepfashion_upper-256x192.py:
--------------------------------------------------------------------------------
1 | _base_ = './td-hm_res50_8xb64-210e_deepfashion_upper-256x192.py'
2 |
3 | model = dict(
4 | backbone=dict(
5 | type='ResNet',
6 | depth=101,
7 | init_cfg=dict(type='Pretrained',
8 | checkpoint='torchvision://resnet101')))
9 |
--------------------------------------------------------------------------------
/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res152_8xb32-210e_deepfashion_full-256x192.py:
--------------------------------------------------------------------------------
1 | _base_ = './td-hm_res50_8xb64-210e_deepfashion_full-256x192.py'
2 |
3 | # automatically scaling LR based on the actual training batch size
4 | auto_scale_lr = dict(base_batch_size=256)
5 |
6 | model = dict(
7 | backbone=dict(
8 | type='ResNet',
9 | depth=152,
10 | init_cfg=dict(type='Pretrained',
11 | checkpoint='torchvision://resnet152')))
12 |
13 | train_dataloader = dict(batch_size=32)
14 |
--------------------------------------------------------------------------------
/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res152_8xb32-210e_deepfashion_lower-256x192.py:
--------------------------------------------------------------------------------
1 | _base_ = './td-hm_res50_8xb64-210e_deepfashion_lower-256x192.py'
2 |
3 | # automatically scaling LR based on the actual training batch size
4 | auto_scale_lr = dict(base_batch_size=256)
5 |
6 | model = dict(
7 | backbone=dict(
8 | type='ResNet',
9 | depth=152,
10 | init_cfg=dict(type='Pretrained',
11 | checkpoint='torchvision://resnet152')))
12 |
13 | train_dataloader = dict(batch_size=32)
14 |
--------------------------------------------------------------------------------
/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion/td-hm_res152_8xb32-210e_deepfashion_upper-256x192.py:
--------------------------------------------------------------------------------
1 | _base_ = './td-hm_res50_8xb64-210e_deepfashion_upper-256x192.py'
2 |
3 | # automatically scaling LR based on the actual training batch size
4 | auto_scale_lr = dict(base_batch_size=256)
5 |
6 | model = dict(
7 | backbone=dict(
8 | type='ResNet',
9 | depth=152,
10 | init_cfg=dict(type='Pretrained',
11 | checkpoint='torchvision://resnet152')))
12 |
13 | train_dataloader = dict(batch_size=32)
14 |
--------------------------------------------------------------------------------
/configs/hand_2d_keypoint/README.md:
--------------------------------------------------------------------------------
1 | # 2D Hand Pose Estimation
2 |
3 | 2D hand pose estimation is defined as the task of detecting the poses (or keypoints) of the hand from an input image.
4 |
5 | Normally, the input images are cropped hand images, where the hand locates at the center;
6 | or the rough location (or the bounding box) of the hand is provided.
7 |
8 | ## Data preparation
9 |
10 | Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_hand_keypoint.md) to prepare data.
11 |
12 | ## Demo
13 |
14 | Please follow [Demo](/demo/docs/en/2d_hand_demo.md) to run demos.
15 |
16 | 
17 |
18 | 
19 |
--------------------------------------------------------------------------------
/configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose_coco_wholebody_hand.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/hand_2d_keypoint/rtmpose/coco_wholebody_hand/rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256.py
3 | In Collection: RTMPose
4 | Metadata:
5 | Architecture:
6 | - RTMPose
7 | Training Data: COCO-WholeBody-Hand
8 | Name: rtmpose-m_8xb32-210e_coco-wholebody-hand-256x256
9 | Results:
10 | - Dataset: COCO-WholeBody-Hand
11 | Metrics:
12 | AUC: 0.815
13 | EPE: 4.51
14 | PCK@0.2: 0.837
15 | Task: Hand 2D Keypoint
16 | Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-coco-wholebody-hand_pt-aic-coco_210e-256x256-99477206_20230228.pth
17 |
--------------------------------------------------------------------------------
/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hourglass_coco_wholebody_hand.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hourglass52_8xb32-210e_coco-wholebody-hand-256x256.py
3 | In Collection: Hourglass
4 | Metadata:
5 | Architecture:
6 | - Hourglass
7 | Training Data: COCO-WholeBody-Hand
8 | Name: td-hm_hourglass52_8xb32-210e_coco-wholebody-hand-256x256
9 | Results:
10 | - Dataset: COCO-WholeBody-Hand
11 | Metrics:
12 | AUC: 0.835
13 | EPE: 4.54
14 | PCK@0.2: 0.804
15 | Task: Hand 2D Keypoint
16 | Weights: https://download.openmmlab.com/mmpose/hand/hourglass/hourglass52_coco_wholebody_hand_256x256-7b05c6db_20210909.pth
17 |
--------------------------------------------------------------------------------
/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_coco_wholebody_hand.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_8xb32-210e_coco-wholebody-hand-256x256.py
3 | In Collection: HRNetv2
4 | Metadata:
5 | Architecture:
6 | - HRNetv2
7 | Training Data: COCO-WholeBody-Hand
8 | Name: td-hm_hrnetv2-w18_8xb32-210e_coco-wholebody-hand-256x256
9 | Results:
10 | - Dataset: COCO-WholeBody-Hand
11 | Metrics:
12 | AUC: 0.84
13 | EPE: 4.39
14 | PCK@0.2: 0.813
15 | Task: Hand 2D Keypoint
16 | Weights: https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_coco_wholebody_hand_256x256-1c028db7_20210908.pth
17 |
--------------------------------------------------------------------------------
/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/hrnetv2_dark_coco_wholebody_hand.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_hrnetv2-w18_dark-8xb32-210e_coco-wholebody-hand-256x256.py
3 | In Collection: DarkPose
4 | Metadata:
5 | Architecture:
6 | - HRNetv2
7 | - DarkPose
8 | Training Data: COCO-WholeBody-Hand
9 | Name: td-hm_hrnetv2-w18_dark-8xb32-210e_coco-wholebody-hand-256x256
10 | Results:
11 | - Dataset: COCO-WholeBody-Hand
12 | Metrics:
13 | AUC: 0.84
14 | EPE: 4.37
15 | PCK@0.2: 0.814
16 | Task: Hand 2D Keypoint
17 | Weights: https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_coco_wholebody_hand_256x256_dark-a9228c9c_20210908.pth
18 |
--------------------------------------------------------------------------------
/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/litehrnet_coco_wholebody_hand.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_litehrnet-w18_8xb32-210e_coco-wholebody-hand-256x256.py
3 | In Collection: LiteHRNet
4 | Metadata:
5 | Architecture:
6 | - LiteHRNet
7 | Training Data: COCO-WholeBody-Hand
8 | Name: td-hm_litehrnet-w18_8xb32-210e_coco-wholebody-hand-256x256
9 | Results:
10 | - Dataset: COCO-WholeBody-Hand
11 | Metrics:
12 | AUC: 0.83
13 | EPE: 4.77
14 | PCK@0.2: 0.795
15 | Task: Hand 2D Keypoint
16 | Weights: https://download.openmmlab.com/mmpose/hand/litehrnet/litehrnet_w18_coco_wholebody_hand_256x256-d6945e6a_20210908.pth
17 |
--------------------------------------------------------------------------------
/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/mobilenetv2_coco_wholebody_hand.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py
3 | In Collection: SimpleBaseline2D
4 | Metadata:
5 | Architecture:
6 | - SimpleBaseline2D
7 | - MobilenetV2
8 | Training Data: COCO-WholeBody-Hand
9 | Name: td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256
10 | Results:
11 | - Dataset: COCO-WholeBody-Hand
12 | Metrics:
13 | AUC: 0.829
14 | EPE: 4.77
15 | PCK@0.2: 0.795
16 | Task: Hand 2D Keypoint
17 | Weights: https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_coco_wholebody_hand_256x256-06b8c877_20210909.pth
18 |
--------------------------------------------------------------------------------
/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/resnet_coco_wholebody_hand.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_res50_8xb32-210e_coco-wholebody-hand-256x256.py
3 | In Collection: SimpleBaseline2D
4 | Metadata:
5 | Architecture:
6 | - SimpleBaseline2D
7 | - ResNet
8 | Training Data: COCO-WholeBody-Hand
9 | Name: td-hm_res50_8xb32-210e_coco-wholebody-hand-256x256
10 | Results:
11 | - Dataset: COCO-WholeBody-Hand
12 | Metrics:
13 | AUC: 0.833
14 | EPE: 4.64
15 | PCK@0.2: 0.8
16 | Task: Hand 2D Keypoint
17 | Weights: https://download.openmmlab.com/mmpose/hand/resnet/res50_coco_wholebody_hand_256x256-8dbc750c_20210908.pth
18 |
--------------------------------------------------------------------------------
/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/scnet_coco_wholebody_hand.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_scnet50_8xb32-210e_coco-wholebody-hand-256x256.py
3 | In Collection: SimpleBaseline2D
4 | Metadata:
5 | Architecture:
6 | - SCNet
7 | Training Data: COCO-WholeBody-Hand
8 | Name: td-hm_scnet50_8xb32-210e_coco-wholebody-hand-256x256
9 | Results:
10 | - Dataset: COCO-WholeBody-Hand
11 | Metrics:
12 | AUC: 0.834
13 | EPE: 4.55
14 | PCK@0.2: 0.803
15 | Task: Hand 2D Keypoint
16 | Weights: https://download.openmmlab.com/mmpose/hand/scnet/scnet50_coco_wholebody_hand_256x256-e73414c7_20210909.pth
17 |
--------------------------------------------------------------------------------
/configs/hand_2d_keypoint/topdown_heatmap/freihand2d/resnet_freihand2d.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/hand_2d_keypoint/topdown_heatmap/freihand2d/td-hm_res50_8xb64-100e_freihand2d-224x224.py
3 | In Collection: SimpleBaseline2D
4 | Metadata:
5 | Architecture:
6 | - SimpleBaseline2D
7 | - ResNet
8 | Training Data: FreiHand
9 | Name: td-hm_res50_8xb64-100e_freihand2d-224x224
10 | Results:
11 | - Dataset: FreiHand
12 | Metrics:
13 | AUC: 0.868
14 | EPE: 3.27
15 | PCK@0.2: 0.999
16 | Task: Hand 2D Keypoint
17 | Weights: https://download.openmmlab.com/mmpose/hand/resnet/res50_freihand_224x224-ff0799bc_20200914.pth
18 |
--------------------------------------------------------------------------------
/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_dark_onehand10k.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_dark-8xb64-210e_onehand10k-256x256.py
3 | In Collection: DarkPose
4 | Metadata:
5 | Architecture:
6 | - HRNetv2
7 | - DarkPose
8 | Training Data: OneHand10K
9 | Name: td-hm_hrnetv2-w18_dark-8xb64-210e_onehand10k-256x256
10 | Results:
11 | - Dataset: OneHand10K
12 | Metrics:
13 | AUC: 0.572
14 | EPE: 23.96
15 | PCK@0.2: 0.99
16 | Task: Hand 2D Keypoint
17 | Weights: https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_onehand10k_256x256_dark-a2f80c64_20210330.pth
18 |
--------------------------------------------------------------------------------
/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_onehand10k.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py
3 | In Collection: HRNetv2
4 | Metadata:
5 | Architecture:
6 | - HRNetv2
7 | Training Data: OneHand10K
8 | Name: td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256
9 | Results:
10 | - Dataset: OneHand10K
11 | Metrics:
12 | AUC: 0.567
13 | EPE: 24.26
14 | PCK@0.2: 0.99
15 | Task: Hand 2D Keypoint
16 | Weights: https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth
17 |
--------------------------------------------------------------------------------
/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/hrnetv2_udp_onehand10k.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_udp-8xb64-210e_onehand10k-256x256.py
3 | In Collection: UDP
4 | Metadata:
5 | Architecture:
6 | - HRNetv2
7 | - UDP
8 | Training Data: OneHand10K
9 | Name: td-hm_hrnetv2-w18_udp-8xb64-210e_onehand10k-256x256
10 | Results:
11 | - Dataset: OneHand10K
12 | Metrics:
13 | AUC: 0.571
14 | EPE: 23.88
15 | PCK@0.2: 0.99
16 | Task: Hand 2D Keypoint
17 | Weights: https://download.openmmlab.com/mmpose/hand/udp/hrnetv2_w18_onehand10k_256x256_udp-0d1b515d_20210330.pth
18 |
--------------------------------------------------------------------------------
/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/mobilenetv2_onehand10k.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_mobilenetv2_8xb64-210e_onehand10k-256x256.py
3 | In Collection: SimpleBaseline2D
4 | Metadata:
5 | Architecture:
6 | - SimpleBaseline2D
7 | - MobilenetV2
8 | Training Data: OneHand10K
9 | Name: td-hm_mobilenetv2_8xb64-210e_onehand10k-256x256
10 | Results:
11 | - Dataset: OneHand10K
12 | Metrics:
13 | AUC: 0.537
14 | EPE: 28.56
15 | PCK@0.2: 0.986
16 | Task: Hand 2D Keypoint
17 | Weights: https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_onehand10k_256x256-f3a3d90e_20210330.pth
18 |
--------------------------------------------------------------------------------
/configs/hand_2d_keypoint/topdown_heatmap/onehand10k/resnet_onehand10k.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_res50_8xb32-210e_onehand10k-256x256.py
3 | In Collection: SimpleBaseline2D
4 | Metadata:
5 | Architecture:
6 | - SimpleBaseline2D
7 | - ResNet
8 | Training Data: OneHand10K
9 | Name: td-hm_res50_8xb32-210e_onehand10k-256x256
10 | Results:
11 | - Dataset: OneHand10K
12 | Metrics:
13 | AUC: 0.555
14 | EPE: 25.16
15 | PCK@0.2: 0.989
16 | Task: Hand 2D Keypoint
17 | Weights: https://download.openmmlab.com/mmpose/hand/resnet/res50_onehand10k_256x256-739c8639_20210330.pth
18 |
--------------------------------------------------------------------------------
/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_dark_rhd2d.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_dark-8xb64-210e_rhd2d-256x256.py
3 | In Collection: DarkPose
4 | Metadata:
5 | Architecture:
6 | - HRNetv2
7 | - DarkPose
8 | Training Data: RHD
9 | Name: td-hm_hrnetv2-w18_dark-8xb64-210e_rhd2d-256x256
10 | Results:
11 | - Dataset: RHD
12 | Metrics:
13 | AUC: 0.903
14 | EPE: 2.18
15 | PCK@0.2: 0.992
16 | Task: Hand 2D Keypoint
17 | Weights: https://download.openmmlab.com/mmpose/hand/dark/hrnetv2_w18_rhd2d_256x256_dark-4df3a347_20210330.pth
18 |
--------------------------------------------------------------------------------
/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_rhd2d.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_8xb64-210e_rhd2d-256x256.py
3 | In Collection: HRNetv2
4 | Metadata:
5 | Architecture:
6 | - HRNetv2
7 | Training Data: RHD
8 | Name: td-hm_hrnetv2-w18_8xb64-210e_rhd2d-256x256
9 | Results:
10 | - Dataset: RHD
11 | Metrics:
12 | AUC: 0.902
13 | EPE: 2.21
14 | PCK@0.2: 0.992
15 | Task: Hand 2D Keypoint
16 | Weights: https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_rhd2d_256x256-95b20dd8_20210330.pth
17 |
--------------------------------------------------------------------------------
/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/hrnetv2_udp_rhd2d.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_hrnetv2-w18_udp-8xb64-210e_rhd2d-256x256.py
3 | In Collection: UDP
4 | Metadata:
5 | Architecture:
6 | - HRNetv2
7 | - UDP
8 | Training Data: RHD
9 | Name: td-hm_hrnetv2-w18_udp-8xb64-210e_rhd2d-256x256
10 | Results:
11 | - Dataset: RHD
12 | Metrics:
13 | AUC: 0.902
14 | EPE: 2.19
15 | PCKh@0.7: 0.992
16 | Task: Hand 2D Keypoint
17 | Weights: https://download.openmmlab.com/mmpose/hand/udp/hrnetv2_w18_rhd2d_256x256_udp-63ba6007_20210330.pth
18 |
--------------------------------------------------------------------------------
/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/mobilenetv2_rhd2d.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_mobilenetv2_8xb64-210e_rhd2d-256x256.py
3 | In Collection: SimpleBaseline2D
4 | Metadata:
5 | Architecture:
6 | - SimpleBaseline2D
7 | - MobilenetV2
8 | Training Data: RHD
9 | Name: td-hm_mobilenetv2_8xb64-210e_rhd2d-256x256
10 | Results:
11 | - Dataset: RHD
12 | Metrics:
13 | AUC: 0.883
14 | EPE: 2.79
15 | PCK@0.2: 0.985
16 | Task: Hand 2D Keypoint
17 | Weights: https://download.openmmlab.com/mmpose/hand/mobilenetv2/mobilenetv2_rhd2d_256x256-85fa02db_20210330.pth
18 |
--------------------------------------------------------------------------------
/configs/hand_2d_keypoint/topdown_heatmap/rhd2d/resnet_rhd2d.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/hand_2d_keypoint/topdown_heatmap/rhd2d/td-hm_res50_8xb64-210e_rhd2d-256x256.py
3 | In Collection: SimpleBaseline2D
4 | Metadata:
5 | Architecture:
6 | - SimpleBaseline2D
7 | - ResNet
8 | Training Data: RHD
9 | Name: td-hm_res50_8xb64-210e_rhd2d-256x256
10 | Results:
11 | - Dataset: RHD
12 | Metrics:
13 | AUC: 0.898
14 | EPE: 2.32
15 | PCK@0.2: 0.991
16 | Task: Hand 2D Keypoint
17 | Weights: https://download.openmmlab.com/mmpose/hand/resnet/res50_rhd2d_256x256-5dc7e4cc_20210330.pth
18 |
--------------------------------------------------------------------------------
/configs/hand_2d_keypoint/topdown_regression/onehand10k/resnet_onehand10k.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/hand_2d_keypoint/topdown_regression/onehand10k/td-reg_res50_8xb64-210e_onehand10k-256x256.py
3 | In Collection: DeepPose
4 | Metadata:
5 | Architecture:
6 | - DeepPose
7 | - ResNet
8 | Training Data: OneHand10K
9 | Name: td-reg_res50_8xb64-210e_onehand10k-256x256
10 | Results:
11 | - Dataset: OneHand10K
12 | Metrics:
13 | AUC: 0.485
14 | EPE: 34.21
15 | PCK@0.2: 0.99
16 | Task: Hand 2D Keypoint
17 | Weights: https://download.openmmlab.com/mmpose/hand/deeppose/deeppose_res50_onehand10k_256x256-cbddf43a_20210330.pth
18 |
--------------------------------------------------------------------------------
/configs/hand_2d_keypoint/topdown_regression/rhd2d/resnet_rhd2d.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/hand_2d_keypoint/topdown_regression/rhd2d/td-reg_res50_8xb64-210e_rhd2d-256x256.py
3 | In Collection: DeepPose
4 | Metadata:
5 | Architecture:
6 | - DeepPose
7 | - ResNet
8 | Training Data: RHD
9 | Name: td-reg_res50_8xb64-210e_rhd2d-256x256
10 | Results:
11 | - Dataset: RHD
12 | Metrics:
13 | AUC: 0.865
14 | EPE: 3.32
15 | PCK@0.2: 0.988
16 | Task: Hand 2D Keypoint
17 | Weights: https://download.openmmlab.com/mmpose/hand/deeppose/deeppose_res50_rhd2d_256x256-37f1c4d3_20210330.pth
18 |
--------------------------------------------------------------------------------
/configs/hand_3d_keypoint/README.md:
--------------------------------------------------------------------------------
1 | # 3D Hand Pose Estimation
2 |
3 | 3D hand pose estimation is defined as the task of detecting the poses (or keypoints) of the hand from an input image.
4 |
5 | ## Data preparation
6 |
7 | Please follow [DATA Preparation](/docs/en/dataset_zoo/3d_hand_keypoint.md) to prepare data.
8 |
--------------------------------------------------------------------------------
/configs/hand_gesture/README.md:
--------------------------------------------------------------------------------
1 | # Gesture Recognition
2 |
3 | Gesture recognition aims to recognize the hand gestures in the video, such as thumbs up.
4 |
5 | ## Data preparation
6 |
7 | Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_hand_gesture.md) to prepare data.
8 |
9 | ## Demo
10 |
11 | Please follow [Demo](/demo/docs/en/gesture_recognition_demo.md) to run the demo.
12 |
13 |
14 |
--------------------------------------------------------------------------------
/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext_udp_coco-wholebody.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/cspnext-m_udp_8xb64-210e_coco-wholebody-256x192.py
3 | In Collection: UDP
4 | Metadata:
5 | Architecture: &id001
6 | - UDP
7 | - CSPNeXt
8 | Training Data: COCO-WholeBody
9 | Name: cspnext-m_udp_8xb64-210e_coco-wholebody-256x192
10 | Results:
11 | - Dataset: COCO-WholeBody
12 | Metrics:
13 | Body AP: 0.687
14 | Body AR: 0.735
15 | Face AP: 0.697
16 | Face AR: 0.755
17 | Foot AP: 0.680
18 | Foot AR: 0.763
19 | Hand AP: 0.46
20 | Hand AR: 0.567
21 | Whole AP: 0.567
22 | Whole AR: 0.641
23 | Task: Wholebody 2D Keypoint
24 | Weights: https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/cspnext-m_udp-coco-wholebody_pt-in1k_210e-256x192-320fa258_20230123.pth
25 |
--------------------------------------------------------------------------------
/configs/wholebody_2d_keypoint/topdown_heatmap/ubody2d/hrnet_coco-wholebody.yml:
--------------------------------------------------------------------------------
1 | Models:
2 | - Config: configs/wholebody_2d_keypoint/topdown_heatmap/ubody2d/td-hm_hrnet-w32_8xb64-210e_ubody-256x192.py
3 | In Collection: HRNet
4 | Metadata:
5 | Architecture: &id001
6 | - HRNet
7 | Training Data: UBody-COCO-WholeBody
8 | Name: td-hm_hrnet-w32_8xb64-210e_ubody-256x192
9 | Results:
10 | - Dataset: COCO-WholeBody
11 | Metrics:
12 | Body AP: 0.678
13 | Body AR: 0.755
14 | Face AP: 0.630
15 | Face AR: 0.708
16 | Foot AP: 0.543
17 | Foot AR: 0.661
18 | Hand AP: 0.467
19 | Hand AR: 0.566
20 | Whole AP: 0.536
21 | Whole AR: 0.636
22 | Task: Wholebody 2D Keypoint
23 | Weights: https://download.openmmlab.com/mmpose/v1/wholebody_2d_keypoint/ubody/td-hm_hrnet-w32_8xb64-210e_ubody-coco-256x192-7c227391_20230807.pth
24 |
--------------------------------------------------------------------------------
/demo/docs/zh_cn/3d_human_pose_demo.md:
--------------------------------------------------------------------------------
1 | coming soon
2 |
--------------------------------------------------------------------------------
/demo/mmdetection_cfg/rtmdet_m_640-8xb32_coco-person.py:
--------------------------------------------------------------------------------
1 | _base_ = 'mmdet::rtmdet/rtmdet_m_8xb32-300e_coco.py'
2 |
3 | checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth' # noqa
4 |
5 | model = dict(
6 | backbone=dict(
7 | init_cfg=dict(
8 | type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
9 | bbox_head=dict(num_classes=1),
10 | test_cfg=dict(
11 | nms_pre=1000,
12 | min_bbox_size=0,
13 | score_thr=0.05,
14 | nms=dict(type='nms', iou_threshold=0.6),
15 | max_per_img=100))
16 |
17 | train_dataloader = dict(dataset=dict(metainfo=dict(classes=('person', ))))
18 |
19 | val_dataloader = dict(dataset=dict(metainfo=dict(classes=('person', ))))
20 | test_dataloader = val_dataloader
21 |
--------------------------------------------------------------------------------
/demo/mmdetection_cfg/rtmdet_m_8xb32-300e_coco.py:
--------------------------------------------------------------------------------
1 | _base_ = 'mmdet::rtmdet/rtmdet_m_8xb32-300e_coco.py'
2 |
--------------------------------------------------------------------------------
/demo/mmdetection_cfg/rtmdet_tiny_8xb32-300e_coco.py:
--------------------------------------------------------------------------------
1 | _base_ = 'mmdet::rtmdet/rtmdet_tiny_8xb32-300e_coco.py'
2 |
--------------------------------------------------------------------------------
/demo/resources/demo.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/demo/resources/demo.mp4
--------------------------------------------------------------------------------
/demo/resources/demo_coco.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/demo/resources/demo_coco.gif
--------------------------------------------------------------------------------
/demo/resources/sunglasses.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/demo/resources/sunglasses.jpg
--------------------------------------------------------------------------------
/docker/serve/config.properties:
--------------------------------------------------------------------------------
1 | inference_address=http://0.0.0.0:8080
2 | management_address=http://0.0.0.0:8081
3 | metrics_address=http://0.0.0.0:8082
4 | model_store=/home/model-server/model-store
5 | load_models=all
6 |
--------------------------------------------------------------------------------
/docker/serve/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | if [[ "$1" = "serve" ]]; then
5 | shift 1
6 | torchserve --start --ts-config /home/model-server/config.properties
7 | else
8 | eval "$@"
9 | fi
10 |
11 | # prevent docker exit
12 | tail -f /dev/null
13 |
--------------------------------------------------------------------------------
/docs/en/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | formats:
4 | - epub
5 |
6 | build:
7 | os: ubuntu-22.04
8 | tools:
9 | python: "3.8"
10 |
11 | sphinx:
12 | configuration: docs/en/conf.py
13 |
14 | python:
15 | install:
16 | - requirements: requirements/docs.txt
17 | - requirements: requirements/readthedocs.txt
18 |
--------------------------------------------------------------------------------
/docs/en/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/en/_static/css/readthedocs.css:
--------------------------------------------------------------------------------
1 | .header-logo {
2 | background-image: url("../images/mmpose-logo.png");
3 | background-size: 120px 50px;
4 | height: 50px;
5 | width: 120px;
6 | }
7 |
8 | table.autosummary td {
9 | width: 35%
10 | }
11 |
--------------------------------------------------------------------------------
/docs/en/_static/images/mmpose-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/docs/en/_static/images/mmpose-logo.png
--------------------------------------------------------------------------------
/docs/en/advanced_guides/customize_evaluation.md:
--------------------------------------------------------------------------------
1 | # Customize Evaluation
2 |
3 | Coming soon.
4 |
5 | Currently, you can refer to [Evaluation Tutorial of MMEngine](https://mmengine.readthedocs.io/en/latest/tutorials/evaluation.html) to customize your own evaluation.
6 |
--------------------------------------------------------------------------------
/docs/en/advanced_guides/customize_logging.md:
--------------------------------------------------------------------------------
1 | # Customize Logging
2 |
3 | Coming soon.
4 |
--------------------------------------------------------------------------------
/docs/en/advanced_guides/customize_optimizer.md:
--------------------------------------------------------------------------------
1 | # Customize Optimizer and Scheduler
2 |
3 | Coming soon.
4 |
--------------------------------------------------------------------------------
/docs/en/advanced_guides/dataflow.md:
--------------------------------------------------------------------------------
1 | # Dataflow in MMPose
2 |
3 | Coming soon.
4 |
--------------------------------------------------------------------------------
/docs/en/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/en/notes/ecosystem.md:
--------------------------------------------------------------------------------
1 | # Ecosystem
2 |
3 | Coming soon.
4 |
--------------------------------------------------------------------------------
/docs/en/switch_language.md:
--------------------------------------------------------------------------------
1 | ## English
2 |
3 | ## 简体中文
4 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/300vw.md:
--------------------------------------------------------------------------------
1 | # 300 faces in-the-wild challenge: Database and results
2 |
3 |
4 |
5 |
6 | 300VW (ICCVW'2015)
7 |
8 | ```bibtex
9 | @inproceedings{shen2015first,
10 | title={The first facial landmark tracking in-the-wild challenge: Benchmark and results},
11 | author={Shen, Jie and Zafeiriou, Stefanos and Chrysos, Grigoris G and Kossaifi, Jean and Tzimiropoulos, Georgios and Pantic, Maja},
12 | booktitle={Proceedings of the IEEE international conference on computer vision workshops},
13 | pages={50--58},
14 | year={2015}
15 | }
16 | ```
17 |
18 |
19 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/300w.md:
--------------------------------------------------------------------------------
1 | # 300 faces in-the-wild challenge: Database and results
2 |
3 |
4 |
5 |
6 | 300W (IMAVIS'2016)
7 |
8 | ```bibtex
9 | @article{sagonas2016300,
10 | title={300 faces in-the-wild challenge: Database and results},
11 | author={Sagonas, Christos and Antonakos, Epameinondas and Tzimiropoulos, Georgios and Zafeiriou, Stefanos and Pantic, Maja},
12 | journal={Image and vision computing},
13 | volume={47},
14 | pages={3--18},
15 | year={2016},
16 | publisher={Elsevier}
17 | }
18 | ```
19 |
20 |
21 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/300wlp.md:
--------------------------------------------------------------------------------
1 | # **Face Alignment Across Large Poses: A 3D Solution**
2 |
3 |
4 |
5 |
6 | 300WLP (IEEE'2017)
7 |
8 | ```bibtex
9 | @article{zhu2017face,
10 | title={Face alignment in full pose range: A 3d total solution},
11 | author={Zhu, Xiangyu and Liu, Xiaoming and Lei, Zhen and Li, Stan Z},
12 | journal={IEEE transactions on pattern analysis and machine intelligence},
13 | year={2017},
14 | publisher={IEEE}
15 | }
16 | ```
17 |
18 |
19 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/aflw.md:
--------------------------------------------------------------------------------
1 | # Annotated facial landmarks in the wild: A large-scale, real-world database for facial landmark localization
2 |
3 |
4 |
5 |
6 | AFLW (ICCVW'2011)
7 |
8 | ```bibtex
9 | @inproceedings{koestinger2011annotated,
10 | title={Annotated facial landmarks in the wild: A large-scale, real-world database for facial landmark localization},
11 | author={Koestinger, Martin and Wohlhart, Paul and Roth, Peter M and Bischof, Horst},
12 | booktitle={2011 IEEE international conference on computer vision workshops (ICCV workshops)},
13 | pages={2144--2151},
14 | year={2011},
15 | organization={IEEE}
16 | }
17 | ```
18 |
19 |
20 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/aic.md:
--------------------------------------------------------------------------------
1 | # Ai challenger: A large-scale dataset for going deeper in image understanding
2 |
3 |
4 |
5 |
6 | AI Challenger (ArXiv'2017)
7 |
8 | ```bibtex
9 | @article{wu2017ai,
10 | title={Ai challenger: A large-scale dataset for going deeper in image understanding},
11 | author={Wu, Jiahong and Zheng, He and Zhao, Bo and Li, Yixin and Yan, Baoming and Liang, Rui and Wang, Wenjia and Zhou, Shipei and Lin, Guosen and Fu, Yanwei and others},
12 | journal={arXiv preprint arXiv:1711.06475},
13 | year={2017}
14 | }
15 | ```
16 |
17 |
18 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/animalkingdom.md:
--------------------------------------------------------------------------------
1 | # Animal Kingdom: A Large and Diverse Dataset for Animal Behavior Understanding
2 |
3 |
4 |
5 |
6 | Animal Kingdom (CVPR'2022)
7 |
8 | ```bibtex
9 | @InProceedings{Ng_2022_CVPR,
10 | author = {Ng, Xun Long and Ong, Kian Eng and Zheng, Qichen and Ni, Yun and Yeo, Si Yong and Liu, Jun},
11 | title = {Animal Kingdom: A Large and Diverse Dataset for Animal Behavior Understanding},
12 | booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
13 | month = {June},
14 | year = {2022},
15 | pages = {19023-19034}
16 | }
17 | ```
18 |
19 |
20 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/animalpose.md:
--------------------------------------------------------------------------------
1 | # Cross-Domain Adaptation for Animal Pose Estimation
2 |
3 |
4 |
5 |
6 | Animal-Pose (ICCV'2019)
7 |
8 | ```bibtex
9 | @InProceedings{Cao_2019_ICCV,
10 | author = {Cao, Jinkun and Tang, Hongyang and Fang, Hao-Shu and Shen, Xiaoyong and Lu, Cewu and Tai, Yu-Wing},
11 | title = {Cross-Domain Adaptation for Animal Pose Estimation},
12 | booktitle = {The IEEE International Conference on Computer Vision (ICCV)},
13 | month = {October},
14 | year = {2019}
15 | }
16 | ```
17 |
18 |
19 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/ap10k.md:
--------------------------------------------------------------------------------
1 | # AP-10K: A Benchmark for Animal Pose Estimation in the Wild
2 |
3 |
4 |
5 |
6 | AP-10K (NeurIPS'2021)
7 |
8 | ```bibtex
9 | @misc{yu2021ap10k,
10 | title={AP-10K: A Benchmark for Animal Pose Estimation in the Wild},
11 | author={Hang Yu and Yufei Xu and Jing Zhang and Wei Zhao and Ziyu Guan and Dacheng Tao},
12 | year={2021},
13 | eprint={2108.12617},
14 | archivePrefix={arXiv},
15 | primaryClass={cs.CV}
16 | }
17 | ```
18 |
19 |
20 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/atrw.md:
--------------------------------------------------------------------------------
1 | # ATRW: A Benchmark for Amur Tiger Re-identification in the Wild
2 |
3 |
4 |
5 |
6 | ATRW (ACM MM'2020)
7 |
8 | ```bibtex
9 | @inproceedings{li2020atrw,
10 | title={ATRW: A Benchmark for Amur Tiger Re-identification in the Wild},
11 | author={Li, Shuyuan and Li, Jianguo and Tang, Hanlin and Qian, Rui and Lin, Weiyao},
12 | booktitle={Proceedings of the 28th ACM International Conference on Multimedia},
13 | pages={2590--2598},
14 | year={2020}
15 | }
16 | ```
17 |
18 |
19 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/campus_and_shelf.md:
--------------------------------------------------------------------------------
1 | # 3D Pictorial Structures for Multiple Human Pose Estimation
2 |
3 |
4 |
5 |
6 | Campus and Shelf (CVPR'2014)
7 |
8 | ```bibtex
9 | @inproceedings {belagian14multi,
10 | title = {{3D} Pictorial Structures for Multiple Human Pose Estimation},
11 | author = {Belagiannis, Vasileios and Amin, Sikandar and Andriluka, Mykhaylo and Schiele, Bernt and Navab
12 | Nassir and Ilic, Slobodan},
13 | booktitle = {IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR)},
14 | year = {2014},
15 | month = {June},
16 | organization={IEEE}
17 | }
18 | ```
19 |
20 |
21 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/coco.md:
--------------------------------------------------------------------------------
1 | # Microsoft coco: Common objects in context
2 |
3 |
4 |
5 |
6 | COCO (ECCV'2014)
7 |
8 | ```bibtex
9 | @inproceedings{lin2014microsoft,
10 | title={Microsoft coco: Common objects in context},
11 | author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence},
12 | booktitle={European conference on computer vision},
13 | pages={740--755},
14 | year={2014},
15 | organization={Springer}
16 | }
17 | ```
18 |
19 |
20 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/coco_wholebody.md:
--------------------------------------------------------------------------------
1 | # Whole-Body Human Pose Estimation in the Wild
2 |
3 |
4 |
5 |
6 | COCO-WholeBody (ECCV'2020)
7 |
8 | ```bibtex
9 | @inproceedings{jin2020whole,
10 | title={Whole-Body Human Pose Estimation in the Wild},
11 | author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping},
12 | booktitle={Proceedings of the European Conference on Computer Vision (ECCV)},
13 | year={2020}
14 | }
15 | ```
16 |
17 |
18 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/coco_wholebody_face.md:
--------------------------------------------------------------------------------
1 | # Whole-Body Human Pose Estimation in the Wild
2 |
3 |
4 |
5 |
6 | COCO-WholeBody-Face (ECCV'2020)
7 |
8 | ```bibtex
9 | @inproceedings{jin2020whole,
10 | title={Whole-Body Human Pose Estimation in the Wild},
11 | author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping},
12 | booktitle={Proceedings of the European Conference on Computer Vision (ECCV)},
13 | year={2020}
14 | }
15 | ```
16 |
17 |
18 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/coco_wholebody_hand.md:
--------------------------------------------------------------------------------
1 | # Whole-Body Human Pose Estimation in the Wild
2 |
3 |
4 |
5 |
6 | COCO-WholeBody-Hand (ECCV'2020)
7 |
8 | ```bibtex
9 | @inproceedings{jin2020whole,
10 | title={Whole-Body Human Pose Estimation in the Wild},
11 | author={Jin, Sheng and Xu, Lumin and Xu, Jin and Wang, Can and Liu, Wentao and Qian, Chen and Ouyang, Wanli and Luo, Ping},
12 | booktitle={Proceedings of the European Conference on Computer Vision (ECCV)},
13 | year={2020}
14 | }
15 | ```
16 |
17 |
18 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/cofw.md:
--------------------------------------------------------------------------------
1 | # Robust face landmark estimation under occlusion
2 |
3 |
4 |
5 |
6 | COFW (ICCV'2013)
7 |
8 | ```bibtex
9 | @inproceedings{burgos2013robust,
10 | title={Robust face landmark estimation under occlusion},
11 | author={Burgos-Artizzu, Xavier P and Perona, Pietro and Doll{\'a}r, Piotr},
12 | booktitle={Proceedings of the IEEE international conference on computer vision},
13 | pages={1513--1520},
14 | year={2013}
15 | }
16 | ```
17 |
18 |
19 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/crowdpose.md:
--------------------------------------------------------------------------------
1 | # CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark
2 |
3 |
4 |
5 |
6 | CrowdPose (CVPR'2019)
7 |
8 | ```bibtex
9 | @article{li2018crowdpose,
10 | title={CrowdPose: Efficient Crowded Scenes Pose Estimation and A New Benchmark},
11 | author={Li, Jiefeng and Wang, Can and Zhu, Hao and Mao, Yihuan and Fang, Hao-Shu and Lu, Cewu},
12 | journal={arXiv preprint arXiv:1812.00324},
13 | year={2018}
14 | }
15 | ```
16 |
17 |
18 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/exlpose.md:
--------------------------------------------------------------------------------
1 | # Human Pose Estimation in Extremely Low-Light Conditions
2 |
3 |
4 |
5 |
6 | ExLPose (CVPR'2023)
7 |
8 | ```bibtex
9 | @inproceedings{ExLPose_2023_CVPR,
10 | title={Human Pose Estimation in Extremely Low-Light Conditions},
11 | author={Sohyun Lee, Jaesung Rim, Boseung Jeong, Geonu Kim, ByungJu Woo, Haechan Lee, Sunghyun Cho, Suha Kwak},
12 | booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
13 | year={2023}
14 | }
15 | ```
16 |
17 |
18 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/fly.md:
--------------------------------------------------------------------------------
1 | # Fast animal pose estimation using deep neural networks
2 |
3 |
4 |
5 |
6 | Vinegar Fly (Nature Methods'2019)
7 |
8 | ```bibtex
9 | @article{pereira2019fast,
10 | title={Fast animal pose estimation using deep neural networks},
11 | author={Pereira, Talmo D and Aldarondo, Diego E and Willmore, Lindsay and Kislin, Mikhail and Wang, Samuel S-H and Murthy, Mala and Shaevitz, Joshua W},
12 | journal={Nature methods},
13 | volume={16},
14 | number={1},
15 | pages={117--125},
16 | year={2019},
17 | publisher={Nature Publishing Group}
18 | }
19 | ```
20 |
21 |
22 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/freihand.md:
--------------------------------------------------------------------------------
1 | # Freihand: A dataset for markerless capture of hand pose and shape from single rgb images
2 |
3 |
4 |
5 |
6 | FreiHand (ICCV'2019)
7 |
8 | ```bibtex
9 | @inproceedings{zimmermann2019freihand,
10 | title={Freihand: A dataset for markerless capture of hand pose and shape from single rgb images},
11 | author={Zimmermann, Christian and Ceylan, Duygu and Yang, Jimei and Russell, Bryan and Argus, Max and Brox, Thomas},
12 | booktitle={Proceedings of the IEEE International Conference on Computer Vision},
13 | pages={813--822},
14 | year={2019}
15 | }
16 | ```
17 |
18 |
19 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/h36m.md:
--------------------------------------------------------------------------------
1 | # Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments
2 |
3 |
4 |
5 |
6 | Human3.6M (TPAMI'2014)
7 |
8 | ```bibtex
9 | @article{h36m_pami,
10 | author = {Ionescu, Catalin and Papava, Dragos and Olaru, Vlad and Sminchisescu, Cristian},
11 | title = {Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments},
12 | journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
13 | publisher = {IEEE Computer Society},
14 | volume = {36},
15 | number = {7},
16 | pages = {1325-1339},
17 | month = {jul},
18 | year = {2014}
19 | }
20 | ```
21 |
22 |
23 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/halpe.md:
--------------------------------------------------------------------------------
1 | # PaStaNet: Toward Human Activity Knowledge Engine
2 |
3 |
4 |
5 |
6 | Halpe (CVPR'2020)
7 |
8 | ```bibtex
9 | @inproceedings{li2020pastanet,
10 | title={PaStaNet: Toward Human Activity Knowledge Engine},
11 | author={Li, Yong-Lu and Xu, Liang and Liu, Xinpeng and Huang, Xijie and Xu, Yue and Wang, Shiyi and Fang, Hao-Shu and Ma, Ze and Chen, Mingyang and Lu, Cewu},
12 | booktitle={CVPR},
13 | year={2020}
14 | }
15 | ```
16 |
17 |
18 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/horse10.md:
--------------------------------------------------------------------------------
1 | # Pretraining boosts out-of-domain robustness for pose estimation
2 |
3 |
4 |
5 |
6 | Horse-10 (WACV'2021)
7 |
8 | ```bibtex
9 | @inproceedings{mathis2021pretraining,
10 | title={Pretraining boosts out-of-domain robustness for pose estimation},
11 | author={Mathis, Alexander and Biasi, Thomas and Schneider, Steffen and Yuksekgonul, Mert and Rogers, Byron and Bethge, Matthias and Mathis, Mackenzie W},
12 | booktitle={Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision},
13 | pages={1859--1868},
14 | year={2021}
15 | }
16 | ```
17 |
18 |
19 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/human_art.md:
--------------------------------------------------------------------------------
1 | # Human-Art: A Versatile Human-Centric Dataset Bridging Natural and Artificial Scenes
2 |
3 |
4 |
5 |
6 | Human-Art (CVPR'2023)
7 |
8 | ```bibtex
9 | @inproceedings{ju2023humanart,
10 | title={Human-Art: A Versatile Human-Centric Dataset Bridging Natural and Artificial Scenes},
11 | author={Ju, Xuan and Zeng, Ailing and Jianan, Wang and Qiang, Xu and Lei, Zhang},
12 | booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR),
13 | year={2023}}
14 | ```
15 |
16 |
17 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/interhand.md:
--------------------------------------------------------------------------------
1 | # InterHand2.6M: A dataset and baseline for 3D interacting hand pose estimation from a single RGB image
2 |
3 |
4 |
5 |
6 | InterHand2.6M (ECCV'2020)
7 |
8 | ```bibtex
9 | @article{moon2020interhand2,
10 | title={InterHand2.6M: A dataset and baseline for 3D interacting hand pose estimation from a single RGB image},
11 | author={Moon, Gyeongsik and Yu, Shoou-I and Wen, He and Shiratori, Takaaki and Lee, Kyoung Mu},
12 | journal={arXiv preprint arXiv:2008.09309},
13 | year={2020},
14 | publisher={Springer}
15 | }
16 | ```
17 |
18 |
19 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/jhmdb.md:
--------------------------------------------------------------------------------
1 | # Towards understanding action recognition
2 |
3 |
4 |
5 |
6 | JHMDB (ICCV'2013)
7 |
8 | ```bibtex
9 | @inproceedings{Jhuang:ICCV:2013,
10 | title = {Towards understanding action recognition},
11 | author = {H. Jhuang and J. Gall and S. Zuffi and C. Schmid and M. J. Black},
12 | booktitle = {International Conf. on Computer Vision (ICCV)},
13 | month = Dec,
14 | pages = {3192-3199},
15 | year = {2013}
16 | }
17 | ```
18 |
19 |
20 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/lapa.md:
--------------------------------------------------------------------------------
1 | # A New Dataset and Boundary-Attention Semantic Segmentation for Face Parsing
2 |
3 |
4 |
5 |
6 | LaPa (AAAI'2020)
7 |
8 | ```bibtex
9 | @inproceedings{liu2020new,
10 | title={A New Dataset and Boundary-Attention Semantic Segmentation for Face Parsing.},
11 | author={Liu, Yinglu and Shi, Hailin and Shen, Hao and Si, Yue and Wang, Xiaobo and Mei, Tao},
12 | booktitle={AAAI},
13 | pages={11637--11644},
14 | year={2020}
15 | }
16 | ```
17 |
18 |
19 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/locust.md:
--------------------------------------------------------------------------------
1 | # DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning
2 |
3 |
4 |
5 |
6 | Desert Locust (Elife'2019)
7 |
8 | ```bibtex
9 | @article{graving2019deepposekit,
10 | title={DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning},
11 | author={Graving, Jacob M and Chae, Daniel and Naik, Hemal and Li, Liang and Koger, Benjamin and Costelloe, Blair R and Couzin, Iain D},
12 | journal={Elife},
13 | volume={8},
14 | pages={e47994},
15 | year={2019},
16 | publisher={eLife Sciences Publications Limited}
17 | }
18 | ```
19 |
20 |
21 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/macaque.md:
--------------------------------------------------------------------------------
1 | # MacaquePose: A novel ‘in the wild’macaque monkey pose dataset for markerless motion capture
2 |
3 |
4 |
5 |
6 | MacaquePose (bioRxiv'2020)
7 |
8 | ```bibtex
9 | @article{labuguen2020macaquepose,
10 | title={MacaquePose: A novel ‘in the wild’macaque monkey pose dataset for markerless motion capture},
11 | author={Labuguen, Rollyn and Matsumoto, Jumpei and Negrete, Salvador and Nishimaru, Hiroshi and Nishijo, Hisao and Takada, Masahiko and Go, Yasuhiro and Inoue, Ken-ichi and Shibata, Tomohiro},
12 | journal={bioRxiv},
13 | year={2020},
14 | publisher={Cold Spring Harbor Laboratory}
15 | }
16 | ```
17 |
18 |
19 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/mhp.md:
--------------------------------------------------------------------------------
1 | # Understanding humans in crowded scenes: Deep nested adversarial learning and a new benchmark for multi-human parsing
2 |
3 |
4 |
5 |
6 | MHP (ACM MM'2018)
7 |
8 | ```bibtex
9 | @inproceedings{zhao2018understanding,
10 | title={Understanding humans in crowded scenes: Deep nested adversarial learning and a new benchmark for multi-human parsing},
11 | author={Zhao, Jian and Li, Jianshu and Cheng, Yu and Sim, Terence and Yan, Shuicheng and Feng, Jiashi},
12 | booktitle={Proceedings of the 26th ACM international conference on Multimedia},
13 | pages={792--800},
14 | year={2018}
15 | }
16 | ```
17 |
18 |
19 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/mpi_inf_3dhp.md:
--------------------------------------------------------------------------------
1 | # Monocular 3D Human Pose Estimation In The Wild Using Improved CNN Supervision
2 |
3 |
4 |
5 |
6 | MPI-INF-3DHP (3DV'2017)
7 |
8 | ```bibtex
9 | @inproceedings{mono-3dhp2017,
10 | author = {Mehta, Dushyant and Rhodin, Helge and Casas, Dan and Fua, Pascal and Sotnychenko, Oleksandr and Xu, Weipeng and Theobalt, Christian},
11 | title = {Monocular 3D Human Pose Estimation In The Wild Using Improved CNN Supervision},
12 | booktitle = {3D Vision (3DV), 2017 Fifth International Conference on},
13 | url = {http://gvv.mpi-inf.mpg.de/3dhp_dataset},
14 | year = {2017},
15 | organization={IEEE},
16 | doi={10.1109/3dv.2017.00064},
17 | }
18 | ```
19 |
20 |
21 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/mpii.md:
--------------------------------------------------------------------------------
1 | # 2D Human Pose Estimation: New Benchmark and State of the Art Analysis
2 |
3 |
4 |
5 |
6 | MPII (CVPR'2014)
7 |
8 | ```bibtex
9 | @inproceedings{andriluka14cvpr,
10 | author = {Mykhaylo Andriluka and Leonid Pishchulin and Peter Gehler and Schiele, Bernt},
11 | title = {2D Human Pose Estimation: New Benchmark and State of the Art Analysis},
12 | booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
13 | year = {2014},
14 | month = {June}
15 | }
16 | ```
17 |
18 |
19 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/mpii_trb.md:
--------------------------------------------------------------------------------
1 | # TRB: A Novel Triplet Representation for Understanding 2D Human Body
2 |
3 |
4 |
5 |
6 | MPII-TRB (ICCV'2019)
7 |
8 | ```bibtex
9 | @inproceedings{duan2019trb,
10 | title={TRB: A Novel Triplet Representation for Understanding 2D Human Body},
11 | author={Duan, Haodong and Lin, Kwan-Yee and Jin, Sheng and Liu, Wentao and Qian, Chen and Ouyang, Wanli},
12 | booktitle={Proceedings of the IEEE International Conference on Computer Vision},
13 | pages={9479--9488},
14 | year={2019}
15 | }
16 | ```
17 |
18 |
19 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/ochuman.md:
--------------------------------------------------------------------------------
1 | # Pose2seg: Detection free human instance segmentation
2 |
3 |
4 |
5 |
6 | OCHuman (CVPR'2019)
7 |
8 | ```bibtex
9 | @inproceedings{zhang2019pose2seg,
10 | title={Pose2seg: Detection free human instance segmentation},
11 | author={Zhang, Song-Hai and Li, Ruilong and Dong, Xin and Rosin, Paul and Cai, Zixi and Han, Xi and Yang, Dingcheng and Huang, Haozhi and Hu, Shi-Min},
12 | booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition},
13 | pages={889--898},
14 | year={2019}
15 | }
16 | ```
17 |
18 |
19 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/onehand10k.md:
--------------------------------------------------------------------------------
1 | # Mask-pose cascaded cnn for 2d hand pose estimation from single color image
2 |
3 |
4 |
5 |
6 | OneHand10K (TCSVT'2019)
7 |
8 | ```bibtex
9 | @article{wang2018mask,
10 | title={Mask-pose cascaded cnn for 2d hand pose estimation from single color image},
11 | author={Wang, Yangang and Peng, Cong and Liu, Yebin},
12 | journal={IEEE Transactions on Circuits and Systems for Video Technology},
13 | volume={29},
14 | number={11},
15 | pages={3258--3268},
16 | year={2018},
17 | publisher={IEEE}
18 | }
19 | ```
20 |
21 |
22 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/panoptic.md:
--------------------------------------------------------------------------------
1 | # Hand keypoint detection in single images using multiview bootstrapping
2 |
3 |
4 |
5 |
6 | CMU Panoptic HandDB (CVPR'2017)
7 |
8 | ```bibtex
9 | @inproceedings{simon2017hand,
10 | title={Hand keypoint detection in single images using multiview bootstrapping},
11 | author={Simon, Tomas and Joo, Hanbyul and Matthews, Iain and Sheikh, Yaser},
12 | booktitle={Proceedings of the IEEE conference on Computer Vision and Pattern Recognition},
13 | pages={1145--1153},
14 | year={2017}
15 | }
16 | ```
17 |
18 |
19 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/panoptic_body3d.md:
--------------------------------------------------------------------------------
1 | # Panoptic Studio: A Massively Multiview System for Social Motion Capture
2 |
3 |
4 |
5 |
6 | CMU Panoptic (ICCV'2015)
7 |
8 | ```bibtex
9 | @Article = {joo_iccv_2015,
10 | author = {Hanbyul Joo, Hao Liu, Lei Tan, Lin Gui, Bart Nabbe, Iain Matthews, Takeo Kanade, Shohei Nobuhara, and Yaser Sheikh},
11 | title = {Panoptic Studio: A Massively Multiview System for Social Motion Capture},
12 | booktitle = {ICCV},
13 | year = {2015}
14 | }
15 | ```
16 |
17 |
18 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/posetrack18.md:
--------------------------------------------------------------------------------
1 | # Posetrack: A benchmark for human pose estimation and tracking
2 |
3 |
4 |
5 |
6 | PoseTrack18 (CVPR'2018)
7 |
8 | ```bibtex
9 | @inproceedings{andriluka2018posetrack,
10 | title={Posetrack: A benchmark for human pose estimation and tracking},
11 | author={Andriluka, Mykhaylo and Iqbal, Umar and Insafutdinov, Eldar and Pishchulin, Leonid and Milan, Anton and Gall, Juergen and Schiele, Bernt},
12 | booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
13 | pages={5167--5176},
14 | year={2018}
15 | }
16 | ```
17 |
18 |
19 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/rhd.md:
--------------------------------------------------------------------------------
1 | # Learning to Estimate 3D Hand Pose from Single RGB Images
2 |
3 |
4 |
5 |
6 | RHD (ICCV'2017)
7 |
8 | ```bibtex
9 | @TechReport{zb2017hand,
10 | author={Christian Zimmermann and Thomas Brox},
11 | title={Learning to Estimate 3D Hand Pose from Single RGB Images},
12 | institution={arXiv:1705.01389},
13 | year={2017},
14 | note="https://arxiv.org/abs/1705.01389",
15 | url="https://lmb.informatik.uni-freiburg.de/projects/hand3d/"
16 | }
17 | ```
18 |
19 |
20 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/ubody.md:
--------------------------------------------------------------------------------
1 | # One-Stage 3D Whole-Body Mesh Recovery with Component Aware Transformer
2 |
3 |
4 |
5 |
6 | UBody (CVPR'2023)
7 |
8 | ```bibtex
9 | @article{lin2023one,
10 | title={One-Stage 3D Whole-Body Mesh Recovery with Component Aware Transformer},
11 | author={Lin, Jing and Zeng, Ailing and Wang, Haoqian and Zhang, Lei and Li, Yu},
12 | booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
13 | year={2023},
14 | }
15 | ```
16 |
17 |
18 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/wflw.md:
--------------------------------------------------------------------------------
1 | # Look at boundary: A boundary-aware face alignment algorithm
2 |
3 |
4 |
5 |
6 | WFLW (CVPR'2018)
7 |
8 | ```bibtex
9 | @inproceedings{wu2018look,
10 | title={Look at boundary: A boundary-aware face alignment algorithm},
11 | author={Wu, Wayne and Qian, Chen and Yang, Shuo and Wang, Quan and Cai, Yici and Zhou, Qiang},
12 | booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition},
13 | pages={2129--2138},
14 | year={2018}
15 | }
16 | ```
17 |
18 |
19 |
--------------------------------------------------------------------------------
/docs/src/papers/datasets/zebra.md:
--------------------------------------------------------------------------------
1 | # DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning
2 |
3 |
4 |
5 |
6 | Grévy’s Zebra (Elife'2019)
7 |
8 | ```bibtex
9 | @article{graving2019deepposekit,
10 | title={DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning},
11 | author={Graving, Jacob M and Chae, Daniel and Naik, Hemal and Li, Liang and Koger, Benjamin and Costelloe, Blair R and Couzin, Iain D},
12 | journal={Elife},
13 | volume={8},
14 | pages={e47994},
15 | year={2019},
16 | publisher={eLife Sciences Publications Limited}
17 | }
18 | ```
19 |
20 |
21 |
--------------------------------------------------------------------------------
/docs/src/papers/techniques/albumentations.md:
--------------------------------------------------------------------------------
1 | # Albumentations: fast and flexible image augmentations
2 |
3 |
4 |
5 |
6 | Albumentations (Information'2020)
7 |
8 | ```bibtex
9 | @article{buslaev2020albumentations,
10 | title={Albumentations: fast and flexible image augmentations},
11 | author={Buslaev, Alexander and Iglovikov, Vladimir I and Khvedchenya, Eugene and Parinov, Alex and Druzhinin, Mikhail and Kalinin, Alexandr A},
12 | journal={Information},
13 | volume={11},
14 | number={2},
15 | pages={125},
16 | year={2020},
17 | publisher={Multidisciplinary Digital Publishing Institute}
18 | }
19 | ```
20 |
21 |
22 |
--------------------------------------------------------------------------------
/docs/src/papers/techniques/fp16.md:
--------------------------------------------------------------------------------
1 | # Mixed Precision Training
2 |
3 |
4 |
5 |
6 | FP16 (ArXiv'2017)
7 |
8 | ```bibtex
9 | @article{micikevicius2017mixed,
10 | title={Mixed precision training},
11 | author={Micikevicius, Paulius and Narang, Sharan and Alben, Jonah and Diamos, Gregory and Elsen, Erich and Garcia, David and Ginsburg, Boris and Houston, Michael and Kuchaiev, Oleksii and Venkatesh, Ganesh and others},
12 | journal={arXiv preprint arXiv:1710.03740},
13 | year={2017}
14 | }
15 | ```
16 |
17 |
18 |
--------------------------------------------------------------------------------
/docs/zh_cn/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | formats:
4 | - epub
5 |
6 | build:
7 | os: ubuntu-22.04
8 | tools:
9 | python: "3.8"
10 |
11 | sphinx:
12 | configuration: docs/zh_cn/conf.py
13 |
14 | python:
15 | install:
16 | - requirements: requirements/docs.txt
17 | - requirements: requirements/readthedocs.txt
18 |
--------------------------------------------------------------------------------
/docs/zh_cn/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/zh_cn/_static/css/readthedocs.css:
--------------------------------------------------------------------------------
1 | .header-logo {
2 | background-image: url("../images/mmpose-logo.png");
3 | background-size: 120px 50px;
4 | height: 50px;
5 | width: 120px;
6 | }
7 |
8 | table.autosummary td {
9 | width: 35%
10 | }
11 |
--------------------------------------------------------------------------------
/docs/zh_cn/_static/images/mmpose-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/docs/zh_cn/_static/images/mmpose-logo.png
--------------------------------------------------------------------------------
/docs/zh_cn/advanced_guides/customize_evaluation.md:
--------------------------------------------------------------------------------
1 | # Customize Evaluation
2 |
3 | Coming soon.
4 |
5 | Currently, you can refer to [Evaluation Tutorial of MMEngine](https://mmengine.readthedocs.io/en/latest/tutorials/evaluation.html) to customize your own evaluation.
6 |
--------------------------------------------------------------------------------
/docs/zh_cn/advanced_guides/customize_logging.md:
--------------------------------------------------------------------------------
1 | # Customize Logging
2 |
3 | Coming soon.
4 |
--------------------------------------------------------------------------------
/docs/zh_cn/advanced_guides/customize_optimizer.md:
--------------------------------------------------------------------------------
1 | # Customize Optimizer and Scheduler
2 |
3 | Coming soon.
4 |
--------------------------------------------------------------------------------
/docs/zh_cn/advanced_guides/dataflow.md:
--------------------------------------------------------------------------------
1 | # Dataflow in MMPose
2 |
3 | Coming soon.
4 |
--------------------------------------------------------------------------------
/docs/zh_cn/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/zh_cn/notes/ecosystem.md:
--------------------------------------------------------------------------------
1 | # Ecosystem
2 |
3 | Coming soon.
4 |
--------------------------------------------------------------------------------
/docs/zh_cn/switch_language.md:
--------------------------------------------------------------------------------
1 | ## 简体中文
2 |
3 | ## English
4 |
--------------------------------------------------------------------------------
/mmpose/apis/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .inference import (collect_multi_frames, inference_bottomup,
3 | inference_topdown, init_model)
4 | from .inference_3d import (collate_pose_sequence, convert_keypoint_definition,
5 | extract_pose_sequence, inference_pose_lifter_model)
6 | from .inference_tracking import _compute_iou, _track_by_iou, _track_by_oks
7 | from .inferencers import MMPoseInferencer, Pose2DInferencer
8 | from .visualization import visualize
9 |
10 | __all__ = [
11 | 'init_model', 'inference_topdown', 'inference_bottomup',
12 | 'collect_multi_frames', 'Pose2DInferencer', 'MMPoseInferencer',
13 | '_track_by_iou', '_track_by_oks', '_compute_iou',
14 | 'inference_pose_lifter_model', 'extract_pose_sequence',
15 | 'convert_keypoint_definition', 'collate_pose_sequence', 'visualize'
16 | ]
17 |
--------------------------------------------------------------------------------
/mmpose/apis/inferencers/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .hand3d_inferencer import Hand3DInferencer
3 | from .mmpose_inferencer import MMPoseInferencer
4 | from .pose2d_inferencer import Pose2DInferencer
5 | from .pose3d_inferencer import Pose3DInferencer
6 | from .utils import get_model_aliases
7 |
8 | __all__ = [
9 | 'Pose2DInferencer', 'MMPoseInferencer', 'get_model_aliases',
10 | 'Pose3DInferencer', 'Hand3DInferencer'
11 | ]
12 |
--------------------------------------------------------------------------------
/mmpose/apis/inferencers/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .default_det_models import default_det_models
3 | from .get_model_alias import get_model_aliases
4 |
5 | __all__ = ['default_det_models', 'get_model_aliases']
6 |
--------------------------------------------------------------------------------
/mmpose/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .builder import build_dataset
3 | from .dataset_wrappers import CombinedDataset
4 | from .datasets import * # noqa
5 | from .samplers import MultiSourceSampler
6 | from .transforms import * # noqa
7 |
8 | __all__ = ['build_dataset', 'CombinedDataset', 'MultiSourceSampler']
9 |
--------------------------------------------------------------------------------
/mmpose/datasets/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .animal import * # noqa: F401, F403
3 | from .base import * # noqa: F401, F403
4 | from .body import * # noqa: F401, F403
5 | from .body3d import * # noqa: F401, F403
6 | from .face import * # noqa: F401, F403
7 | from .fashion import * # noqa: F401, F403
8 | from .hand import * # noqa: F401, F403
9 | from .hand3d import * # noqa: F401, F403
10 | from .wholebody import * # noqa: F401, F403
11 | from .wholebody3d import * # noqa: F401, F403
12 |
--------------------------------------------------------------------------------
/mmpose/datasets/datasets/animal/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .animalkingdom_dataset import AnimalKingdomDataset
3 | from .animalpose_dataset import AnimalPoseDataset
4 | from .ap10k_dataset import AP10KDataset
5 | from .atrw_dataset import ATRWDataset
6 | from .fly_dataset import FlyDataset
7 | from .horse10_dataset import Horse10Dataset
8 | from .locust_dataset import LocustDataset
9 | from .macaque_dataset import MacaqueDataset
10 | from .zebra_dataset import ZebraDataset
11 |
12 | __all__ = [
13 | 'AnimalPoseDataset', 'AP10KDataset', 'Horse10Dataset', 'MacaqueDataset',
14 | 'FlyDataset', 'LocustDataset', 'ZebraDataset', 'ATRWDataset',
15 | 'AnimalKingdomDataset'
16 | ]
17 |
--------------------------------------------------------------------------------
/mmpose/datasets/datasets/base/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .base_coco_style_dataset import BaseCocoStyleDataset
3 | from .base_mocap_dataset import BaseMocapDataset
4 |
5 | __all__ = ['BaseCocoStyleDataset', 'BaseMocapDataset']
6 |
--------------------------------------------------------------------------------
/mmpose/datasets/datasets/body3d/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .h36m_dataset import Human36mDataset
3 |
4 | __all__ = ['Human36mDataset']
5 |
--------------------------------------------------------------------------------
/mmpose/datasets/datasets/face/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .aflw_dataset import AFLWDataset
3 | from .coco_wholebody_face_dataset import CocoWholeBodyFaceDataset
4 | from .cofw_dataset import COFWDataset
5 | from .face_300vw_dataset import Face300VWDataset
6 | from .face_300w_dataset import Face300WDataset
7 | from .face_300wlp_dataset import Face300WLPDataset
8 | from .lapa_dataset import LapaDataset
9 | from .wflw_dataset import WFLWDataset
10 |
11 | __all__ = [
12 | 'Face300WDataset', 'WFLWDataset', 'AFLWDataset', 'COFWDataset',
13 | 'CocoWholeBodyFaceDataset', 'LapaDataset', 'Face300WLPDataset',
14 | 'Face300VWDataset'
15 | ]
16 |
--------------------------------------------------------------------------------
/mmpose/datasets/datasets/fashion/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .deepfashion2_dataset import DeepFashion2Dataset
3 | from .deepfashion_dataset import DeepFashionDataset
4 |
5 | __all__ = ['DeepFashionDataset', 'DeepFashion2Dataset']
6 |
--------------------------------------------------------------------------------
/mmpose/datasets/datasets/fashion/deepfashion2_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmpose.registry import DATASETS
3 | from ..base import BaseCocoStyleDataset
4 |
5 |
6 | @DATASETS.register_module(name='DeepFashion2Dataset')
7 | class DeepFashion2Dataset(BaseCocoStyleDataset):
8 | """DeepFashion2 dataset for fashion landmark detection."""
9 |
10 | METAINFO: dict = dict(from_file='configs/_base_/datasets/deepfashion2.py')
11 |
--------------------------------------------------------------------------------
/mmpose/datasets/datasets/hand/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .coco_wholebody_hand_dataset import CocoWholeBodyHandDataset
3 | from .freihand_dataset import FreiHandDataset
4 | from .interhand2d_double_dataset import InterHand2DDoubleDataset
5 | from .onehand10k_dataset import OneHand10KDataset
6 | from .panoptic_hand2d_dataset import PanopticHand2DDataset
7 | from .rhd2d_dataset import Rhd2DDataset
8 |
9 | __all__ = [
10 | 'OneHand10KDataset', 'FreiHandDataset', 'PanopticHand2DDataset',
11 | 'Rhd2DDataset', 'CocoWholeBodyHandDataset', 'InterHand2DDoubleDataset'
12 | ]
13 |
--------------------------------------------------------------------------------
/mmpose/datasets/datasets/hand3d/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .interhand_3d_dataset import InterHand3DDataset
3 |
4 | __all__ = ['InterHand3DDataset']
5 |
--------------------------------------------------------------------------------
/mmpose/datasets/datasets/wholebody/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .coco_wholebody_dataset import CocoWholeBodyDataset
3 | from .halpe_dataset import HalpeDataset
4 | from .ubody2d_dataset import UBody2dDataset
5 |
6 | __all__ = ['CocoWholeBodyDataset', 'HalpeDataset', 'UBody2dDataset']
7 |
--------------------------------------------------------------------------------
/mmpose/datasets/datasets/wholebody3d/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .h3wb_dataset import H36MWholeBodyDataset
3 | from .ubody3d_dataset import UBody3dDataset
4 |
5 | __all__ = ['UBody3dDataset', 'H36MWholeBodyDataset']
6 |
--------------------------------------------------------------------------------
/mmpose/engine/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .hooks import * # noqa: F401, F403
3 | from .optim_wrappers import * # noqa: F401, F403
4 | from .schedulers import * # noqa: F401, F403
5 |
--------------------------------------------------------------------------------
/mmpose/engine/hooks/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .badcase_hook import BadCaseAnalysisHook
3 | from .ema_hook import ExpMomentumEMA
4 | from .mode_switch_hooks import RTMOModeSwitchHook, YOLOXPoseModeSwitchHook
5 | from .sync_norm_hook import SyncNormHook
6 | from .visualization_hook import PoseVisualizationHook
7 |
8 | __all__ = [
9 | 'PoseVisualizationHook', 'ExpMomentumEMA', 'BadCaseAnalysisHook',
10 | 'YOLOXPoseModeSwitchHook', 'SyncNormHook', 'RTMOModeSwitchHook'
11 | ]
12 |
--------------------------------------------------------------------------------
/mmpose/engine/optim_wrappers/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .force_default_constructor import ForceDefaultOptimWrapperConstructor
3 | from .layer_decay_optim_wrapper import LayerDecayOptimWrapperConstructor
4 |
5 | __all__ = [
6 | 'LayerDecayOptimWrapperConstructor', 'ForceDefaultOptimWrapperConstructor'
7 | ]
8 |
--------------------------------------------------------------------------------
/mmpose/engine/schedulers/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .constant_lr import ConstantLR
3 | from .quadratic_warmup import (QuadraticWarmupLR, QuadraticWarmupMomentum,
4 | QuadraticWarmupParamScheduler)
5 |
6 | __all__ = [
7 | 'QuadraticWarmupParamScheduler', 'QuadraticWarmupMomentum',
8 | 'QuadraticWarmupLR', 'ConstantLR'
9 | ]
10 |
--------------------------------------------------------------------------------
/mmpose/evaluation/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .evaluators import * # noqa: F401,F403
3 | from .functional import * # noqa: F401,F403
4 | from .metrics import * # noqa: F401,F403
5 |
--------------------------------------------------------------------------------
/mmpose/evaluation/evaluators/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .mutli_dataset_evaluator import MultiDatasetEvaluator
3 |
4 | __all__ = ['MultiDatasetEvaluator']
5 |
--------------------------------------------------------------------------------
/mmpose/evaluation/functional/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .keypoint_eval import (keypoint_auc, keypoint_epe, keypoint_mpjpe,
3 | keypoint_nme, keypoint_pck_accuracy,
4 | multilabel_classification_accuracy,
5 | pose_pck_accuracy, simcc_pck_accuracy)
6 | from .nms import nearby_joints_nms, nms, nms_torch, oks_nms, soft_oks_nms
7 | from .transforms import transform_ann, transform_pred, transform_sigmas
8 |
9 | __all__ = [
10 | 'keypoint_pck_accuracy', 'keypoint_auc', 'keypoint_nme', 'keypoint_epe',
11 | 'pose_pck_accuracy', 'multilabel_classification_accuracy',
12 | 'simcc_pck_accuracy', 'nms', 'oks_nms', 'soft_oks_nms', 'keypoint_mpjpe',
13 | 'nms_torch', 'transform_ann', 'transform_sigmas', 'transform_pred',
14 | 'nearby_joints_nms'
15 | ]
16 |
--------------------------------------------------------------------------------
/mmpose/evaluation/metrics/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .coco_metric import CocoMetric
3 | from .coco_wholebody_metric import CocoWholeBodyMetric
4 | from .hand_metric import InterHandMetric
5 | from .keypoint_2d_metrics import (AUC, EPE, NME, JhmdbPCKAccuracy,
6 | MpiiPCKAccuracy, PCKAccuracy)
7 | from .keypoint_3d_metrics import MPJPE
8 | from .keypoint_partition_metric import KeypointPartitionMetric
9 | from .posetrack18_metric import PoseTrack18Metric
10 | from .simple_keypoint_3d_metrics import SimpleMPJPE
11 |
12 | __all__ = [
13 | 'CocoMetric', 'PCKAccuracy', 'MpiiPCKAccuracy', 'JhmdbPCKAccuracy', 'AUC',
14 | 'EPE', 'NME', 'PoseTrack18Metric', 'CocoWholeBodyMetric',
15 | 'KeypointPartitionMetric', 'MPJPE', 'InterHandMetric', 'SimpleMPJPE'
16 | ]
17 |
--------------------------------------------------------------------------------
/mmpose/models/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .backbones import * # noqa
3 | from .builder import (BACKBONES, HEADS, LOSSES, NECKS, build_backbone,
4 | build_head, build_loss, build_neck, build_pose_estimator,
5 | build_posenet)
6 | from .data_preprocessors import * # noqa
7 | from .distillers import * # noqa
8 | from .heads import * # noqa
9 | from .losses import * # noqa
10 | from .necks import * # noqa
11 | from .pose_estimators import * # noqa
12 |
13 | __all__ = [
14 | 'BACKBONES',
15 | 'HEADS',
16 | 'NECKS',
17 | 'LOSSES',
18 | 'build_backbone',
19 | 'build_head',
20 | 'build_loss',
21 | 'build_posenet',
22 | 'build_neck',
23 | 'build_pose_estimator',
24 | ]
25 |
--------------------------------------------------------------------------------
/mmpose/models/backbones/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .channel_shuffle import channel_shuffle
3 | from .inverted_residual import InvertedResidual
4 | from .make_divisible import make_divisible
5 | from .se_layer import SELayer
6 | from .utils import get_state_dict, load_checkpoint
7 |
8 | __all__ = [
9 | 'channel_shuffle', 'make_divisible', 'InvertedResidual', 'SELayer',
10 | 'load_checkpoint', 'get_state_dict'
11 | ]
12 |
--------------------------------------------------------------------------------
/mmpose/models/data_preprocessors/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .batch_augmentation import BatchSyncRandomResize
3 | from .data_preprocessor import PoseDataPreprocessor
4 |
5 | __all__ = [
6 | 'PoseDataPreprocessor',
7 | 'BatchSyncRandomResize',
8 | ]
9 |
--------------------------------------------------------------------------------
/mmpose/models/distillers/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .dwpose_distiller import DWPoseDistiller
3 |
4 | __all__ = ['DWPoseDistiller']
5 |
--------------------------------------------------------------------------------
/mmpose/models/heads/coord_cls_heads/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .rtmcc_head import RTMCCHead
3 | from .rtmw_head import RTMWHead
4 | from .simcc_head import SimCCHead
5 |
6 | __all__ = ['SimCCHead', 'RTMCCHead', 'RTMWHead']
7 |
--------------------------------------------------------------------------------
/mmpose/models/heads/heatmap_heads/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .ae_head import AssociativeEmbeddingHead
3 | from .cid_head import CIDHead
4 | from .cpm_head import CPMHead
5 | from .heatmap_head import HeatmapHead
6 | from .internet_head import InternetHead
7 | from .mspn_head import MSPNHead
8 | from .vipnas_head import ViPNASHead
9 |
10 | __all__ = [
11 | 'HeatmapHead', 'CPMHead', 'MSPNHead', 'ViPNASHead',
12 | 'AssociativeEmbeddingHead', 'CIDHead', 'InternetHead'
13 | ]
14 |
--------------------------------------------------------------------------------
/mmpose/models/heads/hybrid_heads/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .dekr_head import DEKRHead
3 | from .rtmo_head import RTMOHead
4 | from .vis_head import VisPredictHead
5 | from .yoloxpose_head import YOLOXPoseHead
6 |
7 | __all__ = ['DEKRHead', 'VisPredictHead', 'YOLOXPoseHead', 'RTMOHead']
8 |
--------------------------------------------------------------------------------
/mmpose/models/heads/regression_heads/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .dsnt_head import DSNTHead
3 | from .integral_regression_head import IntegralRegressionHead
4 | from .motion_regression_head import MotionRegressionHead
5 | from .regression_head import RegressionHead
6 | from .rle_head import RLEHead
7 | from .temporal_regression_head import TemporalRegressionHead
8 | from .trajectory_regression_head import TrajectoryRegressionHead
9 |
10 | __all__ = [
11 | 'RegressionHead', 'IntegralRegressionHead', 'DSNTHead', 'RLEHead',
12 | 'TemporalRegressionHead', 'TrajectoryRegressionHead',
13 | 'MotionRegressionHead'
14 | ]
15 |
--------------------------------------------------------------------------------
/mmpose/models/necks/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .channel_mapper import ChannelMapper
3 | from .cspnext_pafpn import CSPNeXtPAFPN
4 | from .fmap_proc_neck import FeatureMapProcessor
5 | from .fpn import FPN
6 | from .gap_neck import GlobalAveragePooling
7 | from .hybrid_encoder import HybridEncoder
8 | from .posewarper_neck import PoseWarperNeck
9 | from .yolox_pafpn import YOLOXPAFPN
10 |
11 | __all__ = [
12 | 'GlobalAveragePooling', 'PoseWarperNeck', 'FPN', 'FeatureMapProcessor',
13 | 'ChannelMapper', 'YOLOXPAFPN', 'CSPNeXtPAFPN', 'HybridEncoder'
14 | ]
15 |
--------------------------------------------------------------------------------
/mmpose/models/pose_estimators/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .bottomup import BottomupPoseEstimator
3 | from .pose_lifter import PoseLifter
4 | from .topdown import TopdownPoseEstimator
5 |
6 | __all__ = ['TopdownPoseEstimator', 'BottomupPoseEstimator', 'PoseLifter']
7 |
--------------------------------------------------------------------------------
/mmpose/models/task_modules/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .assigners import * # noqa
3 | from .prior_generators import * # noqa
4 |
--------------------------------------------------------------------------------
/mmpose/models/task_modules/assigners/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .metric_calculators import BBoxOverlaps2D, PoseOKS
3 | from .sim_ota_assigner import SimOTAAssigner
4 |
5 | __all__ = ['SimOTAAssigner', 'PoseOKS', 'BBoxOverlaps2D']
6 |
--------------------------------------------------------------------------------
/mmpose/models/task_modules/prior_generators/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .mlvl_point_generator import MlvlPointGenerator # noqa
3 |
--------------------------------------------------------------------------------
/mmpose/models/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .check_and_update_config import check_and_update_config
3 | from .ckpt_convert import pvt_convert
4 | from .csp_layer import CSPLayer
5 | from .misc import filter_scores_and_topk
6 | from .ops import FrozenBatchNorm2d, inverse_sigmoid
7 | from .reparam_layers import RepVGGBlock
8 | from .rtmcc_block import RTMCCBlock, rope
9 | from .transformer import (DetrTransformerEncoder, GAUEncoder, PatchEmbed,
10 | SinePositionalEncoding, nchw_to_nlc, nlc_to_nchw)
11 |
12 | __all__ = [
13 | 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'pvt_convert', 'RTMCCBlock',
14 | 'rope', 'check_and_update_config', 'filter_scores_and_topk', 'CSPLayer',
15 | 'FrozenBatchNorm2d', 'inverse_sigmoid', 'GAUEncoder',
16 | 'SinePositionalEncoding', 'RepVGGBlock', 'DetrTransformerEncoder'
17 | ]
18 |
--------------------------------------------------------------------------------
/mmpose/structures/bbox/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .bbox_overlaps import bbox_overlaps
3 | from .transforms import (bbox_clip_border, bbox_corner2xyxy, bbox_cs2xywh,
4 | bbox_cs2xyxy, bbox_xywh2cs, bbox_xywh2xyxy,
5 | bbox_xyxy2corner, bbox_xyxy2cs, bbox_xyxy2xywh,
6 | flip_bbox, get_pers_warp_matrix, get_udp_warp_matrix,
7 | get_warp_matrix)
8 |
9 | __all__ = [
10 | 'bbox_cs2xywh', 'bbox_cs2xyxy', 'bbox_xywh2cs', 'bbox_xywh2xyxy',
11 | 'bbox_xyxy2cs', 'bbox_xyxy2xywh', 'flip_bbox', 'get_udp_warp_matrix',
12 | 'get_warp_matrix', 'bbox_overlaps', 'bbox_clip_border', 'bbox_xyxy2corner',
13 | 'bbox_corner2xyxy', 'get_pers_warp_matrix'
14 | ]
15 |
--------------------------------------------------------------------------------
/mmpose/structures/keypoint/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 |
3 | from .transforms import (flip_keypoints, flip_keypoints_custom_center,
4 | keypoint_clip_border)
5 |
6 | __all__ = [
7 | 'flip_keypoints', 'flip_keypoints_custom_center', 'keypoint_clip_border'
8 | ]
9 |
--------------------------------------------------------------------------------
/mmpose/testing/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ._utils import (get_coco_sample, get_config_file, get_packed_inputs,
3 | get_pose_estimator_cfg, get_repo_dir)
4 |
5 | __all__ = [
6 | 'get_packed_inputs', 'get_coco_sample', 'get_config_file',
7 | 'get_pose_estimator_cfg', 'get_repo_dir'
8 | ]
9 |
--------------------------------------------------------------------------------
/mmpose/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .camera import SimpleCamera, SimpleCameraTorch
3 | from .collect_env import collect_env
4 | from .config_utils import adapt_mmdet_pipeline
5 | from .dist_utils import reduce_mean
6 | from .logger import get_root_logger
7 | from .setup_env import register_all_modules, setup_multi_processes
8 | from .timer import StopWatch
9 |
10 | __all__ = [
11 | 'get_root_logger', 'collect_env', 'StopWatch', 'setup_multi_processes',
12 | 'register_all_modules', 'SimpleCamera', 'SimpleCameraTorch',
13 | 'adapt_mmdet_pipeline', 'reduce_mean'
14 | ]
15 |
--------------------------------------------------------------------------------
/mmpose/utils/collect_env.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmengine.utils import get_git_hash
3 | from mmengine.utils.dl_utils import collect_env as collect_base_env
4 |
5 | import mmpose
6 |
7 |
8 | def collect_env():
9 | env_info = collect_base_env()
10 | env_info['MMPose'] = (mmpose.__version__ + '+' + get_git_hash(digits=7))
11 | return env_info
12 |
13 |
14 | if __name__ == '__main__':
15 | for name, val in collect_env().items():
16 | print(f'{name}: {val}')
17 |
--------------------------------------------------------------------------------
/mmpose/utils/config_utils.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmpose.utils.typing import ConfigDict
3 |
4 |
5 | def adapt_mmdet_pipeline(cfg: ConfigDict) -> ConfigDict:
6 | """Converts pipeline types in MMDetection's test dataloader to use the
7 | 'mmdet' namespace.
8 |
9 | Args:
10 | cfg (ConfigDict): Configuration dictionary for MMDetection.
11 |
12 | Returns:
13 | ConfigDict: Configuration dictionary with updated pipeline types.
14 | """
15 | # use lazy import to avoid hard dependence on mmdet
16 | from mmdet.datasets import transforms
17 |
18 | if 'test_dataloader' not in cfg:
19 | return cfg
20 |
21 | pipeline = cfg.test_dataloader.dataset.pipeline
22 | for trans in pipeline:
23 | if trans['type'] in dir(transforms):
24 | trans['type'] = 'mmdet.' + trans['type']
25 |
26 | return cfg
27 |
--------------------------------------------------------------------------------
/mmpose/utils/dist_utils.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import torch.distributed as dist
3 |
4 |
5 | def reduce_mean(tensor):
6 | """"Obtain the mean of tensor on different GPUs."""
7 | if not (dist.is_available() and dist.is_initialized()):
8 | return tensor
9 | tensor = tensor.clone()
10 | dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)
11 | return tensor
12 |
--------------------------------------------------------------------------------
/mmpose/visualization/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .fast_visualizer import FastVisualizer
3 | from .local_visualizer import PoseLocalVisualizer
4 | from .local_visualizer_3d import Pose3dLocalVisualizer
5 |
6 | __all__ = ['PoseLocalVisualizer', 'FastVisualizer', 'Pose3dLocalVisualizer']
7 |
--------------------------------------------------------------------------------
/projects/example_project/configs/example-head-loss_hrnet-w32_8xb64-210e_coco-256x192.py:
--------------------------------------------------------------------------------
1 | # Directly inherit the entire recipe you want to use.
2 | _base_ = 'mmpose::body_2d_keypoint/topdown_heatmap/coco/' \
3 | 'td-hm_hrnet-w32_8xb64-210e_coco-256x192.py'
4 |
5 | # This line is to import your own modules.
6 | custom_imports = dict(imports='models')
7 |
8 | # Modify the model to use your own head and loss.
9 | _base_['model']['head'] = dict(
10 | type='ExampleHead',
11 | in_channels=32,
12 | out_channels=17,
13 | deconv_out_channels=None,
14 | loss=dict(type='ExampleLoss', use_target_weight=True),
15 | decoder=_base_['codec'])
16 |
--------------------------------------------------------------------------------
/projects/example_project/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .example_head import ExampleHead
2 | from .example_loss import ExampleLoss
3 |
4 | __all__ = ['ExampleHead', 'ExampleLoss']
5 |
--------------------------------------------------------------------------------
/projects/just_dance/configs/_base_:
--------------------------------------------------------------------------------
1 | ../../../configs/_base_
2 |
--------------------------------------------------------------------------------
/projects/just_dance/configs/rtmdet-nano_one-person.py:
--------------------------------------------------------------------------------
1 | _base_ = '../../rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py'
2 |
3 | model = dict(test_cfg=dict(nms_pre=1, score_thr=0.0, max_per_img=1))
4 |
--------------------------------------------------------------------------------
/projects/mmpose4aigc/download_models.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) OpenMMLab. All rights reserved.
3 |
4 | # Create models folder
5 | mkdir models
6 |
7 | # Go to models folder
8 | cd models
9 |
10 | # Download det model
11 | wget https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth
12 |
13 | # Download pose model
14 | wget https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth
15 |
16 | # Go back mmpose4aigc
17 | cd ..
18 |
19 | # Success
20 | echo "Download completed."
21 |
--------------------------------------------------------------------------------
/projects/mmpose4aigc/install_posetracker_linux.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) OpenMMLab. All rights reserved.
3 |
4 | # Download pre-compiled files
5 | wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cxx11abi.tar.gz
6 |
7 | # Unzip files
8 | tar -xzvf mmdeploy-1.0.0-linux-x86_64-cxx11abi.tar.gz
9 |
10 | # Go to the sdk folder
11 | cd mmdeploy-1.0.0-linux-x86_64-cxx11abi
12 |
13 | # Init environment
14 | source set_env.sh
15 |
16 | # If opencv 3+ is not installed on your system, execute the following command.
17 | # If it is installed, skip this command
18 | bash install_opencv.sh
19 |
20 | # Compile executable programs
21 | bash build_sdk.sh
22 |
23 | # Download models
24 | wget https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-cpu.zip
25 |
26 | # Unzip files
27 | unzip rtmpose-cpu.zip
28 |
29 | # Success
30 | echo "Installation completed."
31 |
--------------------------------------------------------------------------------
/projects/mmpose4aigc/mmpose_openpose.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) OpenMMLab. All rights reserved.
3 |
4 | INPUT_IMAGE=$1
5 |
6 | python openpose_visualization.py \
7 | ../rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \
8 | models/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \
9 | ../rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-256x192.py \
10 | models/rtmpose-m_simcc-aic-coco_pt-aic-coco_420e-256x192-63eb25f7_20230126.pth \
11 | --input $INPUT_IMAGE \
12 | --device cuda:0 \
13 |
--------------------------------------------------------------------------------
/projects/mmpose4aigc/mmpose_style_skeleton.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) OpenMMLab. All rights reserved.
3 |
4 | WORKSPACE=mmdeploy-1.0.0-linux-x86_64-cxx11abi
5 | export LD_LIBRARY_PATH=${WORKSPACE}/lib:${WORKSPACE}/thirdparty/onnxruntime/lib:$LD_LIBRARY_PATH
6 |
7 | INPUT_IMAGE=$1
8 |
9 | ${WORKSPACE}/bin/pose_tracker \
10 | ${WORKSPACE}/rtmpose-ort/rtmdet-nano \
11 | ${WORKSPACE}/rtmpose-ort/rtmpose-m \
12 | $INPUT_IMAGE \
13 | --background black \
14 | --skeleton ${WORKSPACE}/rtmpose-ort/t2i-adapter_skeleton.txt \
15 | --output ./skeleton_res.jpg \
16 | --pose_kpt_thr 0.4 \
17 | --show -1
18 |
--------------------------------------------------------------------------------
/projects/pose_anything/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | from .pipelines import * # noqa
2 |
--------------------------------------------------------------------------------
/projects/pose_anything/datasets/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | from .mp100 import (FewShotBaseDataset, FewShotKeypointDataset,
2 | TransformerBaseDataset, TransformerPoseDataset)
3 |
4 | __all__ = [
5 | 'FewShotBaseDataset', 'FewShotKeypointDataset', 'TransformerBaseDataset',
6 | 'TransformerPoseDataset'
7 | ]
8 |
--------------------------------------------------------------------------------
/projects/pose_anything/datasets/datasets/mp100/__init__.py:
--------------------------------------------------------------------------------
1 | from .fewshot_base_dataset import FewShotBaseDataset
2 | from .fewshot_dataset import FewShotKeypointDataset
3 | from .test_base_dataset import TestBaseDataset
4 | from .test_dataset import TestPoseDataset
5 | from .transformer_base_dataset import TransformerBaseDataset
6 | from .transformer_dataset import TransformerPoseDataset
7 |
8 | __all__ = [
9 | 'FewShotKeypointDataset', 'FewShotBaseDataset', 'TransformerPoseDataset',
10 | 'TransformerBaseDataset', 'TestBaseDataset', 'TestPoseDataset'
11 | ]
12 |
--------------------------------------------------------------------------------
/projects/pose_anything/datasets/pipelines/__init__.py:
--------------------------------------------------------------------------------
1 | from .top_down_transform import TopDownGenerateTargetFewShot
2 |
3 | __all__ = ['TopDownGenerateTargetFewShot']
4 |
--------------------------------------------------------------------------------
/projects/pose_anything/datasets/pipelines/post_transforms.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/projects/pose_anything/datasets/pipelines/post_transforms.py
--------------------------------------------------------------------------------
/projects/pose_anything/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .backbones import * # noqa
2 | from .detectors import * # noqa
3 | from .keypoint_heads import * # noqa
4 | from .utils import * # noqa
5 |
--------------------------------------------------------------------------------
/projects/pose_anything/models/backbones/__init__.py:
--------------------------------------------------------------------------------
1 | from .swin_transformer_v2 import SwinTransformerV2 # noqa
2 |
--------------------------------------------------------------------------------
/projects/pose_anything/models/detectors/__init__.py:
--------------------------------------------------------------------------------
1 | from .pam import PoseAnythingModel
2 |
3 | __all__ = ['PoseAnythingModel']
4 |
--------------------------------------------------------------------------------
/projects/pose_anything/models/keypoint_heads/__init__.py:
--------------------------------------------------------------------------------
1 | from .head import PoseHead
2 |
3 | __all__ = ['PoseHead']
4 |
--------------------------------------------------------------------------------
/projects/rtmpose/examples/PoseTracker-Android-Prototype/README.md:
--------------------------------------------------------------------------------
1 | # PoseTracker-Android-Prototype
2 |
3 | PoseTracker Android Demo Prototype, which is based on [mmdeploy](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x)
4 |
5 | Please refer to [Original Repository](https://github.com/hanrui1sensetime/PoseTracker-Android-Prototype).
6 |
--------------------------------------------------------------------------------
/projects/rtmpose/examples/README.md:
--------------------------------------------------------------------------------
1 | ## List of examples
2 |
3 | ### 1. RTMPose-Deploy
4 |
5 | RTMPose-Deploy is a C++ code example for RTMPose localized deployment.
6 |
7 | - [ONNXRuntime-CPU](https://github.com/HW140701/RTMPose-Deploy)
8 | - [TensorRT](https://github.com/Dominic23331/rtmpose_tensorrt)
9 |
10 | ### 2. RTMPose inference with ONNXRuntime (Python)
11 |
12 | This example shows how to run RTMPose inference with ONNXRuntime in Python.
13 |
14 | ### 3. PoseTracker Android Demo
15 |
16 | PoseTracker Android Demo Prototype based on mmdeploy.
17 |
18 | - [Original Repository](https://github.com/hanrui1sensetime/PoseTracker-Android-Prototype)
19 |
20 | ### 4. rtmlib
21 |
22 | rtmlib is a super lightweight library to conduct pose estimation based on RTMPose models WITHOUT any dependencies like mmcv, mmpose, mmdet, etc.
23 |
24 | - [Original Repository](https://github.com/Tau-J/rtmlib/tree/main)
25 |
--------------------------------------------------------------------------------
/projects/rtmpose/examples/RTMPose-Deploy/README.md:
--------------------------------------------------------------------------------
1 | # RTMPose-Deploy
2 |
3 | [中文说明](./README_CN.md)
4 |
5 | RTMPose-Deploy is a C ++ code example for RTMPose localized deployment.
6 |
7 | At present, RTMPose-Deploy has completed to use ONNXRuntime-CPU and TensorRT to deploy the RTMDet and RTMPose on the Windows system.
8 |
9 | | Deployment Framework | Repo |
10 | | -------------------- | -------------------------------------------------------------------- |
11 | | ONNXRuntime-CPU | [RTMPose-Deploy](https://github.com/HW140701/RTMPose-Deploy) |
12 | | TensorRT | [rtmpose_tensorrt](https://github.com/Dominic23331/rtmpose_tensorrt) |
13 |
--------------------------------------------------------------------------------
/projects/rtmpose/examples/RTMPose-Deploy/README_CN.md:
--------------------------------------------------------------------------------
1 | # RTMPose-Deploy
2 |
3 | RTMPose-Deploy 是一个进行 RTMPose 本地化部署的 C++ 代码示例。
4 |
5 | 目前,RTMPose-Deploy 已完成在 Windows 系统上使用 OnnxRuntime CPU 和TensorRT 对 RTMDet 和 RTMPose 完成了部署。
6 |
7 | | 部署框架 | 仓库 |
8 | | --------------- | -------------------------------------------------------------------- |
9 | | ONNXRuntime-CPU | [RTMPose-Deploy](https://github.com/HW140701/RTMPose-Deploy) |
10 | | TensorRT | [rtmpose_tensorrt](https://github.com/Dominic23331/rtmpose_tensorrt) |
11 |
--------------------------------------------------------------------------------
/projects/rtmpose/examples/RTMPose-Deploy/Windows/OnnxRumtime-CPU/src/RTMPoseOnnxRuntime/rtmdet_onnxruntime.h:
--------------------------------------------------------------------------------
1 | #ifndef _RTM_DET_ONNX_RUNTIME_H_
2 | #define _RTM_DET_ONNX_RUNTIME_H_
3 |
4 | #include
5 |
6 | #include "opencv2/opencv.hpp"
7 |
8 | #include "onnxruntime_cxx_api.h"
9 | #include "cpu_provider_factory.h"
10 | #include "rtmpose_utils.h"
11 |
12 |
13 | class RTMDetOnnxruntime
14 | {
15 | public:
16 | RTMDetOnnxruntime() = delete;
17 | RTMDetOnnxruntime(const std::string& onnx_model_path);
18 | virtual~RTMDetOnnxruntime();
19 |
20 | public:
21 | DetectBox Inference(const cv::Mat& input_mat);
22 |
23 | private:
24 | void PrintModelInfo(Ort::Session& session);
25 |
26 | private:
27 | Ort::Env m_env;
28 | Ort::Session m_session;
29 |
30 | };
31 |
32 | #endif // !_RTM_DET_ONNX_RUNTIME_H_
33 |
--------------------------------------------------------------------------------
/projects/rtmpose/examples/RTMPose-Deploy/Windows/TensorRT/src/RTMPoseTensorRT/inference.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 | #include
4 |
5 | #include
6 | #include
7 |
8 | #include "rtmdet.h"
9 | #include "rtmpose.h"
10 | #include "utils.h"
11 |
12 |
13 |
14 | std::vector> inference(cv::Mat& image, RTMDet& detect_model, RTMPose& pose_model);
15 |
--------------------------------------------------------------------------------
/projects/rtmpose/examples/onnxruntime/human-pose.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/projects/rtmpose/examples/onnxruntime/human-pose.jpeg
--------------------------------------------------------------------------------
/projects/rtmpose/examples/onnxruntime/requirements.txt:
--------------------------------------------------------------------------------
1 | loguru==0.6.0
2 | numpy==1.21.6
3 | onnxruntime==1.14.1
4 | onnxruntime-gpu==1.8.1
5 |
--------------------------------------------------------------------------------
/projects/rtmpose/rtmdet/README.md:
--------------------------------------------------------------------------------
1 | # Welcome to RTMDet Project of MMPose
2 |
3 | **Highlight:** If you are deploy `projects/rtmpose/rtmdet` with [deploee](https://platform.openmmlab.com/deploee), please input [full http download link of train_config](https://raw.githubusercontent.com/open-mmlab/mmpose/main/projects/rtmpose/rtmdet/hand/rtmdet_nano_320-8xb32_hand.py) instead of relative path, deploee cannot parse mmdet config within mmpose repo.
4 |
--------------------------------------------------------------------------------
/projects/rtmpose/rtmdet/person/rtmdet_m_640-8xb32_coco-person.py:
--------------------------------------------------------------------------------
1 | _base_ = 'mmdet::rtmdet/rtmdet_m_8xb32-300e_coco.py'
2 |
3 | checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth' # noqa
4 |
5 | model = dict(
6 | backbone=dict(
7 | init_cfg=dict(
8 | type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
9 | bbox_head=dict(num_classes=1),
10 | test_cfg=dict(
11 | nms_pre=1000,
12 | min_bbox_size=0,
13 | score_thr=0.05,
14 | nms=dict(type='nms', iou_threshold=0.6),
15 | max_per_img=100))
16 |
17 | train_dataloader = dict(dataset=dict(metainfo=dict(classes=('person', ))))
18 |
19 | val_dataloader = dict(dataset=dict(metainfo=dict(classes=('person', ))))
20 | test_dataloader = val_dataloader
21 |
--------------------------------------------------------------------------------
/projects/rtmpose/rtmdet/person/rtmdet_m_8xb32-300e_humanart.py:
--------------------------------------------------------------------------------
1 | _base_ = './rtmdet_l_8xb32-300e_humanart.py'
2 |
3 | model = dict(
4 | backbone=dict(deepen_factor=0.67, widen_factor=0.75),
5 | neck=dict(in_channels=[192, 384, 768], out_channels=192, num_csp_blocks=2),
6 | bbox_head=dict(in_channels=192, feat_channels=192))
7 |
--------------------------------------------------------------------------------
/projects/rtmpose/rtmdet/person/rtmdet_x_8xb32-300e_humanart.py:
--------------------------------------------------------------------------------
1 | _base_ = './rtmdet_l_8xb32-300e_humanart.py'
2 |
3 | model = dict(
4 | backbone=dict(deepen_factor=1.33, widen_factor=1.25),
5 | neck=dict(
6 | in_channels=[320, 640, 1280], out_channels=320, num_csp_blocks=4),
7 | bbox_head=dict(in_channels=320, feat_channels=320))
8 |
--------------------------------------------------------------------------------
/projects/rtmpose/yolox/humanart/yolox_l_8xb8-300e_humanart.py:
--------------------------------------------------------------------------------
1 | _base_ = './yolox_s_8xb8-300e_humanart.py'
2 |
3 | # model settings
4 | model = dict(
5 | backbone=dict(deepen_factor=1.0, widen_factor=1.0),
6 | neck=dict(
7 | in_channels=[256, 512, 1024], out_channels=256, num_csp_blocks=3),
8 | bbox_head=dict(in_channels=256, feat_channels=256))
9 |
--------------------------------------------------------------------------------
/projects/rtmpose/yolox/humanart/yolox_m_8xb8-300e_humanart.py:
--------------------------------------------------------------------------------
1 | _base_ = './yolox_s_8xb8-300e_humanart.py'
2 |
3 | # model settings
4 | model = dict(
5 | backbone=dict(deepen_factor=0.67, widen_factor=0.75),
6 | neck=dict(in_channels=[192, 384, 768], out_channels=192, num_csp_blocks=2),
7 | bbox_head=dict(in_channels=192, feat_channels=192),
8 | )
9 |
--------------------------------------------------------------------------------
/projects/rtmpose/yolox/humanart/yolox_nano_8xb8-300e_humanart.py:
--------------------------------------------------------------------------------
1 | _base_ = './yolox_tiny_8xb8-300e_humanart.py'
2 |
3 | # model settings
4 | model = dict(
5 | backbone=dict(deepen_factor=0.33, widen_factor=0.25, use_depthwise=True),
6 | neck=dict(
7 | in_channels=[64, 128, 256],
8 | out_channels=64,
9 | num_csp_blocks=1,
10 | use_depthwise=True),
11 | bbox_head=dict(in_channels=64, feat_channels=64, use_depthwise=True))
12 |
--------------------------------------------------------------------------------
/projects/rtmpose/yolox/humanart/yolox_x_8xb8-300e_humanart.py:
--------------------------------------------------------------------------------
1 | _base_ = './yolox_s_8xb8-300e_humanart.py'
2 |
3 | # model settings
4 | model = dict(
5 | backbone=dict(deepen_factor=1.33, widen_factor=1.25),
6 | neck=dict(
7 | in_channels=[320, 640, 1280], out_channels=320, num_csp_blocks=4),
8 | bbox_head=dict(in_channels=320, feat_channels=320))
9 |
--------------------------------------------------------------------------------
/projects/rtmpose3d/demo/rtmdet_m_640-8xb32_coco-person.py:
--------------------------------------------------------------------------------
1 | _base_ = 'mmdet::rtmdet/rtmdet_m_8xb32-300e_coco.py'
2 |
3 | checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth' # noqa
4 |
5 | model = dict(
6 | backbone=dict(
7 | init_cfg=dict(
8 | type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
9 | bbox_head=dict(num_classes=1),
10 | test_cfg=dict(
11 | nms_pre=1000,
12 | min_bbox_size=0,
13 | score_thr=0.05,
14 | nms=dict(type='nms', iou_threshold=0.6),
15 | max_per_img=100))
16 |
17 | train_dataloader = dict(dataset=dict(metainfo=dict(classes=('person', ))))
18 |
19 | val_dataloader = dict(dataset=dict(metainfo=dict(classes=('person', ))))
20 | test_dataloader = val_dataloader
21 |
--------------------------------------------------------------------------------
/projects/rtmpose3d/rtmpose3d/__init__.py:
--------------------------------------------------------------------------------
1 | from .loss import KLDiscretLossWithWeight
2 | from .pose_estimator import TopdownPoseEstimator3D
3 | from .rtmw3d_head import RTMW3DHead
4 | from .simcc_3d_label import SimCC3DLabel
5 |
6 | __all__ = [
7 | 'TopdownPoseEstimator3D', 'RTMW3DHead', 'SimCC3DLabel',
8 | 'KLDiscretLossWithWeight'
9 | ]
10 |
--------------------------------------------------------------------------------
/projects/skps/custom_codecs/__init__.py:
--------------------------------------------------------------------------------
1 | from .skps_heatmap import SKPSHeatmap
2 |
3 | __all__ = ['SKPSHeatmap']
4 |
--------------------------------------------------------------------------------
/projects/skps/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .skps_head import SKPSHead
2 |
3 | __all__ = ['SKPSHead']
4 |
--------------------------------------------------------------------------------
/projects/uniformer/configs/td-hm_uniformer-s-8xb128-210e_coco-256x192.py:
--------------------------------------------------------------------------------
1 | _base_ = ['./td-hm_uniformer-b-8xb128-210e_coco-256x192.py']
2 |
3 | # automatically scaling LR based on the actual training batch size
4 | auto_scale_lr = dict(base_batch_size=1024)
5 |
6 | model = dict(
7 | backbone=dict(
8 | depths=[3, 4, 8, 3],
9 | drop_path_rate=0.2,
10 | init_cfg=dict(
11 | type='Pretrained',
12 | checkpoint='https://download.openmmlab.com/mmpose/v1/projects/'
13 | 'uniformer/uniformer_small_in1k.pth' # noqa
14 | )))
15 |
16 | train_dataloader = dict(batch_size=32)
17 | val_dataloader = dict(batch_size=256)
18 |
--------------------------------------------------------------------------------
/projects/uniformer/configs/td-hm_uniformer-s-8xb128-210e_coco-384x288.py:
--------------------------------------------------------------------------------
1 | _base_ = ['./td-hm_uniformer-b-8xb32-210e_coco-384x288.py']
2 |
3 | # optimizer
4 | optim_wrapper = dict(optimizer=dict(
5 | type='Adam',
6 | lr=2e-3,
7 | ))
8 |
9 | # automatically scaling LR based on the actual training batch size
10 | auto_scale_lr = dict(base_batch_size=1024)
11 |
12 | model = dict(
13 | backbone=dict(
14 | depths=[3, 4, 8, 3],
15 | drop_path_rate=0.2,
16 | init_cfg=dict(
17 | type='Pretrained',
18 | checkpoint='https://download.openmmlab.com/mmpose/v1/projects/'
19 | 'uniformer/uniformer_small_in1k.pth' # noqa
20 | )))
21 |
22 | train_dataloader = dict(batch_size=128)
23 | val_dataloader = dict(batch_size=256)
24 |
--------------------------------------------------------------------------------
/projects/uniformer/configs/td-hm_uniformer-s-8xb64-210e_coco-448x320.py:
--------------------------------------------------------------------------------
1 | _base_ = ['./td-hm_uniformer-b-8xb32-210e_coco-448x320.py']
2 |
3 | # optimizer
4 | optim_wrapper = dict(optimizer=dict(
5 | type='Adam',
6 | lr=1.0e-3,
7 | ))
8 |
9 | # automatically scaling LR based on the actual training batch size
10 | auto_scale_lr = dict(base_batch_size=512)
11 |
12 | model = dict(
13 | backbone=dict(
14 | depths=[3, 4, 8, 3],
15 | drop_path_rate=0.2,
16 | init_cfg=dict(
17 | type='Pretrained',
18 | checkpoint='https://download.openmmlab.com/mmpose/v1/projects/'
19 | 'uniformer/uniformer_small_in1k.pth')))
20 |
21 | train_dataloader = dict(batch_size=64)
22 | val_dataloader = dict(batch_size=256)
23 |
--------------------------------------------------------------------------------
/projects/uniformer/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .uniformer import * # noqa
2 |
--------------------------------------------------------------------------------
/projects/yolox_pose/configs/_base_/datasets:
--------------------------------------------------------------------------------
1 | ../../../../configs/_base_/datasets
2 |
--------------------------------------------------------------------------------
/projects/yolox_pose/configs/yolox-pose_l_4xb64-300e_coco.py:
--------------------------------------------------------------------------------
1 | _base_ = ['./yolox-pose_s_8xb32-300e_coco.py']
2 |
3 | # model settings
4 | model = dict(
5 | init_cfg=dict(checkpoint='https://download.openmmlab.com/mmyolo/v0/yolox/'
6 | 'yolox_l_fast_8xb8-300e_coco/yolox_l_fast_8xb8-300e_'
7 | 'coco_20230213_160715-c731eb1c.pth'),
8 | backbone=dict(
9 | deepen_factor=1.0,
10 | widen_factor=1.0,
11 | ),
12 | neck=dict(
13 | deepen_factor=1.0,
14 | widen_factor=1.0,
15 | ),
16 | bbox_head=dict(head_module=dict(widen_factor=1.0)))
17 |
18 | train_dataloader = dict(batch_size=64)
19 |
--------------------------------------------------------------------------------
/projects/yolox_pose/configs/yolox-pose_m_4xb64-300e_coco.py:
--------------------------------------------------------------------------------
1 | _base_ = ['./yolox-pose_s_8xb32-300e_coco.py']
2 |
3 | # model settings
4 | model = dict(
5 | init_cfg=dict(checkpoint='https://download.openmmlab.com/mmyolo/v0/yolox/'
6 | 'yolox_m_fast_8xb32-300e-rtmdet-hyp_coco/yolox_m_fast_8xb32'
7 | '-300e-rtmdet-hyp_coco_20230210_144328-e657e182.pth'),
8 | backbone=dict(
9 | deepen_factor=0.67,
10 | widen_factor=0.75,
11 | ),
12 | neck=dict(
13 | deepen_factor=0.67,
14 | widen_factor=0.75,
15 | ),
16 | bbox_head=dict(head_module=dict(widen_factor=0.75)))
17 |
18 | train_dataloader = dict(batch_size=64)
19 |
--------------------------------------------------------------------------------
/projects/yolox_pose/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | import mmengine
2 | import mmyolo
3 |
4 | compatible_version = '0.5.0'
5 | if mmengine.digit_version(mmyolo.__version__)[1] > \
6 | mmengine.digit_version(compatible_version)[1]:
7 | print(f'This project is only compatible with mmyolo {compatible_version} '
8 | f'or lower. Please install the required version via:'
9 | f'pip install mmyolo=={compatible_version}')
10 |
11 | from .bbox_keypoint_structure import * # noqa
12 | from .coco_dataset import * # noqa
13 | from .transforms import * # noqa
14 |
--------------------------------------------------------------------------------
/projects/yolox_pose/datasets/coco_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from typing import Any
3 |
4 | from mmengine.dataset import force_full_init
5 | from mmyolo.registry import DATASETS
6 |
7 | from mmpose.datasets import CocoDataset as MMPoseCocoDataset
8 |
9 |
10 | @DATASETS.register_module()
11 | class CocoDataset(MMPoseCocoDataset):
12 |
13 | @force_full_init
14 | def prepare_data(self, idx) -> Any:
15 | data_info = self.get_data_info(idx)
16 | data_info['dataset'] = self
17 | return self.pipeline(data_info)
18 |
--------------------------------------------------------------------------------
/projects/yolox_pose/demo:
--------------------------------------------------------------------------------
1 | ../../demo
2 |
--------------------------------------------------------------------------------
/projects/yolox_pose/models/__init__.py:
--------------------------------------------------------------------------------
1 | import mmengine
2 | import mmyolo
3 |
4 | compatible_version = '0.5.0'
5 | if mmengine.digit_version(mmyolo.__version__)[1] > \
6 | mmengine.digit_version(compatible_version)[1]:
7 | print(f'This project is only compatible with mmyolo {compatible_version} '
8 | f'or lower. Please install the required version via:'
9 | f'pip install mmyolo=={compatible_version}')
10 |
11 | from .assigner import * # noqa
12 | from .data_preprocessor import * # noqa
13 | from .oks_loss import * # noqa
14 | from .utils import * # noqa
15 | from .yolox_pose_head import * # noqa
16 |
--------------------------------------------------------------------------------
/projects/yolox_pose/tools:
--------------------------------------------------------------------------------
1 | ../../tools
2 |
--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | addopts = --xdoctest --xdoctest-style=auto
3 | norecursedirs = .git ignore build __pycache__ data docker docs .eggs .mim tests/legacy
4 |
5 | filterwarnings= default
6 | ignore:.*No cfgstr given in Cacher constructor or call.*:Warning
7 | ignore:.*Define the __nice__ method for.*:Warning
8 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | -r requirements/build.txt
2 | -r requirements/runtime.txt
3 | -r requirements/tests.txt
4 | -r requirements/optional.txt
5 |
--------------------------------------------------------------------------------
/requirements/albu.txt:
--------------------------------------------------------------------------------
1 | albumentations>=0.3.2 --no-binary qudida,albumentations
2 |
--------------------------------------------------------------------------------
/requirements/build.txt:
--------------------------------------------------------------------------------
1 | # These must be installed before building mmpose
2 | numpy
3 | torch>=1.8
4 |
--------------------------------------------------------------------------------
/requirements/docs.txt:
--------------------------------------------------------------------------------
1 | docutils==0.16.0
2 | markdown
3 | myst-parser
4 | -e git+https://github.com/gaotongxiao/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
5 | sphinx==4.5.0
6 | sphinx_copybutton
7 | sphinx_markdown_tables
8 | urllib3<2.0.0
9 |
--------------------------------------------------------------------------------
/requirements/mminstall.txt:
--------------------------------------------------------------------------------
1 | mmcv>=2.0.0,<3.0.0
2 | mmdet>=3.0.0,<3.3.0
3 | mmengine>=0.4.0,<1.0.0
4 |
--------------------------------------------------------------------------------
/requirements/optional.txt:
--------------------------------------------------------------------------------
1 | requests
2 |
--------------------------------------------------------------------------------
/requirements/poseval.txt:
--------------------------------------------------------------------------------
1 | poseval@git+https://github.com/svenkreiss/poseval.git
2 | shapely==1.8.4
3 |
--------------------------------------------------------------------------------
/requirements/readthedocs.txt:
--------------------------------------------------------------------------------
1 | mmcv>=2.0.0rc4
2 | mmengine>=0.6.0,<1.0.0
3 | munkres
4 | regex
5 | scipy
6 | titlecase
7 | torch>1.6
8 | torchvision
9 | xtcocotools>=1.13
10 |
--------------------------------------------------------------------------------
/requirements/runtime.txt:
--------------------------------------------------------------------------------
1 | chumpy
2 | json_tricks
3 | matplotlib
4 | munkres
5 | numpy
6 | opencv-python
7 | pillow
8 | scipy
9 | torchvision
10 | xtcocotools>=1.12
11 |
--------------------------------------------------------------------------------
/requirements/tests.txt:
--------------------------------------------------------------------------------
1 | coverage
2 | flake8
3 | interrogate
4 | isort==4.3.21
5 | parameterized
6 | pytest
7 | pytest-runner
8 | xdoctest>=0.10.0
9 | yapf
10 |
--------------------------------------------------------------------------------
/resources/mmpose-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/resources/mmpose-logo.png
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [bdist_wheel]
2 | universal=1
3 |
4 | [aliases]
5 | test=pytest
6 |
7 | [yapf]
8 | based_on_style = pep8
9 | blank_line_before_nested_class_or_def = true
10 | split_before_expression_after_opening_paren = true
11 | split_penalty_import_names=0
12 | SPLIT_PENALTY_AFTER_OPENING_BRACKET=800
13 |
14 | [isort]
15 | line_length = 79
16 | multi_line_output = 0
17 | extra_standard_library = pkg_resources,setuptools
18 | known_first_party = mmpose
19 | known_third_party = PIL,cv2,h5py,json_tricks,matplotlib,mmcv,munkres,numpy,pytest,pytorch_sphinx_theme,requests,scipy,seaborn,spacepy,titlecase,torch,torchvision,webcam_apis,xmltodict,xtcocotools
20 | no_lines_before = STDLIB,LOCALFOLDER
21 | default_section = THIRDPARTY
22 |
23 | [flake8]
24 | per-file-ignores =
25 | mmpose/configs/*: F401,F403,F405
26 | projects/*/configs/*: F401,F403,F405
27 |
--------------------------------------------------------------------------------
/tests/data/300vw/001/imgs/000006.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/300vw/001/imgs/000006.jpg
--------------------------------------------------------------------------------
/tests/data/300vw/001/imgs/000009.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/300vw/001/imgs/000009.jpg
--------------------------------------------------------------------------------
/tests/data/300vw/401/imgs/000731.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/300vw/401/imgs/000731.jpg
--------------------------------------------------------------------------------
/tests/data/300vw/401/imgs/000732.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/300vw/401/imgs/000732.jpg
--------------------------------------------------------------------------------
/tests/data/300vw/broken_frames.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/300vw/broken_frames.npy
--------------------------------------------------------------------------------
/tests/data/300w/indoor_020.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/300w/indoor_020.png
--------------------------------------------------------------------------------
/tests/data/300w/indoor_029.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/300w/indoor_029.png
--------------------------------------------------------------------------------
/tests/data/300wlp/AFW_134212_1_0.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/300wlp/AFW_134212_1_0.jpg
--------------------------------------------------------------------------------
/tests/data/300wlp/AFW_134212_2_0.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/300wlp/AFW_134212_2_0.jpg
--------------------------------------------------------------------------------
/tests/data/aflw/image04476.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/aflw/image04476.jpg
--------------------------------------------------------------------------------
/tests/data/aflw/image22568.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/aflw/image22568.jpg
--------------------------------------------------------------------------------
/tests/data/aic/054d9ce9201beffc76e5ff2169d2af2f027002ca.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/aic/054d9ce9201beffc76e5ff2169d2af2f027002ca.jpg
--------------------------------------------------------------------------------
/tests/data/aic/fa436c914fe4a8ec1ec5474af4d3820b84d17561.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/aic/fa436c914fe4a8ec1ec5474af4d3820b84d17561.jpg
--------------------------------------------------------------------------------
/tests/data/aic/ff945ae2e729f24eea992814639d59b3bdec8bd8.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/aic/ff945ae2e729f24eea992814639d59b3bdec8bd8.jpg
--------------------------------------------------------------------------------
/tests/data/ak/AAOYRUDX/AAOYRUDX_f000027.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/ak/AAOYRUDX/AAOYRUDX_f000027.jpg
--------------------------------------------------------------------------------
/tests/data/ak/AAOYRUDX/AAOYRUDX_f000028.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/ak/AAOYRUDX/AAOYRUDX_f000028.jpg
--------------------------------------------------------------------------------
/tests/data/animalpose/ca110.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/animalpose/ca110.jpeg
--------------------------------------------------------------------------------
/tests/data/animalpose/ho105.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/animalpose/ho105.jpeg
--------------------------------------------------------------------------------
/tests/data/ap10k/000000000004.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/ap10k/000000000004.jpg
--------------------------------------------------------------------------------
/tests/data/ap10k/000000037516.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/ap10k/000000037516.jpg
--------------------------------------------------------------------------------
/tests/data/atrw/000061.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/atrw/000061.jpg
--------------------------------------------------------------------------------
/tests/data/atrw/003464.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/atrw/003464.jpg
--------------------------------------------------------------------------------
/tests/data/campus/actorsGT.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/campus/actorsGT.mat
--------------------------------------------------------------------------------
/tests/data/campus/panoptic_training_pose.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/campus/panoptic_training_pose.pkl
--------------------------------------------------------------------------------
/tests/data/campus/pred_campus_maskrcnn_hrnet_coco.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/campus/pred_campus_maskrcnn_hrnet_coco.pkl
--------------------------------------------------------------------------------
/tests/data/coco/000000000785.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/coco/000000000785.jpg
--------------------------------------------------------------------------------
/tests/data/coco/000000040083.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/coco/000000040083.jpg
--------------------------------------------------------------------------------
/tests/data/coco/000000196141.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/coco/000000196141.jpg
--------------------------------------------------------------------------------
/tests/data/coco/000000197388.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/coco/000000197388.jpg
--------------------------------------------------------------------------------
/tests/data/cofw/001766.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/cofw/001766.jpg
--------------------------------------------------------------------------------
/tests/data/cofw/001805.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/cofw/001805.jpg
--------------------------------------------------------------------------------
/tests/data/crowdpose/103319.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/crowdpose/103319.jpg
--------------------------------------------------------------------------------
/tests/data/crowdpose/106848.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/crowdpose/106848.jpg
--------------------------------------------------------------------------------
/tests/data/deepfasion2/000264.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/deepfasion2/000264.jpg
--------------------------------------------------------------------------------
/tests/data/deepfasion2/000265.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/deepfasion2/000265.jpg
--------------------------------------------------------------------------------
/tests/data/exlpose/imgs_0212_hwangridan_vid000020_exp1200_dark_000052__gain_3.40_exposure_417.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/exlpose/imgs_0212_hwangridan_vid000020_exp1200_dark_000052__gain_3.40_exposure_417.png
--------------------------------------------------------------------------------
/tests/data/exlpose/imgs_0212_hwangridan_vid000020_exp400_dark_000052__gain_3.40_exposure_1250.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/exlpose/imgs_0212_hwangridan_vid000020_exp400_dark_000052__gain_3.40_exposure_1250.png
--------------------------------------------------------------------------------
/tests/data/fld/img_00000128.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/fld/img_00000128.jpg
--------------------------------------------------------------------------------
/tests/data/fld/img_00000132.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/fld/img_00000132.jpg
--------------------------------------------------------------------------------
/tests/data/fly/1400.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/fly/1400.jpg
--------------------------------------------------------------------------------
/tests/data/fly/1450.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/fly/1450.jpg
--------------------------------------------------------------------------------
/tests/data/freihand/00000355.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/freihand/00000355.jpg
--------------------------------------------------------------------------------
/tests/data/freihand/00017620.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/freihand/00017620.jpg
--------------------------------------------------------------------------------
/tests/data/freihand/00032915.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/freihand/00032915.jpg
--------------------------------------------------------------------------------
/tests/data/freihand/00050180.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/freihand/00050180.jpg
--------------------------------------------------------------------------------
/tests/data/freihand/00065475.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/freihand/00065475.jpg
--------------------------------------------------------------------------------
/tests/data/freihand/00082740.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/freihand/00082740.jpg
--------------------------------------------------------------------------------
/tests/data/freihand/00098035.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/freihand/00098035.jpg
--------------------------------------------------------------------------------
/tests/data/freihand/00115300.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/freihand/00115300.jpg
--------------------------------------------------------------------------------
/tests/data/h36m/BF_IUV_gt/S1_Directions_1.54138969_000001_467_466.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/h36m/BF_IUV_gt/S1_Directions_1.54138969_000001_467_466.png
--------------------------------------------------------------------------------
/tests/data/h36m/BF_IUV_gt/S5_SittingDown.54138969_002061_478_619.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/h36m/BF_IUV_gt/S5_SittingDown.54138969_002061_478_619.png
--------------------------------------------------------------------------------
/tests/data/h36m/BF_IUV_gt/S7_Greeting.55011271_000396_365_433.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/h36m/BF_IUV_gt/S7_Greeting.55011271_000396_365_433.png
--------------------------------------------------------------------------------
/tests/data/h36m/BF_IUV_gt/S8_WalkDog_1.55011271_000026_592_382.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/h36m/BF_IUV_gt/S8_WalkDog_1.55011271_000026_592_382.png
--------------------------------------------------------------------------------
/tests/data/h36m/S1/S1_Directions_1.54138969/S1_Directions_1.54138969_000001.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/h36m/S1/S1_Directions_1.54138969/S1_Directions_1.54138969_000001.jpg
--------------------------------------------------------------------------------
/tests/data/h36m/S5/S5_SittingDown.54138969/S5_SittingDown.54138969_002061.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/h36m/S5/S5_SittingDown.54138969/S5_SittingDown.54138969_002061.jpg
--------------------------------------------------------------------------------
/tests/data/h36m/S7/S7_Greeting.55011271/S7_Greeting.55011271_000396.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/h36m/S7/S7_Greeting.55011271/S7_Greeting.55011271_000396.jpg
--------------------------------------------------------------------------------
/tests/data/h36m/S8/S8_WalkDog_1.55011271/S8_WalkDog_1.55011271_000026.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/h36m/S8/S8_WalkDog_1.55011271/S8_WalkDog_1.55011271_000026.jpg
--------------------------------------------------------------------------------
/tests/data/h36m/cameras.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/h36m/cameras.pkl
--------------------------------------------------------------------------------
/tests/data/h36m/test_h36m.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/h36m/test_h36m.npz
--------------------------------------------------------------------------------
/tests/data/h36m/test_h36m_2d_detection.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/h36m/test_h36m_2d_detection.npy
--------------------------------------------------------------------------------
/tests/data/h36m/test_h36m_body3d.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/h36m/test_h36m_body3d.npz
--------------------------------------------------------------------------------
/tests/data/h3wb/h3wb_train_bbox_subset.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/h3wb/h3wb_train_bbox_subset.npz
--------------------------------------------------------------------------------
/tests/data/horse10/0244.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/horse10/0244.png
--------------------------------------------------------------------------------
/tests/data/horse10/0292.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/horse10/0292.png
--------------------------------------------------------------------------------
/tests/data/horse10/0465.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/horse10/0465.png
--------------------------------------------------------------------------------
/tests/data/humanart/2D_virtual_human/digital_art/000000001648.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/humanart/2D_virtual_human/digital_art/000000001648.jpg
--------------------------------------------------------------------------------
/tests/data/humanart/3D_virtual_human/garage_kits/000000005603.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/humanart/3D_virtual_human/garage_kits/000000005603.jpg
--------------------------------------------------------------------------------
/tests/data/humanart/real_human/acrobatics/000000000590.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/humanart/real_human/acrobatics/000000000590.jpg
--------------------------------------------------------------------------------
/tests/data/interhand2.6m/image2017.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/interhand2.6m/image2017.jpg
--------------------------------------------------------------------------------
/tests/data/interhand2.6m/image29590.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/interhand2.6m/image29590.jpg
--------------------------------------------------------------------------------
/tests/data/interhand2.6m/image44669.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/interhand2.6m/image44669.jpg
--------------------------------------------------------------------------------
/tests/data/interhand2.6m/image69148.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/interhand2.6m/image69148.jpg
--------------------------------------------------------------------------------
/tests/data/jhmdb/Frisbee_catch_f_cm_np1_ri_med_0/00001.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/jhmdb/Frisbee_catch_f_cm_np1_ri_med_0/00001.png
--------------------------------------------------------------------------------
/tests/data/jhmdb/Frisbee_catch_f_cm_np1_ri_med_1/00001.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/jhmdb/Frisbee_catch_f_cm_np1_ri_med_1/00001.png
--------------------------------------------------------------------------------
/tests/data/jhmdb/Goalkeeper_Training_Day_@_7_catch_f_cm_np1_ri_med_0/00001.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/jhmdb/Goalkeeper_Training_Day_@_7_catch_f_cm_np1_ri_med_0/00001.png
--------------------------------------------------------------------------------
/tests/data/lapa/10773046825_0.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/lapa/10773046825_0.jpg
--------------------------------------------------------------------------------
/tests/data/lapa/13609937564_5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/lapa/13609937564_5.jpg
--------------------------------------------------------------------------------
/tests/data/locust/630.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/locust/630.jpg
--------------------------------------------------------------------------------
/tests/data/locust/650.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/locust/650.jpg
--------------------------------------------------------------------------------
/tests/data/macaque/PRI_1473.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/macaque/PRI_1473.jpg
--------------------------------------------------------------------------------
/tests/data/macaque/d47f1b1ee9d3217e.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/macaque/d47f1b1ee9d3217e.jpg
--------------------------------------------------------------------------------
/tests/data/mhp/10084.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/mhp/10084.jpg
--------------------------------------------------------------------------------
/tests/data/mhp/10112.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/mhp/10112.jpg
--------------------------------------------------------------------------------
/tests/data/mosh/test_mosh.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/mosh/test_mosh.npz
--------------------------------------------------------------------------------
/tests/data/mpi_inf_3dhp/S4_Seq2_Cam0_001033.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/mpi_inf_3dhp/S4_Seq2_Cam0_001033.jpg
--------------------------------------------------------------------------------
/tests/data/mpi_inf_3dhp/S8_Seq1_Cam8_002165.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/mpi_inf_3dhp/S8_Seq1_Cam8_002165.jpg
--------------------------------------------------------------------------------
/tests/data/mpi_inf_3dhp/TS1_002001.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/mpi_inf_3dhp/TS1_002001.jpg
--------------------------------------------------------------------------------
/tests/data/mpi_inf_3dhp/TS2_001850.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/mpi_inf_3dhp/TS2_001850.jpg
--------------------------------------------------------------------------------
/tests/data/mpi_inf_3dhp/cameras_test.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/mpi_inf_3dhp/cameras_test.pkl
--------------------------------------------------------------------------------
/tests/data/mpi_inf_3dhp/cameras_train.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/mpi_inf_3dhp/cameras_train.pkl
--------------------------------------------------------------------------------
/tests/data/mpi_inf_3dhp/test_3dhp_test.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/mpi_inf_3dhp/test_3dhp_test.npz
--------------------------------------------------------------------------------
/tests/data/mpi_inf_3dhp/test_3dhp_train.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/mpi_inf_3dhp/test_3dhp_train.npz
--------------------------------------------------------------------------------
/tests/data/mpii/004645041.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/mpii/004645041.jpg
--------------------------------------------------------------------------------
/tests/data/mpii/005808361.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/mpii/005808361.jpg
--------------------------------------------------------------------------------
/tests/data/mpii/051423444.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/mpii/051423444.jpg
--------------------------------------------------------------------------------
/tests/data/mpii/052475643.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/mpii/052475643.jpg
--------------------------------------------------------------------------------
/tests/data/mpii/060754485.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/mpii/060754485.jpg
--------------------------------------------------------------------------------
/tests/data/ochuman/000817.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/ochuman/000817.jpg
--------------------------------------------------------------------------------
/tests/data/ochuman/003799.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/ochuman/003799.jpg
--------------------------------------------------------------------------------
/tests/data/ochuman/003896.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/ochuman/003896.jpg
--------------------------------------------------------------------------------
/tests/data/onehand10k/1402.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/onehand10k/1402.jpg
--------------------------------------------------------------------------------
/tests/data/onehand10k/33.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/onehand10k/33.jpg
--------------------------------------------------------------------------------
/tests/data/onehand10k/784.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/onehand10k/784.jpg
--------------------------------------------------------------------------------
/tests/data/onehand10k/9.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/onehand10k/9.jpg
--------------------------------------------------------------------------------
/tests/data/panoptic/005880453_01_l.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/panoptic/005880453_01_l.jpg
--------------------------------------------------------------------------------
/tests/data/panoptic/005880453_01_r.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/panoptic/005880453_01_r.jpg
--------------------------------------------------------------------------------
/tests/data/panoptic/ex2_2.flv_000040_l.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/panoptic/ex2_2.flv_000040_l.jpg
--------------------------------------------------------------------------------
/tests/data/panoptic/ex2_2.flv_000040_r.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/panoptic/ex2_2.flv_000040_r.jpg
--------------------------------------------------------------------------------
/tests/data/posetrack18/images/val/003418_mpii_test/000000.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/posetrack18/images/val/003418_mpii_test/000000.jpg
--------------------------------------------------------------------------------
/tests/data/posetrack18/images/val/009473_mpii_test/000000.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/posetrack18/images/val/009473_mpii_test/000000.jpg
--------------------------------------------------------------------------------
/tests/data/posetrack18/images/val/012834_mpii_test/000000.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/posetrack18/images/val/012834_mpii_test/000000.jpg
--------------------------------------------------------------------------------
/tests/data/posetrack18/mask/val/003418_mpii_test/000000.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/posetrack18/mask/val/003418_mpii_test/000000.jpg
--------------------------------------------------------------------------------
/tests/data/posetrack18/mask/val/009473_mpii_test/000000.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/posetrack18/mask/val/009473_mpii_test/000000.jpg
--------------------------------------------------------------------------------
/tests/data/posetrack18/mask/val/012834_mpii_test/000000.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/posetrack18/mask/val/012834_mpii_test/000000.jpg
--------------------------------------------------------------------------------
/tests/data/posetrack18/videos/000001_mpiinew_test/000000.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/posetrack18/videos/000001_mpiinew_test/000000.jpg
--------------------------------------------------------------------------------
/tests/data/posetrack18/videos/000001_mpiinew_test/000001.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/posetrack18/videos/000001_mpiinew_test/000001.jpg
--------------------------------------------------------------------------------
/tests/data/posetrack18/videos/000001_mpiinew_test/000001_mpiinew_test.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/posetrack18/videos/000001_mpiinew_test/000001_mpiinew_test.mp4
--------------------------------------------------------------------------------
/tests/data/posetrack18/videos/000001_mpiinew_test/000002.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/posetrack18/videos/000001_mpiinew_test/000002.jpg
--------------------------------------------------------------------------------
/tests/data/posetrack18/videos/000001_mpiinew_test/000003.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/posetrack18/videos/000001_mpiinew_test/000003.jpg
--------------------------------------------------------------------------------
/tests/data/posetrack18/videos/000001_mpiinew_test/000004.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/posetrack18/videos/000001_mpiinew_test/000004.jpg
--------------------------------------------------------------------------------
/tests/data/rhd/00111.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/rhd/00111.png
--------------------------------------------------------------------------------
/tests/data/rhd/01111.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/rhd/01111.png
--------------------------------------------------------------------------------
/tests/data/rhd/11111.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/rhd/11111.png
--------------------------------------------------------------------------------
/tests/data/shelf/actorsGT.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/shelf/actorsGT.mat
--------------------------------------------------------------------------------
/tests/data/shelf/panoptic_training_pose.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/shelf/panoptic_training_pose.pkl
--------------------------------------------------------------------------------
/tests/data/shelf/pred_shelf_maskrcnn_hrnet_coco.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/shelf/pred_shelf_maskrcnn_hrnet_coco.pkl
--------------------------------------------------------------------------------
/tests/data/smpl/smpl_mean_params.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/smpl/smpl_mean_params.npz
--------------------------------------------------------------------------------
/tests/data/wflw/36_Football_americanfootball_ball_36_415.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/wflw/36_Football_americanfootball_ball_36_415.jpg
--------------------------------------------------------------------------------
/tests/data/wflw/7_Cheering_Cheering_7_16.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/wflw/7_Cheering_Cheering_7_16.jpg
--------------------------------------------------------------------------------
/tests/data/zebra/810.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/zebra/810.jpg
--------------------------------------------------------------------------------
/tests/data/zebra/850.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmpose/71ec36ebd63c475ab589afc817868e749a61491f/tests/data/zebra/850.jpg
--------------------------------------------------------------------------------
/tests/test_external/test_mim.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import os
3 | from tempfile import TemporaryDirectory
4 | from unittest import TestCase
5 |
6 | from mim.commands import download
7 |
8 |
9 | class TestMIM(TestCase):
10 |
11 | def test_download(self):
12 | with TemporaryDirectory() as tmp_dir:
13 | ckpts = download(
14 | 'mmpose',
15 | configs=['td-hm_hrnet-w48_8xb32-210e_coco-256x192'],
16 | dest_root=tmp_dir)
17 |
18 | self.assertEqual(len(ckpts), 1)
19 | self.assertIn('td-hm_hrnet-w48_8xb32-210e_coco-256x192.py',
20 | os.listdir(tmp_dir))
21 | self.assertIn(ckpts[0], os.listdir(tmp_dir))
22 |
--------------------------------------------------------------------------------
/tests/test_models/test_backbones/test_alexnet.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from unittest import TestCase
3 |
4 | import torch
5 |
6 | from mmpose.models.backbones import AlexNet
7 |
8 |
9 | class TestAlexNet(TestCase):
10 |
11 | def test_alexnet_backbone(self):
12 | """Test alexnet backbone."""
13 | model = AlexNet(-1)
14 | model.train()
15 |
16 | imgs = torch.randn(1, 3, 256, 192)
17 | feat = model(imgs)
18 | self.assertIsInstance(feat, tuple)
19 | self.assertEqual(feat[-1].shape, (1, 256, 7, 5))
20 |
21 | model = AlexNet(1)
22 | model.train()
23 |
24 | imgs = torch.randn(1, 3, 224, 224)
25 | feat = model(imgs)
26 | self.assertIsInstance(feat, tuple)
27 | self.assertEqual(feat[-1].shape, (1, 1))
28 |
--------------------------------------------------------------------------------
/tests/test_models/test_backbones/test_v2v_net.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from unittest import TestCase
3 |
4 | import torch
5 |
6 | from mmpose.models.backbones import V2VNet
7 |
8 |
9 | class TestV2Vnet(TestCase):
10 |
11 | def test_v2v_net(self):
12 | """Test V2VNet."""
13 | model = V2VNet(input_channels=17, output_channels=15)
14 | input = torch.randn(2, 17, 32, 32, 32)
15 | output = model(input)
16 | self.assertIsInstance(output, tuple)
17 | self.assertEqual(output[-1].shape, (2, 15, 32, 32, 32))
18 |
--------------------------------------------------------------------------------
/tools/analysis_tools/print_config.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import argparse
3 |
4 | from mmengine import Config, DictAction
5 |
6 |
7 | def parse_args():
8 | parser = argparse.ArgumentParser(description='Print the whole config')
9 | parser.add_argument('config', help='config file path')
10 | parser.add_argument(
11 | '--options', nargs='+', action=DictAction, help='arguments in dict')
12 | args = parser.parse_args()
13 |
14 | return args
15 |
16 |
17 | def main():
18 | args = parse_args()
19 |
20 | cfg = Config.fromfile(args.config)
21 | if args.options is not None:
22 | cfg.merge_from_dict(args.options)
23 | print(f'Config:\n{cfg.pretty_text}')
24 |
25 |
26 | if __name__ == '__main__':
27 | main()
28 |
--------------------------------------------------------------------------------
/tools/dataset_converters/scripts/preprocess_300w.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | DOWNLOAD_DIR=$1
4 | DATA_ROOT=$2
5 |
6 | tar -zxvf $DOWNLOAD_DIR/OpenDataLab___300w/raw/300w.tar.gz.00 -C $DOWNLOAD_DIR/
7 | tar -xvf $DOWNLOAD_DIR/300w/300w.tar.00 -C $DATA_ROOT/
8 | rm -rf $DOWNLOAD_DIR/300w $DOWNLOAD_DIR/OpenDataLab___300w
9 |
--------------------------------------------------------------------------------
/tools/dataset_converters/scripts/preprocess_aic.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | DOWNLOAD_DIR=$1
4 | DATA_ROOT=$2
5 |
6 | tar -zxvf $DOWNLOAD_DIR/OpenDataLab___AI_Challenger/raw/AI_Challenger.tar.gz -C $DATA_ROOT
7 | rm -rf $DOWNLOAD_DIR/OpenDataLab___AI_Challenger
8 |
--------------------------------------------------------------------------------
/tools/dataset_converters/scripts/preprocess_ap10k.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | DOWNLOAD_DIR=$1
4 | DATA_ROOT=$2
5 |
6 | tar -zxvf $DOWNLOAD_DIR/OpenDataLab___AP-10K/raw/AP-10K.tar.gz.00 -C $DOWNLOAD_DIR/
7 | tar -xvf $DOWNLOAD_DIR/AP-10K/AP-10K.tar.00 -C $DATA_ROOT/
8 | rm -rf $DOWNLOAD_DIR/AP-10K $DOWNLOAD_DIR/OpenDataLab___AP-10K
9 |
--------------------------------------------------------------------------------
/tools/dataset_converters/scripts/preprocess_coco2017.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | DOWNLOAD_DIR=$1
4 | DATA_ROOT=$2
5 |
6 | unzip $DOWNLOAD_DIR/OpenDataLab___COCO_2017/raw/Images/val2017.zip -d $DATA_ROOT
7 | unzip $DOWNLOAD_DIR/OpenDataLab___COCO_2017/raw/Images/train2017.zip -d $DATA_ROOT
8 | unzip $DOWNLOAD_DIR/OpenDataLab___COCO_2017/raw/Annotations/annotations_trainval2017.zip -d $DATA_ROOT
9 | rm -rf $DOWNLOAD_DIR/OpenDataLab___COCO_2017
10 |
--------------------------------------------------------------------------------
/tools/dataset_converters/scripts/preprocess_crowdpose.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | DOWNLOAD_DIR=$1
4 | DATA_ROOT=$2
5 |
6 | tar -zxvf $DOWNLOAD_DIR/OpenDataLab___CrowdPose/raw/CrowdPose.tar.gz -C $DATA_ROOT
7 | rm -rf $DOWNLOAD_DIR/OpenDataLab___CrowdPose
8 |
--------------------------------------------------------------------------------
/tools/dataset_converters/scripts/preprocess_freihand.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | DOWNLOAD_DIR=$1
4 | DATA_ROOT=$2
5 |
6 | tar -zxvf $DOWNLOAD_DIR/OpenDataLab___FreiHAND/raw/FreiHAND.tar.gz -C $DATA_ROOT
7 | rm -rf $DOWNLOAD_DIR/OpenDataLab___FreiHAND
8 |
--------------------------------------------------------------------------------
/tools/dataset_converters/scripts/preprocess_hagrid.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | DOWNLOAD_DIR=$1
4 | DATA_ROOT=$2
5 |
6 | cat $DOWNLOAD_DIR/OpenDataLab___HaGRID/raw/*.tar.gz.* | tar -xvz -C $DATA_ROOT/..
7 | tar -xvf $DATA_ROOT/HaGRID.tar -C $DATA_ROOT/..
8 | rm -rf $DOWNLOAD_DIR/OpenDataLab___HaGRID
9 |
--------------------------------------------------------------------------------
/tools/dataset_converters/scripts/preprocess_halpe.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | DOWNLOAD_DIR=$1
4 | DATA_ROOT=$2
5 |
6 | tar -zxvf $DOWNLOAD_DIR/OpenDataLab___Halpe/raw/Halpe.tar.gz.00 -C $DOWNLOAD_DIR/
7 | tar -xvf $DOWNLOAD_DIR/Halpe/Halpe.tar.00 -C $DATA_ROOT/
8 | rm -rf $DOWNLOAD_DIR/Halpe $DOWNLOAD_DIR/OpenDataLab___Halpe
9 |
--------------------------------------------------------------------------------
/tools/dataset_converters/scripts/preprocess_lapa.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | DOWNLOAD_DIR=$1
4 | DATA_ROOT=$2
5 |
6 | tar -zxvf $DOWNLOAD_DIR/OpenDataLab___LaPa/raw/LaPa.tar.gz -C $DATA_ROOT
7 | rm -rf $DOWNLOAD_DIR/OpenDataLab___LaPa
8 |
--------------------------------------------------------------------------------
/tools/dataset_converters/scripts/preprocess_mpii.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | DOWNLOAD_DIR=$1
4 | DATA_ROOT=$2
5 |
6 | tar -zxvf $DOWNLOAD_DIR/OpenDataLab___MPII_Human_Pose/raw/MPII_Human_Pose.tar.gz -C $DATA_ROOT
7 | rm -rf $DOWNLOAD_DIR/OpenDataLab___MPII_Human_Pose
8 |
--------------------------------------------------------------------------------
/tools/dataset_converters/scripts/preprocess_onehand10k.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | DOWNLOAD_DIR=$1
4 | DATA_ROOT=$2
5 |
6 | tar -zxvf $DOWNLOAD_DIR/OpenDataLab___OneHand10K/raw/OneHand10K.tar.gz.00 -C $DOWNLOAD_DIR/
7 | tar -xvf $DOWNLOAD_DIR/OneHand10K/OneHand10K.tar.00 -C $DATA_ROOT/
8 | rm -rf $DOWNLOAD_DIR/OneHand10K $DOWNLOAD_DIR/OpenDataLab___OneHand10K
9 |
--------------------------------------------------------------------------------
/tools/dataset_converters/scripts/preprocess_wflw.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | DOWNLOAD_DIR=$1
4 | DATA_ROOT=$2
5 |
6 | tar -zxvf $DOWNLOAD_DIR/OpenDataLab___WFLW/raw/WFLW.tar.gz.00 -C $DOWNLOAD_DIR/
7 | tar -xvf $DOWNLOAD_DIR/WFLW/WFLW.tar.00 -C $DATA_ROOT/
8 | rm -rf $DOWNLOAD_DIR/WFLW $DOWNLOAD_DIR/OpenDataLab___WFLW
9 |
--------------------------------------------------------------------------------
/tools/dist_test.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # Copyright (c) OpenMMLab. All rights reserved.
3 |
4 | CONFIG=$1
5 | CHECKPOINT=$2
6 | GPUS=$3
7 | NNODES=${NNODES:-1}
8 | NODE_RANK=${NODE_RANK:-0}
9 | PORT=${PORT:-29500}
10 | MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"}
11 |
12 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
13 | python -m torch.distributed.launch \
14 | --nnodes=$NNODES \
15 | --node_rank=$NODE_RANK \
16 | --master_addr=$MASTER_ADDR \
17 | --nproc_per_node=$GPUS \
18 | --master_port=$PORT \
19 | $(dirname "$0")/test.py \
20 | $CONFIG \
21 | $CHECKPOINT \
22 | --launcher pytorch \
23 | ${@:4}
24 |
--------------------------------------------------------------------------------
/tools/dist_train.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # Copyright (c) OpenMMLab. All rights reserved.
3 |
4 | CONFIG=$1
5 | GPUS=$2
6 | NNODES=${NNODES:-1}
7 | NODE_RANK=${NODE_RANK:-0}
8 | PORT=${PORT:-29500}
9 | MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"}
10 |
11 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
12 | python -m torch.distributed.launch \
13 | --nnodes=$NNODES \
14 | --node_rank=$NODE_RANK \
15 | --master_addr=$MASTER_ADDR \
16 | --nproc_per_node=$GPUS \
17 | --master_port=$PORT \
18 | $(dirname "$0")/train.py \
19 | $CONFIG \
20 | --launcher pytorch ${@:3}
21 |
--------------------------------------------------------------------------------
/tools/slurm_test.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # Copyright (c) OpenMMLab. All rights reserved.
3 |
4 | set -x
5 |
6 | PARTITION=$1
7 | JOB_NAME=$2
8 | CONFIG=$3
9 | CHECKPOINT=$4
10 | GPUS=${GPUS:-8}
11 | GPUS_PER_NODE=${GPUS_PER_NODE:-8}
12 | CPUS_PER_TASK=${CPUS_PER_TASK:-5}
13 | SRUN_ARGS=${SRUN_ARGS:-""}
14 |
15 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
16 | srun -p ${PARTITION} \
17 | --job-name=${JOB_NAME} \
18 | --gres=gpu:${GPUS_PER_NODE} \
19 | --ntasks=${GPUS} \
20 | --ntasks-per-node=${GPUS_PER_NODE} \
21 | --cpus-per-task=${CPUS_PER_TASK} \
22 | --kill-on-bad-exit=1 \
23 | ${SRUN_ARGS} \
24 | python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${@:5}
25 |
--------------------------------------------------------------------------------
/tools/slurm_train.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # Copyright (c) OpenMMLab. All rights reserved.
3 |
4 | set -x
5 |
6 | PARTITION=$1
7 | JOB_NAME=$2
8 | CONFIG=$3
9 | WORK_DIR=$4
10 | GPUS=${GPUS:-8}
11 | GPUS_PER_NODE=${GPUS_PER_NODE:-8}
12 | CPUS_PER_TASK=${CPUS_PER_TASK:-5}
13 | SRUN_ARGS=${SRUN_ARGS:-""}
14 |
15 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
16 | srun -p ${PARTITION} \
17 | --job-name=${JOB_NAME} \
18 | --gres=gpu:${GPUS_PER_NODE} \
19 | --ntasks=${GPUS} \
20 | --ntasks-per-node=${GPUS_PER_NODE} \
21 | --cpus-per-task=${CPUS_PER_TASK} \
22 | --kill-on-bad-exit=1 \
23 | ${SRUN_ARGS} \
24 | python -u tools/train.py ${CONFIG} --work-dir=${WORK_DIR} --launcher="slurm" ${@:5}
25 |
--------------------------------------------------------------------------------