├── .DS_Store
├── .gitignore
├── Dockerfile
├── LICENSE.md
├── README.md
├── active_configs
└── Hi.md
├── configs
├── eval_3dpw_challenge.yml
├── eval_3dpw_test.yml
├── eval_3dpw_test_ft.yml
├── eval_3dpw_test_resnet.yml
├── eval_3dpw_test_resnet_ft.yml
├── eval_cmu_panoptic.yml
├── eval_crowdpose_test.yml
├── eval_crowdpose_val.yml
├── image.yml
├── test.yml
├── v1.yml
├── v1_hrnet_3dpw_ft.yml
├── v1_resnet.yml
├── v1_resnet_3dpw_ft.yml
├── v6_ft.yml
├── v6_train.yml
├── video-batch.yml
├── video.yml
├── webcam.yml
└── webcam_blender.yml
├── demo
└── images
│ ├── 3dpw_sit_on_street.jpg
│ ├── Cristiano_Ronaldo.jpg
│ └── Leo_Messi.jpg
├── docs
├── basic_installation.md
├── batch_videos.md
├── bev_evaluation.md
├── bugs.md
├── config_guide.md
├── contributor.md
├── dataset.md
├── docker.md
├── export.md
├── features.md
├── installation.md
├── old_readme.md
├── romp_evaluation.md
├── trace_dataset.md
├── train.md
├── updates.md
└── visualization.md
├── requirements.txt
├── romp
├── .DS_Store
├── __init__.py
├── _init_paths_.py
├── base.py
├── eval.py
├── exports
│ └── blender_mocap.py
├── lib
│ ├── .DS_Store
│ ├── config.py
│ ├── constants.py
│ ├── dataset
│ │ ├── AICH.py
│ │ ├── MuCo.py
│ │ ├── MuPoTS.py
│ │ ├── __init__.py
│ │ ├── agora.py
│ │ ├── base.py
│ │ ├── camera_parameters.py
│ │ ├── cmu_panoptic_eval.py
│ │ ├── coco14.py
│ │ ├── crowdhuman.py
│ │ ├── crowdpose.py
│ │ ├── h36m.py
│ │ ├── image_base.py
│ │ ├── image_base_relative.py
│ │ ├── internet.py
│ │ ├── lsp.py
│ │ ├── mixed_dataset.py
│ │ ├── mpi_inf_3dhp.py
│ │ ├── mpi_inf_3dhp_test.py
│ │ ├── mpi_inf_3dhp_validation.py
│ │ ├── mpii.py
│ │ ├── posetrack.py
│ │ ├── posetrack21.py
│ │ ├── preprocess
│ │ │ └── h36m_extract_frames.py
│ │ ├── pw3d.py
│ │ ├── relative_human.py
│ │ └── up.py
│ ├── evaluation
│ │ ├── .DS_Store
│ │ ├── __init__.py
│ │ ├── collect_3DPW_results.py
│ │ ├── collect_CRMH_3DPW_results.py
│ │ ├── collect_VIBE_3DPW_results.py
│ │ ├── crowdpose-api
│ │ │ ├── PythonAPI
│ │ │ │ ├── crowdposetools
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── _mask.c
│ │ │ │ │ ├── _mask.pyx
│ │ │ │ │ ├── coco.py
│ │ │ │ │ ├── cocoeval.py
│ │ │ │ │ └── mask.py
│ │ │ │ ├── demo.py
│ │ │ │ └── setup.py
│ │ │ └── common
│ │ │ │ ├── gason.cpp
│ │ │ │ ├── gason.h
│ │ │ │ ├── maskApi.c
│ │ │ │ └── maskApi.h
│ │ ├── eval_CRMH_results.py
│ │ ├── eval_ds_utils.py
│ │ ├── eval_pckh.py
│ │ ├── evaluation_matrix.py
│ │ └── pw3d_eval
│ │ │ ├── SMPL.py
│ │ │ ├── __init__.py
│ │ │ ├── evaluate.py
│ │ │ ├── readme.md
│ │ │ ├── scripts
│ │ │ └── install_prep.sh
│ │ │ └── utils.py
│ ├── loss_funcs
│ │ ├── __init__.py
│ │ ├── calc_loss.py
│ │ ├── keypoints_loss.py
│ │ ├── learnable_loss.py
│ │ ├── maps_loss.py
│ │ ├── params_loss.py
│ │ ├── prior_loss.py
│ │ └── relative_loss.py
│ ├── maps_utils
│ │ ├── __init__.py
│ │ ├── centermap.py
│ │ ├── debug_utils.py
│ │ ├── kp_group.py
│ │ ├── result_parser.py
│ │ └── target_generators.py
│ ├── models
│ │ ├── CoordConv.py
│ │ ├── __init__.py
│ │ ├── balanced_dataparallel.py
│ │ ├── base.py
│ │ ├── basic_modules.py
│ │ ├── bev_model.py
│ │ ├── build.py
│ │ ├── hrnet_32.py
│ │ ├── resnet_50.py
│ │ └── romp_model.py
│ ├── smpl_family
│ │ ├── __init__.py
│ │ ├── smpl.py
│ │ ├── smpl_regressor.py
│ │ ├── smpl_wrapper.py
│ │ ├── smpl_wrapper_relative.py
│ │ └── smpla.py
│ ├── tracking
│ │ ├── __init__.py
│ │ ├── basetrack.py
│ │ ├── matching.py
│ │ ├── tracker.py
│ │ └── tracking_utils
│ │ │ ├── evaluation.py
│ │ │ ├── io.py
│ │ │ ├── kalman_filter.py
│ │ │ ├── log.py
│ │ │ ├── nms.py
│ │ │ ├── parse_config.py
│ │ │ ├── timer.py
│ │ │ ├── utils.py
│ │ │ └── visualization.py
│ ├── utils
│ │ ├── __init__.py
│ │ ├── augments.py
│ │ ├── cam_utils.py
│ │ ├── center_utils.py
│ │ ├── demo_utils.py
│ │ ├── projection.py
│ │ ├── rot_6D.py
│ │ ├── temporal_optimization.py
│ │ ├── train_utils.py
│ │ └── util.py
│ └── visualization
│ │ ├── __init__.py
│ │ ├── create_meshes.py
│ │ ├── open3d_visualizer.py
│ │ ├── renderer_pt3d.py
│ │ ├── renderer_pyrd.py
│ │ ├── socket_utils.py
│ │ ├── vedo_visualizer.py
│ │ ├── vis_cfgs
│ │ ├── o3d_scene_o3d13.yml
│ │ ├── o3d_scene_py36_o3d9.yml
│ │ └── render_option.json
│ │ ├── vis_platform
│ │ ├── vis_client.py
│ │ ├── vis_server.py
│ │ ├── vis_server_o3d13.py
│ │ ├── vis_server_py36_o3d9.py
│ │ ├── vis_utils_o3d13.py
│ │ └── vis_utils_py36_o3d9.py
│ │ ├── visualization.py
│ │ └── web_vis.py
├── predict
│ ├── __init__.py
│ ├── base_predictor.py
│ ├── image.py
│ ├── video.py
│ └── webcam.py
├── pretrain.py
├── test.py
└── train.py
├── scripts
├── V1_hrnet_3dpwft.sh
├── V1_resnet_3dpwft.sh
├── V1_train.sh
├── V1_train_resnet.sh
├── V6_ft.sh
├── V6_train.sh
├── export_fbx.sh
├── image.sh
├── launch_vis_server.sh
├── pretrain.sh
├── run.sh
├── setup.sh
├── test.sh
├── test_3dpwchallenge.sh
├── train_distributed.sh
├── video.sh
├── webcam.sh
└── webcam_blender.sh
├── simple_romp
├── LICENSE
├── MANIFEST.in
├── README.md
├── __init__.py
├── bev
│ ├── __init__.py
│ ├── main.py
│ ├── model.py
│ ├── pack_smil_info.py
│ ├── post_parser.py
│ └── split2process.py
├── build.sh
├── doc
│ └── export.md
├── evaluation
│ ├── RH_evaluation
│ │ ├── __init__.py
│ │ ├── evaluation.py
│ │ └── matching.py
│ ├── __init__.py
│ ├── eval_AGORA.py
│ ├── eval_Relative_Human.py
│ └── eval_cmu_panoptic.py
├── export.sh
├── reinstall.sh
├── requirements.txt
├── romp
│ ├── __init__.py
│ ├── main.py
│ ├── model.py
│ ├── pack_smpl_info.py
│ ├── post_parser.py
│ ├── smpl.py
│ └── utils.py
├── run.sh
├── setup.py
├── setup_trace.py
├── tools
│ ├── convert2fbx.py
│ └── convert_checkpoints.py
├── trace2
│ ├── PMPJPE_BUG_REPORT.md
│ ├── README.md
│ ├── __init__.py
│ ├── eval.py
│ ├── evaluation
│ │ ├── TrackEval
│ │ │ ├── .gitignore
│ │ │ ├── LICENSE
│ │ │ ├── Readme.md
│ │ │ ├── docs
│ │ │ │ ├── How_To
│ │ │ │ │ └── Add_a_new_metric.md
│ │ │ │ ├── MOTChallenge-Official
│ │ │ │ │ └── Readme.md
│ │ │ │ └── RobMOTS-Official
│ │ │ │ │ └── Readme.md
│ │ │ ├── minimum_requirements.txt
│ │ │ ├── pyproject.toml
│ │ │ ├── requirements.txt
│ │ │ ├── scripts
│ │ │ │ ├── comparison_plots.py
│ │ │ │ ├── run_bdd.py
│ │ │ │ ├── run_davis.py
│ │ │ │ ├── run_headtracking_challenge.py
│ │ │ │ ├── run_kitti.py
│ │ │ │ ├── run_kitti_mots.py
│ │ │ │ ├── run_mot_challenge.py
│ │ │ │ ├── run_mots_challenge.py
│ │ │ │ ├── run_rob_mots.py
│ │ │ │ ├── run_tao.py
│ │ │ │ └── run_youtube_vis.py
│ │ │ ├── setup.cfg
│ │ │ ├── setup.py
│ │ │ ├── tests
│ │ │ │ ├── test_all_quick.py
│ │ │ │ ├── test_davis.py
│ │ │ │ ├── test_metrics.py
│ │ │ │ ├── test_mot17.py
│ │ │ │ └── test_mots.py
│ │ │ └── trackeval
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _timing.py
│ │ │ │ ├── baselines
│ │ │ │ ├── __init__.py
│ │ │ │ ├── baseline_utils.py
│ │ │ │ ├── non_overlap.py
│ │ │ │ ├── pascal_colormap.py
│ │ │ │ ├── stp.py
│ │ │ │ ├── thresholder.py
│ │ │ │ └── vizualize.py
│ │ │ │ ├── datasets
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _base_dataset.py
│ │ │ │ ├── bdd100k.py
│ │ │ │ ├── davis.py
│ │ │ │ ├── head_tracking_challenge.py
│ │ │ │ ├── kitti_2d_box.py
│ │ │ │ ├── kitti_mots.py
│ │ │ │ ├── mot_challenge_2d_box.py
│ │ │ │ ├── mots_challenge.py
│ │ │ │ ├── rob_mots.py
│ │ │ │ ├── rob_mots_classmap.py
│ │ │ │ ├── run_rob_mots.py
│ │ │ │ ├── tao.py
│ │ │ │ └── youtube_vis.py
│ │ │ │ ├── eval.py
│ │ │ │ ├── metrics
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _base_metric.py
│ │ │ │ ├── clear.py
│ │ │ │ ├── count.py
│ │ │ │ ├── hota.py
│ │ │ │ ├── identity.py
│ │ │ │ ├── ideucl.py
│ │ │ │ ├── j_and_f.py
│ │ │ │ ├── track_map.py
│ │ │ │ └── vace.py
│ │ │ │ ├── plotting.py
│ │ │ │ └── utils.py
│ │ ├── __init__.py
│ │ ├── dynacam_evaluation
│ │ │ ├── __init__.py
│ │ │ ├── evalute_ate.py
│ │ │ ├── loading_data.py
│ │ │ └── utils.py
│ │ ├── eval_3DPW.py
│ │ ├── eval_dynacam.py
│ │ ├── evaluate_tracking.py
│ │ ├── evaluation.py
│ │ └── smpl.py
│ ├── install.sh
│ ├── main.py
│ ├── models
│ │ ├── TempTracker.py
│ │ ├── basic_modules.py
│ │ ├── debug_utils.py
│ │ ├── deform_conv
│ │ │ ├── __init__.py
│ │ │ ├── functions
│ │ │ │ ├── __init__.py
│ │ │ │ ├── deform_conv.py
│ │ │ │ └── deform_pool.py
│ │ │ ├── install.sh
│ │ │ ├── modules
│ │ │ │ ├── __init__.py
│ │ │ │ ├── deform_conv.py
│ │ │ │ └── deform_pool.py
│ │ │ ├── setup.py
│ │ │ └── src
│ │ │ │ ├── deform_conv_cuda.cpp
│ │ │ │ ├── deform_conv_cuda_kernel.cu
│ │ │ │ ├── deform_pool_cuda.cpp
│ │ │ │ └── deform_pool_cuda_kernel.cu
│ │ ├── hrnet_32.py
│ │ ├── model.py
│ │ ├── raft
│ │ │ ├── __init__.py
│ │ │ ├── corr.py
│ │ │ ├── extractor.py
│ │ │ ├── process.py
│ │ │ ├── raft.py
│ │ │ ├── update.py
│ │ │ └── utils
│ │ │ │ ├── __init__.py
│ │ │ │ ├── augmentor.py
│ │ │ │ ├── flow_viz.py
│ │ │ │ ├── frame_utils.py
│ │ │ │ └── utils.py
│ │ └── smpl.py
│ ├── results_parser
│ │ ├── __init__.py
│ │ ├── centermap.py
│ │ ├── smpl_wrapper_relative_temp.py
│ │ └── temp_result_parser.py
│ ├── show.py
│ ├── track.py
│ ├── tracker
│ │ ├── __init__.py
│ │ ├── basetrack.py
│ │ ├── matching_3dcenter.py
│ │ └── tracker3D.py
│ └── utils
│ │ ├── eval_utils.py
│ │ ├── infer_settings.py
│ │ ├── infer_utils.py
│ │ ├── load_data.py
│ │ ├── open3d_gui.py
│ │ ├── utils.py
│ │ ├── visualize_maps.py
│ │ └── visualize_results.py
├── tracker
│ ├── __init__.py
│ ├── basetrack.py
│ ├── byte_tracker_3dcenter.py
│ ├── kalman_filter_3dcenter.py
│ └── matching.py
├── upload_pypi.sh
└── vis_human
│ ├── __init__.py
│ ├── main.py
│ ├── pyrenderer.py
│ ├── sim3drender
│ ├── __init__.py
│ ├── lib
│ │ ├── rasterize.cpp
│ │ ├── rasterize.h
│ │ ├── rasterize.pyx
│ │ └── rasterize_kernel.cpp
│ └── renderer.py
│ ├── vedo_vis.py
│ └── vis_utils.py
└── trace
├── README.md
├── __init__.py
├── _init_paths_.py
├── base.py
├── configs
└── trace.yml
├── install.sh
├── lib
├── config.py
├── constants.py
├── datasets
│ ├── DynaCamRotation.py
│ ├── DynaCamTranslation.py
│ ├── __init__.py
│ ├── base.py
│ ├── camera_parameters.py
│ ├── h36m.py
│ ├── image_base.py
│ ├── image_base_relative.py
│ ├── internet_video.py
│ ├── mixed_dataset.py
│ ├── mpi_inf_3dhp.py
│ ├── mpi_inf_3dhp_test.py
│ ├── mpi_inf_3dhp_validation.py
│ ├── penn_action.py
│ ├── pw3d.py
│ └── video_base_relative.py
├── epropnp
│ ├── __init__.py
│ ├── camera.py
│ ├── common.py
│ ├── cost_fun.py
│ ├── distributions.py
│ ├── epropnp.py
│ └── levenberg_marquardt.py
├── evaluation
│ ├── __init__.py
│ ├── eval_3DMPB.py
│ ├── eval_ds_utils.py
│ ├── evaluation_matrix.py
│ ├── mupots_util
│ │ ├── __init__.py
│ │ ├── datautil.py
│ │ ├── evaluate.py
│ │ ├── load_dep_pred.py
│ │ ├── matcher.py
│ │ └── mpii_get_joints.py
│ └── smpl.py
├── loss_funcs
│ ├── __init__.py
│ ├── calc_loss.py
│ ├── keypoints_loss.py
│ ├── learnable_loss.py
│ ├── maps_loss.py
│ ├── matching.py
│ ├── params_loss.py
│ ├── prior_loss.py
│ ├── relative_loss.py
│ └── video_loss.py
├── maps_utils
│ ├── __init__.py
│ ├── centermap.py
│ ├── debug_utils.py
│ ├── matching.py
│ ├── matching_utils.py
│ ├── relative_parser.py
│ ├── result_parser.py
│ ├── suppress_duplication.py
│ ├── target_generators.py
│ └── temp_result_parser.py
├── models
│ ├── CoordConv.py
│ ├── EProPnP6DoFSolver.py
│ ├── GRU.py
│ ├── TempRegressor.py
│ ├── TempTracker.py
│ ├── TemporalRegressor.py
│ ├── __init__.py
│ ├── base.py
│ ├── basic_modules.py
│ ├── build.py
│ ├── deform_conv
│ │ ├── __init__.py
│ │ ├── deform_conv_cuda.cpython-39-x86_64-linux-gnu.so
│ │ ├── deform_pool_cuda.cpython-39-x86_64-linux-gnu.so
│ │ ├── functions
│ │ │ ├── __init__.py
│ │ │ ├── deform_conv.py
│ │ │ └── deform_pool.py
│ │ ├── install.sh
│ │ ├── modules
│ │ │ ├── __init__.py
│ │ │ ├── deform_conv.py
│ │ │ └── deform_pool.py
│ │ ├── setup.py
│ │ └── src
│ │ │ ├── deform_conv_cuda.cpp
│ │ │ ├── deform_conv_cuda_kernel.cu
│ │ │ ├── deform_pool_cuda.cpp
│ │ │ └── deform_pool_cuda_kernel.cu
│ ├── hrnet_32.py
│ ├── modelv1.py
│ ├── modelv6.py
│ ├── resnet_50.py
│ ├── tempGRU.py
│ ├── trace.py
│ └── video_base.py
├── parallel
│ ├── balanced_dataparallel.py
│ └── criterion_parallel.py
├── raft
│ ├── __init__.py
│ ├── corr.py
│ ├── extractor.py
│ ├── process.py
│ ├── raft.py
│ ├── update.py
│ └── utils
│ │ ├── __init__.py
│ │ ├── augmentor.py
│ │ ├── flow_viz.py
│ │ ├── frame_utils.py
│ │ └── utils.py
├── smpl_family
│ ├── __init__.py
│ ├── create_smpl_models.py
│ ├── flame.py
│ ├── mano.py
│ ├── pack_smpl_params
│ │ ├── pack_smil_info.py
│ │ ├── pack_smpl_info.py
│ │ ├── pack_smpla_info.py
│ │ ├── pack_smplx_info.py
│ │ ├── pack_smplx_info_sparse.py
│ │ └── pack_smplxa_info_sparse.py
│ ├── smpl.py
│ ├── smpl_regressor.py
│ ├── smpl_transfer_config_files
│ │ ├── smpl2smpl.yaml
│ │ ├── smpl2smplh.yaml
│ │ ├── smpl2smplx.yaml
│ │ ├── smplh2smpl.yaml
│ │ ├── smplh2smplx.yaml
│ │ ├── smplh2smplx_as.yaml
│ │ ├── smplh2smplx_onepose.yaml
│ │ ├── smplx2smpl.yaml
│ │ └── smplx2smplh.yaml
│ ├── smpl_wrapper.py
│ ├── smpl_wrapper_relative.py
│ ├── smpl_wrapper_relative_temp.py
│ ├── smpla.py
│ ├── smplx.py
│ └── transfer_smpl_parameters.py
├── tracker
│ ├── basetrack.py
│ ├── byte_tracker_3dcenter_combined.py
│ ├── cython_bbox
│ │ ├── .gitignore
│ │ ├── LICENSE
│ │ ├── MANIFEST.in
│ │ ├── README.md
│ │ ├── setup.py
│ │ └── src
│ │ │ ├── cython_bbox.c
│ │ │ └── cython_bbox.pyx
│ ├── kalman_filter_3dcenter.py
│ └── matching.py
├── utils
│ ├── __init__.py
│ ├── augments.py
│ ├── cam_utils.py
│ ├── center_utils.py
│ ├── convert2deepmimic.py
│ ├── debug.py
│ ├── demo_utils.py
│ ├── fetch_remote_file.py
│ ├── gpu_memory_log.py
│ ├── make_demo.py
│ ├── projection.py
│ ├── quaternion_operations.py
│ ├── rot_6D.py
│ ├── rotation_transform.py
│ ├── smooth_filter.py
│ ├── smpl_header.txt
│ ├── temporal_optimization.py
│ ├── train_utils.py
│ ├── transformation.py
│ ├── transforms.py
│ ├── util.py
│ ├── utils_pybullet.py
│ ├── video_utils.py
│ └── vis_utils.py
└── visualization
│ ├── __init__.py
│ ├── plotly_volume_viewer.py
│ ├── renderer_pt3d.py
│ ├── trajectory_viewer.py
│ ├── vis_utils.py
│ ├── visualization.py
│ ├── visualize_maps.py
│ ├── web_vis.py
│ └── world_vis.py
├── track.py
├── train.sh
└── train_video.py
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Arthur151/ROMP/a8558aed480af850756f84e2a7c787e359bddbd0/.DS_Store
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | #src/
2 | trained_models/
3 | #models/
4 | demo/videos/
5 | demo/animation/
6 | model_data/
7 | demo/*_results*/
8 | active_configs/*
9 | configs/*_self.yml
10 | build/
11 | dist/
12 | *.egg-info/
13 | .eggs/
14 | __pycache__/
15 | demo/images/
16 | log/*
17 | simple_romp/.DS_Store
18 | .DS_Store
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.7.13-slim-buster
2 |
3 | RUN apt-get update -y
4 | RUN apt install gcc g++ git wget -y
5 | RUN apt-get install ffmpeg libsm6 libxext6 -
6 | RUN pip install setuptools cython numpy
7 |
8 | WORKDIR /workspace
9 | RUN git clone https://github.com/Arthur151/ROMP.git
10 |
11 | WORKDIR /workspace/ROMP/simple_romp
12 | RUN python setup.py install
13 |
14 | # run this part to download weights automaticly
15 | WORKDIR /
16 | RUN wget http://im.rediff.com/sports/2011/aug/13pic1.jpg
17 | RUN romp --mode=image --input 13pic1.jpg -o . --render_mesh
18 | RUN romp --mode=image --input 13pic1.jpg -o . --render_mesh --onnx
19 |
20 | ENTRYPOINT [ "romp" ]
21 |
--------------------------------------------------------------------------------
/active_configs/Hi.md:
--------------------------------------------------------------------------------
1 | Hi, Github developer~
2 | Welcome to ROMP.
3 | I am very happy to meet you at ROMP and look forward to your valuable advices.
4 |
5 |
--------------------------------------------------------------------------------
/configs/eval_3dpw_challenge.yml:
--------------------------------------------------------------------------------
1 | ARGS:
2 | tab: 'pw3d_challenge'
3 | eval_dataset: 'pw3d_challenge'
4 | GPUS: 0
5 | model_version: 1
6 |
7 | val_batch_size: 16
8 | nw_eval: 2
9 | eval: True
10 | collision_aware_centermap: False
11 | collision_factor: 0.2
12 | calc_PVE_error: False
13 |
14 | backbone: 'hrnet'
15 | centermap_size: 64
16 | centermap_conf_thresh: 0.2
17 | model_precision: 'fp16'
18 | smpl_mesh_root_align: False
19 |
20 | model_path: trained_models/ROMP_HRNet32_V1.pkl
21 | dataset_rootdir: "/media/yusun/Extreme SSD/dataset" #'/path/to/folder that contains 3DPW dataset'
22 | output_dir: '/media/yusun/Extreme SSD/ROMP/results/' #'/path/to/vibe_db where 3dpw_test_db.pt located'
--------------------------------------------------------------------------------
/configs/eval_3dpw_test.yml:
--------------------------------------------------------------------------------
1 | ARGS:
2 | tab: 'pw3d_test'
3 | eval_dataset: 'pw3d_test'
4 | GPUS: 0
5 | model_version: 1
6 |
7 | val_batch_size: 16
8 | nw_eval: 2
9 | eval: True
10 | collision_aware_centermap: False
11 | collision_factor: 0.2
12 | calc_PVE_error: False
13 |
14 | backbone: 'hrnet'
15 | centermap_size: 64
16 | centermap_conf_thresh: 0.2
17 | model_precision: 'fp32'
18 | smpl_mesh_root_align: False
19 |
20 | model_path: trained_models/ROMP_HRNet32_V1.pkl
21 | dataset_rootdir: '/media/yusun/Extreme SSD/dataset' #'/path/to/dataset/3DPW'
--------------------------------------------------------------------------------
/configs/eval_3dpw_test_ft.yml:
--------------------------------------------------------------------------------
1 | ARGS:
2 | tab: 'pw3d_test'
3 | eval_dataset: 'pw3d_test'
4 | GPUS: 0
5 | model_version: 1
6 |
7 | val_batch_size: 16
8 | nw_eval: 2
9 | eval: True
10 | collision_aware_centermap: False
11 | collision_factor: 0.2
12 | calc_PVE_error: False
13 |
14 | backbone: 'hrnet'
15 | centermap_size: 64
16 | centermap_conf_thresh: 0.2
17 | model_precision: 'fp32'
18 | smpl_mesh_root_align: False
19 |
20 | model_path: trained_models/ROMP_HRNet32_V1_ft_3DPW.pkl
21 | dataset_rootdir: '/media/yusun/Extreme SSD/dataset' #'/path/to/dataset/3DPW'
--------------------------------------------------------------------------------
/configs/eval_3dpw_test_resnet.yml:
--------------------------------------------------------------------------------
1 | ARGS:
2 | tab: 'pw3d_test'
3 | eval_dataset: 'pw3d_test'
4 | GPUS: 0
5 | model_version: 1
6 |
7 | val_batch_size: 32
8 | nw_eval: 2
9 | eval: True
10 | collision_aware_centermap: False
11 | collision_factor: 0.2
12 | calc_PVE_error: False
13 |
14 | backbone: 'resnet'
15 | centermap_size: 64
16 | centermap_conf_thresh: 0.2
17 | model_precision: 'fp32'
18 | smpl_mesh_root_align: True
19 |
20 | model_path: trained_models/ROMP_ResNet50_V1.pkl
21 | dataset_rootdir: '/media/yusun/Extreme SSD/dataset' #'/path/to/dataset/3DPW'
--------------------------------------------------------------------------------
/configs/eval_3dpw_test_resnet_ft.yml:
--------------------------------------------------------------------------------
1 | ARGS:
2 | tab: 'pw3d_test'
3 | eval_dataset: 'pw3d_test'
4 | GPUS: 0
5 | model_version: 1
6 |
7 | val_batch_size: 32
8 | nw_eval: 2
9 | eval: True
10 | collision_aware_centermap: False
11 | collision_factor: 0.2
12 | calc_PVE_error: False
13 |
14 | backbone: 'resnet'
15 | centermap_size: 64
16 | centermap_conf_thresh: 0.2
17 | model_precision: 'fp32'
18 | smpl_mesh_root_align: True
19 |
20 | model_path: trained_models/ROMP_ResNet50_V1_ft_3DPW.pkl
21 | dataset_rootdir: '/media/yusun/Extreme SSD/dataset' #'/path/to/dataset/3DPW'
--------------------------------------------------------------------------------
/configs/eval_cmu_panoptic.yml:
--------------------------------------------------------------------------------
1 | ARGS:
2 | tab: 'cmup'
3 | eval_dataset: 'cmup'
4 | GPUS: 0
5 | model_version: 1
6 |
7 | val_batch_size: 32
8 | nw_eval: 2
9 | eval: True
10 | collision_aware_centermap: False
11 | collision_factor: 0.2
12 | calc_PVE_error: False
13 |
14 | backbone: 'resnet'
15 | centermap_size: 64
16 | centermap_conf_thresh: 0.2
17 | smpl_mesh_root_align: True
18 |
19 | model_path: trained_models/ROMP_ResNet50_V1.pkl
20 | dataset_rootdir: '/media/yusun/Extreme SSD/dataset' #'/path/to/dataset/3DPW'
--------------------------------------------------------------------------------
/configs/eval_crowdpose_test.yml:
--------------------------------------------------------------------------------
1 | ARGS:
2 | tab: 'crowdpose_test'
3 | eval_dataset: 'crowdpose_test'
4 | GPUS: 0
5 | model_version: 1
6 |
7 | val_batch_size: 32
8 | nw_eval: 2
9 | eval: True
10 | collision_aware_centermap: False
11 | collision_factor: 0.2
12 | calc_PVE_error: False
13 |
14 | backbone: 'resnet' #'hrnet'
15 | centermap_size: 64
16 | centermap_conf_thresh: 0.2
17 | smpl_mesh_root_align: True # False
18 |
19 | model_path: trained_models/ROMP_ResNet50_V1.pkl #trained_models/ROMP_HRNet32_V1_CAR.pkl
20 | dataset_rootdir: '/media/yusun/Extreme SSD/dataset' #'/path/to/dataset/3DPW'
--------------------------------------------------------------------------------
/configs/eval_crowdpose_val.yml:
--------------------------------------------------------------------------------
1 | ARGS:
2 | tab: 'crowdpose_val'
3 | eval_dataset: 'crowdpose_val'
4 | GPUS: 0
5 | model_version: 1
6 |
7 | val_batch_size: 32
8 | nw_eval: 2
9 | eval: True
10 | collision_aware_centermap: False
11 | collision_factor: 0.2
12 | calc_PVE_error: False
13 |
14 | backbone: 'resnet' #'hrnet'
15 | centermap_size: 64
16 | centermap_conf_thresh: 0.2
17 | smpl_mesh_root_align: True # trained_models/ROMP_HRNet32_V1_CAR.pkl
18 |
19 | model_path: trained_models/ROMP_ResNet50_V1.pkl # trained_models/ROMP_HRNet32_V1_CAR.pkl
20 | dataset_rootdir: '/media/yusun/Extreme SSD/dataset' #'/path/to/dataset/3DPW'
--------------------------------------------------------------------------------
/configs/image.yml:
--------------------------------------------------------------------------------
1 |
2 | ARGS:
3 | tab: 'process_images'
4 | GPUS: 0
5 |
6 | backbone: 'hrnet' #'resnet' #
7 | model_precision: 'fp32' # 'fp16'
8 | val_batch_size: 4
9 | nw: 4
10 | model_path: trained_models/ROMP_HRNet32_V1.pkl # 'trained_models/ROMP_ResNet50_V1.pkl'
11 | save_visualization_on_img: True
12 | smpl_mesh_root_align: False
13 | show_mesh_stand_on_image: False
14 | soi_camera: 'far' # 'close'
15 | interactive_vis: False
16 | renderer: 'pyrender' # 'pyrender' 'pytorch3d'
17 |
18 | # default: run on demo/images and the results would be saved at demo/images_results
19 | # for video demo/videos/Messi_1
20 | inputs: '/path/to/image_folder' #'demo/videos/Messi_1'
21 | collect_subdirs: False # whether to collect images from the sub-folder of the input path.
22 | output_dir: 'demo/image_results'
23 | save_mesh: True
24 | save_centermap: False
25 | save_dict_results: True
26 | mesh_cloth: 'ghostwhite' #'LightCyan'
27 |
--------------------------------------------------------------------------------
/configs/test.yml:
--------------------------------------------------------------------------------
1 | ARGS:
2 | tab: 'pw3d_test_train_state'
3 | # pw3d_test pw3d_pc pw3d_nc pw3d_oc crowdpose_val crowdpose_test cmup MuPoTs runtime
4 | eval_dataset: pw3d_test #'mpiinf_test' 'agora' 'pw3d_test' # mupots pw3d_test cmup pw3d_nc pw3d_oc
5 | test_tracking: False
6 | GPUS: 1,2,3,0
7 | model_version: 1
8 |
9 | val_batch_size: 64
10 | nw_eval: 4
11 | eval: True
12 | collision_factor: 0.2
13 | calc_PVE_error: True
14 | acquire_pa_trans_scale: False
15 |
16 | backbone: 'resnet' # 'hrnet' #
17 | centermap_size: 64
18 | centermap_conf_thresh: 0.2
19 | visualize_all_results: False
20 |
21 | head_block_num: 2
22 | merge_smpl_camera_head: False
23 | fine_tune: True
24 |
25 | model_path: /export/home/suny/CenterMesh/trained_models/resnet_cm64_V1_resnet_vibe_88.79_52.53_h36m,mpiinf,coco,mpii,lsp,muco,crowdpose.pkl
--------------------------------------------------------------------------------
/configs/v1.yml:
--------------------------------------------------------------------------------
1 |
2 | ARGS:
3 | tab: 'V1_hrnet'
4 | dataset: 'h36m,mpiinf,coco,mpii,lsp,muco,crowdpose'
5 | gpu: 0
6 | distributed_training: False
7 | model_version: 1
8 | match_preds_to_gts_for_supervision: True
9 |
10 | master_batch_size: -1
11 | val_batch_size: 16
12 | batch_size: 32
13 | nw: 4
14 | nw_eval: 2
15 | lr: 0.00005
16 |
17 | fine_tune: False
18 | fix_backbone_training_scratch: False
19 | eval: False
20 | supervise_global_rot: False
21 |
22 | model_return_loss: True
23 | collision_aware_centermap: True
24 | collision_factor: 0.2
25 | homogenize_pose_space: True
26 | shuffle_crop_mode: True
27 | shuffle_crop_ratio_2d: 0.1
28 | shuffle_crop_ratio_3d: 0.4
29 |
30 | merge_smpl_camera_head: False
31 | head_block_num: 2
32 |
33 | backbone: 'hrnet'
34 | centermap_size: 64
35 | centermap_conf_thresh: 0.2
36 |
37 | model_path: None
38 |
39 | loss_weight:
40 | MPJPE: 200.
41 | PAMPJPE: 360.
42 | P_KP2D: 400.
43 | Pose: 80.
44 | Shape: 6.
45 | Prior: 1.6
46 | CenterMap: 160.
47 | Cam: 200.
48 |
49 | sample_prob:
50 | h36m: 0.2
51 | mpiinf: 0.16
52 | coco: 0.2
53 | lsp: 0.06
54 | mpii: 0.1
55 | muco: 0.14
56 | crowdpose: 0.14
--------------------------------------------------------------------------------
/configs/v1_hrnet_3dpw_ft.yml:
--------------------------------------------------------------------------------
1 |
2 | ARGS:
3 | tab: 'V1_hrnet_pw3d_ft'
4 | dataset: 'crowdpose,pw3d'
5 | gpu: 0
6 | distributed_training: False
7 | model_version: 1
8 | epoch: 1
9 |
10 | master_batch_size: -1
11 | val_batch_size: 64
12 | batch_size: 64
13 | nw: 6
14 | nw_eval: 2
15 | lr: 0.00005
16 | adjust_lr_factor: 0.2
17 |
18 | fine_tune: True
19 | fix_backbone_training_scratch: False
20 | match_preds_to_gts_for_supervision: True
21 | eval: False
22 | supervise_global_rot: False
23 | calc_PVE_error: True
24 |
25 | model_return_loss: True
26 | collision_aware_centermap: True
27 | collision_factor: 0.2
28 | homogenize_pose_space: False
29 | shuffle_crop_mode: False
30 | shuffle_crop_ratio_2d: 0.1
31 | shuffle_crop_ratio_3d: 0.2
32 |
33 | merge_smpl_camera_head: False
34 | head_block_num: 2
35 |
36 | backbone: 'hrnet'
37 | centermap_size: 64
38 | centermap_conf_thresh: 0.2
39 |
40 | model_path: trained_models/ROMP_HRNet32_V1.pkl
41 |
42 | loss_weight:
43 | MPJPE: 200.
44 | PAMPJPE: 360.
45 | P_KP2D: 400.
46 | Pose: 80.
47 | Shape: 6.
48 | Prior: 1.6
49 | CenterMap: 160.
50 | Cam: 200.
51 |
52 | sample_prob:
53 | crowdpose: 0.2
54 | pw3d: 0.8
--------------------------------------------------------------------------------
/configs/v1_resnet.yml:
--------------------------------------------------------------------------------
1 |
2 | ARGS:
3 | tab: 'V1_resnet'
4 | dataset: 'h36m,mpiinf,coco,mpii,lsp,muco,crowdpose'
5 | gpu: 0
6 | distributed_training: False
7 | model_version: 1
8 | match_preds_to_gts_for_supervision: True
9 |
10 | master_batch_size: -1
11 | val_batch_size: 16
12 | batch_size: 32
13 | nw: 4
14 | nw_eval: 2
15 | lr: 0.00005
16 |
17 | fine_tune: False
18 | fix_backbone_training_scratch: False
19 | eval: False
20 | supervise_global_rot: False
21 |
22 | model_return_loss: True
23 | collision_aware_centermap: True
24 | collision_factor: 0.2
25 | homogenize_pose_space: True
26 | shuffle_crop_mode: True
27 | shuffle_crop_ratio_2d: 0.1
28 | shuffle_crop_ratio_3d: 0.4
29 |
30 | merge_smpl_camera_head: False
31 | head_block_num: 2
32 |
33 | backbone: 'resnet'
34 | centermap_size: 64
35 | centermap_conf_thresh: 0.2
36 |
37 | model_path: None
38 |
39 | loss_weight:
40 | MPJPE: 200.
41 | PAMPJPE: 360.
42 | P_KP2D: 400.
43 | Pose: 80.
44 | Shape: 6.
45 | Prior: 1.6
46 | CenterMap: 160.
47 | Cam: 200.
48 |
49 | sample_prob:
50 | h36m: 0.2
51 | mpiinf: 0.16
52 | coco: 0.2
53 | lsp: 0.06
54 | mpii: 0.1
55 | muco: 0.14
56 | crowdpose: 0.14
--------------------------------------------------------------------------------
/configs/v1_resnet_3dpw_ft.yml:
--------------------------------------------------------------------------------
1 |
2 | ARGS:
3 | tab: 'V1_resnet_pw3d_ft'
4 | dataset: 'crowdpose,pw3d'
5 | gpu: 0
6 | distributed_training: False
7 | model_version: 1
8 | epoch: 1
9 |
10 | master_batch_size: -1
11 | val_batch_size: 64
12 | batch_size: 64
13 | nw: 6
14 | nw_eval: 2
15 | lr: 0.00005
16 | adjust_lr_factor: 0.2
17 |
18 | fine_tune: True
19 | fix_backbone_training_scratch: False
20 | match_preds_to_gts_for_supervision: True
21 | eval: False
22 | supervise_global_rot: False
23 | calc_PVE_error: True
24 |
25 | model_return_loss: True
26 | collision_aware_centermap: True
27 | collision_factor: 0.2
28 | homogenize_pose_space: False
29 | shuffle_crop_mode: False
30 | shuffle_crop_ratio_2d: 0.1
31 | shuffle_crop_ratio_3d: 0.2
32 |
33 | merge_smpl_camera_head: False
34 | head_block_num: 2
35 |
36 | backbone: 'resnet'
37 | centermap_size: 64
38 | centermap_conf_thresh: 0.2
39 |
40 | model_path: trained_models/ROMP_ResNet50_V1.pkl
41 |
42 | loss_weight:
43 | MPJPE: 200.
44 | PAMPJPE: 360.
45 | P_KP2D: 400.
46 | Pose: 80.
47 | Shape: 6.
48 | Prior: 1.6
49 | CenterMap: 160.
50 | Cam: 200.
51 |
52 | sample_prob:
53 | crowdpose: 0.2
54 | pw3d: 0.8
--------------------------------------------------------------------------------
/configs/v6_ft.yml:
--------------------------------------------------------------------------------
1 | ARGS:
2 | tab: 'V6_hrnet_relative_train'
3 | dataset: h36m,muco,coco,lsp,agora,pw3d,relative_human,posetrack
4 | eval_datasets: 'relative,agora'
5 | gpu: 0
6 | distributed_training: False
7 | model_version: 6
8 | centermap_size: 128
9 | model_return_loss: True
10 | image_loading_mode: 'image_relative'
11 |
12 | fine_tune: True
13 | new_training: False
14 | new_training_iters: 600
15 | fast_eval_iter: 100
16 | print_freq: 50
17 |
18 | learn_relative: True
19 | learn_relative_age: True
20 | learn_relative_depth: True
21 | depth_loss_type: 'Piecewise'
22 | add_depth_encoding: True
23 |
24 | master_batch_size: -1
25 | val_batch_size: 16
26 | batch_size: 32
27 | nw: 4
28 | nw_eval: 2
29 | lr: 0.00005
30 |
31 | cam_dim: 3
32 | head_block_num: 1
33 | perspective_proj: True
34 | max_supervise_num: 64
35 |
36 | model_path: trained_models/BEV_HRNet32_V6.pkl
37 |
38 | match_preds_to_gts_for_supervision: True
39 | fix_backbone_training_scratch: False
40 | eval: False
41 | supervise_global_rot: False # agora
42 | supervise_cam_params: True
43 |
44 | collision_aware_centermap: True
45 | homogenize_pose_space: True
46 | shuffle_crop_ratio_2d: 0.6
47 | shuffle_crop_ratio_3d: 0.6
48 | Synthetic_occlusion_ratio: 0
49 |
50 | backbone: 'hrnet' # 'resnet' #
51 | centermap_conf_thresh: 0.12 #0.2
52 |
53 | loss_weight:
54 | MPJPE: 200.
55 | PAMPJPE: 360.
56 | P_KP2D: 400.
57 | Pose: 80.
58 | Shape: 60.
59 | Prior: 1.6
60 | KP2D: 10.
61 | CenterMap: 100.
62 | CenterMap_3D: 1000.
63 | Cam: 200.
64 | R_Age: 4000.
65 | R_Depth: 400.
66 |
67 | sample_prob:
68 | h36m: 0.14
69 | muco: 0.14
70 | coco: 0.12
71 | lsp: 0.06
72 | agora: 0.14
73 | pw3d: 0.14
74 | relative_human: 0.16
75 | posetrack: 0.1
--------------------------------------------------------------------------------
/configs/v6_train.yml:
--------------------------------------------------------------------------------
1 | ARGS:
2 | tab: 'V6_hrnet_relative_train'
3 | dataset: h36m,muco,coco,lsp,agora,pw3d,relative_human,posetrack
4 | eval_datasets: 'relative,agora'
5 | gpu: 0
6 | distributed_training: False
7 | model_version: 6
8 | centermap_size: 128
9 | model_return_loss: True
10 | rotate_prob: 0.2
11 | image_loading_mode: 'image_relative'
12 |
13 | fine_tune: False
14 | new_training: True
15 | new_training_iters: 600
16 | fast_eval_iter: 2000
17 | print_freq: 50
18 |
19 | learn_relative: True
20 | learn_relative_age: True
21 | learn_relative_depth: True
22 | depth_loss_type: 'Piecewise'
23 | add_depth_encoding: True
24 |
25 | master_batch_size: -1
26 | val_batch_size: 16
27 | batch_size: 32 # 64
28 | nw: 4
29 | nw_eval: 2
30 | lr: 0.00005
31 |
32 | cam_dim: 3
33 | head_block_num: 1
34 | perspective_proj: True
35 | max_supervise_num: 64
36 |
37 | model_path: None
38 |
39 | match_preds_to_gts_for_supervision: True
40 | fix_backbone_training_scratch: False
41 | eval: False
42 | supervise_global_rot: False # agora
43 | supervise_cam_params: True
44 |
45 | collision_aware_centermap: True
46 | homogenize_pose_space: True
47 | shuffle_crop_ratio_2d: 0.6
48 | shuffle_crop_ratio_3d: 0.6
49 | Synthetic_occlusion_ratio: 0
50 |
51 | backbone: 'hrnet' # 'resnet' #
52 | centermap_conf_thresh: 0.12 #0.2
53 |
54 | loss_weight:
55 | MPJPE: 200.
56 | PAMPJPE: 360.
57 | P_KP2D: 400.
58 | Pose: 80.
59 | Shape: 60.
60 | Prior: 1.6
61 | KP2D: 10.
62 | CenterMap: 100.
63 | CenterMap_3D: 1000.
64 | Cam: 200.
65 | R_Age: 4000.
66 | R_Depth: 400.
67 |
68 | sample_prob:
69 | h36m: 0.14
70 | muco: 0.14
71 | coco: 0.12
72 | lsp: 0.06
73 | agora: 0.14
74 | pw3d: 0.14
75 | relative_human: 0.16
76 | posetrack: 0.1
--------------------------------------------------------------------------------
/configs/video-batch.yml:
--------------------------------------------------------------------------------
1 |
2 | ARGS:
3 | tab: 'test'
4 | GPUS: 0
5 | multi_person: True
6 | kernel_sizes: [5]
7 | use_coordmaps: True
8 | fine_tune: True
9 | eval: False
10 |
11 | backbone: 'hrnet'
12 | input_size: 512
13 | centermap_size: 64
14 | model_precision: 'fp32'
15 | val_batch_size: 1
16 | nw: 4
17 | model_path: trained_models/ROMP_HRNet32_V1.pkl # '../trained_models/ROMP_ResNet50_V1.pkl'
18 | smpl_mesh_root_align: False
19 |
20 | video_or_frame: True
21 | input_video_path: '$FPATH' # None
22 | output_dir: '$OUTPATH'
23 | webcam_mesh_color: 'ghostwhite' #'LightCyan'
24 | save_mesh: False
25 | save_centermap: False
26 | save_dict_results: True
27 | save_video_results: True
28 | webcam: False
29 | fps_save: 30
30 | multiprocess: False
31 |
--------------------------------------------------------------------------------
/configs/video.yml:
--------------------------------------------------------------------------------
1 |
2 | ARGS:
3 | tab: 'process_video'
4 | gpu: 0
5 |
6 | backbone: 'hrnet'
7 | model_precision: 'fp32'
8 | val_batch_size: 8
9 | nw: 4
10 | model_path: trained_models/ROMP_HRNet32_V1.pkl # '../trained_models/ROMP_ResNet50_V1.pkl'
11 | smpl_mesh_root_align: False
12 | centermap_conf_thresh: 0.25
13 |
14 | make_tracking: False
15 | temporal_optimization: False
16 | smooth_coeff: 4.
17 | show_largest_person_only: False
18 | show_mesh_stand_on_image: False
19 | soi_camera: 'far' # 'close' # 'far'
20 | renderer: 'pyrender' # 'pyrender' 'pytorch3d'
21 |
22 | inputs: 'demo/videos/sample_video.mp4' # None
23 | output_dir: 'demo/sample_video_results/'
24 | save_mesh: False
25 | save_centermap: False
26 | save_dict_results: True
27 | save_visualization_on_img: True
28 | fps_save: 24
29 | mesh_cloth: 'ghostwhite' #'LightCyan'
--------------------------------------------------------------------------------
/configs/webcam.yml:
--------------------------------------------------------------------------------
1 |
2 | ARGS:
3 | tab: 'webcam'
4 | gpu: 0
5 |
6 | backbone: 'hrnet'
7 | model_precision: 'fp32'
8 | val_batch_size: 1
9 | model_path: trained_models/ROMP_HRNet32_V1.pkl # '../trained_models/ROMP_ResNet50_V1.pkl'
10 | save_mesh: False
11 | save_centermap: False
12 | save_dict_results: False
13 | smpl_mesh_root_align: False
14 | save_visualization_on_img: False
15 | centermap_conf_thresh: 0.25
16 |
17 | make_tracking: True
18 | temporal_optimization: False
19 | smooth_coeff: 4.
20 | show_largest_person_only: False
21 | visulize_platform: 'integrated' # 'blender' #
22 | tracker: 'norfair'
23 | tracking_target: 'centers' # 'keypoints' #
24 | add_trans: True
25 | FOV: 60
26 |
27 | webcam: True
28 | cam_id: 0
29 | multiprocess: False
30 | run_on_remote_server: False
31 | server_ip: 'localhost'
32 | server_port: 10086
33 |
34 | # for picking up sepcific cloth from the wardrobe in model_data, please refer to romp/lib/constants.py
35 | # 'ghostwhite'/'LightCyan' for using the single color texture.
36 | mesh_cloth: '001' # 'random' #
37 | character: 'smpl' # 'nvxia' #
--------------------------------------------------------------------------------
/configs/webcam_blender.yml:
--------------------------------------------------------------------------------
1 |
2 | ARGS:
3 | tab: 'webcam'
4 | gpu: 0
5 |
6 | backbone: 'hrnet'
7 | model_precision: 'fp32'
8 | val_batch_size: 1
9 | model_path: trained_models/ROMP_HRNet32_V1.pkl # '../trained_models/ROMP_ResNet50_V1.pkl'
10 | save_mesh: False
11 | save_centermap: False
12 | save_dict_results: False
13 | smpl_mesh_root_align: False
14 | save_visualization_on_img: False
15 | centermap_conf_thresh: 0.25
16 |
17 | make_tracking: True
18 | temporal_optimization: True
19 | smooth_coeff: 4.
20 | show_largest_person_only: True
21 | visulize_platform: 'blender' # 'integrated' #
22 | tracker: 'norfair'
23 | tracking_target: 'centers' # 'keypoints' #
24 | add_trans: True
25 | FOV: 60
26 |
27 | webcam: True
28 | cam_id: 0
29 | multiprocess: False
30 | run_on_remote_server: False
31 | server_ip: 'localhost'
32 | server_port: 10086
33 |
34 | # for picking up sepcific cloth from the wardrobe in model_data, please refer to romp/lib/constants.py
35 | # 'ghostwhite'/'LightCyan' for using the single color texture.
36 | mesh_cloth: '001' # 'random' #
37 | character: 'smpl' # 'nvxia' #
38 | nvxia_model_path: 'model_data/characters/nvxia'
--------------------------------------------------------------------------------
/demo/images/3dpw_sit_on_street.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Arthur151/ROMP/a8558aed480af850756f84e2a7c787e359bddbd0/demo/images/3dpw_sit_on_street.jpg
--------------------------------------------------------------------------------
/demo/images/Cristiano_Ronaldo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Arthur151/ROMP/a8558aed480af850756f84e2a7c787e359bddbd0/demo/images/Cristiano_Ronaldo.jpg
--------------------------------------------------------------------------------
/demo/images/Leo_Messi.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Arthur151/ROMP/a8558aed480af850756f84e2a7c787e359bddbd0/demo/images/Leo_Messi.jpg
--------------------------------------------------------------------------------
/docs/basic_installation.md:
--------------------------------------------------------------------------------
1 | ## Basic Installation
2 |
3 | To run the demo code, please install via
4 | ```
5 | pip install simple-romp
6 | ```
7 |
8 | ### (Optional) Install Pytorch GPU
9 |
10 | If you have a GPU on your computer, for real-time inference, we recommand to install [Pytorch](https://pytorch.org/) GPU version.
11 |
12 | Here we give an example to install pytorch 1.10.0.
13 |
14 | #### 1. Install [Pytorch](https://pytorch.org/).
15 | Please choose one of the following 4 options to install Pytorch via [conda](https://docs.conda.io/en/latest/miniconda.html) or [pip](https://pip.pypa.io/en/stable).
16 | Here, we support to install with Python 3.9, 3.8 or 3.7.
17 | We recommend installing via conda (Option 1-3) so that ROMP env is clean and will not affect other repo.
18 |
19 | ##### Option 1) to install conda env with python 3.9, please run
20 | ```
21 | conda create -n ROMP python=3.9
22 | conda activate ROMP
23 | conda install -n ROMP pytorch==1.10.0 torchvision==0.11.1 cudatoolkit=10.2 -c pytorch
24 | ```
25 | ##### Option 2) to install conda env with python 3.8, please run
26 | ```
27 | conda create -n ROMP python==3.8.8
28 | conda activate ROMP
29 | conda install -n ROMP pytorch==1.10.0 torchvision==0.11.1 cudatoolkit=10.2 -c pytorch
30 | ```
31 |
32 | ##### Option 3) to install conda env with python 3.7, please run
33 | ```
34 | conda create -n ROMP python==3.7.6
35 | conda activate ROMP
36 | conda install -n ROMP pytorch==1.10.0 torchvision==0.11.1 cudatoolkit=10.2 -c pytorch
37 | ```
38 |
39 | ##### Option 4) install via pip
40 | To directly install via pip, you need to install CUDA 10.2 first (For Ubuntu, run`sudo apt-get install cuda-10-2`).
41 | Then install pytorch via:
42 | ```
43 | pip install torch==1.10.0+cu102 torchvision==0.11.1+cu102 -f https://download.pytorch.org/whl/torch_stable.html
44 | ```
--------------------------------------------------------------------------------
/docs/bev_evaluation.md:
--------------------------------------------------------------------------------
1 | ## BEV Benchmark Evaluation
2 |
3 | The evaluation code of BEV is integrated into `simple-romp`, `evaluation` folder.
4 |
5 | To run the evaluation, please first install simple-romp following [the guidance](../simple_romp/README.md).
6 | To prepare the evaluation datasets, please refer to [dataset.md](../docs/dataset.md).
7 | In each evaluation code, please set the dataset path and `output_save_dir` for saving the predictions.
8 |
9 | ### Relative Human
10 |
11 | Please properly set the path in `simple_romp/evaluation/eval_Relative_Human.py` and then run
12 | ```
13 | cd simple_romp/evaluation
14 | python eval_Relative_Human.py
15 | ```
16 | You can also download the [predictions](https://github.com/Arthur151/Relative_Human/releases/download/Predictions/all_results.zip).
17 |
18 | ### AGORA
19 |
20 | Please properly set the path in `simple_romp/evaluation/eval_AGORA.py` and then run
21 | ```
22 | cd simple_romp/evaluation
23 | python eval_AGORA.py
24 | ```
25 |
26 | ### CMU Panoptic
27 |
28 | Please properly set the path in `simple_romp/evaluation/eval_cmu_panoptic.py` and then run
29 | ```
30 | cd simple_romp/evaluation
31 | python eval_cmu_panoptic.py
32 | ```
33 | You can also download the [predictions](https://github.com/Arthur151/ROMP/releases/download/V2.1/cmu_panoptic_predictions.npz) and then set the results path in `evaluation_results` function to get the matrix numbers.
34 |
--------------------------------------------------------------------------------
/docs/bugs.md:
--------------------------------------------------------------------------------
1 | ## Bugs
2 |
3 | ### libstdc++.so.6: version 'GLIBCXX_3.4.21' not found
4 | ``bash
5 | sudo find / -name libstdc++.so.6*
6 | # find the correct lib in the search results
7 | such as, /export/home/suny/anaconda3/lib/libstdc++.so.6.0.26 in my system
8 | # set the link to this lib
9 | sudo rm -rf /usr/lib64/libstdc++.so.6
10 | sudo ln -s /export/home/suny/anaconda3/lib/libstdc++.so.6.0.26 /usr/lib64/libstdc++.so.6
11 | ``
12 |
13 | ### Visualization bug: OpenGL.error.GLError: GLError(err = 12289,baseOperation = eglMakeCurrent ....
14 |
15 | The bug of pyrender is really a pain in the ass...
16 | ``bash
17 | # for centos
18 | sudo yum install libXext libSM libXrender freeglut-devel
19 | # for ubuntu: use pyrender in OSMesa mode
20 | sudo apt update
21 | sudo apt-get install libsm6 libxrender1 libfontconfig1 freeglut3-dev
22 | sudo apt --fix-broken install
23 | sudo wget https://github.com/mmatl/travis_debs/raw/master/xenial/mesa_18.3.3-0.deb
24 | sudo dpkg -i ./mesa_18.3.3-0.deb || true
25 | sudo apt install -f
26 | git clone https://github.com/mmatl/pyopengl.git
27 | pip install ./pyopengl
28 |
--------------------------------------------------------------------------------
/docs/contributor.md:
--------------------------------------------------------------------------------
1 | ## Contributors
2 |
3 | This repository is currently maintained by [Yu Sun](https://github.com/Arthur151).
4 |
5 | We thank [Peng Cheng](https://github.com/CPFLAME) for his constructive comments on Center map training.
6 |
7 | ROMP has also benefited from many developers, including
8 | - [Marco Musy](https://github.com/marcomusy) : help in [the textured SMPL visualization](https://github.com/marcomusy/vedo/issues/371).
9 | - [Gavin Gray](https://github.com/gngdb) : adding support for an elegant context manager to run code in a notebook.
10 | - [VLT Media](https://github.com/vltmedia) and [Vivien Richter](https://github.com/vivi90) : adding support for running on Windows & batch_videos.py.
11 | - [Chuanhang Yan](https://github.com/yanch2116) : developing an [addon for driving character in Blender](https://github.com/yanch2116/Blender-addons-for-SMPL).
12 | - [Tian Jin](https://github.com/jinfagang): help in simplified smpl and fast rendering ([realrender](https://pypi.org/project/realrender/)).
13 | - [ZhengdiYu](https://github.com/ZhengdiYu) : helpful discussion on optimizing the implementation details.
14 | - [Ali Yaghoubian](https://github.com/AliYqb) : add Docker file for simple-romp.
15 |
16 |
17 | ## Related repos
18 |
19 | Here are some great resources we benefit:
20 |
21 | - SMPL models and layer is borrowed from MPII [SMPL-X model](https://github.com/vchoutas/smplx).
22 | - Some functions are borrowed from [HMR-pytorch](https://github.com/MandyMo/pytorch_HMR) and [SPIN](https://github.com/nkolot/SPIN).
23 | - The evaluation code and GT annotations of 3DPW dataset is brought from [3dpw-eval](https://github.com/aymenmir1/3dpw-eval) and [VIBE](https://github.com/mkocabas/VIBE).
24 | - 3D mesh visualization is supported by [vedo](https://github.com/marcomusy/vedo), [EasyMocap](https://github.com/zju3dv/EasyMocap), [minimal-hand](https://github.com/CalciferZh/minimal-hand), [Open3D](https://github.com/intel-isl/Open3D), and [Pyrender](https://github.com/mmatl/pyrender).
25 |
--------------------------------------------------------------------------------
/docs/docker.md:
--------------------------------------------------------------------------------
1 | ## Docker usage
2 |
3 | ```
4 | # Build
5 | docker build --rm -t romp .
6 | # Inference
7 | docker run --privileged --rm -it --gpus 0 --ipc=host -p 8888:8888 -v /tmp/.X11-unix:/tmp/.X11-unix -v $(pwd):/workspace/results --device /dev/video0 -e DISPLAY=$DISPLAY romp --mode=webcam
8 | ```
--------------------------------------------------------------------------------
/docs/export.md:
--------------------------------------------------------------------------------
1 | ## Export
2 |
3 | Currently, this function only support the single-person video cases. Therefore, please test it with `demo/videos/sample_video2_results/sample_video2.mp4`, whose results would be saved to `demo/videos/sample_video2_results`.
4 |
5 | #### Blender Addons
6 | [Chuanhang Yan](https://github.com/yanch2116) : developing an [addon for driving character in Blender](https://github.com/yanch2116/Blender-addons-for-SMPL).
7 | [VLT Media](https://github.com/vltmedia) creates a [QuickMocap-BlenderAddon](https://github.com/vltmedia/QuickMocap-BlenderAddon) to read the .npz file created by ROMP. Clean & smooth the resulting keyframes.
8 |
9 | ### Blender character animation
10 |
11 | 1. Download the [BlenderAddon](https://github.com/yanch2116/LiveMocap-BlenderAddon) and install the [Blender](https://www.blender.org/).
12 | 2. Install the Addon in Blender:
13 | Edit -> Preferences -> Add-ons -> install -> select ROMP/romp/exports/blender_mocap.py
14 | Click to active the 'Real Time Mocap' add-on.
15 | 3. Run the ROMP webcam demo code:
16 | ```
17 | cd ROMP
18 | sh scripts/webcam_blender.sh
19 | ```
20 |
21 | ### Export the results to fbx
22 |
23 |
24 |
25 |
26 |
27 | Currently, this function can only export the motion of a single person at each time. Therefore, please test it with `demo/videos/sample_video2_results/sample_video2.mp4`, whose results would be saved to `demo/videos/sample_video2_results`.
28 |
29 | 1. Prepare the data:
30 |
31 | Please register at [this link](https://smpl.is.tue.mpg.de/) and download the SMPL_unity_v.1.0.0.zip from SMPL for Unity.
32 |
33 | Then set the path of the downloaded files at [convert_fbx.py](../export/convert_fbx.py). For example,
34 |
35 | ```
36 | male_model_path = '/home/yusun/Desktop/unity/SMPL_unity_v.1.0.0/smpl/Models/SMPL_m_unityDoubleBlends_lbs_10_scale5_207_v1.0.0.fbx'
37 | female_model_path = '/home/yusun/Desktop/unity/SMPL_unity_v.1.0.0/smpl/Models/SMPL_f_unityDoubleBlends_lbs_10_scale5_207_v1.0.0.fbx'
38 | ```
39 | You can also choose the gender of animated model via setting `gender=male` or `gender=female`
40 |
41 | 2. Install the Blender:
42 |
43 | Generally, Blender 2.91 can be installed following [this instructions](https://github.com/TylerGubala/blenderpy) via:
44 | ```pip install bpy && bpy_post_install```
45 |
46 | If you use python 3.7, bpy can be easily installed via
47 | ```
48 | pip install https://github.com/TylerGubala/blenderpy/releases/download/v2.91a0/bpy-2.91a0-cp37-cp37m-manylinux2014_x86_64.whl && bpy_post_install
49 | ```
50 |
51 | 3. Run the code:
52 |
53 | ```
54 | cd ROMP/
55 | # on Linux
56 | python export/convert_fbx.py --input=demo/videos/sample_video2_results/sample_video2_results.npz --output=demo/videos/sample_video2.fbx --gender=female
57 | # on Windows
58 | python export\convert_fbx.py --input=demo\videos\sample_video2_results\sample_video2_results.npz --output=demo\videos\sample_video2.fbx --gender=female
59 | ```
60 |
61 | 4.Open the fbx animation in Blender:
62 |
63 | File -> Import -> FBX(.fbx)
64 |
65 | Now, you can display the estimated animation in Blender via pushing the play button at the bottom.
66 |
67 |
--------------------------------------------------------------------------------
/docs/features.md:
--------------------------------------------------------------------------------
1 | ## Features
2 | - Running the examples on [Google Colab](https://colab.research.google.com/drive/1oz9E6uIbj4udOPZvA1Zi9pFx0SWH_UXg).
3 | - Real-time online multi-person webcam demo for driving textured SMPL model. We also provide a wardrobe for changing clothes.
4 | - Batch processing images/videos via command line / jupyter notebook / calling ROMP as a python lib.
5 | - Exporting the captured single-person motion to FBX file for Blender/Unity usage.
6 | - Training and evaluation for re-implementing our results presented in paper.
7 | - Convenient API for 2D / 3D visualization, parsed datasets.
--------------------------------------------------------------------------------
/docs/trace_dataset.md:
--------------------------------------------------------------------------------
1 | # Datasets for TRACE's evaluation & training
2 |
3 | To faciliate the evaluation and training of TRACE, we introduce how to prepare related datasets in this instruction.
4 |
5 | ## Evaluation datasets
6 |
7 | We employ 4 main datasets () and 2 additional datasets for benchmark evaluation.
8 |
--------------------------------------------------------------------------------
/docs/updates.md:
--------------------------------------------------------------------------------
1 | ## Update logs
2 | *2022/03/27: [Relative Human dataset](https://github.com/Arthur151/Relative_Human) has been released.*
3 | *2022/03/18: Simple version of ROMP for all platform. See the [guidance](https://github.com/Arthur151/ROMP/blob/master/simple_romp/README.md) for details* (https://github.com/Arthur151/ROMP/blob/master/simple_romp/README.md) for details*
4 | *2022/02/11: Support running on Windows.*
5 | *2021/12/23: Add [Training guidance](https://github.com/Arthur151/ROMP/blob/master/docs/train.md#training-guidance).*
6 | *2021/12/20: Upgrade the Pytorch to 1.10.0, Pytorch3D to 0.6.1.*
7 | *2021/12/2: Add optional renderers (pyrender or pytorch3D). Fix some bugs reported in issues.*
8 | ✨✨*2021/10/10: V1.1 released, including multi-person webcam, extracting , webcam temporal optimization, live blender character animation, interactive visualization.* Let's try!
9 | *2021/7/15: Adding support for an elegant context manager to run code in a notebook.* See [Colab demo](https://colab.research.google.com/drive/1oz9E6uIbj4udOPZvA1Zi9pFx0SWH_UXg) for the details.
10 | *2021/4/19: Adding support for textured SMPL mesh using [vedo](https://github.com/marcomusy/vedo).* See [visualization.md](docs/visualization.md) for the details.
11 | *2021/3/30: 1.0 version.* Rebuilding the code. Release the ResNet-50 version and evaluation on 3DPW.
12 | *2020/11/26: Optimization for person-person occlusion.* Small changes for video support.
13 | *2020/9/11: Real-time webcam demo using local/remote server.*
14 | *2020/9/4: Google Colab demo.* Saving a npy file per imag.
15 | *2021/9/13: Low FPS / args parsing bugs are fixed. Support calling as a python lib.*
16 | *2021/9/10: Training code release. API optimization.*
17 |
--------------------------------------------------------------------------------
/docs/visualization.md:
--------------------------------------------------------------------------------
1 | ### Textured SMPL visualization
2 |
3 | 1. Prepare the data:
4 |
5 | Please make sure you have downloaded the latest released ROMP data for textured SMPL visualization.
6 |
7 | The layout of smpl data is:
8 | - model_data
9 | - parameters
10 | - wardrobe
11 |
12 | 2. Change the config:
13 |
14 | Edit the config file, i.e. [configs/webcam.yml](../configs/webcam.yml).
15 |
16 | ```
17 | # for picking up sepcific cloth from the wardrobe in model_data, please refer to romp/lib/constants.py
18 | # 'ghostwhite'/'LightCyan' for using the single color texture.
19 | mesh_cloth: '031' # '031' # 'random'
20 | ```
21 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | setuptools==59.5.*
2 | opencv-python
3 | opencv-contrib-python
4 | prettytable
5 | tqdm
6 | yacs
7 | torchfile
8 | plyfile
9 | munkres
10 | chumpy
11 | trimesh
12 | shyaml
13 | PyYAML==5.4.1
14 | numpy==1.22.0
15 | numpy-quaternion
16 | pygame
17 | keyboard
18 | transforms3d
19 | bvhtoolbox
20 | vedo
21 | imgaug==0.4.0
22 | lap
23 | tensorboard
24 | smplx
25 | pycocotools
26 | plotly
27 | open3d
28 | addict
29 | loguru
30 | norfair
31 | h5py
32 | imageio-ffmpeg
33 | pyrender
34 | joblib
35 | pandas
36 |
--------------------------------------------------------------------------------
/romp/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Arthur151/ROMP/a8558aed480af850756f84e2a7c787e359bddbd0/romp/.DS_Store
--------------------------------------------------------------------------------
/romp/__init__.py:
--------------------------------------------------------------------------------
1 | from . import _init_paths_
--------------------------------------------------------------------------------
/romp/_init_paths_.py:
--------------------------------------------------------------------------------
1 | import os.path as osp
2 | import sys
3 |
4 | def add_path(path):
5 | if path not in sys.path:
6 | sys.path.insert(0, path)
7 |
8 | this_dir = osp.dirname(__file__)
9 |
10 | # Add lib to PYTHONPATH
11 | lib_path = osp.join(this_dir, 'lib')
12 | pred_path = osp.join(this_dir, 'predict')
13 | add_path(lib_path)
14 | add_path(pred_path)
--------------------------------------------------------------------------------
/romp/lib/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Arthur151/ROMP/a8558aed480af850756f84e2a7c787e359bddbd0/romp/lib/.DS_Store
--------------------------------------------------------------------------------
/romp/lib/dataset/__init__.py:
--------------------------------------------------------------------------------
1 | import sys, os
2 | lib_dir = os.path.dirname(__file__)
3 | root_dir = os.path.join(lib_dir.replace(os.path.basename(lib_dir),''))
4 | if root_dir not in sys.path:
5 | sys.path.insert(0, root_dir)
--------------------------------------------------------------------------------
/romp/lib/dataset/base.py:
--------------------------------------------------------------------------------
1 | from dataset.image_base import Image_base, test_image_dataset
2 | from dataset.image_base_relative import Image_base_relative, test_image_relative_dataset
3 | from config import args
4 |
5 | Base_Classes = {'image': Image_base, 'image_relative': Image_base_relative}
6 |
7 | Test_Funcs = {'image': test_image_dataset, 'image_relative': test_image_relative_dataset}
--------------------------------------------------------------------------------
/romp/lib/dataset/mpi_inf_3dhp_validation.py:
--------------------------------------------------------------------------------
1 | from dataset.mpi_inf_3dhp import MPI_INF_3DHP
2 | from dataset.image_base import *
3 | from dataset.base import Base_Classes, Test_Funcs
4 |
5 | default_mode = args().image_loading_mode
6 |
7 | def MPI_INF_3DHP_VALIDATION(base_class=default_mode):
8 | class MPI_INF_3DHP_VALIDATION(MPI_INF_3DHP(Base_Classes[base_class])):
9 | def __init__(self,train_flag=False, validation=True, **kwargs):
10 | super(MPI_INF_3DHP_VALIDATION,self).__init__(train_flag=train_flag, validation=validation)
11 | return MPI_INF_3DHP_VALIDATION
12 | if __name__ == '__main__':
13 | dataset=MPI_INF_3DHP_VALIDATION(base_class=default_mode)()
14 | Test_Funcs[default_mode](dataset,with_smpl=True)
15 | print('Done')
16 |
--------------------------------------------------------------------------------
/romp/lib/dataset/preprocess/h36m_extract_frames.py:
--------------------------------------------------------------------------------
1 | import h5py
2 | import sys
3 | import os
4 | import cv2
5 | import numpy as np
6 | import glob
7 | import pickle
8 | import sys
9 |
10 | subject_list = ['S1', 'S5', 'S6', 'S7', 'S8', 'S9', 'S11']
11 | cam_dict = {'54138969': 0, '55011271': 1, '58860488': 2, '60457274': 3}
12 |
13 | def extract_imgs(subject_id, src_folder, out_folder):
14 | video_files = glob.glob(os.path.join(src_folder, subject_id, 'Videos', '*.mp4'))
15 | for video_file in video_files:
16 | if "_ALL" in video_file:
17 | continue
18 | print("video_file", video_file)
19 | video_name = os.path.basename(video_file)
20 | action_name, cam_str, _ = video_name.split('.')
21 | cam_id = cam_dict[cam_str]
22 | target_name = os.path.join(out_folder,'{}_{}_{}'.format(subject_id, action_name, cam_id))
23 | print("target_name ", target_name)
24 | print("video_file", video_file)
25 | cap = cv2.VideoCapture(video_file)
26 | frame_dex = -1
27 | dex = 0
28 | frame_num = 0 #
29 | while (1):
30 | frame_dex = frame_dex + 1
31 | ret, frame = cap.read()
32 | if frame_dex % 5 != 0:
33 | continue
34 | if frame_dex == 0:
35 | continue
36 | if ret:
37 | cv2.imwrite(target_name + '_' + str(dex) + '.jpg', frame)
38 | print("target_name ", target_name + '_' + str(dex) + '.jpg')
39 | dex = dex + 1
40 | if dex > 20:
41 | break
42 | else:
43 | print("video_file end", video_file)
44 | break
45 | cap.release()
46 | return 1
47 |
48 |
49 | def main():
50 | assert len(sys.argv)==3, print('plese run the code : python h36m_extract_frames.py h36m_video_path image_save_path')
51 | # set the path to image folder (archives) of human3.6M dataset here
52 | src_folder = sys.argv[2] #"archives" # archives/S1/Videos/Directions 1.54138969.mp4 .....
53 | out_folder = sys.argv[3] #"images"
54 | os.makedirs(out_folder, exist_ok=True)
55 | for subject_id in subject_list:
56 | print('Processing {}'.format(subject_id))
57 | extract_imgs(subject_id, src_folder, out_folder)
58 |
59 |
60 | if __name__ == '__main__':
61 | main()
62 |
63 |
--------------------------------------------------------------------------------
/romp/lib/evaluation/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Arthur151/ROMP/a8558aed480af850756f84e2a7c787e359bddbd0/romp/lib/evaluation/.DS_Store
--------------------------------------------------------------------------------
/romp/lib/evaluation/__init__.py:
--------------------------------------------------------------------------------
1 | import sys, os
2 | lib_dir = os.path.dirname(__file__)
3 | root_dir = os.path.join(lib_dir.replace(os.path.basename(lib_dir),''))
4 | if root_dir not in sys.path:
5 | sys.path.insert(0, root_dir)
6 | from .evaluation_matrix import compute_error_verts, compute_similarity_transform, compute_similarity_transform_torch, \
7 | batch_compute_similarity_transform_torch, compute_mpjpe
8 | #from evaluation.eval_pckh import eval_pck, eval_pckh
9 | #from evaluation.pw3d_eval import *
10 | from .eval_ds_utils import h36m_evaluation_act_wise, cmup_evaluation_act_wise, pp_evaluation_cam_wise, determ_worst_best, reorganize_vis_info
--------------------------------------------------------------------------------
/romp/lib/evaluation/crowdpose-api/PythonAPI/crowdposetools/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'tylin'
2 |
--------------------------------------------------------------------------------
/romp/lib/evaluation/crowdpose-api/PythonAPI/demo.py:
--------------------------------------------------------------------------------
1 | from crowdposetools.coco import COCO
2 | from crowdposetools.cocoeval import COCOeval
3 |
4 | set_name = 'val' #'test' #
5 | gt_file = '../annotations/crowdpose_{}.json'.format(set_name)
6 | preds = '../annotations/V1_crowdpose_{}_preds_CAR0.2_ct0.2_mp64.json'.format(set_name)
7 |
8 | cocoGt = COCO(gt_file)
9 | cocoDt = cocoGt.loadRes(preds)
10 | cocoEval = COCOeval(cocoGt, cocoDt, 'keypoints')
11 | cocoEval.evaluate()
12 | cocoEval.accumulate()
13 | cocoEval.summarize()
14 |
--------------------------------------------------------------------------------
/romp/lib/evaluation/crowdpose-api/PythonAPI/setup.py:
--------------------------------------------------------------------------------
1 | from distutils.core import setup
2 | from Cython.Build import cythonize
3 | from distutils.extension import Extension
4 | import numpy as np
5 |
6 | # To compile and install locally run "python setup.py build_ext --inplace"
7 | # To install library to Python site-packages run "python setup.py build_ext install"
8 |
9 | ext_modules = [
10 | Extension(
11 | 'crowdposetools._mask',
12 | sources=['../common/maskApi.c', 'crowdposetools/_mask.pyx'],
13 | include_dirs=[np.get_include(), '../common'],
14 | extra_compile_args=['-Wno-cpp', '-Wno-unused-function', '-std=c99'],
15 | )
16 | ]
17 |
18 | setup(name='crowdposetools',
19 | packages=['crowdposetools'],
20 | package_dir={'crowdposetools': 'crowdposetools'},
21 | version='2.0',
22 | ext_modules=cythonize(ext_modules)
23 | )
24 |
--------------------------------------------------------------------------------
/romp/lib/evaluation/crowdpose-api/common/maskApi.h:
--------------------------------------------------------------------------------
1 | /**************************************************************************
2 | * Microsoft COCO Toolbox. version 2.0
3 | * Data, paper, and tutorials available at: http://mscoco.org/
4 | * Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
5 | * Licensed under the Simplified BSD License [see coco/license.txt]
6 | **************************************************************************/
7 | #pragma once
8 |
9 | typedef unsigned int uint;
10 | typedef unsigned long siz;
11 | typedef unsigned char byte;
12 | typedef double* BB;
13 | typedef struct { siz h, w, m; uint *cnts; } RLE;
14 |
15 | /* Initialize/destroy RLE. */
16 | void rleInit( RLE *R, siz h, siz w, siz m, uint *cnts );
17 | void rleFree( RLE *R );
18 |
19 | /* Initialize/destroy RLE array. */
20 | void rlesInit( RLE **R, siz n );
21 | void rlesFree( RLE **R, siz n );
22 |
23 | /* Encode binary masks using RLE. */
24 | void rleEncode( RLE *R, const byte *mask, siz h, siz w, siz n );
25 |
26 | /* Decode binary masks encoded via RLE. */
27 | void rleDecode( const RLE *R, byte *mask, siz n );
28 |
29 | /* Compute union or intersection of encoded masks. */
30 | void rleMerge( const RLE *R, RLE *M, siz n, int intersect );
31 |
32 | /* Compute area of encoded masks. */
33 | void rleArea( const RLE *R, siz n, uint *a );
34 |
35 | /* Compute intersection over union between masks. */
36 | void rleIou( RLE *dt, RLE *gt, siz m, siz n, byte *iscrowd, double *o );
37 |
38 | /* Compute non-maximum suppression between bounding masks */
39 | void rleNms( RLE *dt, siz n, uint *keep, double thr );
40 |
41 | /* Compute intersection over union between bounding boxes. */
42 | void bbIou( BB dt, BB gt, siz m, siz n, byte *iscrowd, double *o );
43 |
44 | /* Compute non-maximum suppression between bounding boxes */
45 | void bbNms( BB dt, siz n, uint *keep, double thr );
46 |
47 | /* Get bounding boxes surrounding encoded masks. */
48 | void rleToBbox( const RLE *R, BB bb, siz n );
49 |
50 | /* Convert bounding boxes to encoded masks. */
51 | void rleFrBbox( RLE *R, const BB bb, siz h, siz w, siz n );
52 |
53 | /* Convert polygon to encoded mask. */
54 | void rleFrPoly( RLE *R, const double *xy, siz k, siz h, siz w );
55 |
56 | /* Get compressed string representation of encoded mask. */
57 | char* rleToString( const RLE *R );
58 |
59 | /* Convert from compressed string representation of encoded mask. */
60 | void rleFrString( RLE *R, char *s, siz h, siz w );
61 |
--------------------------------------------------------------------------------
/romp/lib/evaluation/pw3d_eval/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Arthur151/ROMP/a8558aed480af850756f84e2a7c787e359bddbd0/romp/lib/evaluation/pw3d_eval/__init__.py
--------------------------------------------------------------------------------
/romp/lib/evaluation/pw3d_eval/scripts/install_prep.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | export CONDA_ENV_NAME=3dpw_eval
4 | echo $CONDA_ENV_NAME
5 |
6 | conda create -n $CONDA_ENV_NAME python=3.7.7
7 |
8 | eval "$(conda shell.bash hook)"
9 | conda activate $CONDA_ENV_NAME
10 |
11 | conda install numpy
12 | pip install opencv-python
13 | pip install numpy-quaternion
14 | pip install scipy
15 | pip install chumpy
16 | # 4 pip install
17 | # 5 pip install scipy
18 |
19 | wget https://virtualhumans.mpi-inf.mpg.de/3DPW/sequenceFiles.zip
20 | unzip sequenceFiles.zip
21 | rm sequenceFiles.zip
22 | mkdir input_dir
23 | mkdir ./input_dir/ref
24 | mv ./sequenceFiles/test ./sequenceFiles/train ./sequenceFiles/validation ./input_dir/ref
25 |
--------------------------------------------------------------------------------
/romp/lib/evaluation/pw3d_eval/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import quaternion
3 |
4 |
5 | def rot_matrix_to_axan(data):
6 | """
7 | Converts rotation matrices to axis angles
8 | :param data: Rotation matrices. Shape: (Persons, Seq, 24, 3, 3)
9 | :return: Axis angle representation of inpute matrices. Shape: (Persons, Seq, 24, 3)
10 | """
11 | aa = quaternion.as_rotation_vector(quaternion.from_rotation_matrix(data))
12 | return aa
13 |
14 |
15 | def axan_to_rot_matrix(data):
16 | """
17 | Converts the axis angle representation to a rotation matrix.
18 | :param data: Axis angle. Shape (batch, 24*3).
19 | :return: an array of shape (num_people, seq_length, 24, 3, 3).
20 | """
21 | # reshape to have sensor values explicit
22 | data_c = np.array(data, copy=True)
23 | # n = 24
24 | batch, n = data_c.shape[0], int(data_c.shape[1] / 3)
25 | data_r = np.reshape(data_c, [batch, n, 3])
26 |
27 | qs = quaternion.from_rotation_vector(data_r)
28 | rot = np.array(quaternion.as_rotation_matrix(qs))
29 | # size Batch x 24 x 3 x 3
30 |
31 | # check this
32 | rot = np.resize(rot, (batch, 24 * 3 * 3))
33 | # finally we get Batch X 24*3*3
34 | return rot
35 |
36 |
37 | def with_zeros(data):
38 | """
39 | Appends a [0, 0, 0, 1] vector to all the 3 X 4 matrices in the batch
40 | Args:
41 | data: matrix shape Batch X 3 X 4
42 | Returns: matrix shape Batch X 4 X 4
43 |
44 | """
45 | batch_size = data.shape[0]
46 | padding = np.array([0.0, 0.0, 0.0, 1.0])
47 |
48 | # This creates a list of data and a padding array with size Batch X 1 X 4
49 |
50 | concat_list = [data, np.tile(np.resize(padding, (1, 1, 4)), (batch_size, 1, 1))]
51 | cat_res = np.concatenate(concat_list, axis=1)
52 | return cat_res
53 |
54 |
55 | def pack(data):
56 | """
57 | changes a matrix of size B x 4 x 1 to matrix of size B x 4 x 4 where all the additional values are zero
58 | This is useful for multiplying the global transform with the inverse of the pose transform
59 | Args:
60 | data: BATCH x 4 x 1
61 | Returns:
62 |
63 | """
64 | batch_size = data.shape[0]
65 | padding = np.zeros((batch_size, 4, 3))
66 | pack_list = [padding, data]
67 | pack_res = np.concatenate(pack_list, 2)
68 | return pack_res
69 |
70 |
71 | def subtract_flat_id(rot_mats):
72 | """
73 | does R(\theta) - R(\theta*)
74 | R(\theta*) is a contatenation of identity matrices
75 | Args:
76 | rot_mats: shape: BATCH X 207
77 | Returns:
78 |
79 | """
80 | # Subtracts identity as a flattened tensor
81 | id_flat = np.eye(3, dtype=rot_mats.dtype)
82 | id_flat = np.resize(id_flat, (1, 9))
83 | id_flat = np.tile(id_flat, (rot_mats.shape[0], 23))
84 | results = rot_mats - id_flat
85 | return results
86 |
--------------------------------------------------------------------------------
/romp/lib/loss_funcs/__init__.py:
--------------------------------------------------------------------------------
1 | import sys, os
2 | lib_dir = os.path.dirname(__file__)
3 | root_dir = os.path.join(lib_dir.replace(os.path.basename(lib_dir),''))
4 | if root_dir not in sys.path:
5 | sys.path.insert(0, root_dir)
6 | from .calc_loss import Loss
7 | from .learnable_loss import Learnable_Loss
8 | from .params_loss import batch_l2_loss, _calc_MPJAE
9 | from .keypoints_loss import batch_kp_2d_l2_loss, calc_mpjpe, calc_pampjpe, _calc_pck_loss, calc_pck, align_by_parts
10 | from .maps_loss import focal_loss, Heatmap_AE_loss, JointsMSELoss
11 | from .prior_loss import create_prior, MaxMixturePrior, L2Prior, SMPLifyAnglePrior, angle_prior
--------------------------------------------------------------------------------
/romp/lib/maps_utils/__init__.py:
--------------------------------------------------------------------------------
1 | import sys, os
2 | lib_dir = os.path.dirname(__file__)
3 | root_dir = os.path.join(lib_dir.replace(os.path.basename(lib_dir),''))
4 | if root_dir not in sys.path:
5 | sys.path.insert(0, root_dir)
6 | from .kp_group import HeatmapParser
7 | from .target_generators import HeatmapGenerator,JointsGenerator
8 | from .centermap import CenterMap
--------------------------------------------------------------------------------
/romp/lib/maps_utils/debug_utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 |
4 | def print_dict(td):
5 | keys = collect_keyname(td)
6 | print(keys)
7 |
8 | def get_size(item):
9 | if isinstance(item, list) or isinstance(item, tuple):
10 | return len(item)
11 | elif isinstance(item, torch.Tensor):
12 | return (item.shape, item.device)
13 | elif isinstance(item, np.np.ndarray):
14 | return item.shape
15 | else:
16 | return item
17 |
18 | def collect_keyname(td):
19 | keys = []
20 | for key in td:
21 | if isinstance(td[key], dict):
22 | keys.append([key, collect_keyname(td[key])])
23 | else:
24 | keys.append([key, get_size(td[key])])
25 | return keys
--------------------------------------------------------------------------------
/romp/lib/models/__init__.py:
--------------------------------------------------------------------------------
1 | import sys, os
2 | lib_dir = os.path.dirname(__file__)
3 | root_dir = os.path.join(lib_dir.replace(os.path.basename(lib_dir),''))
4 | if root_dir not in sys.path:
5 | sys.path.insert(0, root_dir)
6 | from .build import build_model
7 |
--------------------------------------------------------------------------------
/romp/lib/models/build.py:
--------------------------------------------------------------------------------
1 | import sys, os
2 | import torch
3 | import torch.nn as nn
4 | from config import args
5 | from models.hrnet_32 import HigherResolutionNet
6 | from models.resnet_50 import ResNet_50
7 | from models.romp_model import ROMP
8 | from models.bev_model import BEV
9 |
10 | Backbones = {'hrnet': HigherResolutionNet, 'resnet': ResNet_50}
11 | Heads = {1: ROMP, 6:BEV}
12 |
13 | def build_model():
14 | if args().backbone in Backbones:
15 | backbone = Backbones[args().backbone]()
16 | else:
17 | raise NotImplementedError("Backbone is not recognized")
18 | if args().model_version in Heads:
19 | head = Heads[args().model_version]
20 | else:
21 | raise NotImplementedError("Head is not recognized")
22 | model = head(backbone=backbone)
23 | return model
24 |
25 | if __name__ == '__main__':
26 | net = build_model()
27 | nx = torch.rand(4,512,512,3).float().cuda()
28 | y = net(nx)
29 |
30 | for idx, item in enumerate(y):
31 | if isinstance(item,dict):
32 | for key, it in item.items():
33 | print(key,it.shape)
34 | else:
35 | print(idx,item.shape)
36 |
--------------------------------------------------------------------------------
/romp/lib/smpl_family/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Arthur151/ROMP/a8558aed480af850756f84e2a7c787e359bddbd0/romp/lib/smpl_family/__init__.py
--------------------------------------------------------------------------------
/romp/lib/smpl_family/smpl_regressor.py:
--------------------------------------------------------------------------------
1 | import sys,os
2 | import torch
3 | import torch.nn as nn
4 | import config
5 | import numpy as np
6 | from .smpl import SMPL
7 | from config import args
8 |
9 | class SMPLR(nn.Module):
10 | def __init__(self, use_gender=False):
11 | super(SMPLR, self).__init__()
12 | model_path = os.path.join(config.model_dir,'parameters','smpl')
13 | self.smpls = {}
14 | self.smpls['n'] = SMPL(args().smpl_model_path, model_type='smpl')
15 | if use_gender:
16 | self.smpls['f'] = SMPL(os.path.join(config.smpl_model_dir,'SMPL_FEMALE.pth'))
17 | self.smpls['m'] = SMPL(os.path.join(config.smpl_model_dir,'SMPL_MALE.pth'))
18 |
19 | def forward(self, pose, betas, gender='n'):
20 | if isinstance(pose, np.ndarray):
21 | pose, betas = torch.from_numpy(pose).float(),torch.from_numpy(betas).float()
22 | if len(pose.shape)==1:
23 | pose, betas = pose.unsqueeze(0), betas.unsqueeze(0)
24 | verts, joints54_17 = self.smpls[gender](poses=pose, betas=betas)
25 |
26 | return verts.numpy(), joints54_17[:,:54].numpy()
--------------------------------------------------------------------------------
/romp/lib/smpl_family/smpl_wrapper.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import numpy as np
4 |
5 | import sys, os
6 | import config
7 | from config import args
8 | import constants
9 | from smpl_family.smpl import SMPL
10 | from utils.projection import vertices_kp3d_projection
11 | from utils.rot_6D import rot6D_to_angular
12 |
13 | class SMPLWrapper(nn.Module):
14 | def __init__(self):
15 | super(SMPLWrapper,self).__init__()
16 | #self.smpl_model = smpl_model.create(args().smpl_model_path, J_reg_extra9_path=args().smpl_J_reg_extra_path, J_reg_h36m17_path=args().smpl_J_reg_h37m_path, \
17 | # batch_size=args().batch_size,model_type='smpl', gender='neutral', use_face_contour=False, ext='npz',flat_hand_mean=True, use_pca=False).cuda()
18 | self.smpl_model = SMPL(args().smpl_model_path, model_type='smpl')
19 | self.part_name = ['cam', 'global_orient', 'body_pose', 'betas']
20 | self.part_idx = [args().cam_dim, args().rot_dim, (args().smpl_joint_num-1)*args().rot_dim, 10]
21 |
22 | self.unused_part_name = ['left_hand_pose', 'right_hand_pose', 'jaw_pose', 'leye_pose', 'reye_pose', 'expression']
23 | self.unused_part_idx = [ 15, 15, 3, 3, 3, 10]
24 |
25 | self.kps_num = 25 # + 21*2
26 | self.params_num = np.array(self.part_idx).sum()
27 | self.global_orient_nocam = torch.from_numpy(constants.global_orient_nocam).unsqueeze(0)
28 | self.joint_mapper_op25 = torch.from_numpy(constants.joint_mapping(constants.SMPL_ALL_54, constants.OpenPose_25)).long()
29 | self.joint_mapper_op25 = torch.from_numpy(constants.joint_mapping(constants.SMPL_ALL_54, constants.OpenPose_25)).long()
30 |
31 | def forward(self, outputs, meta_data):
32 | idx_list, params_dict = [0], {}
33 | for i, (idx, name) in enumerate(zip(self.part_idx,self.part_name)):
34 | idx_list.append(idx_list[i] + idx)
35 | params_dict[name] = outputs['params_pred'][:, idx_list[i]: idx_list[i+1]].contiguous()
36 |
37 | if args().Rot_type=='6D':
38 | params_dict['body_pose'] = rot6D_to_angular(params_dict['body_pose'])
39 | params_dict['global_orient'] = rot6D_to_angular(params_dict['global_orient'])
40 | N = params_dict['body_pose'].shape[0]
41 | params_dict['body_pose'] = torch.cat([params_dict['body_pose'], torch.zeros(N,6).to(params_dict['body_pose'].device)],1)
42 | params_dict['poses'] = torch.cat([params_dict['global_orient'], params_dict['body_pose']], 1)
43 |
44 | vertices, joints54_17 = self.smpl_model(betas=params_dict['betas'], poses=params_dict['poses'], root_align=args().smpl_mesh_root_align)
45 |
46 | outputs.update({'params': params_dict, 'verts': vertices, 'j3d':joints54_17[:,:54], 'joints_h36m17':joints54_17[:,54:]})
47 |
48 | outputs.update(vertices_kp3d_projection(outputs['j3d'], outputs['params']['cam'], joints_h36m17_preds=outputs['joints_h36m17'], \
49 | vertices=outputs['verts'], input2orgimg_offsets=meta_data['offsets'], presp=args().perspective_proj))
50 |
51 | return outputs
--------------------------------------------------------------------------------
/romp/lib/smpl_family/smpla.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from smpl_family.smpl import SMPL
3 | import torch.nn as nn
4 |
5 | class SMPLA_parser(nn.Module):
6 | def __init__(self, smpla_path, smil_path, baby_thresh=0.8):
7 | super(SMPLA_parser, self).__init__()
8 | self.smil_model = SMPL(smil_path, model_type='smpl')
9 | self.smpla_model = SMPL(smpla_path, model_type='smpla')
10 | self.baby_thresh = baby_thresh
11 |
12 | def forward(self, betas=None, poses=None, root_align=True):
13 | baby_mask = betas[:,10] > self.baby_thresh
14 | if baby_mask.sum()>0:
15 | adult_mask = ~baby_mask
16 | verts, joints = torch.zeros(len(poses), 6890, 3, device=poses.device, dtype=poses.dtype), torch.zeros(len(poses), 54+17, 3, device=poses.device, dtype=poses.dtype)
17 |
18 | # SMIL beta - 10 dims, only need the estimated betas, kid_offsets are not used
19 | verts[baby_mask], joints[baby_mask] = self.smil_model(betas=betas[baby_mask,:10], poses=poses[baby_mask], root_align=root_align)
20 |
21 | # SMPLA beta - 11 dims, the estimated betas (10) + kid_offsets (1)
22 | if adult_mask.sum()>0:
23 | verts[adult_mask], joints[adult_mask] = self.smpla_model(betas=betas[adult_mask,:11], poses=poses[adult_mask], root_align=root_align)
24 | else:
25 | verts, joints = self.smpla_model(betas=betas[:,:11], poses=poses, root_align=root_align)
26 |
27 | return verts, joints
--------------------------------------------------------------------------------
/romp/lib/tracking/__init__.py:
--------------------------------------------------------------------------------
1 | import sys, os
2 | lib_dir = os.path.dirname(__file__)
3 | root_dir = os.path.join(lib_dir.replace(os.path.basename(lib_dir),''))
4 | if root_dir not in sys.path:
5 | sys.path.insert(0, root_dir)
--------------------------------------------------------------------------------
/romp/lib/tracking/basetrack.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from collections import OrderedDict
3 |
4 |
5 | class TrackState(object):
6 | New = 0
7 | Tracked = 1
8 | Lost = 2
9 | Removed = 3
10 |
11 |
12 | class BaseTrack(object):
13 | _count = 0
14 |
15 | track_id = 0
16 | is_activated = False
17 | state = TrackState.New
18 |
19 | history = OrderedDict()
20 | features = []
21 | curr_feature = None
22 | score = 0
23 | start_frame = 0
24 | frame_id = 0
25 | time_since_update = 0
26 |
27 | # multi-camera
28 | location = (np.inf, np.inf)
29 |
30 | @property
31 | def end_frame(self):
32 | return self.frame_id
33 |
34 | @staticmethod
35 | def next_id():
36 | BaseTrack._count += 1
37 | return BaseTrack._count
38 |
39 | def activate(self, *args):
40 | raise NotImplementedError
41 |
42 | def predict(self):
43 | raise NotImplementedError
44 |
45 | def update(self, *args, **kwargs):
46 | raise NotImplementedError
47 |
48 | def mark_lost(self):
49 | self.state = TrackState.Lost
50 |
51 | def mark_removed(self):
52 | self.state = TrackState.Removed
--------------------------------------------------------------------------------
/romp/lib/tracking/tracking_utils/log.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 |
4 | def get_logger(name='root'):
5 | formatter = logging.Formatter(
6 | # fmt='%(asctime)s [%(levelname)s]: %(filename)s(%(funcName)s:%(lineno)s) >> %(message)s')
7 | fmt='%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
8 |
9 | handler = logging.StreamHandler()
10 | handler.setFormatter(formatter)
11 |
12 | logger = logging.getLogger(name)
13 | logger.setLevel(logging.DEBUG)
14 | logger.addHandler(handler)
15 | return logger
16 |
17 |
18 | logger = get_logger('root')
19 |
--------------------------------------------------------------------------------
/romp/lib/tracking/tracking_utils/nms.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 | # from ._utils import _C
3 | from tracking_utils import _C
4 |
5 | nms = _C.nms
6 | # nms.__doc__ = """
7 | # This function performs Non-maximum suppresion"""
8 |
--------------------------------------------------------------------------------
/romp/lib/tracking/tracking_utils/parse_config.py:
--------------------------------------------------------------------------------
1 | def parse_model_cfg(path):
2 | """Parses the yolo-v3 layer configuration file and returns module definitions"""
3 | file = open(path, 'r')
4 | lines = file.read().split('\n')
5 | lines = [x for x in lines if x and not x.startswith('#')]
6 | lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces
7 | module_defs = []
8 | for line in lines:
9 | if line.startswith('['): # This marks the start of a new block
10 | module_defs.append({})
11 | module_defs[-1]['type'] = line[1:-1].rstrip()
12 | if module_defs[-1]['type'] == 'convolutional':
13 | module_defs[-1]['batch_normalize'] = 0
14 | else:
15 | key, value = line.split("=")
16 | value = value.strip()
17 | module_defs[-1][key.rstrip()] = value.strip()
18 |
19 | return module_defs
20 |
21 |
22 | def parse_data_cfg(path):
23 | """Parses the data configuration file"""
24 | options = dict()
25 | options['gpu'] = '0'
26 | options['num_workers'] = '10'
27 | with open(path, 'r') as fp:
28 | lines = fp.readlines()
29 | for line in lines:
30 | line = line.strip()
31 | if line == '' or line.startswith('#'):
32 | continue
33 | key, value = line.split('=')
34 | options[key.strip()] = value.strip()
35 | return options
36 |
--------------------------------------------------------------------------------
/romp/lib/tracking/tracking_utils/timer.py:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # Fast R-CNN
3 | # Copyright (c) 2015 Microsoft
4 | # Licensed under The MIT License [see LICENSE for details]
5 | # Written by Ross Girshick
6 | # --------------------------------------------------------
7 |
8 | import time
9 |
10 |
11 | class Timer(object):
12 | """A simple timer."""
13 | def __init__(self):
14 | self.total_time = 0.
15 | self.calls = 0
16 | self.start_time = 0.
17 | self.diff = 0.
18 | self.average_time = 0.
19 |
20 | self.duration = 0.
21 |
22 | def tic(self):
23 | # using time.time instead of time.clock because time time.clock
24 | # does not normalize for multithreading
25 | self.start_time = time.time()
26 |
27 | def toc(self, average=True):
28 | self.diff = time.time() - self.start_time
29 | self.total_time += self.diff
30 | self.calls += 1
31 | self.average_time = self.total_time / self.calls
32 | if average:
33 | self.duration = self.average_time
34 | else:
35 | self.duration = self.diff
36 | return self.duration
37 |
38 | def clear(self):
39 | self.total_time = 0.
40 | self.calls = 0
41 | self.start_time = 0.
42 | self.diff = 0.
43 | self.average_time = 0.
44 | self.duration = 0.
45 |
46 |
--------------------------------------------------------------------------------
/romp/lib/utils/__init__.py:
--------------------------------------------------------------------------------
1 | import sys, os
2 | lib_dir = os.path.dirname(__file__)
3 | root_dir = os.path.join(lib_dir.replace(os.path.basename(lib_dir),''))
4 | if root_dir not in sys.path:
5 | sys.path.insert(0, root_dir)
6 |
7 | from .rot_6D import rot6D_to_angular
8 | from .projection import filter_out_incorrect_trans
9 | from .util import AverageMeter,normalize_kps, BHWC_to_BCHW, rotation_matrix_to_angle_axis,\
10 | batch_rodrigues, AverageMeter_Dict, transform_rot_representation, save_obj, save_yaml, save_json
11 | from .augments import img_kp_rotate, random_erase, RGB_mix, Synthetic_occlusion, calc_aabb, flip_kps, rot_imgplane, pose_processing, process_image
12 | from .train_utils import load_model, process_idx, copy_state_dict, save_model, write2log, exclude_params, train_entire_model, \
13 | print_dict, get_remove_keys, reorganize_items, init_seeds, fix_backbone
14 | from .center_utils import process_gt_center
15 | from .cam_utils import normalize_trans_to_cam_params, denormalize_cam_params_to_trans, estimate_translation
--------------------------------------------------------------------------------
/romp/lib/utils/center_utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import constants
3 | from config import args
4 | import numpy as np
5 | from .cam_utils import convert_cam_params_to_centermap_coords
6 |
7 | def denormalize_center(center, size=args().centermap_size):
8 | center = (center+1)/2*size
9 |
10 | center[center<1] = 1
11 | center[center>size - 1] = size - 1
12 | if isinstance(center, np.ndarray):
13 | center = center.astype(np.int32)
14 | elif isinstance(center, torch.Tensor):
15 | center = center.long()
16 | return center
17 |
18 | def process_gt_center(center_normed):
19 | valid_mask = center_normed[:,:,0]>-1
20 | valid_inds = torch.where(valid_mask)
21 | valid_batch_inds, valid_person_ids = valid_inds[0], valid_inds[1]
22 | center_gt = ((center_normed+1)/2*args().centermap_size).long()
23 | center_gt_valid = center_gt[valid_mask]
24 | return (valid_batch_inds, valid_person_ids, center_gt_valid)
25 |
26 |
27 | def parse_gt_center3d(cam_mask, cams, size=args().centermap_size):
28 | batch_ids, person_ids = torch.where(cam_mask)
29 | cam_params = cams[batch_ids, person_ids]
30 | centermap_coords = convert_cam_params_to_centermap_coords(cam_params)
31 | czyxs = denormalize_center(centermap_coords, size=size)
32 | #sample_view_ids = determine_sample_view(batch_ids,czyxs)
33 | return batch_ids, person_ids, czyxs
--------------------------------------------------------------------------------
/romp/lib/visualization/__init__.py:
--------------------------------------------------------------------------------
1 | import sys, os
2 | lib_dir = os.path.dirname(__file__)
3 | root_dir = os.path.join(lib_dir.replace(os.path.basename(lib_dir),''))
4 | if root_dir not in sys.path:
5 | sys.path.insert(0, root_dir)
--------------------------------------------------------------------------------
/romp/lib/visualization/vis_cfgs/o3d_scene_o3d13.yml:
--------------------------------------------------------------------------------
1 | host: '127.0.0.1'
2 | port: 9999
3 |
4 | width: 1920
5 | height: 1080
6 |
7 | max_human: 10
8 | track: True
9 | filter: True
10 | block: True # block visualization or not, True for visualize each frame, False in realtime applications
11 | rotate: False
12 | debug: False
13 | write: False
14 | out: 'none'
15 |
16 | body_model:
17 | module: "models.smpl.SMPL"
18 |
19 | camera:
20 | phi: 0
21 | theta: -10
22 | cx: 0.
23 | cy: 0.2
24 | cz: 6.
25 |
26 | scene:
27 | # "visualization.vis_utils.create_coord":
28 | # camera: [0, 0, 0]
29 | # radius: 1.
30 | # scale: 1.
31 | # "visualization.vis_utils.create_bbox":
32 | # min_bound: [-3, -3, 0]
33 | # max_bound: [3, 3, 2]
34 | # flip: False
35 | "visualization.vis_utils_o3d13.create_ground":
36 | center: [0, 0, -1.6]
37 | xdir: [1, 0, 0]
38 | ydir: [0, 1, 0]
39 | step: 1
40 | xrange: 6
41 | yrange: 6
42 | white: [1., 1., 1.]
43 | black: [0.,0.,0.]
44 | two_sides: True
45 |
46 | range:
47 | minr: [-100, -100, -100]
48 | maxr: [ 100, 100, 100]
49 | rate_inlier: 0.8
50 | min_conf: 0.1
51 |
52 |
--------------------------------------------------------------------------------
/romp/lib/visualization/vis_cfgs/o3d_scene_py36_o3d9.yml:
--------------------------------------------------------------------------------
1 | host: '127.0.0.1'
2 | port: 9999
3 |
4 | width: 1920
5 | height: 1080
6 |
7 | max_human: 6
8 | track: True
9 | filter: True
10 | block: True # block visualization or not, True for visualize each frame, False in realtime applications
11 | rotate: False
12 | debug: False
13 | write: False
14 | out: 'none'
15 |
16 | body_model:
17 | module: "models.smpl.SMPL"
18 |
19 | camera:
20 | phi: 0
21 | theta: -10
22 | cx: 0.
23 | cy: 0.2
24 | cz: 6.
25 |
26 | scene:
27 | # "visualization.vis_utils.create_coord":
28 | # camera: [0, 0, 0]
29 | # radius: 1.
30 | # scale: 1.
31 | # "visualization.vis_utils.create_bbox":
32 | # min_bound: [-3, -3, 0]
33 | # max_bound: [3, 3, 2]
34 | # flip: False
35 | "visualization.vis_utils_py36_o3d9.create_ground":
36 | center: [0, 0, -1.6]
37 | xdir: [1, 0, 0]
38 | ydir: [0, 1, 0]
39 | step: 1
40 | xrange: 6
41 | yrange: 6
42 | white: [1., 1., 1.]
43 | black: [0.,0.,0.]
44 | two_sides: True
45 |
46 | range:
47 | minr: [-100, -100, -100]
48 | maxr: [ 100, 100, 100]
49 | rate_inlier: 0.8
50 | min_conf: 0.1
51 |
52 |
--------------------------------------------------------------------------------
/romp/lib/visualization/vis_cfgs/render_option.json:
--------------------------------------------------------------------------------
1 | {
2 | "background_color" : [ 1.0, 1.0, 1.0 ],
3 | "class_name" : "RenderOption",
4 | "default_mesh_color" : [ 0.9, 0.9, 0.9 ],
5 | "image_max_depth" : 3000,
6 | "image_stretch_option" : 1,
7 | "interpolation_option" : 0,
8 |
9 | "light0_color" : [ 1.0, 1.0, 1.0 ],
10 | "light0_diffuse_power" : 0.7,
11 | "light0_position" : [ 0.0, 0.0, 2.0 ],
12 | "light0_specular_power" : 0.7,
13 | "light0_specular_shininess" : 70.0,
14 |
15 | "light1_color" : [ 0.0, 0.0, 0.0 ],
16 | "light1_diffuse_power" : 0.7,
17 | "light1_position" : [ 0.0, 2.0, 0 ],
18 | "light1_specular_power" : 0.1,
19 | "light1_specular_shininess" : 70.0,
20 |
21 | "light2_color" : [ 0.0, 0.0, 0.0 ],
22 | "light2_diffuse_power" : 0.7,
23 | "light2_position" : [ 2.0, -2.0, 0.0 ],
24 | "light2_specular_power" : 0.1,
25 | "light2_specular_shininess" : 70.0,
26 |
27 | "light3_color" : [ 0.0, 0.0, 0.0 ],
28 | "light3_diffuse_power" : 0.7,
29 | "light3_position" : [ -2.0, -2.0, 0.0 ],
30 | "light3_specular_power" : 0.1,
31 | "light3_specular_shininess" : 70.0,
32 |
33 | "light_ambient_color" : [ 0.3, 0.3, 0.3 ],
34 |
35 | "light_on" : true,
36 | "line_width" : 1.0,
37 | "mesh_color_option" : 1,
38 | "mesh_shade_option" : 1,
39 | "mesh_show_back_face" : true,
40 | "mesh_show_wireframe" : false,
41 | "point_color_option" : 0,
42 | "point_show_normal" : false,
43 | "point_size" : 5.0,
44 | "show_coordinate_frame" : false,
45 | "version_major" : 1,
46 | "version_minor" : 0
47 | }
--------------------------------------------------------------------------------
/romp/lib/visualization/vis_platform/vis_client.py:
--------------------------------------------------------------------------------
1 | import sys, os
2 | root_dir = os.path.join(os.path.dirname(__file__),'..')
3 | if root_dir not in sys.path:
4 | sys.path.insert(0, root_dir)
5 | import socket
6 | import time
7 | from visualization.socket_utils import BaseSocketClient
8 |
9 | def send_rand(client):
10 | import numpy as np
11 | N_person = 4
12 | datas = []
13 | for i in range(N_person):
14 | transl = (np.random.rand(1, 3) - 0.5) * 3
15 | kpts = np.random.rand(25, 4)
16 | kpts[:, :3] += transl
17 | pose = np.random.rand(1,72)
18 | beta = np.random.rand(1,10)
19 | data = {
20 | 'id': i,
21 | 'keypoints3d': kpts,
22 | 'poses': pose,
23 | 'betas': beta
24 | }
25 | datas.append(data)
26 | for _ in range(1):
27 | for i in range(N_person):
28 | move = (np.random.rand(1, 3) - 0.5) * 0.1
29 | datas[i]['keypoints3d'][:, :3] += move
30 | client.send_smpl(datas)
31 | time.sleep(0.01)
32 | client.close()
33 |
34 | def send_dir(client, path, step):
35 | from os.path import join
36 | from glob import glob
37 | from tqdm import tqdm
38 | from easymocap.mytools.reader import read_keypoints3d
39 | results = sorted(glob(join(path, '*.json')))
40 | for result in tqdm(results[::step]):
41 | if args.smpl:
42 | data = read_smpl(result)
43 | client.send_smpl(data)
44 | else:
45 | data = read_keypoints3d(result)
46 | client.send(data)
47 | time.sleep(0.005)
48 |
49 | if __name__ == '__main__':
50 | import argparse
51 | parser = argparse.ArgumentParser()
52 | parser.add_argument('--host', type=str, default='127.0.0.1')
53 | parser.add_argument('--port', type=int, default=9999)
54 | parser.add_argument('--step', type=int, default=1)
55 | parser.add_argument('--path', type=str, default=None)
56 | parser.add_argument('--smpl', action='store_true')
57 | parser.add_argument('--debug', action='store_true')
58 | args = parser.parse_args()
59 | if args.host == 'auto':
60 | args.host = socket.gethostname()
61 | for i in range(1000):
62 | client = BaseSocketClient(args.host, args.port)
63 | send_rand(client)
--------------------------------------------------------------------------------
/romp/lib/visualization/vis_platform/vis_server.py:
--------------------------------------------------------------------------------
1 | import open3d as o3d
2 | version = int(o3d.__version__.split('.')[1])
3 | if version==9:
4 | print('using open3d 0.9.0, importing functions from vis_server_py36_o3d9.')
5 | from .vis_server_py36_o3d9 import *
6 | config_file = 'romp/lib/visualization/vis_cfgs/o3d_scene_py36_o3d9.yml'
7 | elif version >=11:
8 | print('using open3d 0.13.0, importing functions from vis_server_o3d13.')
9 | from .vis_server_o3d13 import *
10 | config_file = 'romp/lib/visualization/vis_cfgs/o3d_scene_o3d13.yml'
11 | else:
12 | print('Error: the open3d version may not be supported.')
13 |
14 | if __name__ == '__main__':
15 | cfg = Config().load(config_file)
16 | server = VisOpen3DSocket(cfg.host, cfg.port, cfg)
17 | while True:
18 | server.update()
--------------------------------------------------------------------------------
/romp/lib/visualization/web_vis.py:
--------------------------------------------------------------------------------
1 | import plotly
2 | import plotly.graph_objects as go
3 | from plotly.graph_objs.scatter import Line
4 | from plotly.subplots import make_subplots
5 | import plotly.express as px
6 | import ipywidgets
7 | from ipywidgets.widgets import Layout, HBox, VBox
8 | from ipywidgets.embed import embed_minimal_html
9 | import pandas as pd
10 | import sys, os
11 | import constants
12 | import config
13 | from config import args
14 |
15 | def convert_3dpose_to_line_figs(poses, bones, pred_color='goldenrod', gt_color='red'):
16 | figs = []
17 | items_name = ["x","y","z",'class','joint_name']
18 | if bones.max()==13:
19 | joint_names = constants.LSP_14_names
20 | elif bones.max()==23:
21 | joint_names = constants.SMPL_24_names
22 | for batch_inds, (pred, real) in enumerate(zip(*poses)):
23 | pose_dict, color_maps = {}, {}
24 | for bone_inds in bones:
25 | si, ei = bone_inds
26 | bone_name = '{}-{}'.format(joint_names[si], joint_names[ei])
27 | pose_dict['p_'+bone_name+'_s'] = [*pred[si],'p_'+bone_name, joint_names[si]]
28 | pose_dict['p_'+bone_name+'_e'] = [*pred[ei],'p_'+bone_name, joint_names[ei]]
29 | color_maps['p_'+bone_name] = pred_color
30 | pose_dict['r_'+bone_name+'_s'] = [*real[si],'r_'+bone_name, joint_names[si]]
31 | pose_dict['r_'+bone_name+'_e'] = [*real[ei],'r_'+bone_name, joint_names[ei]]
32 | color_maps['r_'+bone_name] = gt_color
33 | pred_real_pose_df = pd.DataFrame.from_dict(pose_dict,orient='index',columns=items_name)
34 | pose3d_fig = px.line_3d(pred_real_pose_df, x="x", y="y", z="z", color='class', color_discrete_map=color_maps)#, text='joint_name'
35 | figs.append(pose3d_fig)
36 | return figs
37 |
38 | def write_to_html(img_names, plot_dict, vis_cfg):
39 | containers = []
40 | raw_layout = Layout(overflow_x='scroll',border='2px solid black',width='1900px',height='',
41 | flex_direction='row',display='flex')
42 | for inds, img_name in enumerate(img_names):
43 | Hboxes = []
44 | for item in list(plot_dict.keys()):
45 | if inds >= len(plot_dict[item]['figs']):
46 | continue
47 | fig = plot_dict[item]['figs'][inds]
48 | fig['layout'] = {"title":{"text":img_name.replace(args().dataset_rootdir, '')}}
49 | Hboxes.append(go.FigureWidget(fig))
50 | containers.append(HBox(Hboxes,layout=raw_layout))
51 | all_figs = VBox(containers)
52 | save_name = os.path.join(vis_cfg['save_dir'],vis_cfg['save_name']+'.html')
53 | embed_minimal_html(save_name, views=[all_figs], title=vis_cfg['save_name'], drop_defaults=True)
54 | ipywidgets.Widget.close_all()
55 | del all_figs, containers, Hboxes
56 |
57 | def convert_image_list(images):
58 | figs = []
59 | for img in images:
60 | figs.append(px.imshow(img))
61 | return figs
62 |
63 | if __name__ == '__main__':
64 | import numpy as np
65 | convert_3dpose_to_line_figs([np.random.rand(18).reshape((2,3,3)),np.random.rand(18).reshape((2,3,3))],np.array([[0,1],[1,2]]))
--------------------------------------------------------------------------------
/romp/predict/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Arthur151/ROMP/a8558aed480af850756f84e2a7c787e359bddbd0/romp/predict/__init__.py
--------------------------------------------------------------------------------
/scripts/V1_hrnet_3dpwft.sh:
--------------------------------------------------------------------------------
1 | TRAIN_CONFIGS='configs/v1_hrnet_3dpw_ft.yml'
2 |
3 | GPUS=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.gpu)
4 | DATASET=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.dataset)
5 | TAB=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.tab)
6 |
7 | CUDA_VISIBLE_DEVICES=${GPUS} nohup python -u -m romp.train --configs_yml=${TRAIN_CONFIGS} > 'log/'${TAB}'_'${DATASET}'_g'${GPUS}.log 2>&1 &
--------------------------------------------------------------------------------
/scripts/V1_resnet_3dpwft.sh:
--------------------------------------------------------------------------------
1 | TRAIN_CONFIGS='configs/v1_resnet_3dpw_ft.yml'
2 |
3 | GPUS=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.gpu)
4 | DATASET=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.dataset)
5 | TAB=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.tab)
6 |
7 | CUDA_VISIBLE_DEVICES=${GPUS} nohup python -u -m romp.train --configs_yml=${TRAIN_CONFIGS} > 'log/'${TAB}'_'${DATASET}'_g'${GPUS}.log 2>&1 &
--------------------------------------------------------------------------------
/scripts/V1_train.sh:
--------------------------------------------------------------------------------
1 | TRAIN_CONFIGS='configs/v1.yml'
2 |
3 | GPUS=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.gpu)
4 | DATASET=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.dataset)
5 | TAB=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.tab)
6 |
7 | CUDA_VISIBLE_DEVICES=${GPUS} python -u -m romp.train --configs_yml=${TRAIN_CONFIGS}
8 | #CUDA_VISIBLE_DEVICES=${GPUS} nohup python -u -m romp.train --configs_yml=${TRAIN_CONFIGS} > 'log/'${TAB}'_'${DATASET}'_g'${GPUS}.log 2>&1 &
--------------------------------------------------------------------------------
/scripts/V1_train_resnet.sh:
--------------------------------------------------------------------------------
1 | TRAIN_CONFIGS='configs/v1_resnet.yml'
2 |
3 | GPUS=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.gpu)
4 | DATASET=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.dataset)
5 | TAB=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.tab)
6 |
7 | CUDA_VISIBLE_DEVICES=${GPUS} python -u -m romp.train --configs_yml=${TRAIN_CONFIGS}
8 | #CUDA_VISIBLE_DEVICES=${GPUS} nohup python -u -m romp.train --configs_yml=${TRAIN_CONFIGS} > 'log/'${TAB}'_'${DATASET}'_g'${GPUS}.log 2>&1 &
--------------------------------------------------------------------------------
/scripts/V6_ft.sh:
--------------------------------------------------------------------------------
1 | TRAIN_CONFIGS='configs/v6_ft.yml'
2 |
3 | GPUS=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.gpu)
4 | DATASET=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.dataset)
5 | TAB=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.tab)
6 |
7 | CUDA_VISIBLE_DEVICES=${GPUS} python -u -m romp.train --gpu=${GPUS} --configs_yml=${TRAIN_CONFIGS}
8 | #CUDA_VISIBLE_DEVICES=${GPUS} nohup python -u -m romp.train --gpu=${GPUS} --configs_yml=${TRAIN_CONFIGS} > 'log/'${TAB}'_'${DATASET}'_g'${GPUS}.log 2>&1 &
--------------------------------------------------------------------------------
/scripts/V6_train.sh:
--------------------------------------------------------------------------------
1 | TRAIN_CONFIGS='configs/v6_train.yml'
2 |
3 | GPUS=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.gpu)
4 | DATASET=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.dataset)
5 | TAB=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.tab)
6 |
7 | CUDA_VISIBLE_DEVICES=${GPUS} python -u -m romp.train --configs_yml=${TRAIN_CONFIGS}
8 | #CUDA_VISIBLE_DEVICES=${GPUS} nohup python -u -m romp.train --configs_yml=${TRAIN_CONFIGS} > 'log/'${TAB}'_'${DATASET}'_g'${GPUS}.log 2>&1 &
--------------------------------------------------------------------------------
/scripts/export_fbx.sh:
--------------------------------------------------------------------------------
1 | python romp/exports/convert_fbx.py --input=demo/videos/action_frames/1_results/1_ts_results.npz --output=demo/videos/a1.fbx --gender=female
--------------------------------------------------------------------------------
/scripts/image.sh:
--------------------------------------------------------------------------------
1 | CUDA_VISIBLE_DEVICES=0 python -u -m romp.predict.image --configs_yml='configs/image.yml'
2 |
--------------------------------------------------------------------------------
/scripts/launch_vis_server.sh:
--------------------------------------------------------------------------------
1 | WEBCAM_CONFIGS='configs/webcam.yml'
2 |
3 | python -u -m romp.lib.visualization.vis_server --configs_yml=${WEBCAM_CONFIGS}
--------------------------------------------------------------------------------
/scripts/pretrain.sh:
--------------------------------------------------------------------------------
1 |
2 | TRAIN_CONFIGS='configs/pretrain.yml'
3 |
4 | GPUS=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.gpu)
5 | DATASET=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.dataset)
6 | TAB=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.tab)
7 |
8 | CUDA_VISIBLE_DEVICES=${GPUS} nohup python -u -m romp.pretrain --configs_yml=${TRAIN_CONFIGS} > 'log/'${TAB}'_'${DATASET}'_g'${GPUS}.log 2>&1 &
--------------------------------------------------------------------------------
/scripts/run.sh:
--------------------------------------------------------------------------------
1 |
2 | IMAGE_MODE=1
3 | VIDEO_MODE=0
4 | WEBCAM_MODE=0
5 | EVALUATION_MODE=0
6 |
7 | IMAGE_CONFIGS='configs/image.yml'
8 | VIDEO_CONFIGS='configs/video.yml'
9 | WEBCAM_CONFIGS='configs/webcam.yml'
10 | EVALUATION_CONFIGS='configs/eval_3dpw.yml'
11 |
12 | if [ "$IMAGE_MODE" = 1 ]
13 | then
14 | GPUS=$(cat $IMAGE_CONFIGS | shyaml get-value ARGS.gpu)
15 | CUDA_VISIBLE_DEVICES=${GPUS} python -u -m romp.predict.image --configs_yml=${IMAGE_CONFIGS}
16 | elif [ "$VIDEO_MODE" = 1 ]
17 | then
18 | GPUS=$(cat $VIDEO_CONFIGS | shyaml get-value ARGS.gpu)
19 | CUDA_VISIBLE_DEVICES=${GPUS} python -u -m romp.predict.video --configs_yml=${VIDEO_CONFIGS}
20 | elif [ "$WEBCAM_MODE" = 1 ]
21 | then
22 | GPUS=$(cat $WEBCAM_CONFIGS | shyaml get-value ARGS.gpu)
23 | CUDA_VISIBLE_DEVICES=${GPUS} python -u -m romp.predict.webcam --configs_yml=${WEBCAM_CONFIGS}
24 | elif [ "$EVALUATION_MODE" = 1 ]
25 | then
26 | GPUS=$(cat $EVALUATION_CONFIGS | shyaml get-value ARGS.gpu)
27 | CUDA_VISIBLE_DEVICES=${GPUS} python -u -m romp.benchmarks_evaluation --configs_yml=${EVALUATION_CONFIGS}
28 | fi
--------------------------------------------------------------------------------
/scripts/setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | pip install -r requirements.txt
3 |
4 | pip uninstall smplx
5 | cd ../models/smplx
6 | python setup.py install
7 |
8 | cd ../manopth
9 | python setup.py install
10 |
11 | cd ../../tools/
12 | git clone https://github.com/liruilong940607/OCHumanApi
13 | cd OCHumanApi
14 | make install
15 | git clone https://github.com/Jeff-sjtu/CrowdPose
16 | cd CrowdPose/crowdpose-api/PythonAPI
17 | sh install.sh
18 | cd ../../
19 |
20 | cd ..
21 | # for centos
22 | #sudo yum install libXext libSM libXrender freeglut-devel
23 |
24 | #for ubuntu: use pyrender in OSMesa mode
25 | sudo apt update
26 | sudo apt-get install libsm6 libxrender1 libfontconfig1 freeglut3-dev
27 | sudo apt --fix-broken install
28 | sudo wget https://github.com/mmatl/travis_debs/raw/master/xenial/mesa_18.3.3-0.deb
29 | sudo dpkg -i ./mesa_18.3.3-0.deb || true
30 | sudo apt install -f
31 |
32 | git clone https://github.com/mmatl/pyopengl.git
33 | pip install ./pyopengl
34 |
35 | cd ../src
36 |
--------------------------------------------------------------------------------
/scripts/test.sh:
--------------------------------------------------------------------------------
1 |
2 | TEST_CONFIGS='configs/test.yml'
3 |
4 | GPUS=$(cat $TEST_CONFIGS | shyaml get-value ARGS.gpu)
5 | CUDA_VISIBLE_DEVICES=${GPUS} python -m romp.test --configs_yml=${TEST_CONFIGS}
--------------------------------------------------------------------------------
/scripts/test_3dpwchallenge.sh:
--------------------------------------------------------------------------------
1 |
2 | TEST_CONFIGS='configs/test.yml'
3 |
4 | GPUS=$(cat $TEST_CONFIGS | shyaml get-value ARGS.gpu)
5 | CUDA_VISIBLE_DEVICES=${GPUS} python -u lib/evaluation/collect_3DPW_results.py --configs_yml=${TEST_CONFIGS}
--------------------------------------------------------------------------------
/scripts/train_distributed.sh:
--------------------------------------------------------------------------------
1 | # kernprof -v -l --persp
2 | EVAL_MODE=0
3 | TEST_MODE=0
4 | SUBMIT_MODE=0
5 |
6 | TRAIN_CONFIGS='configs/v7.yml'
7 |
8 | GPUS=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.gpu)
9 | DATASET=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.dataset)
10 | TAB=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.tab)
11 | #CUDA_VISIBLE_DEVICES=${GPUS} torchrun --nproc_per_node=4 -m romp.train --configs_yml=${TRAIN_CONFIGS} --distributed_training=1
12 | CUDA_VISIBLE_DEVICES=${GPUS} nohup torchrun --nproc_per_node=4 -m romp.train --configs_yml=${TRAIN_CONFIGS} --distributed_training=1 > '../log/'${TAB}'_'${DATASET}'_g'${GPUS}.log 2>&1 &
13 |
--------------------------------------------------------------------------------
/scripts/video.sh:
--------------------------------------------------------------------------------
1 | CUDA_VISIBLE_DEVICES=0 python -u -m romp.predict.video --configs_yml='configs/video.yml'
2 |
--------------------------------------------------------------------------------
/scripts/webcam.sh:
--------------------------------------------------------------------------------
1 | CUDA_VISIBLE_DEVICES=0 python -u -m romp.predict.webcam --configs_yml='configs/webcam.yml'
2 |
--------------------------------------------------------------------------------
/scripts/webcam_blender.sh:
--------------------------------------------------------------------------------
1 | python -u -m romp.predict.webcam --configs_yml='configs/webcam_blender.yml'
--------------------------------------------------------------------------------
/simple_romp/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | This code base itself is MIT, but please follow the license for SMPL, MoSh data,
4 | and the respective dataset.
5 |
6 | Copyright (c) 2022 Yu Sun
7 |
8 | Permission is hereby granted, free of charge, to any person obtaining a copy
9 | of this software and associated documentation files (the "Software"), to deal
10 | in the Software without restriction, including without limitation the rights
11 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 | copies of the Software, and to permit persons to whom the Software is
13 | furnished to do so, subject to the following conditions:
14 |
15 | The above copyright notice and this permission notice shall be included in all
16 | copies or substantial portions of the Software.
17 |
18 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 | SOFTWARE.
25 |
26 | Publications using the Model & Software
27 | You acknowledge that the Model & Software is a valuable scientific resource and agree to appropriately reference
28 | the following paper in any publication making use of the Model & Software.
29 |
30 | Citation:
31 |
32 | @InProceedings{ROMP,
33 | author = {Sun, Yu and Bao, Qian and Liu, Wu and Fu, Yili and Michael J., Black and Mei, Tao},
34 | title = {Monocular, One-stage, Regression of Multiple 3D People},
35 | booktitle = {ICCV},
36 | month = {October},
37 | year = {2021}
38 | }
39 |
--------------------------------------------------------------------------------
/simple_romp/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include LICENSE
2 | include README.md
3 | global-include *.pyx
4 | global-include *.pxd
5 | global-include *.cpp
6 | global-include *.h
7 |
--------------------------------------------------------------------------------
/simple_romp/__init__.py:
--------------------------------------------------------------------------------
1 | from romp.main import ROMP, romp_settings
2 | from bev.main import BEV, bev_settings
3 | from romp_visualizer.sim3drender import Sim3DR
4 | from romp.utils import collect_frame_path, WebcamVideoStream, save_results
--------------------------------------------------------------------------------
/simple_romp/bev/__init__.py:
--------------------------------------------------------------------------------
1 | from .main import BEV, bev_settings
--------------------------------------------------------------------------------
/simple_romp/bev/split2process.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 | import torch
4 | from .post_parser import remove_subjects
5 |
6 | def padding_image_overlap(image, overlap_ratio=0.46):
7 | h, w = image.shape[:2]
8 | pad_length = int(h* overlap_ratio)
9 | pad_w = w+2*pad_length
10 | pad_image = np.zeros((h, pad_w, 3), dtype=np.uint8)
11 | top, left = 0, pad_length
12 | bottom, right = h, w+pad_length
13 | pad_image[top:bottom, left:right] = image
14 |
15 | # due to BEV takes square input, so we convert top, bottom to the state that assuming square padding
16 | pad_height = (w - h)//2
17 | top = pad_height
18 | bottom = w - top
19 | left = 0
20 | right = w
21 | image_pad_info = torch.Tensor([top, bottom, left, right, h, w])
22 | return pad_image, image_pad_info, pad_length
23 |
24 | def get_image_split_plan(image, overlap_ratio=0.46):
25 | h, w = image.shape[:2]
26 | aspect_ratio = w / h
27 | slide_time = int(np.ceil((aspect_ratio - 1) / (1 - overlap_ratio))) + 1
28 |
29 | crop_box = [] # left, right, top, bottom
30 | move_step = (1 - overlap_ratio) * h
31 | for ind in range(slide_time):
32 | if ind == (slide_time-1):
33 | left = w-h
34 | else:
35 | left = move_step * ind
36 | right = left+h
37 | crop_box.append([left, right, 0, h])
38 |
39 | return np.array(crop_box).astype(np.int32)
40 |
41 | def exclude_boudary_subjects(outputs, drop_boundary_ratio, ptype='left', torlerance=0.05):
42 | if ptype=='left':
43 | drop_mask = outputs['cam'][:, 2] > (1 - drop_boundary_ratio + torlerance)
44 | elif ptype=='right':
45 | drop_mask = outputs['cam'][:, 2] < (drop_boundary_ratio - 1 - torlerance)
46 | remove_subjects(outputs, torch.where(drop_mask)[0])
47 |
48 | def convert_crop_cam_params2full_image(cam_params, crop_bbox, image_shape):
49 | h, w = image_shape
50 | # adjust scale, cam 3: depth, y, x
51 | scale_adjust = (crop_bbox[[1,3]]-crop_bbox[[0,2]]).max() / max(h, w)
52 | cam_params *= scale_adjust
53 |
54 | # adjust x
55 | # crop_bbox[:2] -= pad_length
56 | bbox_mean_x = crop_bbox[:2].mean()
57 | cam_params[:,2] += bbox_mean_x / (w /2) - 1
58 | return cam_params
59 |
60 | def collect_outputs(outputs, all_outputs):
61 | keys = list(outputs.keys())
62 | for key in keys:
63 | if key not in all_outputs:
64 | all_outputs[key] = outputs[key]
65 | else:
66 | if key in ['smpl_face']:
67 | continue
68 | if key in ['center_map']:
69 | all_outputs[key] = torch.cat([all_outputs[key], outputs[key]],3)
70 | continue
71 | if key in ['center_map_3d']:
72 | all_outputs[key] = torch.cat([all_outputs[key], outputs[key]],2)
73 | continue
74 | all_outputs[key] = torch.cat([all_outputs[key], outputs[key]],0)
75 |
--------------------------------------------------------------------------------
/simple_romp/build.sh:
--------------------------------------------------------------------------------
1 | python setup.py sdist bdist_wheel --plat-name=manylinux1_x86_64
2 | #python setup_trace.py sdist bdist_wheel --plat-name=manylinux1_x86_64
3 |
--------------------------------------------------------------------------------
/simple_romp/doc/export.md:
--------------------------------------------------------------------------------
1 | ## Export
2 |
3 | In this part, we introduce how to export .fbx / .glb / .bvh from simple-romp predictions.
4 |
5 | ### Installation
6 |
7 | The blender python API requires python3.7 env, so we need to install bpy in python3.7 via
8 | ```
9 | conda create -n romp_export python==3.7
10 | conda activate romp_export
11 | pip install future-fstrings mathutils==2.81.2
12 | ```
13 | Then, please follow the instruction at https://github.com/TylerGubala/blenderpy/releases to install the bpy.
14 | For example, for ubuntu users, please first download the [bpy .whl package](https://github.com/TylerGubala/blenderpy/releases/download/v2.91a0/bpy-2.91a0-cp37-cp37m-manylinux2014_x86_64.whl) and then install via
15 | ```
16 | pip install /path/to/downloaded/bpy-2.91a0-cp37-cp37m-manylinux2014_x86_64.whl && bpy_post_install
17 | ```
18 |
19 | ### Usage
20 |
21 | Please change the input / output path in simple_romp/export.sh, for instance
22 | ```
23 | python tools/convert2fbx.py --input=/home/yusun/BEV_results/video_results.npz --output=/home/yusun/BEV_results/dance.fbx --gender=female
24 | ```
25 | You can also assign the subject ID of the motion you want to avoid the interaction via
26 | ```
27 | python tools/convert2fbx.py --input=/home/yusun/BEV_results/video_results.npz --output=/home/yusun/BEV_results/dance.fbx --gender=female --subject_id=1
28 | ```
29 |
--------------------------------------------------------------------------------
/simple_romp/evaluation/RH_evaluation/__init__.py:
--------------------------------------------------------------------------------
1 | from .evaluation import RH_Evaluation
--------------------------------------------------------------------------------
/simple_romp/evaluation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Arthur151/ROMP/a8558aed480af850756f84e2a7c787e359bddbd0/simple_romp/evaluation/__init__.py
--------------------------------------------------------------------------------
/simple_romp/export.sh:
--------------------------------------------------------------------------------
1 | python tools/convert2fbx.py --input=/home/yusun/BEV_results/video_results.npz --output=/home/yusun/BEV_results/dance.bvh --gender=female
--------------------------------------------------------------------------------
/simple_romp/reinstall.sh:
--------------------------------------------------------------------------------
1 | pip uninstall simple-romp
2 | python setup.py install
3 |
--------------------------------------------------------------------------------
/simple_romp/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | opencv-python
3 | cython
4 | torch
5 | lapx
--------------------------------------------------------------------------------
/simple_romp/romp/__init__.py:
--------------------------------------------------------------------------------
1 | from .main import ROMP, romp_settings
2 | from .utils import WebcamVideoStream, ResultSaver
3 |
--------------------------------------------------------------------------------
/simple_romp/run.sh:
--------------------------------------------------------------------------------
1 | romp --mode=webcam --show
--------------------------------------------------------------------------------
/simple_romp/setup.py:
--------------------------------------------------------------------------------
1 | import setuptools
2 | from distutils.core import setup, Extension
3 | from Cython.Build import cythonize
4 | import numpy
5 |
6 | with open("README.md", "r", encoding="utf-8") as fh:
7 | long_description = fh.read()
8 |
9 | requireds = ["opencv-python","torch",
10 | 'setuptools>=18.0.0',
11 | 'cython',
12 | 'numpy>=1.21.0',
13 | 'typing-extensions>=4.1',
14 | 'scipy',
15 | 'lapx']
16 |
17 | setuptools.setup(
18 | name='simple_romp',
19 | version='1.1.4',
20 | author="Yu Sun",
21 | author_email="yusunhit@gmail.com",
22 | setup_requires=[
23 | # Setuptools 18.0 properly handles Cython extensions.
24 | 'setuptools>=18.0.0',
25 | 'cython',
26 | 'numpy>=1.21.0',
27 | 'typing-extensions>=4.1',
28 | 'scipy',
29 | 'lapx'],
30 | install_requires=requireds,
31 | description="ROMP [ICCV21], BEV [CVPR22], TRACE [CVPR23]",
32 | long_description=long_description,
33 | long_description_content_type="text/markdown",
34 | url="https://github.com/Arthur151/ROMP",
35 | packages=[
36 | 'romp',
37 | 'vis_human',
38 | 'vis_human.sim3drender',
39 | 'vis_human.sim3drender.lib',
40 | 'bev',
41 | 'trace2',
42 | 'trace2.tracker',
43 | 'trace2.models',
44 | 'trace2.models.raft',
45 | 'trace2.models.raft.utils',
46 | 'trace2.models.deform_conv',
47 | 'trace2.models.deform_conv.functions',
48 | 'trace2.results_parser',
49 | 'trace2.evaluation',
50 | 'trace2.evaluation.dynacam_evaluation',
51 | 'trace2.evaluation.TrackEval',
52 | 'trace2.evaluation.TrackEval.trackeval',
53 | 'trace2.evaluation.TrackEval.trackeval.metrics',
54 | 'trace2.evaluation.TrackEval.trackeval.datasets',
55 | 'trace2.evaluation.TrackEval.trackeval.baselines',
56 | 'trace2.utils',
57 | 'tracker'],
58 | ext_modules=cythonize([Extension("Sim3DR_Cython",
59 | sources=["vis_human/sim3drender/lib/rasterize.pyx",
60 | "vis_human/sim3drender/lib/rasterize_kernel.cpp"],
61 | language='c++',
62 | include_dirs=[numpy.get_include()],
63 | extra_compile_args=["-std=c++11"])]),
64 | include_package_data=True,
65 | classifiers=[
66 | "Programming Language :: Python :: 3",
67 | "License :: Other/Proprietary License",
68 | "Operating System :: OS Independent",
69 | ],
70 | project_urls={
71 | "Bug Tracker": "https://github.com/Arthur151/ROMP/issues",
72 | },
73 | entry_points={
74 | "console_scripts": [
75 | "romp=romp.main:main",
76 | "bev=bev.main:main",
77 | "trace2=trace2.main:main",
78 | "romp.prepare_smpl=romp.pack_smpl_info:main",
79 | "bev.prepare_smil=bev.pack_smil_info:main",
80 | ],
81 | },
82 | )
83 |
--------------------------------------------------------------------------------
/simple_romp/tools/convert_checkpoints.py:
--------------------------------------------------------------------------------
1 | from os import remove
2 | from sklearn.model_selection import PredefinedSplit
3 | import torch
4 | import sys
5 |
6 | def remove_prefix(state_dict, prefix='module.', remove_keys=['_result_parser', '_calc_loss']):
7 | keys = list(state_dict.keys())
8 | print('orginal keys:', keys)
9 | for key in keys:
10 | exist_flag = True
11 | for rkey in remove_keys:
12 | if rkey in key:
13 | del state_dict[key]
14 | exist_flag = False
15 | if not exist_flag:
16 | continue
17 | if prefix in key:
18 | state_dict[key.replace(prefix, '')] = state_dict[key]
19 | del state_dict[key]
20 |
21 | keys = list(state_dict.keys())
22 | print('new keys:', keys)
23 | return state_dict
24 |
25 | if __name__ == '__main__':
26 | model_path = sys.argv[1]
27 | save_path = sys.argv[2]
28 | state_dict = remove_prefix(torch.load(model_path), prefix='module.')
29 | torch.save(state_dict, save_path)
30 |
--------------------------------------------------------------------------------
/simple_romp/trace2/__init__.py:
--------------------------------------------------------------------------------
1 | #from .main import TRACE, trace_settings
2 |
--------------------------------------------------------------------------------
/simple_romp/trace2/eval.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 |
4 | from .main import TRACE
5 | from .utils.eval_utils import update_eval_seq_cfgs, get_evaluation_sequence_dict
6 | from .utils.infer_settings import trace_settings
7 | from .utils.utils import preds_save_paths
8 |
9 | from .evaluation.eval_dynacam import evaluate_panorama, evaluate_translation
10 | from .evaluation.evaluate_tracking import evaluate_trackers_mupots, evaluate_trackers_dyna3dpw
11 | from .evaluation.eval_3DPW import evaluate_3dpw_results
12 |
13 | datasets_dir = {
14 | 'DynaCam-Panorama': '/home/yusun/DataCenter/my_datasets/DynaCam',
15 | 'DynaCam-Translation': '/home/yusun/DataCenter/my_datasets/DynaCam',
16 | 'mupots': '/home/yusun/DataCenter/datasets/MultiPersonTestSet',
17 | 'Dyna3DPW': '/home/yusun/DataCenter/datasets/Dyna3DPW',
18 | '3DPW': '/home/yusun/DataCenter/datasets/3DPW',}
19 |
20 | eval_functions = {
21 | 'DynaCam-Panorama': evaluate_panorama, 'DynaCam-Translation': evaluate_translation, \
22 | 'mupots': evaluate_trackers_mupots, 'Dyna3DPW': evaluate_trackers_dyna3dpw, '3DPW': evaluate_3dpw_results}
23 |
24 | class Evaluator(TRACE):
25 | def __init__(self, args):
26 | super(Evaluator, self).__init__(args)
27 |
28 | def update_sequence_cfs(self, seq_name):
29 | return update_eval_seq_cfgs(seq_name, self.default_seq_cfgs, ds_name=self.eval_dataset)
30 |
31 | def check_load_previous_results(self, seq_name):
32 | save_paths = preds_save_paths(self.results_save_dir, prefix=seq_name)
33 | return os.path.exists(save_paths.seq_results_save_path)
34 |
35 | def main():
36 | args = trace_settings()
37 | args.results_save_dir += f'-{args.eval_dataset}'
38 | args.save_video=False
39 | evaluator = Evaluator(args)
40 |
41 | sequence_dict = get_evaluation_sequence_dict(datasets=args.eval_dataset, dataset_dir=datasets_dir[args.eval_dataset])
42 | for seq_name, frame_paths in sequence_dict.items():
43 | if evaluator.check_load_previous_results(os.path.basename(seq_name)): #and os.path.basename(seq_name) not in ['TS16']:
44 | continue
45 | outputs, tracking_results, kp3d_results, imgpaths = evaluator({seq_name: frame_paths})
46 | evaluator.save_results(outputs, tracking_results, kp3d_results, imgpaths)
47 |
48 | eval_functions[args.eval_dataset](args.results_save_dir, datasets_dir[args.eval_dataset], vis=False)
49 |
50 | if __name__ == '__main__':
51 | main()
--------------------------------------------------------------------------------
/simple_romp/trace2/evaluation/TrackEval/.gitignore:
--------------------------------------------------------------------------------
1 | gt_data/*
2 | !gt_data/Readme.md
3 | tracker_output/*
4 | !tracker_output/Readme.md
5 | output/*
6 | data/*
7 | !goutput/Readme.md
8 | **/__pycache__
9 | .idea
10 | error_log.txt
--------------------------------------------------------------------------------
/simple_romp/trace2/evaluation/TrackEval/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Jonathon Luiten
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/simple_romp/trace2/evaluation/TrackEval/docs/How_To/Add_a_new_metric.md:
--------------------------------------------------------------------------------
1 | # How to add a new or custom family of evaluation metrics to TrackEval
2 |
3 | - Create your metrics code in ```trackeval/metrics/.py```.
4 | - It's probably easiest to start by copying an existing metrics code and editing it, e.g. ```trackeval/metrics/identity.py``` is probably the simplest.
5 | - Your metric should be class, and it should inherit from the ```trackeval.metrics._base_metric._BaseMetric``` class.
6 | - Define an ```__init__``` function that defines the different ```fields``` (values) that your metric will calculate. See ```trackeval/metrics/_base_metric.py``` for a list of currently used field types. Feel free to add new types.
7 | - Define your code to actually calculate your metric for a single sequence and single class in a function called ```eval_sequence```, which takes a data dictionary as input, and returns a results dictionary as output.
8 | - Define functions for how to combine your metric field values over a) sequences ```combine_sequences```, b) over classes ```combine_classes_class_averaged```, and c) over classes weighted by the number of detections ```combine_classes_det_averaged```.
9 | - We find using a function such as the ```_compute_final_fields``` function that we use in the current metrics is convienient because it is likely used for metrics calculation and for the different metric combination, however this is not required.
10 | - Register your new metric by adding it to ```trackeval/metrics/init.py```
11 | - Your new metric can be used by passing the metrics class to a list of metrics which is passed to the evaluator (see files in ```scripts/*```).
12 |
--------------------------------------------------------------------------------
/simple_romp/trace2/evaluation/TrackEval/minimum_requirements.txt:
--------------------------------------------------------------------------------
1 | scipy==1.4.1
2 | numpy==1.18.1
3 |
--------------------------------------------------------------------------------
/simple_romp/trace2/evaluation/TrackEval/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = [
3 | "setuptools>=42",
4 | "wheel"
5 | ]
6 | build-backend = "setuptools.build_meta"
7 |
--------------------------------------------------------------------------------
/simple_romp/trace2/evaluation/TrackEval/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy==1.18.1
2 | scipy==1.4.1
3 | pycocotools==2.0.2
4 | matplotlib==3.2.1
5 | opencv_python==4.4.0.46
6 | scikit_image==0.16.2
7 | pytest==6.0.1
8 | Pillow==8.1.2
9 |
--------------------------------------------------------------------------------
/simple_romp/trace2/evaluation/TrackEval/scripts/comparison_plots.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 |
4 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
5 | import trackeval # noqa: E402
6 |
7 | plots_folder = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'data', 'plots'))
8 | tracker_folder = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'data', 'trackers'))
9 |
10 | # dataset = os.path.join('kitti', 'kitti_2d_box_train')
11 | # classes = ['cars', 'pedestrian']
12 |
13 | dataset = os.path.join('mot_challenge', 'MOT17-train')
14 | classes = ['pedestrian']
15 |
16 | data_fol = os.path.join(tracker_folder, dataset)
17 | trackers = os.listdir(data_fol)
18 | out_loc = os.path.join(plots_folder, dataset)
19 | for cls in classes:
20 | trackeval.plotting.plot_compare_trackers(data_fol, trackers, cls, out_loc)
21 |
--------------------------------------------------------------------------------
/simple_romp/trace2/evaluation/TrackEval/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | name = trackeval
3 | version = 1.0.dev1
4 | author = Jonathon Luiten, Arne Hoffhues
5 | author_email = jonoluiten@gmail.com
6 | description = Code for evaluating object tracking
7 | long_description = file: Readme.md
8 | long_description_content_type = text/markdown
9 | url = https://github.com/JonathonLuiten/TrackEval
10 | project_urls =
11 | Bug Tracker = https://github.com/JonathonLuiten/TrackEval/issues
12 | classifiers =
13 | Programming Language :: Python :: 3
14 | Programming Language :: Python :: 3 :: Only
15 | License :: OSI Approved :: MIT License
16 | Operating System :: OS Independent
17 | Topic :: Scientific/Engineering
18 | license_files = LICENSE
19 |
20 | [options]
21 | install_requires =
22 | numpy
23 | scipy
24 | packages = find:
25 |
26 | [options.packages.find]
27 | include = trackeval*
28 |
--------------------------------------------------------------------------------
/simple_romp/trace2/evaluation/TrackEval/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 |
3 | setup()
4 |
--------------------------------------------------------------------------------
/simple_romp/trace2/evaluation/TrackEval/tests/test_davis.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 | import numpy as np
4 | from multiprocessing import freeze_support
5 |
6 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
7 | import trackeval # noqa: E402
8 |
9 | # Fixes multiprocessing on windows, does nothing otherwise
10 | if __name__ == '__main__':
11 | freeze_support()
12 |
13 |
14 | eval_config = {'USE_PARALLEL': False,
15 | 'NUM_PARALLEL_CORES': 8,
16 | 'PRINT_RESULTS': False,
17 | 'PRINT_CONFIG': True,
18 | 'TIME_PROGRESS': True,
19 | 'DISPLAY_LESS_PROGRESS': True,
20 | 'OUTPUT_SUMMARY': False,
21 | 'OUTPUT_EMPTY_CLASSES': False,
22 | 'OUTPUT_DETAILED': False,
23 | 'PLOT_CURVES': False,
24 | }
25 | evaluator = trackeval.Evaluator(eval_config)
26 | metrics_list = [trackeval.metrics.HOTA(), trackeval.metrics.CLEAR(), trackeval.metrics.Identity(),
27 | trackeval.metrics.JAndF()]
28 |
29 | tests = [
30 | {'SPLIT_TO_EVAL': 'val', 'TRACKERS_TO_EVAL': ['ags']},
31 | ]
32 |
33 | for dataset_config in tests:
34 |
35 | dataset_list = [trackeval.datasets.DAVIS(dataset_config)]
36 | file_loc = os.path.join('davis', 'davis_unsupervised_' + dataset_config['SPLIT_TO_EVAL'])
37 |
38 | raw_results, messages = evaluator.evaluate(dataset_list, metrics_list)
39 |
40 | classes = dataset_list[0].config['CLASSES_TO_EVAL']
41 | tracker = dataset_config['TRACKERS_TO_EVAL'][0]
42 | test_data_loc = os.path.join(os.path.dirname(__file__), '..', 'data', 'tests', file_loc)
43 |
44 | for cls in classes:
45 | results = {seq: raw_results['DAVIS'][tracker][seq][cls] for seq in raw_results['DAVIS'][tracker].keys()}
46 | current_metrics_list = metrics_list + [trackeval.metrics.Count()]
47 | metric_names = trackeval.utils.validate_metrics_list(current_metrics_list)
48 |
49 | # Load expected results:
50 | test_data = trackeval.utils.load_detail(os.path.join(test_data_loc, tracker, cls + '_detailed.csv'))
51 |
52 | # Do checks
53 | for seq in test_data.keys():
54 | assert len(test_data[seq].keys()) > 250, len(test_data[seq].keys())
55 |
56 | details = []
57 | for metric, metric_name in zip(current_metrics_list, metric_names):
58 | table_res = {seq_key: seq_value[metric_name] for seq_key, seq_value in results.items()}
59 | details.append(metric.detailed_results(table_res))
60 | res_fields = sum([list(s['COMBINED_SEQ'].keys()) for s in details], [])
61 | res_values = sum([list(s[seq].values()) for s in details], [])
62 | res_dict = dict(zip(res_fields, res_values))
63 |
64 | for field in test_data[seq].keys():
65 | assert np.isclose(res_dict[field], test_data[seq][field]), seq + ': ' + cls + ': ' + field
66 |
67 | print('Tracker %s tests passed' % tracker)
68 | print('All tests passed')
--------------------------------------------------------------------------------
/simple_romp/trace2/evaluation/TrackEval/tests/test_mot17.py:
--------------------------------------------------------------------------------
1 | """ Test to ensure that the code is working correctly.
2 | Runs all metrics on 14 trackers for the MOT Challenge MOT17 benchmark.
3 | """
4 |
5 |
6 | import sys
7 | import os
8 | import numpy as np
9 | from multiprocessing import freeze_support
10 |
11 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
12 | import trackeval # noqa: E402
13 |
14 | # Fixes multiprocessing on windows, does nothing otherwise
15 | if __name__ == '__main__':
16 | freeze_support()
17 |
18 | eval_config = {'USE_PARALLEL': False,
19 | 'NUM_PARALLEL_CORES': 8,
20 | }
21 | evaluator = trackeval.Evaluator(eval_config)
22 | metrics_list = [trackeval.metrics.HOTA(), trackeval.metrics.CLEAR(), trackeval.metrics.Identity()]
23 | test_data_loc = os.path.join(os.path.dirname(__file__), '..', 'data', 'tests', 'mot_challenge', 'MOT17-train')
24 | trackers = [
25 | 'DPMOT',
26 | 'GNNMatch',
27 | 'IA',
28 | 'ISE_MOT17R',
29 | 'Lif_T',
30 | 'Lif_TsimInt',
31 | 'LPC_MOT',
32 | 'MAT',
33 | 'MIFTv2',
34 | 'MPNTrack',
35 | 'SSAT',
36 | 'TracktorCorr',
37 | 'Tracktorv2',
38 | 'UnsupTrack',
39 | ]
40 |
41 | for tracker in trackers:
42 | # Run code on tracker
43 | dataset_config = {'TRACKERS_TO_EVAL': [tracker],
44 | 'BENCHMARK': 'MOT17'}
45 | dataset_list = [trackeval.datasets.MotChallenge2DBox(dataset_config)]
46 | raw_results, messages = evaluator.evaluate(dataset_list, metrics_list)
47 |
48 | results = {seq: raw_results['MotChallenge2DBox'][tracker][seq]['pedestrian'] for seq in
49 | raw_results['MotChallenge2DBox'][tracker].keys()}
50 | current_metrics_list = metrics_list + [trackeval.metrics.Count()]
51 | metric_names = trackeval.utils.validate_metrics_list(current_metrics_list)
52 |
53 | # Load expected results:
54 | test_data = trackeval.utils.load_detail(os.path.join(test_data_loc, tracker, 'pedestrian_detailed.csv'))
55 | assert len(test_data.keys()) == 22, len(test_data.keys())
56 |
57 | # Do checks
58 | for seq in test_data.keys():
59 | assert len(test_data[seq].keys()) > 250, len(test_data[seq].keys())
60 |
61 | details = []
62 | for metric, metric_name in zip(current_metrics_list, metric_names):
63 | table_res = {seq_key: seq_value[metric_name] for seq_key, seq_value in results.items()}
64 | details.append(metric.detailed_results(table_res))
65 | res_fields = sum([list(s['COMBINED_SEQ'].keys()) for s in details], [])
66 | res_values = sum([list(s[seq].values()) for s in details], [])
67 | res_dict = dict(zip(res_fields, res_values))
68 |
69 | for field in test_data[seq].keys():
70 | if not np.isclose(res_dict[field], test_data[seq][field]):
71 | print(tracker, seq, res_dict[field], test_data[seq][field], field)
72 | raise AssertionError
73 |
74 | print('Tracker %s tests passed' % tracker)
75 | print('All tests passed')
76 |
77 |
--------------------------------------------------------------------------------
/simple_romp/trace2/evaluation/TrackEval/tests/test_mots.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 | import numpy as np
4 | from multiprocessing import freeze_support
5 |
6 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
7 | import trackeval # noqa: E402
8 |
9 | # Fixes multiprocessing on windows, does nothing otherwise
10 | if __name__ == '__main__':
11 | freeze_support()
12 |
13 | eval_config = {'USE_PARALLEL': False,
14 | 'NUM_PARALLEL_CORES': 8,
15 | }
16 | evaluator = trackeval.Evaluator(eval_config)
17 | metrics_list = [trackeval.metrics.HOTA(), trackeval.metrics.CLEAR(), trackeval.metrics.Identity()]
18 |
19 | tests = [
20 | {'DATASET': 'KittiMOTS', 'SPLIT_TO_EVAL': 'val', 'TRACKERS_TO_EVAL': ['trackrcnn']},
21 | {'DATASET': 'MOTSChallenge', 'SPLIT_TO_EVAL': 'train', 'TRACKERS_TO_EVAL': ['TrackRCNN']}
22 | ]
23 |
24 | for dataset_config in tests:
25 |
26 | dataset_name = dataset_config.pop('DATASET')
27 | if dataset_name == 'MOTSChallenge':
28 | dataset_list = [trackeval.datasets.MOTSChallenge(dataset_config)]
29 | file_loc = os.path.join('mot_challenge', 'MOTS-' + dataset_config['SPLIT_TO_EVAL'])
30 | elif dataset_name == 'KittiMOTS':
31 | dataset_list = [trackeval.datasets.KittiMOTS(dataset_config)]
32 | file_loc = os.path.join('kitti', 'kitti_mots_val')
33 | else:
34 | raise Exception('Dataset %s does not exist.' % dataset_name)
35 |
36 | raw_results, messages = evaluator.evaluate(dataset_list, metrics_list)
37 |
38 | classes = dataset_list[0].config['CLASSES_TO_EVAL']
39 | tracker = dataset_config['TRACKERS_TO_EVAL'][0]
40 | test_data_loc = os.path.join(os.path.dirname(__file__), '..', 'data', 'tests', file_loc)
41 |
42 | for cls in classes:
43 | results = {seq: raw_results[dataset_name][tracker][seq][cls] for seq in raw_results[dataset_name][tracker].keys()}
44 | current_metrics_list = metrics_list + [trackeval.metrics.Count()]
45 | metric_names = trackeval.utils.validate_metrics_list(current_metrics_list)
46 |
47 | # Load expected results:
48 | test_data = trackeval.utils.load_detail(os.path.join(test_data_loc, tracker, cls + '_detailed.csv'))
49 |
50 | # Do checks
51 | for seq in test_data.keys():
52 | assert len(test_data[seq].keys()) > 250, len(test_data[seq].keys())
53 |
54 | details = []
55 | for metric, metric_name in zip(current_metrics_list, metric_names):
56 | table_res = {seq_key: seq_value[metric_name] for seq_key, seq_value in results.items()}
57 | details.append(metric.detailed_results(table_res))
58 | res_fields = sum([list(s['COMBINED_SEQ'].keys()) for s in details], [])
59 | res_values = sum([list(s[seq].values()) for s in details], [])
60 | res_dict = dict(zip(res_fields, res_values))
61 |
62 | for field in test_data[seq].keys():
63 | assert np.isclose(res_dict[field], test_data[seq][field]), seq + ': ' + cls + ': ' + field
64 |
65 | print('Tracker %s tests passed' % tracker)
66 | print('All tests passed')
--------------------------------------------------------------------------------
/simple_romp/trace2/evaluation/TrackEval/trackeval/__init__.py:
--------------------------------------------------------------------------------
1 | from .eval import Evaluator
2 | from . import datasets
3 | from . import metrics
4 | from . import plotting
5 | from . import utils
6 |
--------------------------------------------------------------------------------
/simple_romp/trace2/evaluation/TrackEval/trackeval/_timing.py:
--------------------------------------------------------------------------------
1 | from functools import wraps
2 | from time import perf_counter
3 | import inspect
4 |
5 | DO_TIMING = False
6 | DISPLAY_LESS_PROGRESS = False
7 | timer_dict = {}
8 | counter = 0
9 |
10 |
11 | def time(f):
12 | @wraps(f)
13 | def wrap(*args, **kw):
14 | if DO_TIMING:
15 | # Run function with timing
16 | ts = perf_counter()
17 | result = f(*args, **kw)
18 | te = perf_counter()
19 | tt = te-ts
20 |
21 | # Get function name
22 | arg_names = inspect.getfullargspec(f)[0]
23 | if arg_names[0] == 'self' and DISPLAY_LESS_PROGRESS:
24 | return result
25 | elif arg_names[0] == 'self':
26 | method_name = type(args[0]).__name__ + '.' + f.__name__
27 | else:
28 | method_name = f.__name__
29 |
30 | # Record accumulative time in each function for analysis
31 | if method_name in timer_dict.keys():
32 | timer_dict[method_name] += tt
33 | else:
34 | timer_dict[method_name] = tt
35 |
36 | # If code is finished, display timing summary
37 | if method_name == "Evaluator.evaluate":
38 | print("")
39 | print("Timing analysis:")
40 | for key, value in timer_dict.items():
41 | print('%-70s %2.4f sec' % (key, value))
42 | else:
43 | # Get function argument values for printing special arguments of interest
44 | arg_titles = ['tracker', 'seq', 'cls']
45 | arg_vals = []
46 | for i, a in enumerate(arg_names):
47 | if a in arg_titles:
48 | arg_vals.append(args[i])
49 | arg_text = '(' + ', '.join(arg_vals) + ')'
50 |
51 | # Display methods and functions with different indentation.
52 | if arg_names[0] == 'self':
53 | print('%-74s %2.4f sec' % (' '*4 + method_name + arg_text, tt))
54 | elif arg_names[0] == 'test':
55 | pass
56 | else:
57 | global counter
58 | counter += 1
59 | print('%i %-70s %2.4f sec' % (counter, method_name + arg_text, tt))
60 |
61 | return result
62 | else:
63 | # If config["TIME_PROGRESS"] is false, or config["USE_PARALLEL"] is true, run functions normally without timing.
64 | return f(*args, **kw)
65 | return wrap
66 |
--------------------------------------------------------------------------------
/simple_romp/trace2/evaluation/TrackEval/trackeval/baselines/__init__.py:
--------------------------------------------------------------------------------
1 | import baseline_utils
2 | import stp
3 | import non_overlap
4 | import pascal_colormap
5 | import thresholder
6 | import vizualize
--------------------------------------------------------------------------------
/simple_romp/trace2/evaluation/TrackEval/trackeval/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | from .kitti_2d_box import Kitti2DBox
2 | from .kitti_mots import KittiMOTS
3 | from .mot_challenge_2d_box import MotChallenge2DBox
4 | from .mots_challenge import MOTSChallenge
5 | from .bdd100k import BDD100K
6 | from .davis import DAVIS
7 | from .tao import TAO
8 | from .youtube_vis import YouTubeVIS
9 | from .head_tracking_challenge import HeadTrackingChallenge
10 | from .rob_mots import RobMOTS
11 |
--------------------------------------------------------------------------------
/simple_romp/trace2/evaluation/TrackEval/trackeval/datasets/rob_mots_classmap.py:
--------------------------------------------------------------------------------
1 | cls_id_to_name = {
2 | 1: 'person',
3 | 2: 'bicycle',
4 | 3: 'car',
5 | 4: 'motorcycle',
6 | 5: 'airplane',
7 | 6: 'bus',
8 | 7: 'train',
9 | 8: 'truck',
10 | 9: 'boat',
11 | 10: 'traffic light',
12 | 11: 'fire hydrant',
13 | 12: 'stop sign',
14 | 13: 'parking meter',
15 | 14: 'bench',
16 | 15: 'bird',
17 | 16: 'cat',
18 | 17: 'dog',
19 | 18: 'horse',
20 | 19: 'sheep',
21 | 20: 'cow',
22 | 21: 'elephant',
23 | 22: 'bear',
24 | 23: 'zebra',
25 | 24: 'giraffe',
26 | 25: 'backpack',
27 | 26: 'umbrella',
28 | 27: 'handbag',
29 | 28: 'tie',
30 | 29: 'suitcase',
31 | 30: 'frisbee',
32 | 31: 'skis',
33 | 32: 'snowboard',
34 | 33: 'sports ball',
35 | 34: 'kite',
36 | 35: 'baseball bat',
37 | 36: 'baseball glove',
38 | 37: 'skateboard',
39 | 38: 'surfboard',
40 | 39: 'tennis racket',
41 | 40: 'bottle',
42 | 41: 'wine glass',
43 | 42: 'cup',
44 | 43: 'fork',
45 | 44: 'knife',
46 | 45: 'spoon',
47 | 46: 'bowl',
48 | 47: 'banana',
49 | 48: 'apple',
50 | 49: 'sandwich',
51 | 50: 'orange',
52 | 51: 'broccoli',
53 | 52: 'carrot',
54 | 53: 'hot dog',
55 | 54: 'pizza',
56 | 55: 'donut',
57 | 56: 'cake',
58 | 57: 'chair',
59 | 58: 'couch',
60 | 59: 'potted plant',
61 | 60: 'bed',
62 | 61: 'dining table',
63 | 62: 'toilet',
64 | 63: 'tv',
65 | 64: 'laptop',
66 | 65: 'mouse',
67 | 66: 'remote',
68 | 67: 'keyboard',
69 | 68: 'cell phone',
70 | 69: 'microwave',
71 | 70: 'oven',
72 | 71: 'toaster',
73 | 72: 'sink',
74 | 73: 'refrigerator',
75 | 74: 'book',
76 | 75: 'clock',
77 | 76: 'vase',
78 | 77: 'scissors',
79 | 78: 'teddy bear',
80 | 79: 'hair drier',
81 | 80: 'toothbrush'}
--------------------------------------------------------------------------------
/simple_romp/trace2/evaluation/TrackEval/trackeval/metrics/__init__.py:
--------------------------------------------------------------------------------
1 | from .hota import HOTA
2 | from .clear import CLEAR
3 | from .identity import Identity
4 | from .count import Count
5 | from .j_and_f import JAndF
6 | from .track_map import TrackMAP
7 | from .vace import VACE
8 | from .ideucl import IDEucl
--------------------------------------------------------------------------------
/simple_romp/trace2/evaluation/TrackEval/trackeval/metrics/count.py:
--------------------------------------------------------------------------------
1 |
2 | from ._base_metric import _BaseMetric
3 | from .. import _timing
4 |
5 |
6 | class Count(_BaseMetric):
7 | """Class which simply counts the number of tracker and gt detections and ids."""
8 | def __init__(self, config=None):
9 | super().__init__()
10 | self.integer_fields = ['Dets', 'GT_Dets', 'IDs', 'GT_IDs']
11 | self.fields = self.integer_fields
12 | self.summary_fields = self.fields
13 |
14 | @_timing.time
15 | def eval_sequence(self, data):
16 | """Returns counts for one sequence"""
17 | # Get results
18 | res = {'Dets': data['num_tracker_dets'],
19 | 'GT_Dets': data['num_gt_dets'],
20 | 'IDs': data['num_tracker_ids'],
21 | 'GT_IDs': data['num_gt_ids'],
22 | 'Frames': data['num_timesteps']}
23 | return res
24 |
25 | def combine_sequences(self, all_res):
26 | """Combines metrics across all sequences"""
27 | res = {}
28 | for field in self.integer_fields:
29 | res[field] = self._combine_sum(all_res, field)
30 | return res
31 |
32 | def combine_classes_class_averaged(self, all_res, ignore_empty_classes=None):
33 | """Combines metrics across all classes by averaging over the class values"""
34 | res = {}
35 | for field in self.integer_fields:
36 | res[field] = self._combine_sum(all_res, field)
37 | return res
38 |
39 | def combine_classes_det_averaged(self, all_res):
40 | """Combines metrics across all classes by averaging over the detection values"""
41 | res = {}
42 | for field in self.integer_fields:
43 | res[field] = self._combine_sum(all_res, field)
44 | return res
45 |
--------------------------------------------------------------------------------
/simple_romp/trace2/evaluation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Arthur151/ROMP/a8558aed480af850756f84e2a7c787e359bddbd0/simple_romp/trace2/evaluation/__init__.py
--------------------------------------------------------------------------------
/simple_romp/trace2/evaluation/dynacam_evaluation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Arthur151/ROMP/a8558aed480af850756f84e2a7c787e359bddbd0/simple_romp/trace2/evaluation/dynacam_evaluation/__init__.py
--------------------------------------------------------------------------------
/simple_romp/trace2/evaluation/dynacam_evaluation/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 | import os,sys
4 | import torch
5 | import quaternion
6 |
7 | def joint_mapping(source_format, target_format):
8 | mapping = np.ones(len(target_format),dtype=np.int32)*-1
9 | for joint_name in target_format:
10 | if joint_name in source_format:
11 | mapping[target_format[joint_name]] = source_format[joint_name]
12 | return np.array(mapping)
13 |
14 | SMPL_24 = {
15 | 'Pelvis_SMPL':0, 'L_Hip_SMPL':1, 'R_Hip_SMPL':2, 'Spine_SMPL': 3, 'L_Knee':4, 'R_Knee':5, 'Thorax_SMPL': 6, 'L_Ankle':7, 'R_Ankle':8,'Thorax_up_SMPL':9, \
16 | 'L_Toe_SMPL':10, 'R_Toe_SMPL':11, 'Neck': 12, 'L_Collar':13, 'R_Collar':14, 'SMPL_Head':15, 'L_Shoulder':16, 'R_Shoulder':17,\
17 | 'L_Elbow':18, 'R_Elbow':19, 'L_Wrist': 20, 'R_Wrist': 21, 'L_Hand':22, 'R_Hand':23}
18 | SMPL_Face_Foot_11 = {
19 | 'Nose':24, 'R_Eye':25, 'L_Eye':26, 'R_Ear': 27, 'L_Ear':28, \
20 | 'L_BigToe':29, 'L_SmallToe': 30, 'L_Heel':31, 'R_BigToe':32,'R_SmallToe':33, 'R_Heel':34}
21 | SMPL_EXTRA_9 = {
22 | 'R_Hip': 35, 'L_Hip':36, 'Neck_LSP':37, 'Head_top':38, 'Pelvis':39, 'Thorax_MPII':40, \
23 | 'Spine_H36M':41, 'Jaw_H36M':42, 'Head':43}
24 | SMPL_ALL_44 = {**SMPL_24, **SMPL_Face_Foot_11, **SMPL_EXTRA_9}
25 |
26 | COCO_17 = {
27 | 'Nose':0, 'L_Eye':1, 'R_Eye':2, 'L_Ear':3, 'R_Ear':4, 'L_Shoulder':5, 'R_Shoulder':6, 'L_Elbow':7, 'R_Elbow':8, \
28 | 'L_Wrist': 9, 'R_Wrist':10, 'L_Hip':11, 'R_Hip':12, 'L_Knee':13, 'R_Knee':14, 'L_Ankle':15, 'R_Ankle':16}
29 | GLAMR_26 = {
30 | 'L_Hip': 1, 'R_Hip':2, 'L_Knee':4, 'R_Knee':5, 'L_Ankle':7, 'R_Ankle':8, 'L_Shoulder':20, 'R_Shoulder':21,'L_Elbow':22, 'R_Elbow':23}
31 | glamr_mapping2D = joint_mapping(GLAMR_26, SMPL_ALL_44)
32 |
33 | def rotation_matrix_to_angle_axis(rotmats):
34 | rotmats = rotmats.numpy()
35 | aas = np.array([cv2.Rodrigues(rotmat)[0][:,0] for rotmat in rotmats])
36 | print(aas.shape)
37 | return torch.from_numpy(aas).float()
38 |
39 | def angle_axis_to_rotation_matrix(aas):
40 | aas = aas.numpy()
41 | rotmats = np.array([cv2.Rodrigues(aa)[0] for aa in aas])
42 | print(rotmats.shape)
43 | return torch.from_numpy(rotmats).float()
44 |
45 | def angle2mat(angle):
46 | return quaternion.as_rotation_matrix(quaternion.from_rotation_vector(angle))
47 | def mat2angle(mat):
48 | return quaternion.as_rotation_vector(quaternion.from_rotation_matrix(mat))
49 | def angle2quaternion(angle):
50 | return quaternion.as_float_array(quaternion.from_rotation_vector(angle))
51 |
52 | def search_valid_frame(frame2ind, frame_id):
53 | start_id = sorted(list(frame2ind.keys()))[0]
54 | if frame_id < start_id:
55 | #print('smaller than start_id', start_id)
56 | while 1:
57 | frame_id = frame_id+1
58 | if frame_id in frame2ind:
59 | break
60 | else:
61 | while frame_id > 0:
62 | frame_id = frame_id-1
63 | if frame_id in frame2ind:
64 | break
65 | return frame_id
--------------------------------------------------------------------------------
/simple_romp/trace2/evaluation/evaluation.py:
--------------------------------------------------------------------------------
1 | from .evaluate_tracking import evaluate_trackers
2 | from .eval_kp3ds import eval_kp3ds
3 | from ..utils.eval_utils import adjust_tracking_results
4 |
5 | def evaluate_predictions(ds_name, kp3d_results, tracking_results, tracking_matrix_save_path, eval_hard_seq=False):
6 | eval_results = {}
7 | if ds_name in ['mupots', 'pw3d', 'cmup']:
8 | eval_results.update(eval_kp3ds(kp3d_results, dataset=ds_name, eval_hard_seq=eval_hard_seq))
9 | if ds_name in ['posetrack', 'mupots', 'Dyna3DPW', 'cmup']:
10 | tracking_results = adjust_tracking_results(tracking_results)
11 | eval_results.update(evaluate_trackers(tracking_results, tracking_matrix_save_path, dataset=ds_name, eval_hard_seq=eval_hard_seq))
12 | return eval_results
--------------------------------------------------------------------------------
/simple_romp/trace2/install.sh:
--------------------------------------------------------------------------------
1 | cd models/deform_conv
2 | python setup.py develop
3 | cd ../..
--------------------------------------------------------------------------------
/simple_romp/trace2/models/deform_conv/__init__.py:
--------------------------------------------------------------------------------
1 | from .functions.deform_conv import deform_conv, modulated_deform_conv
2 | from .functions.deform_pool import deform_roi_pooling
3 | from .modules.deform_conv import (DeformConv, ModulatedDeformConv,
4 | DeformConvPack, ModulatedDeformConvPack)
5 | from .modules.deform_pool import (DeformRoIPooling, DeformRoIPoolingPack,
6 | ModulatedDeformRoIPoolingPack)
7 |
8 | __all__ = [
9 | 'DeformConv', 'DeformConvPack', 'ModulatedDeformConv',
10 | 'ModulatedDeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack',
11 | 'ModulatedDeformRoIPoolingPack', 'deform_conv', 'modulated_deform_conv',
12 | 'deform_roi_pooling'
13 | ]
14 |
--------------------------------------------------------------------------------
/simple_romp/trace2/models/deform_conv/functions/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Arthur151/ROMP/a8558aed480af850756f84e2a7c787e359bddbd0/simple_romp/trace2/models/deform_conv/functions/__init__.py
--------------------------------------------------------------------------------
/simple_romp/trace2/models/deform_conv/functions/deform_pool.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.autograd import Function
3 |
4 | import deform_pool_cuda
5 |
6 |
7 | class DeformRoIPoolingFunction(Function):
8 |
9 | @staticmethod
10 | def forward(ctx,
11 | data,
12 | rois,
13 | offset,
14 | spatial_scale,
15 | out_size,
16 | out_channels,
17 | no_trans,
18 | group_size=1,
19 | part_size=None,
20 | sample_per_part=4,
21 | trans_std=.0):
22 | ctx.spatial_scale = spatial_scale
23 | ctx.out_size = out_size
24 | ctx.out_channels = out_channels
25 | ctx.no_trans = no_trans
26 | ctx.group_size = group_size
27 | ctx.part_size = out_size if part_size is None else part_size
28 | ctx.sample_per_part = sample_per_part
29 | ctx.trans_std = trans_std
30 |
31 | assert 0.0 <= ctx.trans_std <= 1.0
32 | if not data.is_cuda:
33 | raise NotImplementedError
34 |
35 | n = rois.shape[0]
36 | output = data.new_empty(n, out_channels, out_size, out_size)
37 | output_count = data.new_empty(n, out_channels, out_size, out_size)
38 | deform_pool_cuda.deform_psroi_pooling_cuda_forward(
39 | data, rois, offset, output, output_count, ctx.no_trans,
40 | ctx.spatial_scale, ctx.out_channels, ctx.group_size, ctx.out_size,
41 | ctx.part_size, ctx.sample_per_part, ctx.trans_std)
42 |
43 | if data.requires_grad or rois.requires_grad or offset.requires_grad:
44 | ctx.save_for_backward(data, rois, offset)
45 | ctx.output_count = output_count
46 |
47 | return output
48 |
49 | @staticmethod
50 | def backward(ctx, grad_output):
51 | if not grad_output.is_cuda:
52 | raise NotImplementedError
53 |
54 | data, rois, offset = ctx.saved_tensors
55 | output_count = ctx.output_count
56 | grad_input = torch.zeros_like(data)
57 | grad_rois = None
58 | grad_offset = torch.zeros_like(offset)
59 |
60 | deform_pool_cuda.deform_psroi_pooling_cuda_backward(
61 | grad_output, data, rois, offset, output_count, grad_input,
62 | grad_offset, ctx.no_trans, ctx.spatial_scale, ctx.out_channels,
63 | ctx.group_size, ctx.out_size, ctx.part_size, ctx.sample_per_part,
64 | ctx.trans_std)
65 | return (grad_input, grad_rois, grad_offset, None, None, None, None,
66 | None, None, None, None)
67 |
68 |
69 | deform_roi_pooling = DeformRoIPoolingFunction.apply
70 |
--------------------------------------------------------------------------------
/simple_romp/trace2/models/deform_conv/install.sh:
--------------------------------------------------------------------------------
1 | python setup.py develop
2 |
--------------------------------------------------------------------------------
/simple_romp/trace2/models/deform_conv/modules/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Arthur151/ROMP/a8558aed480af850756f84e2a7c787e359bddbd0/simple_romp/trace2/models/deform_conv/modules/__init__.py
--------------------------------------------------------------------------------
/simple_romp/trace2/models/deform_conv/setup.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import os
16 | import platform
17 | import subprocess
18 | import time
19 | from setuptools import Extension, find_packages, setup
20 |
21 | import numpy as np
22 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension
23 |
24 | def make_cuda_ext(name, sources):
25 | return CUDAExtension(
26 | name=name,
27 | sources=[p for p in sources],
28 | extra_compile_args={
29 | 'cxx': [],
30 | 'nvcc': [
31 | '-D__CUDA_NO_HALF_OPERATORS__',
32 | '-D__CUDA_NO_HALF_CONVERSIONS__',
33 | '-D__CUDA_NO_HALF2_OPERATORS__',
34 | ]
35 | })
36 |
37 |
38 | if __name__ == '__main__':
39 | setup(
40 | name='deform_conv',
41 | ext_modules=[
42 | make_cuda_ext(
43 | name='deform_conv_cuda',
44 | sources=[
45 | 'src/deform_conv_cuda.cpp',
46 | 'src/deform_conv_cuda_kernel.cu'
47 | ]),
48 | make_cuda_ext(
49 | name='deform_pool_cuda',
50 | sources=[
51 | 'src/deform_pool_cuda.cpp',
52 | 'src/deform_pool_cuda_kernel.cu'
53 | ]),
54 | ],
55 | cmdclass={'build_ext': BuildExtension},
56 | zip_safe=False)
57 |
58 |
--------------------------------------------------------------------------------
/simple_romp/trace2/models/raft/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Arthur151/ROMP/a8558aed480af850756f84e2a7c787e359bddbd0/simple_romp/trace2/models/raft/__init__.py
--------------------------------------------------------------------------------
/simple_romp/trace2/models/raft/process.py:
--------------------------------------------------------------------------------
1 |
2 | import cv2
3 | import numpy as np
4 | import torch
5 | import cv2
6 | from torch import nn
7 | import torch.nn.functional as F
8 |
9 | from ..raft.raft import RAFT
10 | from ..raft.utils import flow_viz
11 |
12 | class FlowExtract(nn.Module):
13 | def __init__(self, model_path, device='cuda'):
14 | super(FlowExtract, self).__init__()
15 | model = torch.nn.DataParallel(RAFT())
16 | model.load_state_dict(torch.load(model_path))
17 | self.device = device
18 | self.model = model.module.to(self.device).eval()
19 |
20 | @torch.no_grad()
21 | def forward(self, images, source_img_inds, target_img_inds):
22 | input_images = images.permute(0, 3, 1, 2).to(self.device)
23 | # flow in low resolution, flow in input resolution
24 | flows_low, flows_high = self.model(input_images[source_img_inds].contiguous(), input_images[target_img_inds].contiguous(), iters=20, upsample=False, test_mode=True)
25 | flows = F.interpolate(flows_high, size=(128,128), mode='bilinear', align_corners=True) / 8
26 | return flows
27 |
28 | def show_seq_flow(images, flows):
29 | for img, flo in zip(images, flows):
30 | img = img.cpu().numpy()
31 | flo = flo.permute(1,2,0).cpu().numpy()
32 |
33 | # map flow to rgb image
34 | flo = flow_viz.flow_to_image(flo)
35 | flo = cv2.resize(flo, img.shape[:2])
36 | img_flo = np.concatenate([img, flo], axis=1)
37 |
38 | img2show = img_flo[:, :, [2,1,0]]/255.0
39 | h, w = img2show.shape[:2]
40 | #img2show = cv2.resize(img2show, (w//2, h//2))
41 | cv2.imshow('image', img2show)
42 | cv2.waitKey()
43 |
44 |
45 | def load_image(imfile):
46 | img = cv2.imread(imfile)[:,:,[2,1,0]]
47 | img = torch.from_numpy(img).permute(2, 0, 1).float()
48 | return img[None].to('cuda')
49 |
50 |
51 | def show_flow(img, flo):
52 | img = img[0].permute(1,2,0).cpu().numpy()
53 | flo = flo[0].permute(1,2,0).cpu().numpy()
54 |
55 | # map flow to rgb image
56 | flo = flow_viz.flow_to_image(flo)
57 | img_flo = np.concatenate([img, flo], axis=0)
58 |
59 | img2show = img_flo[:, :, [2,1,0]]/255.0
60 | h, w = img2show.shape[:2]
61 | img2show = cv2.resize(img2show, (w//2, h//2))
62 | cv2.imshow('image', img2show)
63 | cv2.waitKey()
64 |
65 | if __name__ == '__main__':
66 | demo('/home/yusun/data_drive3/datasets/DAVIS-data/DAVIS/JPEGImages/480p/motocross-jump')
67 |
--------------------------------------------------------------------------------
/simple_romp/trace2/models/raft/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Arthur151/ROMP/a8558aed480af850756f84e2a7c787e359bddbd0/simple_romp/trace2/models/raft/utils/__init__.py
--------------------------------------------------------------------------------
/simple_romp/trace2/models/raft/utils/utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn.functional as F
3 | import numpy as np
4 | from scipy import interpolate
5 |
6 |
7 | class InputPadder:
8 | """ Pads images such that dimensions are divisible by 8 """
9 | def __init__(self, dims, mode='sintel'):
10 | self.ht, self.wd = dims[-2:]
11 | pad_ht = (((self.ht // 8) + 1) * 8 - self.ht) % 8
12 | pad_wd = (((self.wd // 8) + 1) * 8 - self.wd) % 8
13 | if mode == 'sintel':
14 | self._pad = [pad_wd//2, pad_wd - pad_wd//2, pad_ht//2, pad_ht - pad_ht//2]
15 | else:
16 | self._pad = [pad_wd//2, pad_wd - pad_wd//2, 0, pad_ht]
17 |
18 | def pad(self, *inputs):
19 | return [F.pad(x, self._pad, mode='replicate') for x in inputs]
20 |
21 | def unpad(self,x):
22 | ht, wd = x.shape[-2:]
23 | c = [self._pad[2], ht-self._pad[3], self._pad[0], wd-self._pad[1]]
24 | return x[..., c[0]:c[1], c[2]:c[3]]
25 |
26 | def forward_interpolate(flow):
27 | flow = flow.detach().cpu().numpy()
28 | dx, dy = flow[0], flow[1]
29 |
30 | ht, wd = dx.shape
31 | x0, y0 = np.meshgrid(np.arange(wd), np.arange(ht))
32 |
33 | x1 = x0 + dx
34 | y1 = y0 + dy
35 |
36 | x1 = x1.reshape(-1)
37 | y1 = y1.reshape(-1)
38 | dx = dx.reshape(-1)
39 | dy = dy.reshape(-1)
40 |
41 | valid = (x1 > 0) & (x1 < wd) & (y1 > 0) & (y1 < ht)
42 | x1 = x1[valid]
43 | y1 = y1[valid]
44 | dx = dx[valid]
45 | dy = dy[valid]
46 |
47 | flow_x = interpolate.griddata(
48 | (x1, y1), dx, (x0, y0), method='nearest', fill_value=0)
49 |
50 | flow_y = interpolate.griddata(
51 | (x1, y1), dy, (x0, y0), method='nearest', fill_value=0)
52 |
53 | flow = np.stack([flow_x, flow_y], axis=0)
54 | return torch.from_numpy(flow).float()
55 |
56 |
57 | def bilinear_sampler(img, coords, mode='bilinear', mask=False):
58 | """ Wrapper for grid_sample, uses pixel coordinates """
59 | H, W = img.shape[-2:]
60 | xgrid, ygrid = coords.split([1,1], dim=-1)
61 | xgrid = 2*xgrid/(W-1) - 1
62 | ygrid = 2*ygrid/(H-1) - 1
63 |
64 | grid = torch.cat([xgrid, ygrid], dim=-1)
65 | img = F.grid_sample(img, grid, align_corners=True)
66 |
67 | if mask:
68 | mask = (xgrid > -1) & (ygrid > -1) & (xgrid < 1) & (ygrid < 1)
69 | return img, mask.float()
70 |
71 | return img
72 |
73 |
74 | def coords_grid(batch, ht, wd, device):
75 | coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device))
76 | coords = torch.stack(coords[::-1], dim=0).float()
77 | return coords[None].repeat(batch, 1, 1, 1)
78 |
79 |
80 | def upflow8(flow, mode='bilinear'):
81 | new_size = (8 * flow.shape[2], 8 * flow.shape[3])
82 | return 8 * F.interpolate(flow, size=new_size, mode=mode, align_corners=True)
83 |
--------------------------------------------------------------------------------
/simple_romp/trace2/results_parser/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Arthur151/ROMP/a8558aed480af850756f84e2a7c787e359bddbd0/simple_romp/trace2/results_parser/__init__.py
--------------------------------------------------------------------------------
/simple_romp/trace2/tracker/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Arthur151/ROMP/a8558aed480af850756f84e2a7c787e359bddbd0/simple_romp/trace2/tracker/__init__.py
--------------------------------------------------------------------------------
/simple_romp/trace2/tracker/basetrack.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from collections import OrderedDict
3 |
4 |
5 | class TrackState(object):
6 | New = 0
7 | Tracked = 1
8 | Lost = 2
9 | Removed = 3
10 |
11 | class BaseTrack(object):
12 | _count = 0
13 |
14 | track_id = 0
15 | is_activated = False
16 | state = TrackState.New
17 |
18 | history = OrderedDict()
19 | features = []
20 | curr_feature = None
21 | score = 0
22 | start_frame = 0
23 | frame_id = 0
24 | time_since_update = 0
25 |
26 | # multi-camera
27 | location = (np.inf, np.inf)
28 |
29 | @property
30 | def end_frame(self):
31 | return self.frame_id
32 |
33 | @staticmethod
34 | def next_id():
35 | BaseTrack._count += 1
36 | return BaseTrack._count
37 |
38 | @staticmethod
39 | def refresh_id():
40 | BaseTrack._count = 0
41 | return BaseTrack._count
42 |
43 | def activate(self, *args):
44 | raise NotImplementedError
45 |
46 | def predict(self):
47 | raise NotImplementedError
48 |
49 | def update(self, *args, **kwargs):
50 | raise NotImplementedError
51 |
52 | def mark_lost(self):
53 | self.state = TrackState.Lost
54 |
55 | def mark_removed(self):
56 | self.state = TrackState.Removed
--------------------------------------------------------------------------------
/simple_romp/trace2/utils/infer_utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import copy
3 | import os
4 |
5 | delete_output_keys = ['params_pred', 'verts', 'verts_camed_org', 'world_verts', 'world_j3d', 'world_verts_camed_org', 'detection_flag']
6 | def remove_large_keys(outputs, del_keys=delete_output_keys):
7 | save_outputs = copy.deepcopy(outputs)
8 | for key in del_keys:
9 | del save_outputs[key]
10 | rest_keys = list(save_outputs.keys())
11 | for key in rest_keys:
12 | if torch.is_tensor(save_outputs[key]):
13 | save_outputs[key] = save_outputs[key].detach().cpu().numpy()
14 |
15 | return save_outputs
16 |
17 | def collect_kp_results(outputs, img_paths):
18 | seq_kp3d_results = {}
19 | for ind, img_path in enumerate(img_paths):
20 | img_name = os.path.basename(img_path)
21 | if img_name not in seq_kp3d_results:
22 | seq_kp3d_results[img_name] = []
23 | subject_results = [outputs['pj2d_org'][ind].cpu().numpy(),outputs['j3d'][ind].cpu().numpy(), outputs['pj2d_org_h36m17'][ind].cpu().numpy(),outputs['joints_h36m17'][ind].cpu().numpy(),\
24 | outputs['smpl_thetas'][ind].cpu().numpy(), outputs['smpl_betas'][ind].cpu().numpy(), outputs['cam_trans'][ind].cpu().numpy()]
25 | seq_kp3d_results[img_name].append(subject_results)
26 | return seq_kp3d_results
27 |
28 | def insert_last_human_state(current, last_state, key, init=None):
29 | if key in last_state:
30 | return torch.cat([last_state[key], current], 0).contiguous()
31 | if key not in last_state:
32 | return torch.cat([current[[0]], current], 0).contiguous()
33 |
34 | def save_last_human_state(cacher, last_state, key):
35 | if key not in cacher:
36 | cacher = {}
37 | cacher[key] = last_state
38 | return cacher
39 |
40 | def merge_item(source, target, key):
41 | if key not in target:
42 | target[key] = source[key].cpu()
43 | else:
44 | target[key] = torch.cat([target[key], source[key].cpu()], 0)
45 |
46 | def merge_output(split_outputs, seq_outputs):
47 | keys = ['params_pred', 'reorganize_idx', 'j3d', 'verts', 'verts_camed_org', \
48 | 'world_cams', 'world_trans', 'world_global_rots', 'world_verts', 'world_j3d', 'world_verts_camed_org',\
49 | 'pj2d_org', 'pj2d','cam_trans','detection_flag', 'pj2d_org_h36m17','joints_h36m17', 'center_confs',\
50 | 'track_ids', 'smpl_thetas', 'smpl_betas']
51 | for key in keys:
52 | if key in split_outputs:
53 | merge_item(split_outputs, seq_outputs, key)
54 | return seq_outputs
--------------------------------------------------------------------------------
/simple_romp/tracker/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Arthur151/ROMP/a8558aed480af850756f84e2a7c787e359bddbd0/simple_romp/tracker/__init__.py
--------------------------------------------------------------------------------
/simple_romp/tracker/basetrack.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from collections import OrderedDict
3 |
4 |
5 | class TrackState(object):
6 | New = 0
7 | Tracked = 1
8 | Lost = 2
9 | Removed = 3
10 |
11 |
12 | class BaseTrack(object):
13 | _count = 0
14 |
15 | track_id = 0
16 | is_activated = False
17 | state = TrackState.New
18 |
19 | history = OrderedDict()
20 | features = []
21 | curr_feature = None
22 | score = 0
23 | start_frame = 0
24 | frame_id = 0
25 | time_since_update = 0
26 |
27 | # multi-camera
28 | location = (np.inf, np.inf)
29 |
30 | @property
31 | def end_frame(self):
32 | return self.frame_id
33 |
34 | @staticmethod
35 | def next_id():
36 | BaseTrack._count += 1
37 | return BaseTrack._count
38 |
39 | def activate(self, *args):
40 | raise NotImplementedError
41 |
42 | def predict(self):
43 | raise NotImplementedError
44 |
45 | def update(self, *args, **kwargs):
46 | raise NotImplementedError
47 |
48 | def mark_lost(self):
49 | self.state = TrackState.Lost
50 |
51 | def mark_removed(self):
52 | self.state = TrackState.Removed
--------------------------------------------------------------------------------
/simple_romp/upload_pypi.sh:
--------------------------------------------------------------------------------
1 | #twine upload --repository-url https://test.pypi.org/legacy/ dist/simple_romp-1*
2 | twine upload dist/*
3 |
--------------------------------------------------------------------------------
/simple_romp/vis_human/__init__.py:
--------------------------------------------------------------------------------
1 | from .main import setup_renderer, rendering_romp_bev_results
2 | from .vis_utils import mesh_color_left2right
--------------------------------------------------------------------------------
/simple_romp/vis_human/sim3drender/__init__.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 |
3 | from .renderer import Sim3DR
4 |
--------------------------------------------------------------------------------
/trace/__init__.py:
--------------------------------------------------------------------------------
1 | from . import _init_paths_
--------------------------------------------------------------------------------
/trace/_init_paths_.py:
--------------------------------------------------------------------------------
1 | import os.path as osp
2 | import sys
3 |
4 | def add_path(path):
5 | if path not in sys.path:
6 | sys.path.insert(0, path)
7 |
8 | this_dir = osp.dirname(__file__)
9 |
10 | # Add lib to PYTHONPATH
11 | lib_path = osp.join(this_dir, 'lib')
12 | add_path(lib_path)
--------------------------------------------------------------------------------
/trace/install.sh:
--------------------------------------------------------------------------------
1 | # compile deformable convolution
2 | cd lib/models/deform_conv
3 | python setup.py develop
4 | cd ../../..
5 |
6 | # compile bounding box iou
7 | cd lib/tracker/cython_bbox
8 | python setup.py install
9 | cd ../../..
10 |
11 | #pip install -e git+https://github.com/samson-wang/cython_bbox.git#egg=cython-bbox
--------------------------------------------------------------------------------
/trace/lib/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | import sys, os
2 | lib_dir = os.path.dirname(__file__)
3 | root_dir = os.path.join(lib_dir.replace(os.path.basename(lib_dir),''))
4 | if root_dir not in sys.path:
5 | sys.path.insert(0, root_dir)
--------------------------------------------------------------------------------
/trace/lib/datasets/base.py:
--------------------------------------------------------------------------------
1 | from datasets.image_base import Image_base, test_image_dataset
2 | from datasets.image_base_relative import Image_base_relative, test_image_relative_dataset
3 | from datasets.video_base_relative import Video_base_relative, test_video_relative_dataset
4 |
5 | Base_Classes = {'image': Image_base, 'image_relative': Image_base_relative, 'video_relative': Video_base_relative}
6 |
7 | Test_Funcs = {'image': test_image_dataset, 'image_relative': test_image_relative_dataset, 'video_relative': test_video_relative_dataset}
--------------------------------------------------------------------------------
/trace/lib/datasets/mpi_inf_3dhp_validation.py:
--------------------------------------------------------------------------------
1 | from datasets.mpi_inf_3dhp import MPI_INF_3DHP
2 | from datasets.image_base import *
3 | from datasets.base import Base_Classes, Test_Funcs
4 |
5 | default_mode = args().video_loading_mode if args().video else args().image_loading_mode
6 |
7 | def MPI_INF_3DHP_VALIDATION(base_class=default_mode):
8 | class MPI_INF_3DHP_VALIDATION(MPI_INF_3DHP(Base_Classes[base_class])):
9 | def __init__(self,train_flag=False, validation=True, **kwargs):
10 | super(MPI_INF_3DHP_VALIDATION,self).__init__(train_flag=train_flag, validation=validation)
11 | return MPI_INF_3DHP_VALIDATION
12 | if __name__ == '__main__':
13 | datasets=MPI_INF_3DHP_VALIDATION(base_class=default_mode)()
14 | Test_Funcs[default_mode](datasets,with_smpl=True)
15 | print('Done')
16 |
--------------------------------------------------------------------------------
/trace/lib/epropnp/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Copyright (C) 2010-2022 Alibaba Group Holding Limited.
3 | """
4 |
--------------------------------------------------------------------------------
/trace/lib/evaluation/__init__.py:
--------------------------------------------------------------------------------
1 | import sys, os
2 | lib_dir = os.path.dirname(__file__)
3 | root_dir = os.path.join(lib_dir.replace(os.path.basename(lib_dir), ''))
4 | if root_dir not in sys.path:
5 | sys.path.insert(0, root_dir)
6 |
7 | from .evaluation_matrix import compute_error_verts, compute_similarity_transform, compute_similarity_transform_torch, \
8 | batch_compute_similarity_transform_torch, compute_mpjpe
9 | #from evaluation.eval_pckh import eval_pck, eval_pckh
10 | #from evaluation.pw3d_eval import *
11 | from .eval_ds_utils import h36m_evaluation_act_wise, cmup_evaluation_act_wise, pp_evaluation_cam_wise, determ_worst_best, reorganize_vis_info
--------------------------------------------------------------------------------
/trace/lib/evaluation/mupots_util/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Arthur151/ROMP/a8558aed480af850756f84e2a7c787e359bddbd0/trace/lib/evaluation/mupots_util/__init__.py
--------------------------------------------------------------------------------
/trace/lib/evaluation/mupots_util/datautil.py:
--------------------------------------------------------------------------------
1 | import scipy.io as sio
2 |
3 | def load_annot(fname):
4 | def parse_pose(dt):
5 | res = {}
6 | annot2 = dt['annot2'][0,0]
7 | annot3 = dt['annot3'][0,0]
8 | annot3_univ = dt['univ_annot3'][0,0]
9 | is_valid = dt['isValidFrame'][0,0][0,0]
10 | res['annot2'] = annot2
11 | res['annot3'] = annot3
12 | res['annot3_univ'] = annot3_univ
13 | res['is_valid'] = is_valid
14 | return res
15 | data = sio.loadmat(fname)['annotations']
16 | results = []
17 | num_frames, num_inst = data.shape[0], data.shape[1]
18 | for j in range(num_inst):
19 | buff = []
20 | for i in range(num_frames):
21 | buff.append(parse_pose(data[i,j]))
22 | results.append(buff)
23 | return results
24 |
25 | def load_occ(fname):
26 | data = sio.loadmat(fname)['occlusion_labels']
27 | results = []
28 | num_frames, num_inst = data.shape[0], data.shape[1]
29 | for i in range(num_frames):
30 | buff = []
31 | for j in range(num_inst):
32 | buff.append(data[i][j])
33 | results.append(buff)
34 | return results
35 |
--------------------------------------------------------------------------------
/trace/lib/evaluation/mupots_util/evaluate.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from . import mpii_get_joints
3 |
4 | def mean(l):
5 | return sum(l) / len(l)
6 |
7 | def mpii_compute_3d_pck(seq_err):
8 | pck_curve_array = []
9 | pck_array = []
10 | auc_array = []
11 | thresh = np.arange(0, 200, 5)
12 | pck_thresh = 150
13 | joint_groups, all_joints = mpii_get_joints.mpii_joint_groups()
14 | for seq_idx in range(len(seq_err)):
15 | pck_curve = []
16 | pck_seq = []
17 | auc_seq = []
18 | err = np.array(seq_err[seq_idx]).astype(np.float32)
19 | for j in range(len(joint_groups)):
20 | err_selected = err[:,joint_groups[j][1]]
21 | buff = []
22 | for t in thresh:
23 | pck = np.float32(err_selected < t).sum() / len(joint_groups[j][1]) / len(err)
24 | buff.append(pck) #[Num_thresholds]
25 | pck_curve.append(buff)
26 | auc_seq.append(mean(buff))
27 | pck = np.float32(err_selected < pck_thresh).sum() / len(joint_groups[j][1]) / len(err)
28 | pck_seq.append(pck)
29 |
30 | buff = []
31 | for t in thresh:
32 | pck = np.float32(err[:, all_joints] < t).sum() / len(err) / len(all_joints)
33 | buff.append(pck) #[Num_thresholds]
34 | pck_curve.append(buff)
35 |
36 | pck = np.float32(err[:, all_joints] < pck_thresh).sum() / len(err) / len(all_joints)
37 | pck_seq.append(pck)
38 |
39 | pck_curve_array.append(pck_curve) # [num_seq: [Num_grpups+1: [Num_thresholds]]]
40 | pck_array.append(pck_seq) # [num_seq: [Num_grpups+1]]
41 | auc_array.append(auc_seq) # [num_seq: [Num_grpups]]
42 |
43 | return pck_curve_array, pck_array, auc_array
44 |
45 | def calculate_multiperson_errors(seq_err):
46 | return mpii_compute_3d_pck(seq_err)
47 |
--------------------------------------------------------------------------------
/trace/lib/evaluation/mupots_util/load_dep_pred.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pickle
3 | import os,sys
4 |
5 | file_path = os.path.dirname(__file__)
6 | gt_dict = {}
7 | def get_pred_gt(seq_idx, inst_idx, frame_idx):
8 | if not seq_idx in gt_dict:
9 | gt = pickle.load(open(os.path.join(file_path,'mupots_depths','%02d_%02d.pkl'%(seq_idx, inst_idx)), 'rb'))
10 | gt_dict[(seq_idx, inst_idx)] = np.float32(gt)
11 | gt = gt_dict[(seq_idx, inst_idx)]
12 | return gt[frame_idx]
13 |
--------------------------------------------------------------------------------
/trace/lib/evaluation/mupots_util/mpii_get_joints.py:
--------------------------------------------------------------------------------
1 | def mpii_joint_groups():
2 | joint_groups = [
3 | ['Head', [0]],
4 | ['Neck', [1]],
5 | ['Shou', [2,5]],
6 | ['Elbow', [3,6]],
7 | ['Wrist', [4,7]],
8 | ['Hip', [8,11]],
9 | ['Knee', [9,12]],
10 | ['Ankle', [10,13]],
11 | ]
12 | all_joints = []
13 | for i in joint_groups:
14 | all_joints += i[1]
15 | return joint_groups, all_joints
16 |
17 |
18 | def mpii_get_joints(set_name):
19 | original_joint_names = ['spine3', 'spine4', 'spine2', 'spine1', 'spine',
20 | 'neck', 'head', 'head_top', 'left_shoulder', 'left_arm', 'left_forearm',
21 | 'left_hand', 'left_hand_ee', 'right_shoulder', 'right_arm', 'right_forearm', 'right_hand',
22 | 'right_hand_ee', 'left_leg_up', 'left_leg', 'left_foot', 'left_toe', 'left_ee',
23 | 'right_leg_up' , 'right_leg', 'right_foot', 'right_toe', 'right_ee']
24 |
25 | all_joint_names = ['spine3', 'spine4', 'spine2', 'spine', 'pelvis',
26 | 'neck', 'head', 'head_top', 'left_clavicle', 'left_shoulder', 'left_elbow',
27 | 'left_wrist', 'left_hand', 'right_clavicle', 'right_shoulder', 'right_elbow', 'right_wrist',
28 | 'right_hand', 'left_hip', 'left_knee', 'left_ankle', 'left_foot', 'left_toe',
29 | 'right_hip' , 'right_knee', 'right_ankle', 'right_foot', 'right_toe']
30 |
31 | if set_name=='relavant':
32 | joint_idx = [8, 6, 15, 16, 17, 10, 11, 12, 24, 25, 26, 19, 20, 21, 5, 4, 7]
33 | joint_parents_o1 = [ 2, 16, 2, 3, 4, 2, 6, 7, 15, 9, 10, 15, 12, 13, 15, 15, 2]
34 | joint_parents_o2 = [ 16, 15, 16, 2, 3, 16, 2, 6, 16, 15, 9, 16, 15, 12, 15, 15, 16]
35 | joint_idx = [i-1 for i in joint_idx]
36 | joint_parents_o1 = [i-1 for i in joint_parents_o1]
37 | joint_parents_o2 = [i-1 for i in joint_parents_o2]
38 | joint_names = [all_joint_names[i] for i in joint_idx]
39 | return joint_idx, joint_parents_o1, joint_parents_o2, joint_names
40 | else:
41 | raise NotImplementedError('Not implemented yet.')
42 |
--------------------------------------------------------------------------------
/trace/lib/loss_funcs/__init__.py:
--------------------------------------------------------------------------------
1 | import sys, os
2 | lib_dir = os.path.dirname(__file__)
3 | root_dir = os.path.join(lib_dir.replace(os.path.basename(lib_dir), ''))
4 | if root_dir not in sys.path:
5 | sys.path.insert(0, root_dir)
6 |
7 | from .calc_loss import Loss
8 | from .learnable_loss import Learnable_Loss
9 | from .params_loss import batch_smpl_pose_l2_error, batch_l2_loss
10 | from .keypoints_loss import batch_kp_2d_l2_loss, calc_mpjpe, calc_pampjpe, calc_pck, align_by_parts
11 | from .maps_loss import focal_loss, Heatmap_AE_loss, JointsMSELoss
12 | from .prior_loss import create_prior, MaxMixturePrior, L2Prior, SMPLifyAnglePrior, angle_prior
--------------------------------------------------------------------------------
/trace/lib/loss_funcs/matching.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 | import lap
4 |
5 | def linear_assignment(cost_matrix, thresh=100.):
6 | if cost_matrix.size == 0:
7 | return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1]))
8 | matches, unmatched_a, unmatched_b = [], [], []
9 | cost, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh)
10 | for ix, mx in enumerate(x):
11 | if mx >= 0:
12 | matches.append([ix, mx])
13 | matches = np.asarray(matches)
14 | #unmatched_a = np.where(x < 0)[0]
15 | #unmatched_b = np.where(y < 0)[0]
16 |
17 | return matches #, unmatched_a, unmatched_b
18 |
19 | def calc_dist_matix(pred_trajs, gt_trajs):
20 | valid_mask = (gt_trajs!=-2).sum(-1)!=0
21 | valid_mask = valid_mask.unsqueeze(1).repeat(1,len(pred_trajs),1).float()
22 | dist_matix = torch.norm(gt_trajs.unsqueeze(1).repeat(1,len(pred_trajs),1,1)-pred_trajs.unsqueeze(0).repeat(len(gt_trajs),1,1,1),p=2, dim=-1)
23 | dist_matix = (dist_matix * valid_mask).sum(-1) / (valid_mask.sum(-1)+1e-4)
24 |
25 | return dist_matix
26 |
27 | def match_traj_to_3D_2D_gts(traj3D_gts, traj2D_gts, Tj_flag, traj_preds, pred_batch_ids):
28 | mc = {key:[] for key in ['batch_ids', 'matched_ids', 'person_ids']}
29 |
30 | unique_batch_ids = torch.unique(pred_batch_ids)
31 | # matching 3D trajectory
32 | for batch_id in unique_batch_ids:
33 | pred_mask = pred_batch_ids == batch_id
34 | batch_ids = pred_batch_ids[pred_mask]
35 | pred_ids = torch.where(pred_mask)[0]
36 | pred_trajs = traj_preds[pred_mask]
37 |
38 | if Tj_flag[batch_id,1]: # have 3D traj gt
39 | gt_trajs = traj3D_gts[batch_id] # max_person_num, args().temp_clip_length, 3
40 | elif Tj_flag[batch_id,0]: # have 2D traj gt
41 | gt_trajs = traj2D_gts[batch_id] # max_person_num, args().temp_clip_length, 2
42 | pred_trajs = pred_trajs[:,:,[2,1]]
43 | else:
44 | continue
45 |
46 | gt_mask = (gt_trajs!=-2).sum(-1).sum(-1)>0
47 | gt_trajs = gt_trajs[gt_mask]
48 | person_ids = torch.where(gt_mask)[0]
49 |
50 | dist_matrix = calc_dist_matix(pred_trajs.detach(), gt_trajs)
51 | matches = linear_assignment(dist_matrix.cpu().numpy())
52 | if len(matches) == 0:
53 | continue
54 | person_ids = person_ids[matches[:,0]]
55 | pred_ids = pred_ids[matches[:,1]]
56 | batch_ids = batch_ids[matches[:,1]]
57 |
58 | mc['batch_ids'].append(batch_ids)
59 | mc['matched_ids'].append(pred_ids)
60 | mc['person_ids'].append(person_ids)
61 |
62 | if len(mc['matched_ids'])==0:
63 | mc.update({'batch_ids':[0], 'matched_ids':[0], 'person_ids':[0]})
64 | keys_list = list(mc.keys())
65 | for key in keys_list:
66 | mc[key] = torch.cat(mc[key], 0).long().to(traj_preds.device)
67 | return mc
68 |
--------------------------------------------------------------------------------
/trace/lib/loss_funcs/params_loss.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import torch
6 | import torch.nn as nn
7 | import sys, os
8 | root_dir = os.path.join(os.path.dirname(__file__),'..')
9 | if root_dir not in sys.path:
10 | sys.path.insert(0, root_dir)
11 |
12 | import time
13 | import pickle
14 | import numpy as np
15 |
16 | import config
17 | import constants
18 | from config import args
19 | from utils import batch_rodrigues, rotation_matrix_to_angle_axis
20 |
21 | def batch_l2_loss(real,predict):
22 | if len(real) == 0:
23 | return 0
24 | loss = torch.norm(real-predict, p=2, dim=1)
25 | loss = loss[~torch.isnan(loss)]
26 | if len(loss) == 0:
27 | return 0
28 | return loss#.mean()
29 |
30 | def batch_smpl_pose_l2_error(real,predict):
31 | # convert to rot mat, multiple angular maps to the same rotation with Pi as a period.
32 | batch_size = real.shape[0]
33 | real = batch_rodrigues(real.reshape(-1,3)).contiguous()#(N*J)*3 -> (N*J)*3*3
34 | predict = batch_rodrigues(predict.reshape(-1,3)).contiguous()#(N*J)*3 -> (N*J)*3*3
35 | loss = torch.norm((real-predict).view(-1,9), p=2, dim=-1)#self.sl1loss(real,predict)#
36 | loss = loss.reshape(batch_size, -1).mean(-1)
37 | return loss
38 |
39 | def trans_relative_rot_to_global_rotmat(params, with_global_rot=False):
40 | '''
41 | calculate absolute rotation matrix in the global coordinate frame of K body parts.
42 | The rotation is the map from the local bone coordinate frame to the global one.
43 | K= 9 parts in the following order:
44 | root (JOINT 0) , left hip (JOINT 1), right hip (JOINT 2), left knee (JOINT 4), right knee (JOINT 5),
45 | left shoulder (JOINT 16), right shoulder (JOINT 17), left elbow (JOINT 18), right elbow (JOINT 19).
46 | parent kinetic tree [-1, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 12, 13, 14, 16, 17, 18, 19, 20, 21]
47 | '''
48 | batch_size, param_num = params.shape[0], params.shape[1]//3
49 | pose_rotmat = batch_rodrigues(params.reshape(-1,3)).view(batch_size, param_num, 3, 3).contiguous()
50 | if with_global_rot:
51 | sellect_joints = np.array([0,1,2,4,5,16,17,18,19],dtype=np.int32)
52 | results = [pose_rotmat[:, 0]]
53 | for idx in range(param_num-1):
54 | i_val = int(idx + 1)
55 | joint_rot = pose_rotmat[:, i_val]
56 | parent = constants.kintree_parents[i_val]
57 | glob_transf_mat = torch.matmul(results[parent], joint_rot)
58 | results.append(glob_transf_mat)
59 | else:
60 | sellect_joints = np.array([1,2,4,5,16,17,18,19],dtype=np.int32)-1
61 | results = [torch.eye(3,3)[None].cuda().repeat(batch_size,1,1)]
62 | for i_val in range(param_num-1):
63 | #i_val = int(idx + 1)
64 | joint_rot = pose_rotmat[:, i_val]
65 | parent = constants.kintree_parents[i_val+1]
66 | glob_transf_mat = torch.matmul(results[parent], joint_rot)
67 | results.append(glob_transf_mat)
68 | global_rotmat = torch.stack(results, axis=1)[:, sellect_joints].contiguous()
69 | return global_rotmat
70 |
--------------------------------------------------------------------------------
/trace/lib/maps_utils/__init__.py:
--------------------------------------------------------------------------------
1 | import sys, os
2 | lib_dir = os.path.dirname(__file__)
3 | root_dir = os.path.join(lib_dir.replace(os.path.basename(lib_dir),''))
4 | if root_dir not in sys.path:
5 | sys.path.insert(0, root_dir)
--------------------------------------------------------------------------------
/trace/lib/maps_utils/debug_utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 |
4 | def print_dict(td):
5 | keys = collect_keyname(td)
6 | print(keys)
7 |
8 | def get_size(item):
9 | if isinstance(item, list) or isinstance(item, tuple):
10 | return len(item)
11 | elif isinstance(item, torch.Tensor) or isinstance(item, np.ndarray):
12 | return item.shape
13 | else:
14 | return item
15 |
16 | def collect_keyname(td):
17 | keys = []
18 | for key in td:
19 | if isinstance(td[key], dict):
20 | keys.append([key, collect_keyname(td[key])])
21 | else:
22 | keys.append([key, get_size(td[key])])
23 | return keys
--------------------------------------------------------------------------------
/trace/lib/maps_utils/relative_parser.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import numpy as np
6 | import torch
7 | import torch.nn.functional as F
8 | import sys, os
9 | import constants
10 | from config import args
11 |
12 | def parse_age_cls_results(age_probs):
13 | age_preds = torch.ones_like(age_probs).long()*-1
14 | age_preds[(age_probs<=constants.age_threshold['adult'][2])&(age_probs>constants.age_threshold['adult'][0])] = 0
15 | age_preds[(age_probs<=constants.age_threshold['teen'][2])&(age_probs>constants.age_threshold['teen'][0])] = 1
16 | age_preds[(age_probs<=constants.age_threshold['kid'][2])&(age_probs>constants.age_threshold['kid'][0])] = 2
17 | age_preds[(age_probs<=constants.age_threshold['baby'][2])&(age_probs>constants.age_threshold['baby'][0])] = 3
18 | return age_preds
19 |
20 | def parse_classfication_results(betas_pred, valid_gender_thresh=0.6):
21 | if betas_pred.shape[1]==13:
22 | age_probs = betas_pred[:,10]
23 | elif betas_pred.shape[1]==3:
24 | age_probs = betas_pred[:,0]
25 | age_preds = torch.ones_like(age_probs).long()*-1
26 | age_preds[(age_probs<=constants.age_threshold['adult'][2])&(age_probs>constants.age_threshold['adult'][0])] = 0
27 | age_preds[(age_probs<=constants.age_threshold['teen'][2])&(age_probs>constants.age_threshold['teen'][0])] = 1
28 | age_preds[(age_probs<=constants.age_threshold['kid'][2])&(age_probs>constants.age_threshold['kid'][0])] = 2
29 | age_preds[(age_probs<=constants.age_threshold['baby'][2])&(age_probs>constants.age_threshold['baby'][0])] = 3
30 |
31 | if betas_pred.shape[1]==13:
32 | gender_results = betas_pred[:,11:13].max(1)
33 | elif betas_pred.shape[1]==3:
34 | gender_results = betas_pred[:,1:3].max(1)
35 | gender_preds, gender_probs = gender_results.indices, gender_results.values
36 | invalid_gender_preds_mask = gender_probs1)[0]
16 |
17 | #print(params_preds.shape, pred_batch_ids.shape, pred_czyxs.shape, top_score.shape, similarity.shape)
18 | #print('suppressing_silimar', similarity)
19 |
20 | for s_inds in center_similar_inds:
21 | if rot_dim==6:
22 | pose_angulars = rot6D_to_angular(pose_params_preds[similarity[s_inds]])
23 | pose_angular_base = rot6D_to_angular(pose_params_preds[s_inds].unsqueeze(0)).repeat(len(pose_angulars), 1)
24 | elif rot_dim==3:
25 | pose_angulars = pose_params_preds[similarity[s_inds]]
26 | pose_angular_base = pose_params_preds[s_inds].unsqueeze(0).repeat(len(pose_angulars))
27 | pose_similarity = batch_smpl_pose_l2_error(pose_angulars,pose_angular_base)
28 | sim_past = similarity[s_inds].clone()
29 | similarity[s_inds,sim_past] = (pose_similarity 0) & (x1 < wd) & (y1 > 0) & (y1 < ht)
42 | x1 = x1[valid]
43 | y1 = y1[valid]
44 | dx = dx[valid]
45 | dy = dy[valid]
46 |
47 | flow_x = interpolate.griddata(
48 | (x1, y1), dx, (x0, y0), method='nearest', fill_value=0)
49 |
50 | flow_y = interpolate.griddata(
51 | (x1, y1), dy, (x0, y0), method='nearest', fill_value=0)
52 |
53 | flow = np.stack([flow_x, flow_y], axis=0)
54 | return torch.from_numpy(flow).float()
55 |
56 |
57 | def bilinear_sampler(img, coords, mode='bilinear', mask=False):
58 | """ Wrapper for grid_sample, uses pixel coordinates """
59 | H, W = img.shape[-2:]
60 | xgrid, ygrid = coords.split([1,1], dim=-1)
61 | xgrid = 2*xgrid/(W-1) - 1
62 | ygrid = 2*ygrid/(H-1) - 1
63 |
64 | grid = torch.cat([xgrid, ygrid], dim=-1)
65 | img = F.grid_sample(img, grid, align_corners=True)
66 |
67 | if mask:
68 | mask = (xgrid > -1) & (ygrid > -1) & (xgrid < 1) & (ygrid < 1)
69 | return img, mask.float()
70 |
71 | return img
72 |
73 |
74 | def coords_grid(batch, ht, wd, device):
75 | coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device))
76 | coords = torch.stack(coords[::-1], dim=0).float()
77 | return coords[None].repeat(batch, 1, 1, 1)
78 |
79 |
80 | def upflow8(flow, mode='bilinear'):
81 | new_size = (8 * flow.shape[2], 8 * flow.shape[3])
82 | return 8 * F.interpolate(flow, size=new_size, mode=mode, align_corners=True)
83 |
--------------------------------------------------------------------------------
/trace/lib/smpl_family/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Arthur151/ROMP/a8558aed480af850756f84e2a7c787e359bddbd0/trace/lib/smpl_family/__init__.py
--------------------------------------------------------------------------------
/trace/lib/smpl_family/create_smpl_models.py:
--------------------------------------------------------------------------------
1 | from smpl_family.smpl import SMPL
2 | from smpl_family.smpla import SMPLA_parser
3 | from smpl_family.smplx import SMPLX
4 | from config import args
5 |
6 | def create_model(model_type, model_path=None, **kwargs):
7 | if model_type == 'smpl':
8 | model_path = args().smpl_model_path if model_path is None else model_path
9 | return SMPL(model_path, model_type='smpl', **kwargs)
10 | if model_type == 'smpla':
11 | return SMPLA_parser(args().smpla_model_path, args().smil_model_path, baby_thresh=args().baby_threshold, **kwargs)
12 | if model_type == 'smplx':
13 | model_path = os.path.join(args().smplx_model_folder, 'SMPLX_NEUTRAL.pth') if model_path is None else model_path
14 | return SMPLX(model_path, **kwargs)
--------------------------------------------------------------------------------
/trace/lib/smpl_family/mano.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Arthur151/ROMP/a8558aed480af850756f84e2a7c787e359bddbd0/trace/lib/smpl_family/mano.py
--------------------------------------------------------------------------------
/trace/lib/smpl_family/smpl_regressor.py:
--------------------------------------------------------------------------------
1 | import sys,os
2 | import torch
3 | import torch.nn as nn
4 | import config
5 | import numpy as np
6 | from .smpl import SMPL
7 | from config import args
8 |
9 | class SMPLR(nn.Module):
10 | def __init__(self, use_gender=False):
11 | super(SMPLR, self).__init__()
12 | model_path = os.path.join(config.model_dir,'parameters','smpl')
13 | self.smpls = {}
14 | self.smpls['n'] = SMPL(args().smpl_model_path, model_type='smpl')
15 | #SMPL(model_path, J_reg_extra9_path=args().smpl_J_reg_extra_path, J_reg_h36m17_path=args().smpl_J_reg_h37m_path, gender='neutral',create_transl=False)
16 | if use_gender:
17 | self.smpls['f'] = SMPL(args().smpl_model_path.replace('NEUTRAL', 'FEMALE'))
18 | #SMPL(model_path, J_reg_extra9_path=args().smpl_J_reg_extra_path, J_reg_h36m17_path=args().smpl_J_reg_h37m_path, gender='female',create_transl=False)
19 | self.smpls['m'] = SMPL(args().smpl_model_path.replace('NEUTRAL', 'MALE'))
20 | #SMPL(model_path,J_reg_extra9_path=args().smpl_J_reg_extra_path, J_reg_h36m17_path=args().smpl_J_reg_h37m_path, gender='male',create_transl=False)
21 |
22 | def forward(self, pose, betas, gender='n', root_align=True):
23 | if isinstance(pose, np.ndarray):
24 | pose, betas = torch.from_numpy(pose).float(),torch.from_numpy(betas).float()
25 | if len(pose.shape)==1:
26 | pose, betas = pose.unsqueeze(0), betas.unsqueeze(0)
27 | verts, joints44_17 = self.smpls[gender](poses=pose, betas=betas, root_align=root_align)
28 |
29 | return verts.numpy(), joints44_17[:,:args().joint_num].numpy()
--------------------------------------------------------------------------------
/trace/lib/smpl_family/smpl_transfer_config_files/smpl2smpl.yaml:
--------------------------------------------------------------------------------
1 | datasets:
2 | mesh_folder:
3 | data_folder: '/home/yusun/DataCenter2/smpl_models/smpl'
4 | deformation_transfer_path: '/home/yusun/DataCenter2/smpl_models/model_transfer/smpl2smplx_deftrafo_setup.pkl'
5 | mask_ids_fname: None
6 | summary_steps: 100
7 |
8 | edge_fitting:
9 | per_part: False
10 |
11 | optim:
12 | type: 'trust-ncg'
13 | maxiters: 100
14 | gtol: 1e-06
15 |
16 | body_model:
17 | model_type: "smpl"
18 | gender: "neutral"
19 | folder: "/home/yusun/DataCenter2/smpl_models"
20 | use_compressed: False
21 | use_face_contour: True
22 | smpl:
23 | betas:
24 | num: 10
25 |
--------------------------------------------------------------------------------
/trace/lib/smpl_family/smpl_transfer_config_files/smpl2smplh.yaml:
--------------------------------------------------------------------------------
1 | datasets:
2 | mesh_folder:
3 | data_folder: 'transfer_data/meshes/smpl'
4 | deformation_transfer_path: 'transfer_data/smpl2smplh_def_transfer.pkl'
5 | mask_ids_fname: ''
6 | summary_steps: 100
7 |
8 | edge_fitting:
9 | per_part: False
10 |
11 | optim:
12 | type: 'trust-ncg'
13 | maxiters: 100
14 | gtol: 1e-06
15 |
16 | body_model:
17 | model_type: "smplh"
18 | # SMPL+H has no neutral model, so we have to manually select the gender
19 | gender: "female"
20 | # gender: "male"
21 | folder: "transfer_data/body_models"
22 | use_compressed: False
23 | smplh:
24 | betas:
25 | num: 10
26 |
--------------------------------------------------------------------------------
/trace/lib/smpl_family/smpl_transfer_config_files/smpl2smplx.yaml:
--------------------------------------------------------------------------------
1 | datasets:
2 | mesh_folder:
3 | data_folder: '/home/yusun/DataCenter2/smpl_models/smpl'
4 | deformation_transfer_path: '/home/yusun/DataCenter2/smpl_models/model_transfer/smpl2smplx_deftrafo_setup.pkl'
5 | mask_ids_fname: 'smplx_mask_ids.npy'
6 | summary_steps: 100
7 |
8 | edge_fitting:
9 | per_part: False
10 |
11 | optim:
12 | type: 'trust-ncg'
13 | maxiters: 100
14 | gtol: 1e-06
15 |
16 | body_model:
17 | model_type: "smplx"
18 | gender: "neutral"
19 | folder: "/home/yusun/DataCenter2/smpl_models/"
20 | use_compressed: False
21 | use_face_contour: True
22 | smplx:
23 | betas:
24 | num: 10
25 | expression:
26 | num: 10
27 |
--------------------------------------------------------------------------------
/trace/lib/smpl_family/smpl_transfer_config_files/smplh2smpl.yaml:
--------------------------------------------------------------------------------
1 | datasets:
2 | mesh_folder:
3 | data_folder: 'transfer_data/meshes/smplh'
4 | deformation_transfer_path: 'transfer_data/smplh2smpl_def_transfer.pkl'
5 | mask_ids_fname: ''
6 | summary_steps: 100
7 |
8 | edge_fitting:
9 | per_part: False
10 |
11 | optim:
12 | type: 'trust-ncg'
13 | maxiters: 100
14 | gtol: 1e-06
15 |
16 | body_model:
17 | model_type: "smpl"
18 | gender: "neutral"
19 | folder: "transfer_data/body_models"
20 | use_compressed: False
21 | use_face_contour: True
22 | smpl:
23 | betas:
24 | num: 10
25 |
--------------------------------------------------------------------------------
/trace/lib/smpl_family/smpl_transfer_config_files/smplh2smplx.yaml:
--------------------------------------------------------------------------------
1 | datasets:
2 | mesh_folder:
3 | data_folder: 'transfer_data/meshes/smplh'
4 | deformation_transfer_path: 'transfer_data/smplh2smplx_deftrafo_setup.pkl'
5 | mask_ids_fname: 'smplx_mask_ids.npy'
6 | summary_steps: 100
7 |
8 | edge_fitting:
9 | per_part: False
10 |
11 | optim:
12 | type: 'trust-ncg'
13 | maxiters: 100
14 | gtol: 1e-06
15 |
16 | body_model:
17 | model_type: "smplx"
18 | gender: "neutral"
19 | folder: "transfer_data/body_models"
20 | use_compressed: False
21 | use_face_contour: True
22 | smplx:
23 | betas:
24 | num: 10
25 | expression:
26 | num: 10
27 |
--------------------------------------------------------------------------------
/trace/lib/smpl_family/smpl_transfer_config_files/smplh2smplx_as.yaml:
--------------------------------------------------------------------------------
1 | datasets:
2 | mesh_folder:
3 | data_folder: 'transfer_data/meshes/amass_sample'
4 | deformation_transfer_path: 'transfer_data/smplh2smplx_deftrafo_setup.pkl'
5 | mask_ids_fname: 'smplx_mask_ids.npy'
6 | summary_steps: 100
7 |
8 | edge_fitting:
9 | per_part: False
10 |
11 | optim:
12 | type: 'trust-ncg'
13 | maxiters: 100
14 | gtol: 1e-06
15 |
16 | body_model:
17 | model_type: "smplx"
18 | gender: "neutral"
19 | folder: "models"
20 | use_compressed: False
21 | use_face_contour: True
22 | smplx:
23 | betas:
24 | num: 10
25 | expression:
26 | num: 10
27 |
--------------------------------------------------------------------------------
/trace/lib/smpl_family/smpl_transfer_config_files/smplh2smplx_onepose.yaml:
--------------------------------------------------------------------------------
1 | datasets:
2 | mesh_folder:
3 | data_folder: 'transfer_data/meshes/amass_onepose'
4 | deformation_transfer_path: 'transfer_data/smplh2smplx_deftrafo_setup.pkl'
5 | mask_ids_fname: 'smplx_mask_ids.npy'
6 | summary_steps: 100
7 |
8 | edge_fitting:
9 | per_part: False
10 |
11 | optim:
12 | type: 'adam'
13 | lr: 0.1
14 | maxiters: 10000
15 | gtol: 1e-06
16 |
17 | body_model:
18 | model_type: "smplx"
19 | gender: "neutral"
20 | folder: "models"
21 | use_compressed: False
22 | use_face_contour: True
23 | smplx:
24 | betas:
25 | num: 10
26 | expression:
27 | num: 10
28 |
--------------------------------------------------------------------------------
/trace/lib/smpl_family/smpl_transfer_config_files/smplx2smpl.yaml:
--------------------------------------------------------------------------------
1 | datasets:
2 | mesh_folder:
3 | data_folder: 'meshes/smplx'
4 | deformation_transfer_path: 'transfer_data/smplx2smpl_deftrafo_setup.pkl'
5 | mask_ids_fname: ''
6 | summary_steps: 100
7 |
8 | edge_fitting:
9 | per_part: False
10 |
11 | optim:
12 | type: 'lbfgs'
13 | maxiters: 200
14 | gtol: 1e-06
15 |
16 | body_model:
17 | model_type: "smpl"
18 | gender: "neutral"
19 | ext: 'pkl'
20 | folder: "transfer_data/body_models"
21 | use_compressed: False
22 | use_face_contour: True
23 | smpl:
24 | betas:
25 | num: 10
26 |
--------------------------------------------------------------------------------
/trace/lib/smpl_family/smpl_transfer_config_files/smplx2smplh.yaml:
--------------------------------------------------------------------------------
1 | datasets:
2 | mesh_folder:
3 | data_folder: 'meshes/smplx'
4 | deformation_transfer_path: 'transfer_data/smplx2smplh_deftrafo_setup.pkl'
5 | mask_ids_fname: ''
6 | summary_steps: 100
7 |
8 | edge_fitting:
9 | per_part: False
10 |
11 | optim:
12 | type: 'lbfgs'
13 | maxiters: 200
14 | gtol: 1e-06
15 |
16 | body_model:
17 | model_type: "smplh"
18 | # SMPL+H has no neutral model, so we have to manually select the gender
19 | gender: "female"
20 | # gender: "male"
21 | ext: 'pkl'
22 | folder: "transfer_data/body_models"
23 | use_compressed: False
24 | use_face_contour: True
25 | smplh:
26 | betas:
27 | num: 10
28 |
--------------------------------------------------------------------------------
/trace/lib/smpl_family/smpla.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from smpl_family.smpl import SMPL
3 | import torch.nn as nn
4 | from config import args
5 |
6 | class SMPLA_parser(nn.Module):
7 | def __init__(self, smpla_path, smil_path, baby_thresh=0.8):
8 | super(SMPLA_parser, self).__init__()
9 | self.smil_model = SMPL(smil_path, model_type='smpl')
10 | self.smpla_model = SMPL(smpla_path, model_type='smpla')
11 | self.baby_thresh = baby_thresh
12 |
13 | def forward(self, betas=None, poses=None, root_align=True, separate_smil_betas=False):
14 | baby_mask = betas[:,10] > self.baby_thresh
15 | if baby_mask.sum()>0:
16 | adult_mask = ~baby_mask
17 | verts, joints = torch.zeros(len(poses), 6890, 3, device=poses.device, dtype=poses.dtype), torch.zeros(len(poses), args().joint_num+17, 3, device=poses.device, dtype=poses.dtype)
18 |
19 | # SMIL beta - 10 dims, only need the estimated betas, kid_offsets are not used
20 | if separate_smil_betas:
21 | verts[baby_mask], joints[baby_mask] = self.smil_model(betas=betas[baby_mask,11:], poses=poses[baby_mask], root_align=root_align)
22 | else:
23 | verts[baby_mask], joints[baby_mask] = self.smil_model(betas=betas[baby_mask,:10], poses=poses[baby_mask], root_align=root_align)
24 |
25 | # SMPLA beta - 11 dims, the estimated betas (10) + kid_offsets (1)
26 | if adult_mask.sum()>0:
27 | verts[adult_mask], joints[adult_mask] = self.smpla_model(betas=betas[adult_mask,:11], poses=poses[adult_mask], root_align=root_align)
28 | else:
29 | verts, joints = self.smpla_model(betas=betas[:,:11], poses=poses, root_align=root_align)
30 |
31 | return verts, joints
--------------------------------------------------------------------------------
/trace/lib/tracker/basetrack.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from collections import OrderedDict
3 |
4 |
5 | class TrackState(object):
6 | New = 0
7 | Tracked = 1
8 | Lost = 2
9 | Removed = 3
10 |
11 |
12 | class BaseTrack(object):
13 | _count = 0
14 |
15 | track_id = 0
16 | is_activated = False
17 | state = TrackState.New
18 |
19 | history = OrderedDict()
20 | features = []
21 | curr_feature = None
22 | score = 0
23 | start_frame = 0
24 | frame_id = 0
25 | time_since_update = 0
26 |
27 | # multi-camera
28 | location = (np.inf, np.inf)
29 |
30 | @property
31 | def end_frame(self):
32 | return self.frame_id
33 |
34 | @staticmethod
35 | def next_id():
36 | BaseTrack._count += 1
37 | return BaseTrack._count
38 |
39 | @staticmethod
40 | def refresh_id():
41 | BaseTrack._count = 0
42 | return BaseTrack._count
43 |
44 | def activate(self, *args):
45 | raise NotImplementedError
46 |
47 | def predict(self):
48 | raise NotImplementedError
49 |
50 | def update(self, *args, **kwargs):
51 | raise NotImplementedError
52 |
53 | def mark_lost(self):
54 | self.state = TrackState.Lost
55 |
56 | def mark_removed(self):
57 | self.state = TrackState.Removed
--------------------------------------------------------------------------------
/trace/lib/tracker/cython_bbox/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 | db.sqlite3
58 |
59 | # Flask stuff:
60 | instance/
61 | .webassets-cache
62 |
63 | # Scrapy stuff:
64 | .scrapy
65 |
66 | # Sphinx documentation
67 | docs/_build/
68 |
69 | # PyBuilder
70 | target/
71 |
72 | # Jupyter Notebook
73 | .ipynb_checkpoints
74 |
75 | # pyenv
76 | .python-version
77 |
78 | # celery beat schedule file
79 | celerybeat-schedule
80 |
81 | # SageMath parsed files
82 | *.sage.py
83 |
84 | # Environments
85 | .env
86 | .venv
87 | env/
88 | venv/
89 | ENV/
90 | env.bak/
91 | venv.bak/
92 |
93 | # Spyder project settings
94 | .spyderproject
95 | .spyproject
96 |
97 | # Rope project settings
98 | .ropeproject
99 |
100 | # mkdocs documentation
101 | /site
102 |
103 | # mypy
104 | .mypy_cache/
105 |
--------------------------------------------------------------------------------
/trace/lib/tracker/cython_bbox/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include src/*.pyx
2 | include README.md
3 |
--------------------------------------------------------------------------------
/trace/lib/tracker/cython_bbox/README.md:
--------------------------------------------------------------------------------
1 | # cython_bbox
2 |
3 | cython_bbox is widely used in object detection tasks. To my best knowledge, it was first implemented in [Faster-RCNN](https://github.com/rbgirshick/py-faster-rcnn). Since then, almost all object detection projects use the source code directly.
4 |
5 | In order to use it in standalone code snippets or small projects, I make it a pypi module. The `cython_bbox.pyx` is totally borrowed from [Faster-RCNN](https://github.com/rbgirshick/py-faster-rcnn). Thanks [RBG](http://www.rossgirshick.info/)!
6 |
7 | ## install
8 |
9 | ```
10 | pip install cython_bbox
11 | ```
12 |
13 | ## usage
14 |
15 |
16 | ```
17 | from cython_bbox import bbox_overlaps
18 | overlaps = bbox_overlaps(
19 | np.ascontiguousarray(dt, dtype=np.float32),
20 | np.ascontiguousarray(gt, dtype=np.float32)
21 | )
22 |
23 | ```
24 |
--------------------------------------------------------------------------------
/trace/lib/tracker/cython_bbox/setup.py:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # Fast R-CNN
3 | # Copyright (c) 2015 Microsoft
4 | # Licensed under The MIT License [see LICENSE for details]
5 | # Written by Samson Wang
6 | # --------------------------------------------------------
7 |
8 | from __future__ import print_function
9 |
10 | from setuptools import Extension
11 | from setuptools import setup
12 | from distutils.command.build import build as _build
13 | import os
14 |
15 | # ref from https://stackoverflow.com/questions/54117786/add-numpy-get-include-argument-to-setuptools-without-preinstalled-numpy
16 | class build(_build):
17 | def finalize_options(self):
18 | super().finalize_options()
19 | import builtins
20 | builtins.__NUMPY_SETUP__ = False
21 | import numpy as np
22 | # Obtain the numpy include directory. This logic works across numpy versions.
23 | extension = next(m for m in self.distribution.ext_modules if m.name=='cython_bbox')
24 | try:
25 | extension.include_dirs.append(np.get_include())
26 | except AttributeError:
27 | extension.include_dirs.append(np.get_numpy_include())
28 |
29 | with open("README.md", "r") as fh:
30 | long_description = fh.read()
31 |
32 | if os.name == 'nt':
33 | compile_args = {'gcc': ['/Qstd=c99']}
34 | else:
35 | compile_args = ['-Wno-cpp']
36 |
37 | ext_modules = [
38 | Extension(
39 | name='cython_bbox',
40 | sources=['src/cython_bbox.pyx'],
41 | extra_compile_args = compile_args,
42 | )
43 | ]
44 |
45 | setup(
46 | name='cython_bbox',
47 | setup_requires=["setuptools>=18.0","Cython","numpy"],
48 | install_requires=["Cython","numpy"],
49 | ext_modules=ext_modules,
50 | cmdclass={'build': build},
51 | version = '0.1.3',
52 | description = 'Standalone cython_bbox',
53 | long_description=long_description,
54 | long_description_content_type="text/markdown",
55 | author = 'Samson Wang',
56 | author_email = 'samson.c.wang@gmail.com',
57 | url = 'https://github.com/samson-wang/cython_bbox.git',
58 | keywords = ['cython_bbox']
59 | )
60 |
61 |
--------------------------------------------------------------------------------
/trace/lib/tracker/cython_bbox/src/cython_bbox.pyx:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # Fast R-CNN
3 | # Copyright (c) 2015 Microsoft
4 | # Licensed under The MIT License [see LICENSE for details]
5 | # Written by Sergey Karayev
6 | # --------------------------------------------------------
7 |
8 | cimport cython
9 | import numpy as np
10 | cimport numpy as np
11 |
12 | DTYPE = np.float32
13 | ctypedef np.float_t DTYPE_t
14 |
15 | def bbox_overlaps(
16 | np.ndarray[DTYPE_t, ndim=2] boxes,
17 | np.ndarray[DTYPE_t, ndim=2] query_boxes):
18 | """
19 | Parameters
20 | ----------
21 | boxes: (N, 4) ndarray of float
22 | query_boxes: (K, 4) ndarray of float
23 | Returns
24 | -------
25 | overlaps: (N, K) ndarray of overlap between boxes and query_boxes
26 | """
27 | cdef unsigned int N = boxes.shape[0]
28 | cdef unsigned int K = query_boxes.shape[0]
29 | cdef np.ndarray[DTYPE_t, ndim=2] overlaps = np.zeros((N, K), dtype=DTYPE)
30 | cdef DTYPE_t iw, ih, box_area
31 | cdef DTYPE_t ua
32 | cdef unsigned int k, n
33 | for k in range(K):
34 | box_area = (
35 | (query_boxes[k, 2] - query_boxes[k, 0] + 1) *
36 | (query_boxes[k, 3] - query_boxes[k, 1] + 1)
37 | )
38 | for n in range(N):
39 | iw = (
40 | min(boxes[n, 2], query_boxes[k, 2]) -
41 | max(boxes[n, 0], query_boxes[k, 0]) + 1
42 | )
43 | if iw > 0:
44 | ih = (
45 | min(boxes[n, 3], query_boxes[k, 3]) -
46 | max(boxes[n, 1], query_boxes[k, 1]) + 1
47 | )
48 | if ih > 0:
49 | ua = float(
50 | (boxes[n, 2] - boxes[n, 0] + 1) *
51 | (boxes[n, 3] - boxes[n, 1] + 1) +
52 | box_area - iw * ih
53 | )
54 | overlaps[n, k] = iw * ih / ua
55 | return overlaps
56 |
--------------------------------------------------------------------------------
/trace/lib/utils/__init__.py:
--------------------------------------------------------------------------------
1 | import sys, os
2 | lib_dir = os.path.dirname(__file__)
3 | root_dir = os.path.join(lib_dir.replace(os.path.basename(lib_dir),''))
4 | if root_dir not in sys.path:
5 | sys.path.insert(0, root_dir)
6 |
7 | from .rot_6D import rot6D_to_angular
8 | from .projection import filter_out_incorrect_trans
9 | from .util import AverageMeter,normalize_kps, BHWC_to_BCHW, rotation_matrix_to_angle_axis,\
10 | batch_rodrigues, AverageMeter_Dict, transform_rot_representation, save_obj, save_yaml, save_json
11 | from .augments import img_kp_rotate, random_erase, RGB_mix, Synthetic_occlusion, calc_aabb, flip_kps, rot_imgplane, pose_processing, process_image
12 | from .train_utils import load_model, process_idx, copy_state_dict, save_model, write2log, exclude_params, train_entire_model, \
13 | print_dict, get_remove_keys, reorganize_items, init_seeds, fix_backbone
14 | from .cam_utils import normalize_trans_to_cam_params, denormalize_cam_params_to_trans, estimate_translation
15 | from .center_utils import process_gt_center, parse_gt_center3d, determine_sample_view
--------------------------------------------------------------------------------
/trace/lib/utils/center_utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import constants
3 | from config import args
4 | import numpy as np
5 | from .cam_utils import convert_cam_params_to_centermap_coords
6 |
7 | def denormalize_center(center, size=args().centermap_size):
8 | center = (center+1)/2*size
9 |
10 | center[center<1] = 1
11 | center[center>size - 1] = size - 1
12 | if isinstance(center, np.ndarray):
13 | center = center.astype(np.int32)
14 | elif isinstance(center, torch.Tensor):
15 | center = center.long()
16 | return center
17 |
18 | def process_gt_center(center_normed):
19 | valid_mask = center_normed[:,:,0]>-1
20 | valid_inds = torch.where(valid_mask)
21 | valid_batch_inds, valid_person_ids = valid_inds[0], valid_inds[1]
22 | # restrain the normalized center >= -1, so that the denormalized center > 0
23 | center_normed[valid_inds] = torch.max(center_normed[valid_inds], torch.ones_like(center_normed[valid_inds])*-1)
24 | center_gt = ((center_normed+1)/2*(args().centermap_size-1)).long()
25 | # restrain the denormalized center <= centermap_size-1
26 | center_gt = torch.min(center_gt, torch.ones_like(center_gt)*(args().centermap_size-1))
27 | center_gt_valid = center_gt[valid_mask]
28 | return (valid_batch_inds, valid_person_ids, center_gt_valid)
29 |
30 |
31 | def parse_gt_center3d(cam_mask, cams, size=args().centermap_size):
32 | batch_ids, person_ids = torch.where(cam_mask)
33 | cam_params = cams[batch_ids, person_ids]
34 | centermap_coords = convert_cam_params_to_centermap_coords(cam_params)
35 | czyxs = denormalize_center(centermap_coords, size=size)
36 | #sample_view_ids = determine_sample_view(batch_ids,czyxs)
37 | return batch_ids, person_ids, czyxs
38 |
39 |
40 | def determine_sample_view(batch_ids,czyxs,thresh=3.):
41 | batch_ids_unique = torch.unique(batch_ids)
42 | sample_view_ids = torch.zeros_like(batch_ids).long()
43 | for batch_id in batch_ids_unique:
44 | person_mask = batch_ids == batch_id
45 | if person_mask.sum()==1:
46 | continue
47 | sample_czyxs = czyxs[person_mask]
48 | sample_view_id = torch.zeros(len(sample_czyxs)).to(czyxs.device)
49 | for inds, czyx in enumerate(sample_czyxs):
50 | dist = torch.norm(sample_czyxs[:,1:] - czyx[1:][None].float(), dim=-1, p=2)
51 | sample_view_id[inds] = (dist 0
52 | sample_view_ids[person_mask] = sample_view_id.long()
53 | return sample_view_ids
54 |
55 | if __name__ == '__main__':
56 | test_projection_depth()
--------------------------------------------------------------------------------
/trace/lib/utils/fetch_remote_file.py:
--------------------------------------------------------------------------------
1 | import os
2 | import paramiko
3 |
4 | local_cache_folder = '/home/yusun/Downloads/server_cacher'
5 | server_url = {18:'10.207.174.18'}
6 | def fetch_remote_file(remote_path, server_id=18):
7 | transport = paramiko.Transport((server_url[server_id], 22))
8 | transport.connect(username='suny', password='199497-')
9 | sftp = paramiko.SFTPClient.from_transport(transport)
10 | local_save_path = os.path.join(local_cache_folder, os.path.basename(remote_path))
11 | sftp.get(remote_path,local_save_path)
12 | return local_save_path
13 |
14 | class Remote_server_fetcher(object):
15 | def __init__(self, server_id=18) -> None:
16 | super().__init__()
17 | transport = paramiko.Transport((server_url[server_id], 22))
18 | transport.connect(username='suny', password='199497-')
19 | self.sftp = paramiko.SFTPClient.from_transport(transport)
20 | self.local_cache_folder = '/home/yusun/Downloads/server_cacher'
21 |
22 | def fetch(self, remote_path):
23 | remote_path = '/home/sunyu15/datasets/3DPW/imageFiles/courtyard_arguing_00/image_00000.jpg'
24 | local_save_path = os.path.join(self.local_cache_folder, os.path.basename(remote_path))
25 | self.sftp.get(remote_path,local_save_path)
26 | return local_save_path
27 |
28 | if __name__ == '__main__':
29 | RF = Remote_server_fetcher()
30 | RF.fetch('1')
--------------------------------------------------------------------------------
/trace/lib/utils/gpu_memory_log.py:
--------------------------------------------------------------------------------
1 | import gc
2 | import datetime
3 | import pynvml
4 |
5 | import torch
6 | import numpy as np
7 | import sys
8 |
9 | def _get_tensors():
10 | for obj in gc.get_objects():
11 | if torch.is_tensor(obj):
12 | tensor = obj
13 | else:
14 | continue
15 | if tensor.is_cuda:
16 | yield tensor
17 |
18 | def _write_log(f, write_str):
19 | print(write_str)
20 | f.write("%s\n" % write_str)
21 |
22 | def gpu_memory_log(gpu_log_file="gpu_mem.log", device=0):
23 | stack_layer = 1
24 | func_name = sys._getframe(stack_layer).f_code.co_name
25 | file_name = sys._getframe(stack_layer).f_code.co_filename
26 | line = sys._getframe(stack_layer).f_lineno
27 | now_time = datetime.datetime.now()
28 | log_format = 'LINE:%s, FUNC:%s, FILE:%s, TIME:%s, CONTENT:%s'
29 |
30 | pynvml.nvmlInit()
31 | handle = pynvml.nvmlDeviceGetHandleByIndex(device)
32 | meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
33 |
34 | with open(gpu_log_file, 'a+') as f:
35 | write_str = log_format % (line, func_name, file_name, now_time, "")
36 | _write_log(f, write_str)
37 |
38 | ts_list = [tensor.size() for tensor in _get_tensors()]
39 | new_tensor_sizes = {(type(x),
40 | tuple(x.size()),
41 | ts_list.count(x.size()),
42 | np.prod(np.array(x.size()))*4/1024**2)
43 | for x in _get_tensors()}
44 | for t, s, n, m in new_tensor_sizes:
45 | write_str = '[tensor: %s * Size:%s | Memory: %s M | %s]' %(str(n), str(s), str(m*n)[:6], str(t))
46 | _write_log(f, write_str)
47 |
48 | write_str = "memory_allocated:%f Mb" % float(torch.cuda.memory_allocated()/1024**2)
49 | _write_log(f, write_str)
50 | write_str = "max_memory_allocated:%f Mb" % float(torch.cuda.max_memory_allocated()/1024**2)
51 | _write_log(f, write_str)
52 | write_str = "memory_cached:%f Mb" % float(torch.cuda.memory_cached()/1024**2)
53 | _write_log(f, write_str)
54 | write_str = "max_memory_cached:%f Mb" % float(torch.cuda.max_memory_cached()/1024**2)
55 | _write_log(f, write_str)
56 | write_str = "Used Memory:%f Mb" % float(meminfo.used/1024**2)
57 | _write_log(f, write_str)
58 | write_str = "Free Memory:%f Mb" % float(meminfo.free/1024**2)
59 | _write_log(f, write_str)
60 | write_str = "Total Memory:%f Mb" % float(meminfo.total/1024**2)
61 | _write_log(f, write_str)
62 |
63 | pynvml.nvmlShutdown()
--------------------------------------------------------------------------------
/trace/lib/visualization/__init__.py:
--------------------------------------------------------------------------------
1 | import sys, os
2 | lib_dir = os.path.dirname(__file__)
3 | root_dir = os.path.join(lib_dir.replace(os.path.basename(lib_dir),''))
4 | if root_dir not in sys.path:
5 | sys.path.insert(0, root_dir)
--------------------------------------------------------------------------------
/trace/train.sh:
--------------------------------------------------------------------------------
1 | TRAIN_CONFIGS='configs/trace.yml'
2 |
3 | GPUS=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.GPUS)
4 | DATASET=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.datasets)
5 | TAB=$(cat $TRAIN_CONFIGS | shyaml get-value ARGS.tab)
6 |
7 | CUDA_VISIBLE_DEVICES=${GPUS} python -u -m train_video --gpu=${GPUS} --configs_yml=${TRAIN_CONFIGS}
8 | #CUDA_VISIBLE_DEVICES=${GPUS} nohup python -u -m train_video --gpu=${GPUS} --configs_yml=${TRAIN_CONFIGS} > '../project_data/trace_data/log/'${TAB}'_'${DATASET}'_g'${GPUS}.log 2>&1 &
9 |
--------------------------------------------------------------------------------