├── support_data
├── smplx_APose.npz
├── conf
│ ├── telegram.yaml
│ ├── parallel_conf
│ │ ├── moshpp_parallel.yaml
│ │ ├── blender_parallel.yaml
│ │ ├── soma_run_parallel.yaml
│ │ ├── soma_train_parallel.yaml
│ │ ├── eval_label_parallel.yaml
│ │ └── eval_v2v_parallel.yaml
│ ├── eval_v2v.yaml
│ ├── eval_label.yaml
│ ├── soma_run_conf.yaml
│ ├── render_conf.yaml
│ └── soma_train_conf.yaml
└── tests
│ ├── mosh_stageii.pkl
│ ├── 0006_normal_walk2.pkl
│ └── marker_layout.json
├── MANIFEST.in
├── requirements.txt
├── src
├── __init__.py
├── soma
│ ├── __init__.py
│ ├── amass
│ │ ├── __init__.py
│ │ ├── amass_stats.py
│ │ ├── prepare_amass_npz.py
│ │ ├── copy_into_release_folders.py
│ │ ├── mosh_manual.py
│ │ ├── prepare_amass_smplx.py
│ │ └── amass_info.py
│ ├── data
│ │ ├── __init__.py
│ │ ├── sample_hand_sequences.py
│ │ ├── prepare_ghorbani_permutation_data.py
│ │ ├── mocap_noise_tools.py
│ │ ├── body_synthesizer.py
│ │ ├── synthetic_body_dataset.py
│ │ └── amass_marker_noise_model.py
│ ├── models
│ │ ├── __init__.py
│ │ ├── optimal_transport.py
│ │ ├── transformer.py
│ │ ├── model_components.py
│ │ └── soma_model.py
│ ├── render
│ │ ├── __init__.py
│ │ ├── blender_tools.py
│ │ ├── mesh_to_video_standard.py
│ │ └── parameters_to_mesh.py
│ ├── tools
│ │ ├── __init__.py
│ │ ├── permutation_tools.py
│ │ ├── parallel_tools.py
│ │ └── eval_tools.py
│ ├── train
│ │ ├── __init__.py
│ │ └── train_soma_multiple.py
│ └── run_soma
│ │ ├── __init__.py
│ │ └── paper_plots
│ │ ├── __init__.py
│ │ └── mosh_soma_dataset.py
└── tutorials
│ └── README.md
├── .gitignore
├── setup.py
├── README.md
└── LICENSE
/support_data/smplx_APose.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nghorbani/soma/HEAD/support_data/smplx_APose.npz
--------------------------------------------------------------------------------
/support_data/conf/telegram.yaml:
--------------------------------------------------------------------------------
1 | token:
2 | chat_id:
3 | silent_completion: false
4 | proxy_url: https://proxy:8080
5 |
6 |
--------------------------------------------------------------------------------
/support_data/tests/mosh_stageii.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nghorbani/soma/HEAD/support_data/tests/mosh_stageii.pkl
--------------------------------------------------------------------------------
/support_data/tests/0006_normal_walk2.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nghorbani/soma/HEAD/support_data/tests/0006_normal_walk2.pkl
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | recursive-include support_data *.npz
2 | recursive-include support_data *.ply
3 | recursive-include support_data/conf *.yaml
4 | # added by check-manifest
5 | include *.txt
6 | recursive-include src *.cpp
7 | recursive-include src *.h
8 | recursive-include src *.md
9 | recursive-include src *.py
10 | recursive-include src *.pyx
11 | recursive-include src *.txt
12 | recursive-include src *.whl
13 | recursive-include src Makefile
14 |
--------------------------------------------------------------------------------
/support_data/conf/parallel_conf/moshpp_parallel.yaml:
--------------------------------------------------------------------------------
1 | pool_size: 1
2 | max_num_jobs:
3 | randomly_run_jobs: False
4 | max_mem: 16
5 | jobs_per_instance: 1
6 | bid_amount: 1
7 | gpu_memory:
8 | avoid_nodes:
9 | use_highend_gpus: False
10 | cpu_count: 1
11 | gpu_count: 0
12 | concurrency_limits: 0
13 | username: nghorbani
14 | log_dir: /is/cluster/scratch/nghorbani/.cluster_log/moshpp_smplx
15 | jobs_unique_name:
16 | max_run_time:
17 | max_auto_retries:
--------------------------------------------------------------------------------
/support_data/conf/parallel_conf/blender_parallel.yaml:
--------------------------------------------------------------------------------
1 | pool_size: 1
2 | max_num_jobs:
3 | randomly_run_jobs: False
4 | max_mem: 32
5 | jobs_per_instance: 1
6 | bid_amount: 5
7 | gpu_memory: 8
8 | avoid_nodes:
9 | use_highend_gpus: False
10 | cpu_count: 1
11 | gpu_count: 1
12 | concurrency_limits: 0
13 | username: nghorbani
14 | log_dir: /is/cluster/scratch/nghorbani/.cluster_log/blender_render
15 | jobs_unique_name:
16 | max_run_time: 8
17 | max_auto_retries: 2
--------------------------------------------------------------------------------
/support_data/conf/parallel_conf/soma_run_parallel.yaml:
--------------------------------------------------------------------------------
1 | pool_size: 1
2 | max_num_jobs:
3 | randomly_run_jobs: False
4 | max_mem: 128
5 | jobs_per_instance: 10
6 | bid_amount: 5
7 | gpu_memory: 12
8 | avoid_nodes:
9 | use_highend_gpus: False
10 | cpu_count: 2
11 | gpu_count: 1
12 | concurrency_limits: 0
13 | username: nghorbani
14 | log_dir: /is/cluster/scratch/nghorbani/.cluster_log/soma_run
15 | jobs_unique_name:
16 | max_run_time: 1
17 | max_auto_retries: 10
--------------------------------------------------------------------------------
/support_data/conf/parallel_conf/soma_train_parallel.yaml:
--------------------------------------------------------------------------------
1 | pool_size: 1
2 | max_num_jobs: -1
3 | randomly_run_jobs: False
4 | max_mem: 128
5 | jobs_per_instance: 1
6 | bid_amount: 5
7 | gpu_memory: 12
8 | avoid_nodes:
9 | use_highend_gpus: True
10 | cpu_count: 6
11 | gpu_count: 4
12 | concurrency_limits: 0
13 | username: nghorbani
14 | log_dir: /is/cluster/scratch/nghorbani/.cluster_log/soma_train
15 | jobs_unique_name:
16 | max_run_time:
17 | max_auto_retries:
--------------------------------------------------------------------------------
/support_data/conf/parallel_conf/eval_label_parallel.yaml:
--------------------------------------------------------------------------------
1 | pool_size: 1
2 | max_num_jobs:
3 | randomly_run_jobs: False
4 | max_mem: 128
5 | jobs_per_instance: 10
6 | bid_amount: 1
7 | gpu_memory:
8 | avoid_nodes:
9 | use_highend_gpus: False
10 | cpu_count: 1
11 | gpu_count: 0
12 | concurrency_limits: 0
13 | username: nghorbani
14 | log_dir: /is/cluster/scratch/nghorbani/.cluster_log/soma_eval_label
15 | jobs_unique_name:
16 | max_run_time: 1
17 | max_auto_retries: 10
--------------------------------------------------------------------------------
/support_data/conf/parallel_conf/eval_v2v_parallel.yaml:
--------------------------------------------------------------------------------
1 | pool_size: 1
2 | max_num_jobs:
3 | randomly_run_jobs: False
4 | max_mem: 128
5 | jobs_per_instance: 1
6 | bid_amount: 1
7 | gpu_memory: 12
8 | avoid_nodes:
9 | use_highend_gpus: False
10 | cpu_count: 1
11 | gpu_count: 1
12 | concurrency_limits: 0
13 | username: nghorbani
14 | log_dir: /is/cluster/scratch/nghorbani/.cluster_log/soma_eval_label
15 | jobs_unique_name:
16 | max_run_time: 1
17 | max_auto_retries: 10
--------------------------------------------------------------------------------
/support_data/conf/eval_v2v.yaml:
--------------------------------------------------------------------------------
1 | mosh_gt:
2 | stageii_fname: ???
3 | ds_name: ${resolve_mocap_ds_name:${mosh_gt.stageii_fname}}
4 | subject_name: ${resolve_mocap_subject:${mosh_gt.stageii_fname}}
5 | basename: ${resolve_mosh_basename:${mosh_gt.stageii_fname}}
6 |
7 | mosh_rec:
8 | stageii_fname: ???
9 | ds_name: ${resolve_mocap_ds_name:${mosh_rec.stageii_fname}}
10 | subject_name: ${resolve_mocap_subject:${mosh_rec.stageii_fname}}
11 | basename: ${resolve_mosh_basename:${mosh_rec.stageii_fname}}
12 |
13 | dirs:
14 | support_base_dir: ???
15 | work_base_dir: ???
16 | eval_pkl_out_fname: ${dirs.work_base_dir}/${mosh_rec.ds_name}/${mosh_rec.subject_name}/${mosh_rec.basename}_v2v.pkl
17 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | git+https://github.com/nghorbani/human_body_prior.git@SOMA
2 | git+https://github.com/nghorbani/body_visualizer
3 | git+https://github.com/nghorbani/configer.git
4 | imageio
5 | mkl
6 | mkl-service
7 | mkl_fft
8 | mkl_random
9 | numpy
10 | pyOpenSSL
11 | pillow
12 | scikit-image
13 | scipy
14 | setuptools
15 | loguru
16 | omegaconf
17 | six
18 | tk
19 | toolz
20 | wheel
21 | pytorch3d
22 | notifiers
23 | tqdm
24 | opencv-python
25 | c3d
26 | chumpy
27 | ipython
28 | markdown
29 | omegaconf
30 | pandas
31 | pycodestyle
32 | pytorch-lightning
33 | scikit-learn
34 | tensorboard
35 | threadpoolctl
36 | setuptools
37 | trimesh
38 | colour
39 | seaborn
40 | tables
41 | transforms3d
42 | xlsxwriter
43 | jupyterlab
--------------------------------------------------------------------------------
/support_data/conf/eval_label.yaml:
--------------------------------------------------------------------------------
1 | mocap_gt:
2 | fname: ??? # should be provided at runtime
3 | ds_name: ${resolve_mocap_ds_name:${mocap_gt.fname}} # dataset name
4 | subject_name: ${resolve_mocap_subject:${mocap_gt.fname}}
5 | basename: ${resolve_mocap_basename:${mocap_gt.fname}}
6 | unit: mm
7 | rotate:
8 | ds_rate: 1
9 | set_labels_to_nan:
10 |
11 | mocap_rec:
12 | fname: ???
13 | ds_name: ${resolve_mocap_ds_name:${mocap_rec.fname}}
14 | subject_name: ${resolve_mocap_subject:${mocap_rec.fname}}
15 | basename: ${resolve_mocap_basename:${mocap_rec.fname}}
16 | unit: m # the mocap unit for soma pkl is meters
17 | rotate:
18 | ds_rate: 1
19 |
20 | dirs:
21 | support_base_dir: ???
22 | work_base_dir: ???
23 | eval_pkl_out_fname: ${dirs.work_base_dir}/${mocap_rec.ds_name}/${mocap_rec.subject_name}/${mocap_rec.basename}_labeling.pkl
24 | superset_fname: ???
25 |
--------------------------------------------------------------------------------
/support_data/conf/soma_run_conf.yaml:
--------------------------------------------------------------------------------
1 | mocap:
2 | fname: ???
3 | ds_name: ${resolve_mocap_ds_name:${mocap.fname}}
4 | subject_name: ${resolve_mocap_subject:${mocap.fname}}
5 | basename: ${resolve_mocap_basename:${mocap.fname}}
6 | unit: mm
7 | rotate:
8 | start_fidx: 0
9 | end_fidx: -1
10 | ds_rate: 1
11 |
12 | dirs:
13 | work_base_dir: ???
14 | support_base_dir: ???
15 | work_dir: ${soma.expr_dir}/evaluations/${resolve_soma_runtime_work_dir:${soma.tracklet_labeling.enable}}
16 | mocap_out_fname: ${dirs.work_dir}/${mocap.ds_name}/${mocap.subject_name}/${mocap.basename}.pkl
17 | log_fname: ${dirs.work_dir}/${mocap.ds_name}/${mocap.subject_name}/${mocap.basename}.log
18 |
19 | soma:
20 | expr_id: ???
21 | data_id: OC_05_G_03_real_000_synt_100
22 | expr_dir: ${dirs.work_base_dir}/training_experiments/${soma.expr_id}/${soma.data_id}
23 | tracklet_labeling:
24 | enable: False
25 |
26 | soma_train:
27 |
28 |
29 | keep_nan_points: True # required for labeling evaluation
30 | save_c3d: True # save a c3d file from autolabeled mpc data
31 | retain_model_debug_details: False
32 | batch_size: 512
33 | verbosity: 1
34 | remove_zero_trajectories: False # required for labeling evaluation
--------------------------------------------------------------------------------
/src/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
--------------------------------------------------------------------------------
/src/soma/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
--------------------------------------------------------------------------------
/src/soma/amass/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
--------------------------------------------------------------------------------
/src/soma/data/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
--------------------------------------------------------------------------------
/src/soma/models/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
--------------------------------------------------------------------------------
/src/soma/render/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
--------------------------------------------------------------------------------
/src/soma/tools/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
--------------------------------------------------------------------------------
/src/soma/train/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
--------------------------------------------------------------------------------
/src/soma/run_soma/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
--------------------------------------------------------------------------------
/src/soma/run_soma/paper_plots/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
--------------------------------------------------------------------------------
/support_data/conf/render_conf.yaml:
--------------------------------------------------------------------------------
1 | dirs:
2 | support_base_dir: ???
3 | temp_base_dir: ???
4 | work_base_dir: ???
5 |
6 | png_out_dir: ${dirs.temp_base_dir}/${out.ds_name}/png_files/${render.blend_file_basename}/${out.subject_action_name}_${out.basename}
7 | mesh_out_dir: ${dirs.temp_base_dir}/${out.ds_name}/mesh_files/${out.subject_action_name}_${out.basename}
8 | mp4_out_fname: ${dirs.work_base_dir}/${out.ds_name}/${out.subject_action_name}/${out.basename}.mp4
9 |
10 | out:
11 | basename: ${resolve_out_basename:${mesh.mosh_stageii_pkl_fnames}}
12 | ds_name: ${resolve_out_ds_name:${mesh.mosh_stageii_pkl_fnames}}
13 | subject_action_name: ${resolve_subject_action_name:${mesh.mosh_stageii_pkl_fnames}}
14 |
15 | mesh:
16 | mosh_stageii_pkl_fnames: ???
17 | enable_dmpl: True
18 | ds_rate: 10
19 |
20 | colors:
21 | default: [ 0.345, 0.580, 0.713 ]
22 |
23 | marker_radius:
24 | default: 0.009
25 | body: 0.009
26 | face: 0.004
27 | finger_left: 0.005
28 | finger_right: 0.005
29 | marker_color:
30 | default: [ .7, .7, .7 ]
31 | style: superset # correctness/superset/black
32 |
33 | render:
34 | blend_file_basename: soma_standard
35 |
36 | show_markers: False
37 | show_body: True
38 | render_only_one_image: False
39 | video_fps: 15
40 |
41 | resolution:
42 | change_from_blend: False
43 | default: [ 800, 1024 ] # [x_res, y_res]
44 |
45 | floor:
46 | enable: True # Use floor plane
47 | plane_location: [ 0.0, 0.0, -0.01 ] # bf/lr/ud
48 |
49 | blender_fname: ${dirs.support_base_dir}/blender/blend_files/${render.blend_file_basename}.blend
50 |
51 | save_final_blend_file: False
52 |
53 | render_engine: cycles #cycles/eevee. as of 2.83 eevee doesnt work in headless mode
54 | camera_tracking_mode: body # Use tracking camera
55 |
56 | rotate_body_object_z: # this should be replaced by camera rotation
57 |
58 | num_samples:
59 | cycles: 128
60 | eevee: 64
--------------------------------------------------------------------------------
/support_data/tests/marker_layout.json:
--------------------------------------------------------------------------------
1 | {
2 | "markersets": [
3 | {
4 | "distance_from_skin": 0.0095,
5 | "indices": {
6 | "C7": 3470,
7 | "CLAV": 3171,
8 | "LANK": 3327,
9 | "LFWT": 2927,
10 | "LBAK": 2885,
11 | "LUPA": 1505,
12 | "LBHD": 395,
13 | "LSCAP": 2937,
14 | "LBUM": 3116,
15 | "LBUST": 3040,
16 | "LCHEECK": 239,
17 | "LELB": 1666,
18 | "LELBIN": 1725,
19 | "LFHD": 0,
20 | "LFIN": 2174,
21 | "LFRM": 1570,
22 | "LFTHI": 1500,
23 | "LFTHIIN": 1365,
24 | "LHEE": 3392,
25 | "LWRA": 2112,
26 | "LKNE": 1053,
27 | "LKNEIN": 1058,
28 | "LMT1": 3336,
29 | "LMT5": 3346,
30 | "LNWST": 1323,
31 | "LWRB": 2108,
32 | "LBWT": 3122,
33 | "LRSTBEEF": 3314,
34 | "LSHO": 1861,
35 | "LTHI": 1454,
36 | "LTHMB": 2224,
37 | "LSHIN": 1097,
38 | "LTOE": 3232,
39 | "MBLLY": 3504,
40 | "RANK": 6607,
41 | "RFWT": 6385,
42 | "RBAK": 6344,
43 | "RUPA": 4978,
44 | "RBHD": 3897,
45 | "RSCAP": 6392,
46 | "RBUM": 6540,
47 | "RBUST": 6488,
48 | "RCHEECK": 3749,
49 | "RELB": 5135,
50 | "RELBIN": 5194,
51 | "RFHD": 3512,
52 | "RFIN": 5635,
53 | "RFRM": 5067,
54 | "RWRA": 5570,
55 | "RFTHI": 4972,
56 | "RFTHIIN": 4838,
57 | "RHEE": 6792,
58 | "RKNE": 4538,
59 | "RKNEIN": 4544,
60 | "RMT1": 6736,
61 | "RMT5": 6747,
62 | "RNWST": 4804,
63 | "RWRB": 5568,
64 | "RBWT": 6544,
65 | "RRSTBEEF": 6682,
66 | "RSHO": 5322,
67 | "RTHI": 4927,
68 | "RTHMB": 5686,
69 | "RSHIN": 4581,
70 | "RTOE": 6633,
71 | "STRN": 3506,
72 | "T10": 3505
73 | },
74 | "marker_radius": 0.0095,
75 | "template_fname": "/ps/body/projects/smpl/character_compiler/models_nm/template_female_softnormals.obj",
76 | "type": "ball"
77 | }
78 | ]
79 | }
--------------------------------------------------------------------------------
/src/soma/tools/permutation_tools.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
31 | import numpy as np
32 | import torch
33 |
34 |
35 | def batch_shuffle(points):
36 | n_batch = points.shape[0]
37 | n_marker = points.shape[-2]
38 |
39 | rand = torch.rand(n_batch, n_marker, device=points.device)
40 | batch_rand_perm = rand.argsort(dim=1)
41 |
42 | permuted_points = batched_index_select(points, 1, batch_rand_perm)
43 | inv_ids = torch.argsort(batch_rand_perm)
44 |
45 | assert (torch.unique(batch_rand_perm, return_counts=True)[1] == n_batch).sum() == n_marker
46 |
47 | return permuted_points, inv_ids
48 |
49 |
50 | def batched_index_select(input, dim, index):
51 | views = [input.shape[0]] + [1 if i != dim else -1 for i in np.arange(1, len(input.shape))]
52 | expanse = list(input.shape)
53 | expanse[0] = -1
54 | expanse[dim] = -1
55 | index = index.view(views).expand(expanse)
56 | return torch.gather(input, dim, index)
57 |
--------------------------------------------------------------------------------
/src/soma/data/sample_hand_sequences.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
31 |
32 | import numpy as np
33 | import torch
34 |
35 |
36 | class MANO():
37 | def __init__(self):
38 | raise NotImplementedError('This functionality is not released for current SOMA.')
39 |
40 |
41 | class MANO_Torch():
42 | def __init__(self):
43 | raise NotImplementedError('This functionality is not released for current SOMA.')
44 |
45 | class VPoser():
46 | def __init__(self):
47 | raise NotImplementedError('This functionality is not released for current SOMA.')
48 |
49 | def right2left_aangle(right_aangle):
50 | raise NotImplementedError('This functionality is not released for current SOMA.')
51 |
52 |
53 | def fullrightpose2leftpose(rightpose):
54 | raise NotImplementedError('This functionality is not released for current SOMA.')
55 |
56 |
57 | def hand_pose_sequence_generator(handL_frames, handR_frames, hand_prior_type='mano'):
58 | raise NotImplementedError('This functionality is not released for current SOMA.')
59 |
60 |
61 |
62 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 | .idea/
131 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
31 | from setuptools import setup, find_packages
32 | from glob import glob
33 |
34 | setup(name='soma',
35 | version='5.0.0',
36 | packages=find_packages('src'),
37 | package_dir={'': 'src'},
38 | include_package_data=True,
39 | data_files=[
40 | ('soma/support_data', glob('support_data/*.*')),
41 | ('soma/support_data/conf/parallel_conf', glob('support_data/conf/parallel_conf/*.*')),
42 | ('soma/support_data/github_files', glob('support_data/github_files/*.*')),
43 | ('soma/support_data/tests', glob('support_data/tests/*.*')),
44 | ('soma/support_data/conf', glob('support_data/conf/*.*'))
45 | ],
46 |
47 | author='Nima Ghorbani',
48 | author_email='nghorbani@tue.mpg.de',
49 | maintainer='Nima Ghorbani',
50 | maintainer_email='nghorbani@tue.mpg.de',
51 | url='https://github.com/nghorbani/soma',
52 | description='Solving Optical Marker-Based Motion Capture Automatically',
53 | license='See LICENSE.txt',
54 | long_description=open("README.md").read(),
55 | long_description_content_type="text/markdown",
56 | install_requires=[],
57 | dependency_links=[],
58 | classifiers=[
59 | "Intended Audience :: Research",
60 | "Natural Language :: English",
61 | "Operating System :: POSIX",
62 | "Operating System :: POSIX :: BSD",
63 | "Operating System :: POSIX :: Linux",
64 | "Programming Language :: Python",
65 | "Programming Language :: Python :: 3",
66 | "Programming Language :: Python :: 3.7", ],
67 | )
68 |
--------------------------------------------------------------------------------
/src/soma/tools/parallel_tools.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
31 | from pathlib import Path
32 | from typing import List
33 | from typing import Union
34 |
35 | from loguru import logger
36 | from omegaconf import DictConfig
37 | from omegaconf import OmegaConf
38 |
39 |
40 | def run_parallel_jobs(func, jobs: List[DictConfig], parallel_cfg: DictConfig = None,
41 | base_parallel_cfg: Union[DictConfig, Union[Path, str]] = None) -> None:
42 |
43 | if parallel_cfg is None:
44 | parallel_cfg = {} # todo update parallel cfg in case it is provided
45 |
46 | if base_parallel_cfg is None:
47 | base_parallel_cfg = {}
48 | elif not isinstance(base_parallel_cfg, DictConfig):
49 | base_parallel_cfg = OmegaConf.load(base_parallel_cfg)
50 |
51 | parallel_cfg = OmegaConf.merge(base_parallel_cfg, OmegaConf.create(parallel_cfg))
52 |
53 | pool_size = parallel_cfg.pool_size
54 | logger.info(f'#Job(s) submitted: {len(jobs)}')
55 | max_num_jobs = parallel_cfg.get('max_num_jobs', -1)
56 | if max_num_jobs and max_num_jobs > 0:
57 | jobs = jobs[:max_num_jobs]
58 | logger.info(f'max_num_jobs is set to {max_num_jobs}. choosing the first #Job(s): {len(jobs)}')
59 |
60 | if pool_size==0:
61 | raise NotImplementedError('This functionality is not released for current SOMA.')
62 |
63 |
64 | if parallel_cfg.randomly_run_jobs:
65 | from random import shuffle
66 | shuffle(jobs)
67 | logger.info(f'Will run the jobs in random order.')
68 | if len(jobs) == 0: return
69 |
70 |
71 |
72 | if pool_size == 0:
73 | raise NotImplementedError('This functionality is not released for current SOMA.')
74 |
75 | elif pool_size < 0:
76 | return
77 | else:
78 | for job in jobs:
79 | func(job)
80 | # print(job)
81 |
--------------------------------------------------------------------------------
/src/soma/models/optimal_transport.py:
--------------------------------------------------------------------------------
1 | # %BANNER_BEGIN%
2 | # ---------------------------------------------------------------------
3 | # %COPYRIGHT_BEGIN%
4 | #
5 | # Magic Leap, Inc. ("COMPANY") CONFIDENTIAL
6 | #
7 | # Unpublished Copyright (c) 2020
8 | # Magic Leap, Inc., All Rights Reserved.
9 | #
10 | # NOTICE: All information contained herein is, and remains the property
11 | # of COMPANY. The intellectual and technical concepts contained herein
12 | # are proprietary to COMPANY and may be covered by U.S. and Foreign
13 | # Patents, patents in process, and are protected by trade secret or
14 | # copyright law. Dissemination of this information or reproduction of
15 | # this material is strictly forbidden unless prior written permission is
16 | # obtained from COMPANY. Access to the source code contained herein is
17 | # hereby forbidden to anyone except current COMPANY employees, managers
18 | # or contractors who have executed Confidentiality and Non-disclosure
19 | # agreements explicitly covering such access.
20 | #
21 | # The copyright notice above does not evidence any actual or intended
22 | # publication or disclosure of this source code, which includes
23 | # information that is confidential and/or proprietary, and is a trade
24 | # secret, of COMPANY. ANY REPRODUCTION, MODIFICATION, DISTRIBUTION,
25 | # PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH USE OF THIS
26 | # SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS
27 | # STRICTLY PROHIBITED, AND IN VIOLATION OF APPLICABLE LAWS AND
28 | # INTERNATIONAL TREATIES. THE RECEIPT OR POSSESSION OF THIS SOURCE
29 | # CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS
30 | # TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS CONTENTS, OR TO MANUFACTURE,
31 | # USE, OR SELL ANYTHING THAT IT MAY DESCRIBE, IN WHOLE OR IN PART.
32 | #
33 | # %COPYRIGHT_END%
34 | # ----------------------------------------------------------------------
35 | # %AUTHORS_BEGIN%
36 | #
37 | # Originating Authors: Paul-Edouard Sarlin
38 | #
39 | # %AUTHORS_END%
40 | # --------------------------------------------------------------------*/
41 | # %BANNER_END%
42 |
43 | import torch
44 |
45 |
46 | def log_sinkhorn_iterations(Z, log_mu, log_nu, iters: int):
47 | """ Perform Sinkhorn Normalization in Log-space for stability"""
48 | u, v = torch.zeros_like(log_mu), torch.zeros_like(log_nu)
49 | for _ in range(iters):
50 | u = log_mu - torch.logsumexp(Z + v.unsqueeze(1), dim=2)
51 | v = log_nu - torch.logsumexp(Z + u.unsqueeze(2), dim=1)
52 | return Z + u.unsqueeze(2) + v.unsqueeze(1)
53 |
54 |
55 | def log_optimal_transport(scores, alpha, iters: int):
56 | """
57 | Perform Differentiable Optimal Transport in Log-space for stability
58 | scores:
59 | alpha: bin score
60 | """
61 |
62 | b, m, n = scores.shape
63 | one = scores.new_tensor(1)
64 | ms, ns = (m * one).to(scores), (n * one).to(scores)
65 |
66 | bins0 = alpha.expand(b, m, 1)
67 | bins1 = alpha.expand(b, 1, n)
68 | alpha = alpha.expand(b, 1, 1)
69 |
70 | couplings = torch.cat([torch.cat([scores, bins0], -1),
71 | torch.cat([bins1, alpha], -1)], 1)
72 |
73 | norm = - (ms + ns).log()
74 | log_mu = torch.cat([norm.expand(m), ns.log()[None] + norm])
75 | log_nu = torch.cat([norm.expand(n), ms.log()[None] + norm])
76 | log_mu, log_nu = log_mu[None].expand(b, -1), log_nu[None].expand(b, -1)
77 |
78 | Z = log_sinkhorn_iterations(couplings, log_mu.contiguous(), log_nu.contiguous(), iters)
79 | Z = Z - norm # multiply probabilities by M+N
80 | return Z
81 |
--------------------------------------------------------------------------------
/src/soma/train/train_soma_multiple.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
31 | from os import path as osp
32 |
33 | from human_body_prior.tools.omni_tools import get_support_data_dir
34 | from typing import List
35 |
36 | from soma.tools.parallel_tools import run_parallel_jobs
37 | from soma.train.soma_trainer import SOMATrainer
38 | from soma.train.soma_trainer import train_soma_once
39 |
40 |
41 | def train_multiple_soma(soma_data_settings: List[tuple]=None, soma_train_cfg: dict = None, parallel_cfg: dict = None):
42 | '''
43 | Train multiple soma models with various settings.
44 | Args:
45 | soma_data_settings: list of tuples with format
46 | (number of occlusions, number of ghost points, percentage of real data, percentage of synthetic data)
47 | the data type percentages would work if corresponding type actually exists.
48 | soma_train_cfg: a dictionary with keys as dot formatted hierarchy and desired values
49 | parallel_cfg: relevant when running on IS-Condor cluster
50 |
51 | Returns:
52 |
53 | '''
54 | if soma_train_cfg is None: soma_train_cfg = {}
55 | if parallel_cfg is None: parallel_cfg = {}
56 | if soma_data_settings is None: soma_data_settings = [(5, 3, 0.0, 1.0)]
57 |
58 | app_support_dir = get_support_data_dir(__file__)
59 | base_parallel_cfg_fname = osp.join(app_support_dir, 'conf/parallel_conf/soma_train_parallel.yaml')
60 |
61 |
62 | for num_occ_max, num_ghost_max, limit_real_data, limit_synt_data in soma_data_settings:
63 | job = {
64 | 'data_parms.mocap_dataset.num_occ_max': num_occ_max,
65 | 'data_parms.mocap_dataset.num_ghost_max': num_ghost_max,
66 | 'data_parms.mocap_dataset.limit_real_data': limit_real_data,
67 | 'data_parms.mocap_dataset.limit_synt_data': limit_synt_data,
68 | }
69 | job.update(soma_train_cfg)
70 | cur_soma_cfg = SOMATrainer.prepare_cfg(**job)
71 | parallel_cfg['jobs_unique_name'] = f'{cur_soma_cfg.soma.expr_id}_{cur_soma_cfg.soma.data_id}'
72 | run_parallel_jobs(train_soma_once, [job], parallel_cfg=parallel_cfg, base_parallel_cfg=base_parallel_cfg_fname)
73 |
--------------------------------------------------------------------------------
/src/soma/amass/amass_stats.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
31 | import os.path as osp
32 | from collections import OrderedDict
33 | from glob import glob
34 |
35 | import numpy as np
36 | import pandas as pd
37 |
38 | from soma.tools.eval_tools import save_xlsx
39 |
40 | # only_datasets = sorted(['HumanEva', 'ACCAD', 'PosePrior'])
41 | # only_datasets = sorted(['HumanEva', 'ACCAD', 'TotalCapture', 'CMU', 'Transitions', 'PosePrior'])
42 | only_datasets = None
43 | # amass_npz_base_dir = '/ps/project/soma/training_experiments/V48/V48_02_DanceDB/OC_05_G_03_real_000_synt_100/evaluations/mosh_results_tracklet_npz'
44 | # amass_npz_base_dir = '/ps/project/soma/training_experiments/V48/V48_02_Mixamo/OC_05_G_03_real_000_synt_100/evaluations/mosh_results_tracklet_npz'
45 | amass_npz_base_dir = '/ps/project/soma/training_experiments/V48/V48_02_SOMA/OC_05_G_03_real_000_synt_100/evaluations/mosh_results_tracklet_npz'
46 |
47 | # amass_npz_base_dir = '/is/cluster/scratch/nghorbani/amass/mosh_results/20210726/amass_neutral'
48 |
49 | # mosh_stageii_npz_fnames = glob(osp.join(amass_npz_base_dir, '*/*stageii.npz'))
50 | mosh_stageii_npz_fnames = glob(osp.join(amass_npz_base_dir, '*/*/*stageii.npz'))
51 |
52 | amass_stats = OrderedDict()
53 | for stageii_npz_fname in sorted(mosh_stageii_npz_fnames):
54 | ds_name, subject_name, npz_basename = stageii_npz_fname.split('/')[-3:]
55 | if (only_datasets is not None) and (ds_name not in only_datasets): continue
56 |
57 | mosh_data = np.load(stageii_npz_fname)
58 | if ds_name not in amass_stats: amass_stats[ds_name] = OrderedDict({'markers': [], 'subjects': [],
59 | 'motions': 0, 'minutes': 0})
60 |
61 | amass_stats[ds_name]['markers'].append(len(mosh_data['markers_latent']))
62 | amass_stats[ds_name]['subjects'].append(subject_name)
63 | amass_stats[ds_name]['motions'] += 1
64 | amass_stats[ds_name]['minutes'] += mosh_data['mocap_time_length'] / 60.
65 |
66 | for ds_name in amass_stats:
67 | amass_stats[ds_name]['markers'] = np.median(amass_stats[ds_name]['markers'])
68 | amass_stats[ds_name]['subjects'] = len(np.unique(amass_stats[ds_name]['subjects']))
69 | amass_data_pd = pd.DataFrame(amass_stats).transpose()
70 | xlsx_data = {'amass': amass_data_pd}
71 |
72 | save_xlsx(xlsx_data, xlsx_fname=osp.join(amass_npz_base_dir, 'amass_stats.xlsx'))
73 | print(amass_data_pd)
74 | # mosh_npz_fnames = glob(osp.join(amass_pkl_dir, '*/*/*.npz'))
75 | # for mosh_npz_fname in mosh_npz_fnames:
76 | # os.remove(mosh_npz_fname)
77 |
--------------------------------------------------------------------------------
/src/soma/run_soma/paper_plots/mosh_soma_dataset.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
31 | import os.path as osp
32 | from glob import glob
33 |
34 | import numpy as np
35 | from loguru import logger
36 |
37 | from soma.amass.mosh_manual import mosh_manual
38 |
39 |
40 | def gen_stagei_mocap_fnames(mocap_base_dir, subject_name, ext='.c3d'):
41 | stagei_mocap_fnames = [osp.join(mocap_base_dir, subject_name, frame) for frame in
42 | {
43 | 'soma_subject1': [
44 | f'run_002{ext}_001091',
45 | f'jump_001{ext}_000137',
46 | f'run_001{ext}_001366',
47 | f'jump_001{ext}_000509',
48 | f'throw_001{ext}_000596',
49 | f'dance_003{ext}_001488',
50 | f'jump_001{ext}_000588',
51 | f'squat_002{ext}_001134',
52 | f'jump_002{ext}_000471',
53 | f'run_001{ext}_000032',
54 | f'dance_001{ext}_001042',
55 | f'dance_001{ext}_000289'
56 | ],
57 | 'soma_subject2': [
58 | f'dance_005{ext}_001289',
59 | f'random_004{ext}_000166',
60 | f'run_001{ext}_000826',
61 | f'random_004{ext}_000001',
62 | f'jump_001{ext}_000871',
63 | f'squat_003{ext}_000543',
64 | f'squat_003{ext}_000696',
65 | f'squat_003{ext}_001769',
66 | f'dance_003{ext}_001207',
67 | f'jump_001{ext}_000550',
68 | f'run_001{ext}_000865',
69 | f'throw_001{ext}_000069'
70 | ]
71 | }[subject_name]]
72 |
73 | available_stagei_mocap_fnames = [osp.exists('_'.join(f.split('_')[:-1])) for f in stagei_mocap_fnames]
74 | assert sum(available_stagei_mocap_fnames) == len(available_stagei_mocap_fnames), \
75 | FileNotFoundError(np.array(stagei_mocap_fnames)[np.logical_not(available_stagei_mocap_fnames)])
76 |
77 | return stagei_mocap_fnames
--------------------------------------------------------------------------------
/src/soma/amass/prepare_amass_npz.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
31 | import os
32 | import os.path as osp
33 | from glob import glob
34 |
35 | from loguru import logger
36 |
37 | from moshpp.mosh_head import MoSh
38 |
39 | # amass_pkl_dir = '/is/cluster/scratch/nghorbani/amass/mosh_results/20210726/amass_neutral'
40 | # amass_npz_base_dir = '/is/cluster/scratch/nghorbani/amass/mosh_results/20210726/amass_neutral'
41 | # mosh_stageii_pkl_fnames = glob(osp.join(amass_pkl_dir, '*/*/*stageii.pkl'))
42 | import pickle
43 | from moshpp.tools.run_tools import setup_mosh_omegaconf_resolvers
44 | setup_mosh_omegaconf_resolvers()
45 |
46 | amass_pkl_dir = '/ps/project/soma/support_files/release_soma/SOMA_dataset/renamed_subjects/mosh_results'
47 | amass_npz_base_dir = '/ps/project/soma/support_files/release_soma/SOMA_dataset/renamed_subjects/mosh_results_npz'
48 |
49 | # amass_pkl_dir = '/ps/project/soma/training_experiments/V48/V48_02_Mixamo/OC_05_G_03_real_000_synt_100/evaluations/mosh_results_tracklet'
50 | # amass_npz_base_dir = '/ps/project/soma/training_experiments/V48/V48_02_Mixamo/OC_05_G_03_real_000_synt_100/evaluations/mosh_results_tracklet_npz'
51 |
52 | # amass_pkl_dir = '/ps/project/soma/training_experiments/V48/V48_02_DanceDB/OC_05_G_03_real_000_synt_100/evaluations/mosh_results_tracklet'
53 | # amass_npz_base_dir = '/ps/project/soma/training_experiments/V48/V48_02_DanceDB/OC_05_G_03_real_000_synt_100/evaluations/mosh_results_tracklet_npz'
54 |
55 | # amass_pkl_dir = '/ps/project/soma/training_experiments/V48/V48_02_CMUII/OC_05_G_03_real_000_synt_100/evaluations/mosh_results_tracklet'
56 | # amass_npz_base_dir = '/ps/project/soma/training_experiments/V48/V48_02_CMUII/OC_05_G_03_real_000_synt_100/evaluations/mosh_results_tracklet_npz'
57 |
58 |
59 | mosh_stageii_pkl_fnames = glob(osp.join(amass_pkl_dir, '*/*/*stageii.pkl'))
60 |
61 | for mosh_stageii_pkl_fname in sorted(mosh_stageii_pkl_fnames):
62 | ds_name, subject_name, pkl_basename = mosh_stageii_pkl_fname.split('/')[-3:]
63 |
64 | stageii_npz_fname = osp.join(amass_npz_base_dir, ds_name, subject_name, pkl_basename.replace('.pkl', '.npz'))
65 |
66 | if osp.exists(stageii_npz_fname):
67 | continue
68 |
69 | stageii_pkl_data = pickle.load(open(mosh_stageii_pkl_fname, 'rb'))
70 |
71 | # stageii_pkl_data['stageii_debug_details']['cfg']['surface_model']['gender'] = f"{stageii_pkl_data['stageii_debug_details']['cfg']['surface_model']['gender']}"
72 | # pickle.dump(stageii_pkl_data, open(mosh_stageii_pkl_fname, 'wb'))
73 | # try:
74 | MoSh.load_as_amass_npz(mosh_stageii_pkl_fname,
75 | stageii_npz_fname=stageii_npz_fname,
76 | include_markers=True,
77 | )
78 | # except Exception as e:
79 | # logger.error(mosh_stageii_pkl_fname)
80 | # os.remove(mosh_stageii_pkl_fname)
81 |
82 | # mosh_npz_fnames = glob(osp.join(amass_pkl_dir, '*/*/*.npz'))
83 | # for mosh_npz_fname in mosh_npz_fnames:
84 | # os.remove(mosh_npz_fname)
85 |
--------------------------------------------------------------------------------
/src/soma/data/prepare_ghorbani_permutation_data.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
31 |
32 | import fnmatch
33 | import os.path as osp
34 | import shutil
35 | from glob import glob
36 |
37 | import numpy as np
38 | from human_body_prior.tools.omni_tools import flatten_list, makepath
39 |
40 |
41 | def pick_bmlrub_ghorbani(outdir, amass_dir, fname_filter):
42 | mosh_stageii_npz_fnames = glob(osp.join(amass_dir, 'BMLrub', '*/*_stageii.npz'))
43 |
44 | for mosh_stageii_npz_fname in mosh_stageii_npz_fnames:
45 | subject_name, npz_basename = mosh_stageii_npz_fname.split('/')[-2:]
46 |
47 | mosh_stagei_npz_fname = glob(osp.join(osp.dirname(mosh_stageii_npz_fname), '*_stagei.npz'))[0]
48 | mosh_stagei_pkl_fname = mosh_stagei_npz_fname.replace('.npz', '.pkl')
49 | mosh_stageii_pkl_fname = mosh_stageii_npz_fname.replace('.npz', '.pkl')
50 |
51 | assert osp.exists(mosh_stagei_npz_fname)
52 | assert osp.exists(mosh_stagei_pkl_fname)
53 | assert osp.exists(mosh_stageii_pkl_fname)
54 |
55 | if not np.any([fnmatch.fnmatch(mosh_stageii_npz_fname, a) for a in fname_filter]):
56 | continue
57 |
58 | dst_mosh_stageii_npz_fname = osp.join(outdir, subject_name, osp.basename(mosh_stageii_npz_fname))
59 | if not osp.exists(dst_mosh_stageii_npz_fname):
60 | shutil.copy2(mosh_stageii_npz_fname, makepath(dst_mosh_stageii_npz_fname, isfile=True))
61 |
62 | dst_mosh_stagei_npz_fname = osp.join(outdir, subject_name, osp.basename(mosh_stagei_npz_fname))
63 | if not osp.exists(dst_mosh_stagei_npz_fname):
64 | shutil.copy2(mosh_stagei_npz_fname, makepath(dst_mosh_stagei_npz_fname, isfile=True))
65 |
66 | dst_mosh_stagei_pkl_fname = osp.join(outdir, subject_name, osp.basename(mosh_stagei_pkl_fname))
67 | if not osp.exists(dst_mosh_stagei_pkl_fname):
68 | shutil.copy2(mosh_stagei_pkl_fname, makepath(dst_mosh_stagei_pkl_fname, isfile=True))
69 |
70 | dst_mosh_stageii_pkl_fname = osp.join(outdir, subject_name, osp.basename(mosh_stageii_pkl_fname))
71 | if not osp.exists(dst_mosh_stageii_pkl_fname):
72 | shutil.copy2(mosh_stageii_pkl_fname, makepath(dst_mosh_stageii_pkl_fname, isfile=True))
73 |
74 |
75 | def main():
76 | amass_dir = '/ps/project/soma/support_files/release_soma/smplx/amass_neutral'
77 | outdir_train = osp.join(amass_dir, 'BMLrub_train_ghorbani_permutation_2019')
78 | train_fname_filter = flatten_list(
79 | [[f'*{subject_id:03d}/*{action_name}*' for subject_id in range(1, 68)] for action_name in
80 | ['sit', 'jump', 'walk', 'jog']])
81 | pick_bmlrub_ghorbani(outdir_train, amass_dir, train_fname_filter)
82 |
83 | outdir_vald = osp.join(amass_dir, 'BMLrub_vald_ghorbani_permutation_2019')
84 | vald_fname_filter = flatten_list(
85 | [[f'*{subject_id:03d}/*{action_name}*' for subject_id in range(92, 115)] for action_name in
86 | ['sit', 'jump', 'walk', 'jog']])
87 | pick_bmlrub_ghorbani(outdir_vald, amass_dir, vald_fname_filter)
88 |
89 |
90 | if __name__ == '__main__':
91 | main()
92 |
--------------------------------------------------------------------------------
/src/soma/models/transformer.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
31 | import torch
32 | from torch import nn
33 |
34 |
35 | def scaled_dot_product_attention(query, key, value):
36 | dim = query.shape[1]
37 | scores = torch.einsum('bdhn,bdhm->bhnm', query, key) / dim ** .5
38 | attention_weight = torch.nn.functional.softmax(scores, dim=-1)
39 | return torch.einsum('bhnm,bdhm->bdhn', attention_weight, value), attention_weight
40 |
41 |
42 | class MultiHeadedAttention(nn.Module):
43 | def __init__(self, num_heads: int, num_total_attention_feat: int):
44 | super(MultiHeadedAttention, self).__init__()
45 |
46 | assert num_total_attention_feat % num_heads == 0, ValueError(
47 | f"num_total_attention_feat ({num_total_attention_feat}) % num_heads ({num_heads}) is not 0 ({num_total_attention_feat % num_heads})")
48 | self.dim = num_total_attention_feat // num_heads
49 | self.num_heads = num_heads
50 |
51 | self.proj = nn.ModuleList(
52 | [nn.Conv1d(num_total_attention_feat, num_total_attention_feat, kernel_size=1) for _ in range(3)])
53 |
54 | self.merge = nn.Conv1d(num_total_attention_feat, num_total_attention_feat, kernel_size=1)
55 |
56 | self.post_merge = nn.Sequential(
57 | nn.Conv1d(2 * num_total_attention_feat, 2 * num_total_attention_feat, kernel_size=1),
58 | nn.BatchNorm1d(2 * num_total_attention_feat),
59 | nn.ReLU(),
60 | nn.Conv1d(2 * num_total_attention_feat, num_total_attention_feat, kernel_size=1),
61 | )
62 |
63 | nn.init.constant_(self.post_merge[-1].bias, 0.0)
64 |
65 | def forward(self, init_query, key, value):
66 | batch_dim = init_query.size(0)
67 | query, key, value = [l(x).view(batch_dim, self.dim, self.num_heads, -1) for l, x in
68 | zip(self.proj, (init_query, key, value))]
69 |
70 | x, attention_weight = scaled_dot_product_attention(query, key, value)
71 |
72 | x = self.merge(x.contiguous().view(batch_dim, self.dim * self.num_heads, -1))
73 | x = self.post_merge(torch.cat([x, init_query], dim=1))
74 |
75 | return x, attention_weight
76 |
77 |
78 | class LayeredSelfAttention(nn.Module):
79 | def __init__(self, feature_dim: int, num_layers: int, num_attention_heads: int = 4, return_attention_weights=True):
80 | super(LayeredSelfAttention, self).__init__()
81 | self.self_attention_layers = nn.ModuleList([
82 | MultiHeadedAttention(num_attention_heads, feature_dim)
83 | for _ in range(num_layers)])
84 | self.return_attention_weights = return_attention_weights
85 |
86 | def forward(self, point_feats):
87 | """
88 |
89 | Parameters
90 | ----------
91 | point_feats: num_batch x num_points x num_feat
92 |
93 | Returns
94 | -------
95 |
96 | """
97 | all_attention_weights = []
98 | for self_attention_layer in self.self_attention_layers:
99 | new_point_feats, attention_weight = self_attention_layer(point_feats, point_feats, point_feats)
100 | point_feats = point_feats + new_point_feats
101 | all_attention_weights.append(attention_weight)
102 |
103 | if self.return_attention_weights:
104 | return point_feats, torch.stack(all_attention_weights, axis=1)
105 | else:
106 | return point_feats
107 |
--------------------------------------------------------------------------------
/src/soma/amass/copy_into_release_folders.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{GhorbaniBlack:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
31 | import os
32 | import os.path as osp
33 | import shutil
34 | from collections import OrderedDict
35 | from glob import glob
36 |
37 | import numpy as np
38 | from human_body_prior.tools.omni_tools import makepath
39 | from loguru import logger
40 | from tqdm import tqdm
41 |
42 | # only_datasets = sorted(['HumanEva', 'ACCAD', 'PosePrior'])
43 | # only_datasets = sorted(['HumanEva', 'ACCAD', 'TotalCapture', 'CMU', 'Transitions', 'PosePrior'])
44 | only_datasets = None
45 | release_base_dir = '/ps/project/datasets/AMASS/smplx/neutral'
46 | amass_npz_base_dir = '/is/cluster/scratch/nghorbani/amass/mosh_results/20210726/amass_neutral'
47 | amass_mp4_base_dir = '/is/cluster/scratch/nghorbani/amass/mp4_renders/20210726/amass_neutral'
48 | license_fname = '/ps/project/datasets/AMASS/LICENSE.txt'
49 | assert osp.exists(license_fname)
50 | mosh_npz_fnames = glob(osp.join(amass_npz_base_dir, '*/*/*.npz'))
51 |
52 | amass_stats = OrderedDict()
53 | for npz_fname in tqdm(sorted(mosh_npz_fnames)):
54 | ds_name, subject_name, npz_basename = npz_fname.split('/')[-3:]
55 | if (only_datasets is not None) and (ds_name not in only_datasets): continue
56 |
57 | if npz_basename.endswith('_stageii.npz'):
58 | render_fname = osp.join(amass_mp4_base_dir, ds_name, subject_name, npz_basename.replace('_stageii.npz', '.mp4'))
59 |
60 | new_npz_fname = osp.join(release_base_dir, 'mosh_results', ds_name, subject_name, npz_basename)
61 | new_render_fname = osp.join(release_base_dir, 'mp4_renders', ds_name, subject_name,
62 | npz_basename.replace('_stageii.npz', '.mp4'))
63 | new_license_mosh_fname = osp.join(release_base_dir, 'mosh_results', ds_name, 'LICENSE.txt')
64 | new_license_mp4_fname = osp.join(release_base_dir, 'mp4_renders', ds_name, 'LICENSE.txt')
65 |
66 | if not osp.exists(new_npz_fname):
67 | mosh_data = np.load(npz_fname) # see if it is a valid npz
68 | shutil.copy2(npz_fname, makepath(new_npz_fname, isfile=True))
69 |
70 | if not osp.exists(new_render_fname):
71 | if osp.exists(render_fname):
72 | shutil.copy2(render_fname, makepath(new_render_fname, isfile=True))
73 | else:
74 | logger.error(f'render_fname does not exist: {render_fname}')
75 |
76 | if not osp.exists(new_license_mosh_fname):
77 | shutil.copy2(license_fname, makepath(new_license_mosh_fname, isfile=True))
78 |
79 | if not osp.exists(new_license_mp4_fname):
80 | shutil.copy2(license_fname, makepath(new_license_mp4_fname, isfile=True))
81 |
82 | else: # stagei
83 | new_npz_fname = osp.join(release_base_dir, 'mosh_results', ds_name, subject_name, npz_basename)
84 | if not osp.exists(new_npz_fname):
85 | shutil.copy2(npz_fname, makepath(new_npz_fname, isfile=True))
86 |
87 |
88 | def compress_folder(directory):
89 | dir_basename = osp.basename(directory)
90 | root_dir = osp.dirname(directory)
91 | if not osp.exists(f'{directory}.tar.bz2'):
92 | os.system(f'cd {root_dir}; tar cjvf {dir_basename}.tar.bz2 {dir_basename}')
93 |
94 |
95 | for directory in glob(osp.join(release_base_dir, 'mosh_results/*')):
96 | if osp.isdir(directory):
97 | compress_folder(directory)
98 |
99 | for directory in glob(osp.join(release_base_dir, 'mp4_renders/*')):
100 | if osp.isdir(directory):
101 | compress_folder(directory)
102 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## SOMA: Solving Optical Marker-Based MoCap Automatically, ICCV'21
2 |
3 | This repository contains the official PyTorch implementation of:
4 |
5 | SOMA: Solving Optical Marker-Based MoCap Automatically\
6 | Nima Ghorbani and Michael J. Black\
7 | [Paper](https://download.is.tue.mpg.de/soma/SOMA_ICCV21.pdf) | [Supp.Mat.](https://download.is.tue.mpg.de/soma/SOMA_Suppmat.pdf) | [Video](https://www.youtube.com/watch?v=BEFCqIefLA8&t=1s&ab_channel=MichaelBlack) | [Project website](https://soma.is.tue.mpg.de/) | [Poster](https://download.is.tue.mpg.de/soma/SOMA_Poster.pdf)
8 |
9 |  turned into labeled markers (colored dots)")
10 |
11 | SOMA **automatically transforms raw marker-based mocap point clouds** (black dots in the back) into **solved SMPL-X bodies** and **labeled markers** (colored dots).
12 |
13 | ## Installation
14 |
15 | SOMA is originally developed in Python 3.7, PyTorch 1.8.2 LTS, for Ubuntu 20.04.2 LTS.
16 | Below we prepare the python environment using [Anaconda](https://www.anaconda.com/products/individual),
17 | however, we opt for a simple pip package manager for installing dependencies.
18 |
19 | ````
20 | sudo apt install libatlas-base-dev
21 | sudo apt install libpython3.7
22 | sudo apt install libtbb2
23 |
24 | conda create -n soma python=3.7
25 | conda install -c conda-forge ezc3d
26 |
27 | pip3 install torch==1.8.2+cu102 torchvision==0.9.2+cu102 torchaudio==0.8.2 -f https://download.pytorch.org/whl/lts/1.8/torch_lts.html
28 |
29 | ````
30 | ezc3d installation is currently not supported by pip.
31 |
32 | Assuming that you have already cloned this repository to your local drive
33 | go to the root directory of SOMA code and run
34 | ````
35 | pip install -r requirements.txt
36 | python setup.py develop
37 | ````
38 | Copy the precompiled
39 | [smpl-fast-derivatives](https://download.is.tue.mpg.de/download.php?domain=soma&sfile=smpl-fast-derivatives.tar.bz2)
40 | into your python site-packages folder, i.e. ````anaconda3/envs/soma/lib/python3.7/site-packages````.
41 | The final directory should look like ````anaconda3/envs/soma/lib/python3.7/site-packages/psbody/smpl````.
42 |
43 | Install the psbody.mesh library following the instructions in [https://github.com/MPI-IS/mesh](https://github.com/MPI-IS/mesh).
44 | Hint: clone the mesh repository and run the following from the anaconda environment: ````python setup.py install ````.
45 |
46 | To use the rendering capabilities first install an instance of Blender-2.83 LTS on your machine.
47 | Afterward uncompress contents of the precompiled
48 | [bpy-2.83](https://download.is.tue.mpg.de/download.php?domain=soma&sfile=blender/bpy-2.83-20200908.tar.bz2)
49 | into your python site-packages folder, i.e. ````anaconda3/envs/soma/lib/python3.7/site-packages````.
50 |
51 | Last but not least, the current SOMA code relies on [MoSh++](https://github.com/nghorbani/moshpp) mocap solver.
52 | Please install MoSh++ following the guidelines in its repository.
53 |
54 |
55 | ## Using SOMA
56 | There are multiple main parts of the codebase that we try to explain in the [Tutorials](src/tutorials):
57 | - [Run SOMA On MoCap Point Cloud Data](src/tutorials/run_soma_on_soma_dataset.ipynb)
58 | - [Label Priming an Unknown Marker Layout](src/tutorials/label_priming.ipynb)
59 | - [SOMA Ablative Studies](src/tutorials/ablation_study.ipynb)
60 | - [Solve Already Labeled MoCaps With MoSh++](src/tutorials/solve_labeled_mocap.ipynb)
61 |
62 | ## Citation
63 |
64 | Please cite the following paper if you use this code directly or indirectly in your research/projects:
65 |
66 | ```
67 | @inproceedings{SOMA:ICCV:2021,
68 | title = {{SOMA}: Solving Optical Marker-Based MoCap Automatically},
69 | author = {Ghorbani, Nima and Black, Michael J.},
70 | booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
71 | month = oct,
72 | year = {2021},
73 | doi = {},
74 | month_numeric = {10}}
75 | ```
76 |
77 | ## License
78 |
79 | Software Copyright License for **non-commercial scientific research purposes**. Please read carefully
80 | the [terms and conditions](./LICENSE) and any accompanying documentation before you download and/or
81 | use the SOMA data and software, (the "Data & Software"), software, scripts, and animations.
82 | By downloading and/or using the Data & Software (including downloading, cloning, installing, and any other use of this repository),
83 | you acknowledge that you have read these terms
84 | and conditions, understand them, and agree to be bound by them. If you do not agree with these terms and conditions, you
85 | must not download and/or use the Data & Software.
86 | Any infringement of the terms of this agreement will automatically terminate
87 | your rights under this [License](./LICENSE).
88 |
89 | ## Contact
90 |
91 | The code in this repository is developed by [Nima Ghorbani](https://nghorbani.github.io/)
92 | while at [Max-Planck Institute for Intelligent Systems, Tübingen, Germany](https://is.mpg.de/person/nghorbani).
93 |
94 | If you have any questions you can contact us at [soma@tuebingen.mpg.de](mailto:amass@tuebingen.mpg.de).
95 |
96 | For commercial licensing, contact [ps-licensing@tue.mpg.de](mailto:ps-licensing@tue.mpg.de)
97 |
--------------------------------------------------------------------------------
/support_data/conf/soma_train_conf.yaml:
--------------------------------------------------------------------------------
1 | surface_model:
2 | num_betas: 10 #number of betas for generating body dataset
3 | num_expressions: 80 # number of expressions for body dataset
4 | gender: neutral # soma is trained to be gender neutral, however for later mosh use a gender specific model
5 | type: smplx # model type
6 | fname: ${dirs.support_base_dir}/${surface_model.type}/${surface_model.gender}/model.npz
7 | soma:
8 | expr_id: ???
9 | data_id: ${resolve_soma_data_id:${data_parms.mocap_dataset.num_occ_max},${data_parms.mocap_dataset.num_ghost_max},${data_parms.mocap_dataset.limit_real_data},${data_parms.mocap_dataset.limit_synt_data}}
10 | dirs:
11 | work_base_dir: ??? #This is the main directory for newly produced data by SOMA
12 | support_base_dir: ??? # This directory holds the body model, layouts, etc
13 | work_dir: ${dirs.work_base_dir}/training_experiments/${soma.expr_id}/${soma.data_id}
14 | log_dir: ${dirs.work_dir}/logs
15 | log_fname: ${dirs.log_dir}/${soma.expr_id}_${soma.data_id}.log
16 | cfg_fname: ${dirs.work_dir}/${soma.expr_id}_${soma.data_id}.yaml
17 | dataset_dir: ${dirs.work_base_dir}/data/${soma.expr_id} # 30 FPS
18 | body_dataset_dir: ${dirs.dataset_dir}/body_dataset # could be shared for different experiments
19 | marker_dataset_dir: ${dirs.dataset_dir}/marker_dataset # specific to a dataset and set of marker layouts used
20 | amass_dir: ${dirs.support_base_dir}/${surface_model.type}/amass_neutral # amass pkl/npz dir
21 | amass_marker_noise_dir: ${dirs.dataset_dir}/amass_marker_noise # this is to skip amass noise creation later
22 | train_parms:
23 | batch_size: ???
24 | num_workers: 5 # Used for dataloader
25 | loss_weights:
26 | labeling: 1.
27 | body_part_mask: 0.1
28 | gen_optimizer:
29 | type: Adam
30 | args:
31 | lr: 0.001
32 | weight_decay: 0.00005
33 | betas: [ .9, .999 ]
34 | lr_scheduler:
35 | type: ReduceLROnPlateau
36 | args:
37 | # metrics: val_loss
38 | verbose: true
39 | patience: 3
40 | early_stopping:
41 | monitor: val_loss
42 | min_delta: 0.0
43 | patience: 8
44 | verbose: true
45 | mode: min
46 | data_parms:
47 | amass_splits:
48 | vald: # HumanEva [40], ACCAD [4], and TotalCapture [25]
49 | - HumanEva
50 | - ACCAD
51 | - TotalCapture
52 | train: # CMU [9], Transitions [23] and Pose Prior [5]
53 | - CMU
54 | - Transitions
55 | - PosePrior
56 | # - CAESAR_SMPLx_betas # CAESAR
57 | num_timeseq_frames: 1
58 | num_frames_overlap: 0
59 | unified_frame_rate: 30
60 | body_dataset:
61 | rnd_zrot: true
62 | animate_face: false
63 | animate_hand: false
64 | num_hand_var_perseq: 15
65 | augment_by_temporal_inversion: false # will use the time inverted window of data as an augmentation method
66 | marker_dataset: # these values are used only to prepare the marker dataset. for train time obtain the control from mocap_dataset
67 | superset_fname: ${dirs.marker_dataset_dir}/superset.json
68 | use_real_data_from:
69 | use_real_data_for:
70 | # - train
71 | # - vald
72 | use_synt_data_for:
73 | - train
74 | - vald
75 | wrist_markers_on_stick: false
76 | num_random_vid_ring: 1 #to turn of random marker placement set to 0
77 | num_marker_layout_augmentation: 1
78 | enable_rnd_vid_on_face_hands: false
79 | props:
80 | enable: false
81 | num_prop_marker_max: # to be automatically filled
82 | static:
83 | unit: mm
84 | rotate: [ 90,0,0 ]
85 | mocap_dataset:
86 | # marker layouts for training will be merged to create superset
87 | marker_layout_fnames: ???
88 | ghost_distribution: spherical_gaussian # spherical_gaussian/ uniform/ skewed_gaussian
89 | num_occ_max: 5
90 | num_ghost_max: 3
91 | limit_real_data: 0.0
92 | limit_synt_data: 1.0
93 | marker_noise_var: 0
94 | amass_marker_noise_model:
95 | enable: true
96 | amass_splits:
97 | vald:
98 | - HumanEva
99 | - ACCAD
100 | - TotalCapture
101 | train:
102 | - CMU
103 | - Transitions
104 | - PosePrior
105 | model_parms:
106 | labeler:
107 | enable_transformer: true
108 | enable_sinkhorn: true
109 | num_sinkhorn_iters: 35
110 | num_attention_heads: 5
111 | num_attention_layers: 8
112 | num_pooler_feat: 256
113 | num_total_attention_feat: 125 # should be dividable by num_attention_heads
114 | trainer:
115 | num_gpus: 4
116 | max_epochs: 50
117 | fast_dev_run: false
118 | distributed_backend: dp
119 | resume_training_if_possible: false
120 | resume_checkpoint_fname:
121 | finetune_checkpoint_fname:
122 | weights_summary: top
123 | profiler:
124 | num_sanity_val_steps: 2
125 | deterministic: true
126 | limit_train_batches: 1.0
127 | limit_val_batches: 1.0
128 | overfit_batches: 0
129 | rnd_seed: 100
130 | moshpp_cfg_override:
131 | mocap:
132 | unit: mm
133 | surface_model:
134 | gender: ${surface_model.gender} #the deault gender for training model is neutral. keep it that way
135 | type: ${surface_model.type} # default smplx
136 | opt_settings:
137 | weights_type: ${surface_model.type}
138 | moshpp:
139 | verbosity: 1
140 | separate_types:
141 | - body
--------------------------------------------------------------------------------
/src/soma/amass/mosh_manual.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
31 | import os.path as osp
32 |
33 | from human_body_prior.tools.omni_tools import get_support_data_dir
34 | from loguru import logger
35 | from omegaconf import OmegaConf
36 |
37 | from moshpp.mosh_head import MoSh
38 | from moshpp.mosh_head import run_moshpp_once
39 | from soma.render.blender_tools import prepare_render_cfg
40 | from soma.render.blender_tools import render_mosh_once
41 | from soma.tools.parallel_tools import run_parallel_jobs
42 |
43 |
44 | def mosh_manual(
45 | mocap_fnames: list,
46 | mosh_cfg: dict = None,
47 | render_cfg: dict = None,
48 | parallel_cfg: dict = None,
49 | **kwargs):
50 | if parallel_cfg is None: parallel_cfg = {}
51 | if mosh_cfg is None: mosh_cfg = {}
52 | if render_cfg is None: render_cfg = {}
53 |
54 | run_tasks = kwargs.get('run_tasks', ['mosh', 'render'])
55 |
56 | only_stagei = kwargs.get('only_stagei', False)
57 | fast_dev_run = kwargs.get('fast_dev_run', False)
58 | determine_shape_for_each_seq = kwargs.get('determine_shape_for_each_seq', False)
59 |
60 | app_support_dir = get_support_data_dir(__file__)
61 |
62 | fname_filter = kwargs.get('fname_filter', None)
63 |
64 | mosh_jobs = []
65 | render_jobs = []
66 | exclude_mosh_job_keys = []
67 |
68 | if fast_dev_run: mocap_fnames = mocap_fnames[:3]
69 |
70 | for mocap_fname in mocap_fnames:
71 |
72 | if fname_filter:
73 | if not sum([i in mocap_fname for i in fname_filter]): continue
74 | mocap_key = '_'.join(mocap_fname.split('/')[-3:-1])
75 |
76 | persubject_marker_layout = kwargs.get('persubject_marker_layout', False)
77 | mosh_job = mosh_cfg.copy()
78 | mosh_job.update({
79 | 'mocap.fname': mocap_fname,
80 | })
81 | if persubject_marker_layout:
82 | # todo: do we need to pick the mocaps to produce the layout here?
83 | mosh_job.update({
84 | 'dirs.marker_layout_fname': '${dirs.work_base_dir}/${mocap.ds_name}/${mocap.ds_name}_${mocap.subject_name}_${surface_model.type}.json',
85 | })
86 |
87 | cur_mosh_cfg = MoSh.prepare_cfg(**mosh_job.copy())
88 |
89 | if only_stagei and osp.exists(cur_mosh_cfg.dirs.stagei_fname): continue
90 |
91 | if mocap_key not in exclude_mosh_job_keys and not osp.exists(cur_mosh_cfg.dirs.stageii_fname):
92 | mosh_jobs.append(mosh_job.copy())
93 | if not osp.exists(cur_mosh_cfg.dirs.stagei_fname) and not determine_shape_for_each_seq:
94 | exclude_mosh_job_keys.append(mocap_key)
95 | continue
96 |
97 | if osp.exists(cur_mosh_cfg.dirs.stageii_fname):
98 | render_job = render_cfg.copy()
99 | render_job.update({
100 | 'mesh.mosh_stageii_pkl_fnames': [cur_mosh_cfg.dirs.stageii_fname],
101 | })
102 | cur_render_cfg = prepare_render_cfg(**render_job)
103 | if not osp.exists(cur_render_cfg.dirs.mp4_out_fname):
104 | render_jobs.append(render_job)
105 |
106 | if 'mosh' in run_tasks:
107 | logger.info('Submitting MoSh++ jobs.')
108 |
109 | base_parallel_cfg = OmegaConf.load(osp.join(app_support_dir, 'conf/parallel_conf/moshpp_parallel.yaml'))
110 | moshpp_parallel_cfg = OmegaConf.merge(base_parallel_cfg, OmegaConf.create(parallel_cfg))
111 | run_parallel_jobs(func=run_moshpp_once, jobs=mosh_jobs, parallel_cfg=moshpp_parallel_cfg)
112 |
113 | if 'render' in run_tasks:
114 | logger.info('Submitting render jobs.')
115 |
116 | base_parallel_cfg = OmegaConf.load(osp.join(app_support_dir, 'conf/parallel_conf/blender_parallel.yaml'))
117 | render_parallel_cfg = OmegaConf.merge(base_parallel_cfg, OmegaConf.create(parallel_cfg))
118 | run_parallel_jobs(func=render_mosh_once, jobs=render_jobs, parallel_cfg=render_parallel_cfg)
119 |
--------------------------------------------------------------------------------
/src/soma/render/blender_tools.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
31 | import os
32 | import sys
33 | from os import path as osp
34 |
35 | import bpy
36 | from human_body_prior.tools.omni_tools import get_support_data_dir
37 | from loguru import logger
38 | from omegaconf import OmegaConf, DictConfig
39 |
40 | from moshpp.tools.run_tools import setup_mosh_omegaconf_resolvers
41 | from soma.tools.parallel_tools import run_parallel_jobs
42 |
43 |
44 | def render_mosh_once(render_cfg):
45 | from soma.render.parameters_to_mesh import convert_to_mesh_once
46 | from soma.render.mesh_to_video_standard import create_video_from_mesh_dir
47 | convert_to_mesh_once(render_cfg)
48 | create_video_from_mesh_dir(render_cfg)
49 |
50 |
51 | def prepare_render_cfg(*args, **kwargs) -> DictConfig:
52 | setup_mosh_omegaconf_resolvers()
53 |
54 | if not OmegaConf.has_resolver('resolve_out_basename'):
55 | OmegaConf.register_new_resolver('resolve_out_basename',
56 | lambda mosh_stageii_pkl_fnames:
57 | mosh_stageii_pkl_fnames[0].split('/')[-1].replace('_stageii.pkl', ''))
58 |
59 | if not OmegaConf.has_resolver('resolve_subject_action_name'):
60 | OmegaConf.register_new_resolver('resolve_subject_action_name',
61 | lambda mosh_stageii_pkl_fnames: mosh_stageii_pkl_fnames[0].split('/')[-2])
62 |
63 | if not OmegaConf.has_resolver('resolve_out_ds_name'):
64 | OmegaConf.register_new_resolver('resolve_out_ds_name',
65 | lambda mosh_stageii_pkl_fnames: mosh_stageii_pkl_fnames[0].split('/')[-3])
66 |
67 | app_support_dir = get_support_data_dir(__file__)
68 | base_cfg = OmegaConf.load(osp.join(app_support_dir, 'conf/render_conf.yaml'))
69 |
70 | override_cfg_dotlist = [f'{k}={v}' for k, v in kwargs.items()]
71 | override_cfg = OmegaConf.from_dotlist(override_cfg_dotlist)
72 |
73 | return OmegaConf.merge(base_cfg, override_cfg)
74 |
75 |
76 | def setup_scene(cfg):
77 | assert osp.exists(cfg.render.blender_fname), FileNotFoundError(cfg.render.blender_fname)
78 | logger.info(f'Opening scene from provided blend file {cfg.render.blender_fname}')
79 | bpy.ops.wm.open_mainfile(filepath=cfg.render.blender_fname)
80 |
81 | for scene_name, scene in bpy.data.scenes.items():
82 | logger.info(f'Setting up scene: {scene_name}')
83 | # scene = bpy.data.scenes['Scene']
84 |
85 | if cfg.render.render_engine.lower() == 'eevee':
86 | scene.render.engine = 'BLENDER_EEVEE'
87 | scene.eevee.taa_render_samples = cfg.render.num_samples.eevee
88 | else:
89 | scene.render.engine = 'CYCLES'
90 | scene.cycles.samples = cfg.render.num_samples.cycles
91 |
92 | if cfg.render.resolution.change_from_blend:
93 | scene_resolution = cfg.render.resolution.get(scene_name, cfg.render.resolution.default)
94 | scene.render.resolution_x = scene_resolution[0]
95 | scene.render.resolution_y = scene_resolution[1]
96 |
97 | scene.render.image_settings.color_mode = 'RGBA'
98 | plane = [obj for collect in bpy.data.collections for obj in collect.all_objects if obj.name in ['Plane', ]][0]
99 | if not cfg.render.floor.enable:
100 | bpy.ops.object.delete({"selected_objects": [plane]})
101 | else:
102 | plane.location = cfg.render.floor.plane_location
103 |
104 |
105 | def make_blender_silent():
106 | # Silence console output of bpy.ops.render.render by redirecting stdout to /dev/null
107 | sys.stdout.flush()
108 | old = os.dup(1)
109 | os.close(1)
110 | os.open(os.devnull, os.O_WRONLY)
111 |
112 |
113 | def render_mosh_stageii(mosh_stageii_pkl_fnames, render_cfg=None, parallel_cfg=None, **kwargs):
114 | if parallel_cfg is None: parallel_cfg = {}
115 | if render_cfg is None: render_cfg = {}
116 |
117 | fname_filter = kwargs.get('fname_filter', None)
118 |
119 | app_support_dir = get_support_data_dir(__file__)
120 | base_parallel_cfg = OmegaConf.load(osp.join(app_support_dir, 'conf/parallel_conf/blender_parallel.yaml'))
121 |
122 | total_jobs = []
123 | for mosh_stageii_pkl_fname in mosh_stageii_pkl_fnames:
124 | assert mosh_stageii_pkl_fname.endswith('_stageii.pkl')
125 | if fname_filter:
126 | if not sum([i in mosh_stageii_pkl_fname for i in fname_filter]): continue
127 | job = render_cfg.copy()
128 | job.update({
129 | 'mesh.mosh_stageii_pkl_fnames': [mosh_stageii_pkl_fname],
130 | })
131 |
132 | render_cfg = prepare_render_cfg(**job)
133 |
134 | if osp.exists(render_cfg.dirs.mp4_out_fname): continue
135 | total_jobs.append(job.copy())
136 |
137 | parallel_cfg = OmegaConf.merge(base_parallel_cfg, OmegaConf.create(parallel_cfg))
138 |
139 | run_parallel_jobs(func=render_mosh_once, jobs=total_jobs, parallel_cfg=parallel_cfg)
140 |
--------------------------------------------------------------------------------
/src/soma/models/model_components.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
31 | from torch import nn
32 |
33 |
34 | def conv1d_layered(channels: list):
35 | n = len(channels)
36 | layers = []
37 | for i in range(1, n):
38 | layers.append(
39 | nn.Conv1d(channels[i - 1], channels[i], kernel_size=1, bias=True))
40 | if i < (n - 1):
41 | layers.append(nn.BatchNorm1d(channels[i]))
42 | layers.append(nn.ReLU())
43 |
44 | return nn.Sequential(*layers)
45 |
46 |
47 | class ResDenseBlock(nn.Module):
48 |
49 | def __init__(self, num_feat_in, num_feat_out, num_h=256):
50 | super(ResDenseBlock, self).__init__()
51 |
52 | self.res_dense = nn.Sequential(
53 | nn.Linear(num_feat_in, num_h),
54 | nn.BatchNorm1d(num_h),
55 | nn.ReLU(),
56 | nn.Linear(num_h, num_feat_out),
57 | nn.BatchNorm1d(num_feat_out)
58 | )
59 |
60 | self.res_dense_short = nn.Sequential(
61 | *([nn.Linear(num_feat_in, num_feat_out), nn.BatchNorm1d(num_feat_out)] if num_feat_in != num_feat_out else [
62 | nn.Identity()])
63 | )
64 |
65 | def forward(self, x):
66 | return self.res_dense(x) + self.res_dense_short(x)
67 |
68 |
69 | class ResConv1DBlock(nn.Module):
70 | """ a series of 1D convolutions with residuals"""
71 |
72 | def __init__(self, num_feat_in, num_feat_out, num_h=256):
73 | super(ResConv1DBlock, self).__init__()
74 |
75 | self.res_conv1d = nn.Sequential(
76 | nn.Conv1d(num_feat_in, num_h, 1, 1),
77 | nn.BatchNorm1d(num_h),
78 | nn.ReLU(),
79 | nn.Conv1d(num_h, num_feat_out, 1, 1),
80 | nn.BatchNorm1d(num_feat_out)
81 | )
82 |
83 | self.res_conv1d_short = nn.Sequential(
84 | *([nn.Conv1d(num_feat_in, num_feat_out, 1, 1), nn.BatchNorm1d(num_feat_out)]
85 | if num_feat_in != num_feat_out else [nn.Identity()])
86 | )
87 |
88 | def forward(self, x):
89 | return self.res_conv1d(x) + self.res_conv1d_short(x)
90 |
91 |
92 | class LayeredResConv1d(nn.Module):
93 |
94 | def __init__(self, num_feat_in, num_feat_out, num_layers, num_h=256, ):
95 | super(LayeredResConv1d, self).__init__()
96 |
97 | # self.res_conv1d_blocks = nn.ModuleList([
98 | # nn.Sequential(ResConv1DBlock(num_feat_in, num_feat_out * 3, num_h=num_h),
99 | # nn.ReLU(),
100 | # nn.Conv1d(num_feat_out * 3, num_feat_out, kernel_size=1)
101 | # ) for _ in range(num_layers)])
102 | self.res_conv1d_blocks = nn.ModuleList([
103 | nn.Sequential(ResConv1DBlock(num_feat_in, num_feat_out * 3),
104 | nn.ReLU(),
105 | nn.Conv1d(num_feat_out * 3, num_feat_out, kernel_size=1),
106 | nn.BatchNorm1d(num_feat_out),
107 | nn.ReLU(),
108 | nn.Conv1d(num_feat_out, num_feat_out, kernel_size=1),
109 | ) for _ in range(num_layers)])
110 |
111 | def forward(self, point_feats):
112 | """
113 |
114 | Parameters
115 | ----------
116 | point_feats: num_batch x num_points x num_feat
117 |
118 | Returns
119 | -------
120 |
121 | """
122 | for res_conv1d_block in self.res_conv1d_blocks:
123 | new_point_feats = res_conv1d_block(point_feats)
124 | point_feats = point_feats + new_point_feats
125 |
126 | return point_feats
127 |
128 |
129 | class Contiguous(nn.Module):
130 | def __init__(self):
131 | super(Contiguous, self).__init__()
132 | self._name = 'contiguous'
133 |
134 | def forward(self, x):
135 | return x.contiguous()
136 |
137 |
138 | class Permute(nn.Module):
139 | def __init__(self, *args):
140 | super(Permute, self).__init__()
141 | self.shape = args
142 | self._name = 'permute'
143 |
144 | def forward(self, x):
145 | return x.permute(self.shape)
146 |
147 |
148 | class Transpose(nn.Module):
149 | def __init__(self, *args):
150 | super(Transpose, self).__init__()
151 | self.shape = args
152 | self._name = 'transpose'
153 |
154 | def forward(self, x):
155 | return x.transpose(*self.shape)
156 |
157 |
158 | class SDivide(nn.Module):
159 | def __init__(self, scale):
160 | super(SDivide, self).__init__()
161 | self.scale = scale
162 | self._name = 'scalar_divide'
163 |
164 | def forward(self, x):
165 | return x / self.scale
166 |
167 |
168 | class SelectItem(nn.Module):
169 | # https://stackoverflow.com/a/54660829
170 | def __init__(self, item_index):
171 | super(SelectItem, self).__init__()
172 | self._name = 'selectitem'
173 | self.item_index = item_index
174 |
175 | def forward(self, inputs):
176 | return inputs[self.item_index]
177 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | License
2 |
3 | Software Copyright License for non-commercial scientific research purposes
4 |
5 | Please read carefully the following terms and conditions and any accompanying documentation before you download and/or
6 | use the SOMA data and software, (the "Data & Software"),
7 | including 3D meshes, images, videos, textures, software, scripts, and animations.
8 | By downloading and/or using the Data & Software
9 | (including downloading, cloning, installing, and any other use of the corresponding github repository),
10 | you acknowledge that you have read these terms and conditions, understand them, and agree to be bound by them.
11 | If you do not agree with these terms and conditions, you must not download and/or use the Data & Software.
12 | Any infringement of the terms of this agreement will automatically terminate your rights under this License.
13 |
14 | Ownership / Licensees
15 | The Data & Software and the associated materials has been developed at the
16 | Max Planck Institute for Intelligent Systems (hereinafter “MPI”).
17 | Any copyright or patent right is owned by and proprietary material of the
18 | Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (hereinafter “MPG”; MPI and MPG hereinafter collectively “Max-Planck” or “Licensor”).
19 |
20 | License Grant
21 | Licensor grants you (Licensee) personally a single-user, non-exclusive, non-transferable, free of charge right:
22 | To install the Data & Software on computers owned, leased or otherwise controlled by you and/or your organization;
23 | To use the Data & Software for the sole purpose of performing non-commercial scientific research, non-commercial education, or non-commercial artistic projects;
24 | Any other use, in particular any use for commercial, pornographic, military, or surveillance, purposes is prohibited.
25 | This includes, without limitation, incorporation in a commercial product, use in a commercial service, or production of
26 | other artefacts for commercial purposes. The Data & Software may not be used to create fake, libelous, misleading, or
27 | defamatory content of any kind excluding analyses in peer-reviewed scientific research. The Data & Software may not be
28 | reproduced, modified and/or made available in any form to any third party without Max-Planck’s prior written permission.
29 |
30 | The Data & Software may not be used for pornographic purposes or to generate pornographic material whether commercial or not.
31 | This license also prohibits the use of the Data & Software to train methods/algorithms/neural networks/etc.
32 | for commercial, pornographic, military, surveillance, or defamatory use of any kind.
33 | By downloading the Data & Software, you agree not to reverse engineer it.
34 |
35 | No Distribution
36 | The Data & Software and the license herein granted shall not be copied, shared, distributed, re-sold, offered for re-sale, transferred or sub-licensed in whole or in part except that you may make one copy for archive purposes only.
37 |
38 | Disclaimer of Representations and Warranties
39 | You expressly acknowledge and agree that the Data & Software results from basic research, is provided “AS IS”, may contain errors, and that any use of the Data & Software is at your sole risk.
40 | LICENSOR MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE DATA & SOFTWARE, NEITHER EXPRESS NOR IMPLIED, AND THE ABSENCE OF ANY LEGAL OR ACTUAL DEFECTS, WHETHER DISCOVERABLE OR NOT.
41 | Specifically, and not to limit the foregoing, licensor makes no representations or warranties
42 | (i) regarding the merchantability or fitness for a particular purpose of the Data & Software,
43 | (ii) that the use of the Data & Software will not infringe any patents, copyrights or other intellectual property rights of a third party, and
44 | (iii) that the use of the Data & Software will not cause any damage of any kind to you or a third party.
45 |
46 | Limitation of Liability
47 | Because this Data & Software License Agreement qualifies as a donation, according to Section 521 of the
48 | German Civil Code (Bürgerliches Gesetzbuch – BGB) Licensor as a donor is liable for intent and gross negligence only.
49 | If the Licensor fraudulently conceals a legal or material defect, they are obliged to compensate the Licensee for the resulting damage.
50 | Licensor shall be liable for loss of data only up to the amount of typical recovery costs which would have arisen had proper
51 | and regular data backup measures been taken. For the avoidance of doubt Licensor shall be liable in accordance with the
52 | German Product Liability Act in the event of product liability. The foregoing applies also to Licensor’s legal representatives
53 | or assistants in performance. Any further liability shall be excluded. Patent claims generated through the usage of the
54 | Data & Software cannot be directed towards the copyright holders. The Data & Software is provided in the state of development
55 | the licensor defines. If modified or extended by Licensee, the Licensor makes no claims about the fitness of the Data & Software and is not responsible for any problems such modifications cause.
56 |
57 | No Maintenance Services
58 | You understand and agree that Licensor is under no obligation to provide either maintenance services, update services, notices of latent defects, or corrections of defects with regard to the Data & Software. Licensor nevertheless reserves the right to update, modify, or discontinue the Data & Software at any time. Defects of the Data & Software must be notified in writing to the Licensor with a comprehensible description of the error symptoms. The notification of the defect should enable the reproduction of the error. The Licensee is encouraged to communicate any use, results, modification or publication.
59 |
60 | Subjects' Consent
61 | All subjects gave informed written consent to share their data for research purposes.
62 | You further agree to delete data or change their use, in case a subject changes or withdraws their consent.
63 |
64 | Publications using the Data & Software
65 | You acknowledge that the Data & Software is a valuable scientific resource and agree to appropriately reference the following paper in any publication making use of the Data & Software:
66 |
67 | @inproceedings{SOMA:ICCV:2021,
68 | title = {{SOMA}: Solving Optical Marker-Based MoCap Automatically},
69 | author = {Ghorbani, Nima and Black, Michael J.},
70 | booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
71 | month = oct,
72 | year = {2021},
73 | doi = {},
74 | month_numeric = {10}}
75 |
76 | Commercial licensing opportunities
77 | For commercial uses of the Data & Software, please send email to ps-license@tue.mpg.de
78 |
79 | This Agreement shall be governed by the laws of the Federal Republic of Germany except for the UN Sales Convention.
--------------------------------------------------------------------------------
/src/soma/render/mesh_to_video_standard.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
31 |
32 | import glob
33 | import os
34 | import os.path as osp
35 | from math import radians
36 |
37 | import bpy
38 | from body_visualizer.tools.render_tools import pngs2mp4
39 | from human_body_prior.tools.omni_tools import makepath
40 | from loguru import logger
41 |
42 | from soma.render.blender_tools import make_blender_silent
43 | from soma.render.blender_tools import prepare_render_cfg
44 | from soma.render.blender_tools import setup_scene
45 |
46 |
47 | def run_blender_once(cfg, body_mesh_fname, marker_mesh_fname, png_out_fname):
48 | make_blender_silent()
49 |
50 | bpy.ops.object.delete({"selected_objects": [obj for colec in bpy.data.collections for obj in colec.all_objects if
51 | obj.name in ['Body', 'Object']]})
52 |
53 | if cfg.render.show_body:
54 | bpy.ops.import_scene.obj(filepath=body_mesh_fname)
55 |
56 | body = bpy.context.selected_objects[0]
57 |
58 | body.name = 'Body'
59 |
60 | if cfg.render.rotate_body_object_z:
61 | body.rotation_euler[2] = radians(cfg.render.rotate_body_object_z)
62 |
63 | assert "Body" in bpy.data.materials
64 | body.active_material = bpy.data.materials['Body']
65 |
66 | # enable quads
67 | bpy.context.view_layer.objects.active = body
68 | bpy.ops.object.mode_set(mode='EDIT')
69 | bpy.ops.mesh.tris_convert_to_quads()
70 | bpy.ops.object.mode_set(mode='OBJECT')
71 |
72 | # make the surface smooth
73 | bpy.ops.object.shade_smooth()
74 |
75 | # enable wireframe
76 | bpy.context.view_layer.objects.active = body
77 | bpy.ops.object.mode_set(mode='EDIT')
78 | bpy.ops.mesh.mark_freestyle_edge(clear=False)
79 | bpy.ops.object.mode_set(mode='OBJECT')
80 |
81 | if cfg.render.show_markers:
82 | bpy.ops.import_mesh.ply(filepath=marker_mesh_fname)
83 | marker_mesh = bpy.context.object
84 | marker_mesh.name = 'Object'
85 | # Note: Blender ply importer does not select imported object
86 | # object = bpy.context.selected_objects[0]
87 | marker_mesh.rotation_euler = (radians(90.0), 0.0, 0.0)
88 | if cfg.render.rotate_body_object_z:
89 | marker_mesh.rotation_euler[2] = radians(cfg.render.rotate_body_object_z)
90 |
91 | assert "Object" in bpy.data.materials
92 | marker_mesh.active_material = bpy.data.materials['Object']
93 |
94 | # else:
95 | # setup_vertex_color_material(marker_mesh)
96 |
97 | if cfg.render.camera_tracking_mode:
98 | # Create vertex group so that camera can track the mesh vertices instead if pivot
99 | body_cam = bpy.data.scenes['Scene'].camera
100 |
101 | # logger.info('creating constrain on vertex_group {} for camera {}'.format(body_part_name, body_cam.name))
102 | for constraint in body_cam.constraints:
103 | if cfg.render.camera_tracking_mode == 'body' and cfg.render.show_body:
104 | constraint.target = body
105 | body_vertex_group = body.vertex_groups.new(name='body')
106 | body_vertex_group.add([v.index for v in body.data.vertices], 1.0, 'ADD')
107 | constraint.subtarget = 'body'
108 | else:
109 | constraint.target = marker_mesh
110 | object_vertex_group = marker_mesh.vertex_groups.new(name='object')
111 | object_vertex_group.add([v.index for v in marker_mesh.data.vertices], 1.0, 'ADD')
112 | constraint.subtarget = 'object'
113 |
114 | # Render
115 | bpy.context.scene.render.filepath = png_out_fname
116 |
117 | # Render
118 | bpy.ops.render.render(write_still=True)
119 | if cfg.render.save_final_blend_file:
120 | bpy.ops.wm.save_as_mainfile(filepath=png_out_fname.replace('.png', '.blend'))
121 |
122 | bpy.ops.object.delete({"selected_objects": [obj for colec in bpy.data.collections for obj in colec.all_objects if
123 | obj.name in ['Body', 'Object']]})
124 |
125 | # # Delete last selected object from scene
126 | # if ps.show_body:
127 | # body.select_set(True)
128 | # if ps.show_object:
129 | # if not ps.show_body:
130 | # bpy.ops.import_scene.obj(filepath=body_mesh_fname)
131 | # marker_mesh.select_set(True)
132 | #
133 | # bpy.ops.object.delete()
134 |
135 | logger.success(f'created {png_out_fname}')
136 |
137 | return
138 |
139 |
140 | def create_video_from_mesh_dir(cfg):
141 | cfg = prepare_render_cfg(**cfg)
142 |
143 | makepath(cfg.dirs.png_out_dir)
144 |
145 | setup_scene(cfg)
146 |
147 | logger.debug(f'input mesh dir: {cfg.dirs.mesh_out_dir}')
148 | logger.debug(f'png_out_dir: {cfg.dirs.png_out_dir}')
149 |
150 | body_mesh_fnames = sorted(glob.glob(os.path.join(cfg.dirs.mesh_out_dir, 'body_mesh', '*.obj')))
151 | assert len(body_mesh_fnames)
152 |
153 | for body_mesh_fname in body_mesh_fnames:
154 | png_out_fname = os.path.join(cfg.dirs.png_out_dir, os.path.basename(body_mesh_fname).replace('.obj', '.png'))
155 | if os.path.exists(png_out_fname):
156 | # logger.debug(f'already exists: {png_out_fname}')
157 | continue
158 |
159 | marker_mesh_fname = body_mesh_fname.replace('/body_mesh/', '/marker_mesh/')
160 | marker_mesh_fname = marker_mesh_fname.replace('.obj', '.ply')
161 |
162 | if cfg.render.show_markers: assert osp.exists(marker_mesh_fname)
163 |
164 | run_blender_once(cfg, body_mesh_fname, marker_mesh_fname, png_out_fname)
165 |
166 | if cfg.render.render_only_one_image: break
167 |
168 | # if os.path.exists(ps.output_mp4path): return
169 | if not cfg.render.render_only_one_image:
170 | if len(glob.glob(os.path.join(cfg.dirs.png_out_dir, '*.png'))) == 0:
171 | logger.error(f'No images were present at {cfg.dirs.png_out_dir}')
172 | return
173 | png_path_pattern = os.path.join(cfg.dirs.png_out_dir, '%*.png')
174 | pngs2mp4(png_path_pattern, cfg.dirs.mp4_out_fname, fps=cfg.render.video_fps)
175 |
176 | # pngs = sorted(glob.glob(os.path.join(ps.png_outdir, '*.png')), key=os.path.getmtime)
177 | # pngs2gif(pngs, ps.output_mp4path.replace('.mp4', '.gif'))
178 |
179 | # shutil.rmtree(ps.png_outdir)
180 | # shutil.rmtree(ps.mesh_dir)
181 |
--------------------------------------------------------------------------------
/src/tutorials/README.md:
--------------------------------------------------------------------------------
1 | ## SOMA Configuration
2 | SOMA code uses [OmegaConf](https://omegaconf.readthedocs.io/en/2.1_branch/) to control different settings
3 | while separating code from configuration files.
4 | Furthermore, OmegaConf YAML files are "smart" in the way that one can set dynamic value resolvers.
5 | This means that a value for a config key can be dependent on other keys that are *lazily* determined during runtime.
6 | Refer to the OmegaConf tutorials for a full overview of capabilities.
7 | This technique is used overall in this project including SOMA, MoSh++, evaluation code, and Blender rendering capabilities.
8 | You can find the configuration YAML files at
9 | ```` soma/support_data/conf ````
10 | MoSh++ code has a similar configuration in its _support_data_ directory.
11 | The entries in these files are default values that can be changed at runtime.
12 | The code in this repository does not provide a command-line interface.
13 | Instead, one can use Jupyter notebooks or plain Python scripts.
14 |
15 | ## Definitions
16 | In an optical marker-based mocap system we place light-reflecting or -emitting markers on the subject's body.
17 | If markers are reflective then the system is called a **passive mocap system**; e.g. VICON.
18 | If markers are emitting light then it is called an **active mocap system**; i.e. PhaseSpace.
19 |
20 | MoCap systems use a multi-camera setup to reconstruct **sparse 3D mocap point clouds** from the light obtained from the markers.
21 | The correspondence of the 3D points and the markers on the body is called a **label**.
22 | Labeling the sparse mocap point cloud data is a bottleneck in capturing high-quality mocap data and SOMA addresses this issue.
23 |
24 | A mocap point cloud has extra information compared to the usual point cloud in that it usually also has a small **tracklet** information.
25 | That is the mocap hardware outputs groups of points that correspond to the same marker through time.
26 | SOMA assigns the most frequent predicted label of the group of points, and this we call as **tracklet labeling**.
27 | Tracklet labeling enhances the stability of the labels.
28 |
29 | Active mocap systems provide the full trajectory of the point since the emitted light has a unique coding frequency for each marker.
30 | However, in archival data often markers are placed at arbitrary, undocumented locations,
31 | hence the correspondence of points to locations on the body, i.e. labeling, is still required.
32 |
33 | **Ghost points** frequently happen due to mocap hardware hallucinations. Note that exist no ghost marker but ghost points.
34 |
35 | **Marker occlusions** happen when not enough cameras see a marker in the scene
36 | and no point is reconstructed by mocap hardware for that marker.
37 |
38 | **Markerlayout** defines a markerset and a specific placement of it on the body.
39 |
40 | **Markerset** is the set of markers used for a capture session. Markerset has no information about the marker placement.
41 |
42 | **Marker placement** is the actual implementation of a markerset on the subject body during the capture session.
43 |
44 | Both the makerset and the marker placement are subject to change for a capture session.
45 | When the trial coordinator chooses to remove a marker or place a new one then the markerset is altered.
46 | In the worst-case scenario, markers drop from the subject's body due to sweating, rapid motion, or physical shock and this also changes the markerset.
47 | Trial coordinators place the markers by hand on the subject's body so marker placement is always subject to change.
48 | This can even happen during the capture when the mocap suit simply stretches or folds.
49 |
50 | During the SOMA training, we provide only the significant variations of the markerlayout and
51 | one model is then capable of handling variations of the markerlayout throughout the dataset and even during the capture session.
52 |
53 | In this repository, we use MoSh and MoSh++ interchangeably and always mean MoSh++ by it. Otherwise, we would mention it.
54 |
55 | ## Folder Structure
56 | In all the tutorials we assume you have prepared the following directory structure.
57 | Apart from the directory for the code please create a root directory for holding training data and experiments,
58 | runtime results, and support data. This directory will have the following sub-folders:
59 |
60 | 
61 |
62 | For your convinience we have prepared a
63 | [template folder structure for SOMA](https://download.is.tue.mpg.de/soma/tutorials/SOMA_FOLDER_TEMPLATE.tar.bz2).
64 |
65 | This structure is an example case for a model trained for [SOMA dataset](https://soma.is.tue.mpg.de/download.php); i.e. V48_02_SOMA.
66 | Note that experiments can share one data ID and that is why the data ID, V48_01_SOMA, is different compared to the experiment ID.
67 |
68 | Please obtain the SMPL-X body model with a locked head for SOMA from [this link](https://smpl-x.is.tue.mpg.de/download.php) and the
69 | [extra smplx data](https://download.is.tue.mpg.de/download.php?domain=soma&sfile=smplx/extra_smplx_data.tar.bz2)
70 | and place them in the smplx folder as you see in the above image.
71 |
72 |
73 | We assume a mocap session can have three identifiers: name of the project; i.e. dataset name, a subject name, and sequence names.
74 | SOMA expects to receive mocap sequences in this folder structure: dataset>subject>sequence.c3d.
75 | This is also the case for MoSh++. Additionally, MoSh++ expects to get a settings.json file inside the subject's folder
76 | that holds the subject gender information.
77 |
78 |
79 | ## Training SOMA
80 |
81 | SOMA uses synthetic data for training. The main component of the data generation pipeline is the AMASS bodies in
82 | [SMPLx gender neutral format](https://amass.is.tue.mpg.de/download.php).
83 | Originally SOMA is trained using parameters of
84 | ACCAD, CMU, HumanEVA, PosePrior, Total Capture, and Transitions datasets.
85 | Download **SMPL-X gender neutral** _body data_ of these datasets and place them under
86 | ```` support_files/smplx/amass_neutral ````.
87 |
88 | In addition, we use 3764 [CAESAR](https://www.humanics-es.com/CAESARvol1.pdf) SMPL-X parameters for more subject variations.
89 | Due to licensing restrictions, we cannot release beta parameters for CAESAR subjects.
90 | You can download the already prepared
91 | [body parameters without CAESAR subjects](https://download.is.tue.mpg.de/download.php?domain=soma&sfile=smplx/data/V48_01_HDM05_NoCAESAR.tar.bz2).
92 |
93 | SOMA places virtual markers on these bodies following a set of given markerlayouts.
94 | A markerlayout could be a labeled mocap frame as a c3d or a json file.
95 | If a c3d file is given SOMA will automatically run MoSh++ to obtain the markerset and marker placement for the markerlayout.
96 | You can obtain the markerlayouts for the experiments in the paper from [here](https://soma.is.tue.mpg.de/download.php).
97 |
98 | SOMA uses AMASS marker noise model to help generalize to mocap hardware differences.
99 | This model copies the noise for each label from the real AMASS mocap markers.
100 | The noise is the difference between the MoSh++ simulated markers and the real markers of the mocap dataset.
101 | Due to license restrictions, AMASS doesn't release real marker data of the subset datasets
102 | hence to be able to use this noise model you need either a given AMASS noise model or the original mocap markers of the mentioned datasets.
103 | We release AMASS noise model for experiments in the paper.
104 | You can find the web address for these datasets on [AMASS download page](https://amass.is.tue.mpg.de/download.php).
105 | In case you have downloaded the original marker data you can run the script at
106 | ```` src/soma/amass/prepare_amass_smplx.py ````
107 | to produce MoSh simulated markers necessary for the AMASS marker noise model.
108 |
109 | SOMA is implemented in PyTorch and the training code benefits from the
110 | [PyTorch Lightning](https://www.pytorchlightning.ai/) (PTL) framework.
111 | PTL standardizes the training procedure and enables easy multi GPU training.
112 |
113 | We provide Jupyter notebooks to demonstrate use cases of SOMA with hands-on examples:
114 | - [Run SOMA On MoCap Point Cloud Data](run_soma_on_soma_dataset.ipynb)
115 | - [Label Priming an Unknown Marker Layout](label_priming.ipynb)
116 | - [SOMA Ablative Studies](ablation_study.ipynb)
117 | - [Solve Already Labeled MoCaps With MoSh++](solve_labeled_mocap.ipynb)
118 |
--------------------------------------------------------------------------------
/src/soma/amass/prepare_amass_smplx.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
31 |
32 | """
33 | This script runs MoSh on subset datasets of AMASS to prepare SOMA's train and validation body parameters and simulated markers.
34 | Simulated markers can be used to prepare AMASS marker noise dataset.
35 | If you dont want to run this script you can acquire only the AMASS SMPL-X gender neutral body parameters from amass.is.tue.mpg.de.
36 | We release AMASS marker noise model for pretrained SOMA models.
37 | """
38 |
39 | import os.path as osp
40 |
41 | from human_body_prior.tools.omni_tools import get_support_data_dir
42 | from loguru import logger
43 | from omegaconf import OmegaConf
44 |
45 | from moshpp.mosh_head import MoSh
46 | from moshpp.mosh_head import run_moshpp_once
47 | from soma.amass.amass_info import amass_datasets
48 | from soma.render.blender_tools import prepare_render_cfg
49 | from soma.render.blender_tools import render_mosh_once
50 | from soma.tools.parallel_tools import run_parallel_jobs
51 |
52 |
53 | def prepare_amass_smplx(mosh_cfg: dict = None,
54 | render_cfg: dict = None,
55 | parallel_cfg: dict = None,
56 | **kwargs):
57 | if parallel_cfg is None: parallel_cfg = {}
58 | if mosh_cfg is None: mosh_cfg = {}
59 | if render_cfg is None: render_cfg = {}
60 |
61 | run_tasks = kwargs.get('run_tasks', ['mosh', 'render'])
62 |
63 | only_stagei = kwargs.get('only_stagei', False)
64 | only_datasets = kwargs.get('only_datasets', None)
65 | fast_dev_run = kwargs.get('fast_dev_run', False)
66 | determine_shape_for_each_seq = kwargs.get('determine_shape_for_each_seq', False)
67 |
68 | app_support_dir = get_support_data_dir(__file__)
69 |
70 | fname_filter = kwargs.get('fname_filter', None)
71 |
72 | mosh_jobs = []
73 | render_jobs = []
74 | exclude_mosh_job_keys = []
75 |
76 | for ds_name, ds_cfg in amass_datasets.items():
77 | if (only_datasets is not None) and (ds_name not in only_datasets): continue
78 |
79 | assert len(ds_cfg['mocap_fnames']) > 0, ValueError(f'Found no mocap for {ds_name}')
80 | logger.info(f"Found #{len(ds_cfg['mocap_fnames'])} mocaps for {ds_name}")
81 |
82 | if fast_dev_run: ds_cfg['mocap_fnames'] = ds_cfg['mocap_fnames'][:3]
83 |
84 | for mocap_fname in ds_cfg['mocap_fnames']:
85 |
86 | if fname_filter:
87 | if not sum([i in mocap_fname for i in fname_filter]): continue
88 | mocap_key = '_'.join(mocap_fname.split('/')[-3:-1])
89 |
90 | persubject_marker_layout = ds_cfg.get('persubject_marker_layout', False)
91 | mosh_job = mosh_cfg.copy()
92 | mosh_job.update({
93 | 'mocap.fname': mocap_fname,
94 | 'mocap.ds_name': ds_name,
95 | **ds_cfg['mosh_cfg_override']
96 | })
97 | if persubject_marker_layout:
98 | # todo: do we need to pick the mocaps to produce the layout here?
99 | mosh_job.update({
100 | 'dirs.marker_layout_fname': '${dirs.work_base_dir}/${mocap.ds_name}/${mocap.ds_name}_${mocap.subject_name}_${surface_model.type}.json',
101 | })
102 | if 'subject_specific_settings' in ds_cfg:
103 | subject_name = mocap_fname.split('/')[-2]
104 | if subject_name in ds_cfg['subject_specific_settings']:
105 | mosh_job.update(**ds_cfg['subject_specific_settings'][subject_name])
106 |
107 | cur_mosh_cfg = MoSh.prepare_cfg(**mosh_job.copy())
108 |
109 | if only_stagei and osp.exists(cur_mosh_cfg.dirs.stagei_fname): continue
110 |
111 | if mocap_key not in exclude_mosh_job_keys and not osp.exists(cur_mosh_cfg.dirs.stageii_fname):
112 | if not osp.exists(cur_mosh_cfg.dirs.stagei_fname) and not determine_shape_for_each_seq: exclude_mosh_job_keys.append(mocap_key)
113 | mosh_jobs.append(mosh_job.copy())
114 | continue
115 |
116 | if osp.exists(cur_mosh_cfg.dirs.stageii_fname):
117 | render_job = render_cfg.copy()
118 | render_job.update({
119 | 'mesh.mosh_stageii_pkl_fnames': [cur_mosh_cfg.dirs.stageii_fname],
120 | **ds_cfg.get('render_cfg_override', {})
121 | })
122 | cur_render_cfg = prepare_render_cfg(**render_job)
123 | if not osp.exists(cur_render_cfg.dirs.mp4_out_fname):
124 | render_jobs.append(render_job)
125 |
126 | if 'mosh' in run_tasks:
127 | logger.info('Submitting MoSh++ jobs.')
128 |
129 | base_parallel_cfg = OmegaConf.load(osp.join(app_support_dir, 'conf/parallel_conf/moshpp_parallel.yaml'))
130 | moshpp_parallel_cfg = OmegaConf.merge(base_parallel_cfg, OmegaConf.create(parallel_cfg))
131 | run_parallel_jobs(func=run_moshpp_once, jobs=mosh_jobs, parallel_cfg=moshpp_parallel_cfg)
132 |
133 | if 'render' in run_tasks:
134 | logger.info('Submitting render jobs.')
135 |
136 | base_parallel_cfg = OmegaConf.load(osp.join(app_support_dir, 'conf/parallel_conf/blender_parallel.yaml'))
137 | render_parallel_cfg = OmegaConf.merge(base_parallel_cfg, OmegaConf.create(parallel_cfg))
138 | run_parallel_jobs(func=render_mosh_once, jobs=render_jobs, parallel_cfg=render_parallel_cfg)
139 |
140 |
141 | if __name__ == '__main__':
142 | # only_datasets = ['HumanEva', 'ACCAD', 'TotalCapture', 'CMU', 'Transitions', 'PosePrior']
143 | # only_datasets = ['SSM']
144 | # only_datasets = list(set(amass_datasets.keys()).difference(set(only_datasets)))
145 | prepare_amass_smplx(
146 | mosh_cfg={
147 | 'moshpp.verbosity': 1,
148 | 'surface_model.gender': 'neutral',
149 | 'dirs.work_base_dir': '/is/cluster/scratch/nghorbani/amass/mosh_results/20210726/amass_neutral',
150 | # 'dirs.work_base_dir': '/is/cluster/scratch/nghorbani/amass/mosh_results/20210726/amass_gender_specific',
151 | },
152 | render_cfg={
153 | 'dirs.work_base_dir': '/is/cluster/scratch/nghorbani/amass/mp4_renders/20210726/amass_neutral',
154 | # 'dirs.work_base_dir': '/is/cluster/scratch/nghorbani/amass/mp4_renders/20210726/amass_gender_specific',
155 | 'render.render_engine': 'cycles', # eevee / cycles,
156 | # 'render.render_engine': 'cycles', # eevee / cycles,
157 | # 'render.save_final_blend_file': True
158 | 'render.floor.enable': False,
159 | },
160 | parallel_cfg={
161 | 'pool_size': 0,
162 | 'max_num_jobs': -1,
163 | 'randomly_run_jobs': True,
164 | },
165 | run_tasks=[
166 | 'mosh',
167 | 'render',
168 | ],
169 | # fast_dev_run=True,
170 | # only_datasets=[
171 | # 'WEIZMANN',
172 | # ],
173 | # only_datasets=only_datasets,
174 | # only_datasets=['SSM', 'HumanEva', 'ACCAD', 'TotalCapture', 'CMU', 'Transitions', 'PosePrior', 'HDM05'],
175 | # fname_filter=['29/29_12', '86/86_05', '86/86_04'], # ['SSM_synced/resynced/20160330_03333'],
176 | )
177 |
--------------------------------------------------------------------------------
/src/soma/tools/eval_tools.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
31 | import numpy as np
32 | import pandas as pd
33 | from human_body_prior.tools.omni_tools import flatten_list
34 | from loguru import logger
35 | from pandas import ExcelWriter
36 | from sklearn.metrics import classification_report, confusion_matrix
37 |
38 | from moshpp.tools.mocap_interface import MocapSession
39 |
40 |
41 | def find_corresponding_labels(markers_gt: np.ndarray,
42 | labels_gt: np.ndarray,
43 | markers_rec: np.ndarray,
44 | labels_rec: np.ndarray,
45 | flatten_output=True,
46 | rtol=1e-3, atol=1e-8) -> dict:
47 | assert len(markers_gt) == len(markers_rec)
48 |
49 | labels_gt = np.array(labels_gt)
50 | labels_rec = np.array(labels_rec)
51 |
52 | gt_nonan_mask = MocapSession.marker_availability_mask(markers_gt)
53 | rec_nonan_mask = MocapSession.marker_availability_mask(markers_rec)
54 |
55 | mocap_length = len(markers_gt)
56 |
57 | labels_gt_aligned = []
58 | rec_labels_aligned = []
59 | markers_aligned = []
60 |
61 | for t in range(mocap_length):
62 |
63 | # rec (soma) markers must be subset of gt markers. find subset of gt markers assignable to soma markers
64 | # per frame labels are repeats of trajectory labels. so this will not hold
65 | # assert (labels_rec[t, rec_nan_mask[t]] == 'nan').sum() == rec_nan_mask[t].sum()
66 |
67 | if not rec_nonan_mask[t].sum() <= gt_nonan_mask[t].sum():
68 | missing_gt_markers = list(set(labels_rec[t].tolist()).difference(set(labels_gt[t].tolist())))
69 | if not (len(missing_gt_markers) == 1 and missing_gt_markers[0] == 'nan'):
70 | logger.info(f'Frame {t}: There are more soma markers than gt markers.'
71 | f'f{rec_nonan_mask[t].sum()} vs.'
72 | f'f{gt_nonan_mask[t].sum()}.'
73 | f' Probably following markers are not labeled in gt mocap: {missing_gt_markers}')
74 |
75 | gt2rec_dist = np.sqrt(
76 | np.power(markers_rec[t, rec_nonan_mask[t]][None] - markers_gt[t, gt_nonan_mask[t]][:, None], 2))
77 | gt2rec_best = np.isclose(gt2rec_dist, 0, rtol=rtol, atol=atol).sum(-1) == 3
78 |
79 | if gt2rec_best.sum() == 0: continue
80 | # assert gt2rec_best.sum(), ValueError('Not a single soma point could be assigned to a gt point. This cannot be true.')
81 | # print(gt2rec_best.sum(), gt_nonan_mask[t].sum())
82 | if gt2rec_best.sum() > gt_nonan_mask[t].sum():
83 | logger.error(f'Frame {t}: There exists overlapping soma-to-gt assignment:'
84 | f' {gt2rec_best.sum()} vs. possible {gt_nonan_mask[t].sum()}')
85 | logger.error('Try reducing either rtol or atol')
86 |
87 | gt_single_assigned = gt2rec_best.sum(-1) == 1
88 | gt2rec_ids = gt2rec_best.argmax(-1)[gt_single_assigned]
89 |
90 | assert (np.unique(gt2rec_ids, return_counts=True)[1] > 1).sum() == 0, ValueError(
91 | 'Multiple gt labels could be assigned to a rec label.')
92 |
93 | # count_nans = np.logical_not(gt_nonan_mask[t]).sum() + np.logical_not(rec_nonan_mask[t]).sum()
94 | # rec_labels_aligned.append(labels_rec[t, rec_nonan_mask[t]][gt2rec_ids].tolist() + ['nan' for _ in range(count_nans)])
95 | # labels_gt_aligned.append(labels_gt[t,gt_nonan_mask[t]][gt_single_assigned].tolist() + ['nan' for _ in range(count_nans)])
96 |
97 | rec_labels_aligned.append(labels_rec[t, rec_nonan_mask[t]][gt2rec_ids].tolist())
98 | labels_gt_aligned.append(labels_gt[t, gt_nonan_mask[t]][gt_single_assigned].tolist())
99 | markers_aligned.append(markers_gt[t, gt_nonan_mask[t]][gt_single_assigned].tolist())
100 |
101 | if flatten_output:
102 | labels_gt_aligned = flatten_list(labels_gt_aligned)
103 | rec_labels_aligned = flatten_list(rec_labels_aligned)
104 | markers_aligned = flatten_list(markers_aligned)
105 |
106 | return {'labels_gt': labels_gt_aligned,
107 | 'labels_rec': rec_labels_aligned,
108 | 'markers': markers_aligned}
109 |
110 |
111 | def compute_labeling_metrics(labels_gt, labels_rec, create_excel_dfs=True, out_fname=None):
112 | # assert avg_mode in ['micro', 'macro', 'weighted']
113 | if len(labels_rec) == 0:
114 | logger.error('No label ever detected by SOMA. have you run the soma_processor?')
115 | return {'f1': 0,
116 | 'acc': 0,
117 | 'prec': 0,
118 | 'recall': 0}
119 |
120 | # superset_labels = sorted(list(set(labels_gt)))
121 | superset_labels = sorted(list(set(labels_rec + labels_gt)))
122 | # if 'nan' not in superset_labels:
123 | # superset_labels += ['nan']
124 | # else:
125 | # superset_labels.pop(superset_labels.index('nan'))
126 | # superset_labels += ['nan']
127 |
128 | all_label_map = {k: superset_labels.index(k) for k in superset_labels}
129 | assert len(all_label_map) == len(set(all_label_map.keys())) # keys should be unique
130 | #
131 | label_ids_gt = np.array([all_label_map[k] for k in labels_gt])
132 | label_ids_rec = np.array([all_label_map[k] for k in labels_rec])
133 |
134 | avg_mode = 'macro'
135 |
136 | # The support is the number of occurrences of each class in y_true.
137 | # so if a label is not present in the labels_gt but present in labels_rec it will get a 0 percent.
138 | # this could happen when a maker layout is changed for a capture and soma still assigns a nearby label.
139 | labeling_report = classification_report(label_ids_gt, label_ids_rec,
140 | output_dict=True, labels=np.arange(len(superset_labels)),
141 | target_names=superset_labels, zero_division=0)
142 |
143 | # accuracy = accuracy_score(label_ids_gt, label_ids_rec)
144 | # accuracy = jaccard_score(label_ids_gt, label_ids_rec, labels=np.arange(len(superset_labels)), average='macro')
145 | #
146 | f1_score = labeling_report[f'{avg_mode} avg']['f1-score']
147 | precision = labeling_report[f'{avg_mode} avg']['precision']
148 | recall = labeling_report[f'{avg_mode} avg']['recall']
149 | accuracy = labeling_report['accuracy']
150 |
151 | results = {'f1': f1_score,
152 | 'acc': accuracy,
153 | 'prec': precision,
154 | 'recall': recall}
155 |
156 | if create_excel_dfs:
157 | cm = confusion_matrix(label_ids_gt, label_ids_rec, labels=range(len(superset_labels)))
158 |
159 | # per_class_acc = cm.diagonal()/cm.sum(axis=1)
160 | # for k, v in zip(superset_labels, per_class_acc):
161 | # labeling_report[k].update({'acc':v})
162 |
163 | df_cm = pd.DataFrame(cm, index=superset_labels, columns=superset_labels)
164 |
165 | labeling_report = pd.DataFrame(labeling_report).transpose()
166 |
167 | excel_dfs = {'labeling_report': labeling_report,
168 | 'confusion_matrix': df_cm}
169 | results.update(excel_dfs)
170 |
171 | if out_fname:
172 | assert out_fname.endswith('.xlsx')
173 | save_xlsx(excel_dfs, xlsx_fname=out_fname)
174 |
175 | return results
176 |
177 |
178 | def save_xlsx(dicts_dfs, xlsx_fname):
179 | with ExcelWriter(xlsx_fname, engine='xlsxwriter') as writer:
180 | for name, df in dicts_dfs.items():
181 | df.to_excel(writer, sheet_name=name)
182 |
--------------------------------------------------------------------------------
/src/soma/models/soma_model.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
31 |
32 | from collections import OrderedDict
33 |
34 | import numpy as np
35 | import torch
36 | from omegaconf import DictConfig
37 | from torch import nn
38 |
39 | from moshpp.marker_layout.edit_tools import marker_layout_load
40 | from moshpp.marker_layout.labels_map import general_labels_map
41 | from soma.models.model_components import LayeredResConv1d
42 | from soma.models.model_components import Transpose, ResConv1DBlock, SDivide
43 | from soma.models.optimal_transport import log_optimal_transport
44 | from soma.models.transformer import LayeredSelfAttention
45 |
46 |
47 | def masked_mean(tensor, mask, dim, keepdim=False):
48 | masked = torch.mul(tensor, mask) # Apply the mask using an element-wise multiply
49 | return masked.sum(dim=dim, keepdim=keepdim) / mask.sum(dim=dim, keepdim=keepdim) # Find the average!
50 |
51 |
52 | class ScorePredictor(nn.Module):
53 | def __init__(self, num_labels: int, enable_transformer: bool = True, enable_sinkhorn: bool = False,
54 | num_total_attention_feat: int = 125, num_attention_layers: int = 8, num_attention_heads: int = 5):
55 | super(ScorePredictor, self).__init__()
56 |
57 | self.enable_transformer = enable_transformer
58 |
59 | # the multiplication with num_total_attention_feat is to increase number of parameters in case of avoiding transformer
60 | self.score_predictor_b1 = nn.Sequential( # per body part
61 | Transpose(-2, -1),
62 | ResConv1DBlock(3, num_total_attention_feat),
63 | # if enable_transformer else ResConv1DBlock(3, 4 * num_total_attention_feat),
64 | nn.ReLU(), )
65 |
66 | if enable_transformer:
67 | self.point_attention = LayeredSelfAttention(num_total_attention_feat,
68 | num_attention_layers,
69 | num_attention_heads)
70 | else:
71 | self.conv1d_block = LayeredResConv1d(num_total_attention_feat,
72 | num_total_attention_feat,
73 | num_layers=num_attention_layers) # the factors is to increase the count of model parameters
74 |
75 | self.score_predictor_b2 = nn.Sequential(nn.ReLU(),
76 | ResConv1DBlock(num_total_attention_feat, num_total_attention_feat * 2),
77 | nn.ReLU(),
78 | SDivide(num_total_attention_feat ** .5),
79 | nn.Conv1d(num_total_attention_feat * 2,
80 | num_labels if enable_sinkhorn else num_labels + 1, 1, 1),
81 | Transpose(-2, -1),
82 | )
83 |
84 | def forward(self, pts_centered):
85 | output_dict = {}
86 |
87 | b1_res = self.score_predictor_b1(pts_centered)
88 |
89 | if self.enable_transformer:
90 | att_res, att_weights = self.point_attention(b1_res)
91 | output_dict['scores'] = self.score_predictor_b2(att_res)
92 |
93 | output_dict['attention_weights'] = att_weights
94 | else:
95 | conv_res = self.conv1d_block(b1_res)
96 | output_dict['scores'] = self.score_predictor_b2(conv_res)
97 |
98 | return output_dict
99 |
100 |
101 | class SOMA(nn.Module):
102 |
103 | def __init__(self, cfg: DictConfig):
104 | super(SOMA, self).__init__()
105 |
106 | superset_fname = cfg.data_parms.marker_dataset.superset_fname
107 |
108 | superset_meta = marker_layout_load(superset_fname, labels_map=general_labels_map)
109 | num_labels = OrderedDict({k: np.sum(v) for k, v in superset_meta['marker_type_mask'].items()})
110 |
111 | num_total_attention_feat = cfg.model_parms.labeler.num_total_attention_feat
112 | num_attention_layers = cfg.model_parms.labeler.num_attention_layers
113 | num_attention_heads = cfg.model_parms.labeler.num_attention_heads
114 |
115 | self.has_multiple_body_parts = len(num_labels) > 1 # num_labels is a dictionary
116 | self.num_labels = num_labels
117 | self.enable_transformer = cfg.model_parms.labeler.enable_transformer
118 | self.enable_sinkhorn = cfg.model_parms.labeler.enable_sinkhorn
119 |
120 | if self.has_multiple_body_parts:
121 | raise NotImplementedError('This functionality is not released for current SOMA.')
122 |
123 | else:
124 | num_part_labels = num_labels[list(num_labels.keys())[0]]
125 | self.score_predictor = ScorePredictor(num_labels=num_part_labels,
126 | enable_transformer=self.enable_transformer,
127 | enable_sinkhorn=self.enable_sinkhorn,
128 | num_total_attention_feat=num_total_attention_feat,
129 | num_attention_layers=num_attention_layers,
130 | num_attention_heads=num_attention_heads)
131 |
132 | if self.enable_sinkhorn:
133 | bin_score = torch.nn.Parameter(torch.tensor(1.))
134 | self.register_parameter('bin_score', bin_score)
135 | self.num_sinkhorn_iters = cfg.model_parms.labeler.num_sinkhorn_iters
136 | else:
137 | self.log_softmax = nn.LogSoftmax(dim=-1)
138 |
139 | def forward(self, pts):
140 | """
141 |
142 | Args:
143 | pts: bs x n_pts x 3
144 |
145 | Returns:
146 |
147 | """
148 | output_dict = {}
149 |
150 | pts_offset = SOMA.compute_offsets(pts)
151 | pts_centered = pts - pts_offset
152 |
153 | if self.has_multiple_body_parts:
154 | raise NotImplementedError('This functionality is not released for current SOMA.')
155 |
156 | else:
157 | score_predictor_res = self.score_predictor(pts_centered)
158 | scores = score_predictor_res['scores']
159 | if 'attention_weights' in score_predictor_res:
160 | output_dict.update({'attention_weights': score_predictor_res['attention_weights']})
161 |
162 | if self.enable_sinkhorn:
163 |
164 | aug_asmat = log_optimal_transport(scores, self.bin_score, iters=self.num_sinkhorn_iters)
165 |
166 | output_dict.update({
167 | 'label_ids': aug_asmat[:, :-1].argmax(-1),
168 | 'label_confidence': aug_asmat[:, :-1].exp(),
169 | 'aug_asmat': aug_asmat,
170 | })
171 | else:
172 | asmat = self.log_softmax(scores)
173 | output_dict.update({
174 | 'label_ids': asmat.argmax(-1),
175 | 'label_confidence': asmat.exp(),
176 | 'aug_asmat': asmat,
177 | })
178 |
179 | return output_dict
180 |
181 | @staticmethod
182 | def compute_offsets(points):
183 | """
184 | given a batch of seq of points compute the center of the points at first time frame
185 | this is basically th bos offset
186 | Args:
187 | points: Nxnum_pointsx3
188 |
189 | Returns:
190 | Nx1x3
191 |
192 | """
193 | bs, num_markers, _ = points.shape
194 |
195 | nonzero_mask = ((points == 0.0).sum(-1) != 3)
196 | offsets = []
197 | for i in range(bs):
198 | if nonzero_mask[i].sum() == 0:
199 | offsets.append(points.new(np.zeros([1,3])))
200 | continue
201 | offsets.append(torch.median(points[i, nonzero_mask[i]], dim=0, keepdim=True).values)
202 | return torch.cat(offsets, dim=0).view(bs, 1, 3)
203 |
--------------------------------------------------------------------------------
/src/soma/data/mocap_noise_tools.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
31 |
32 | import numpy as np
33 | import torch
34 |
35 |
36 | # def make_ghost_points(markers, num_ghost_max, use_exact_num_ghost=False):
37 | # '''
38 | #
39 | # Args:
40 | # markers: Txnum_pointsx3
41 | # num_ghost_max:
42 | #
43 | # Returns:
44 | #
45 | # '''
46 | # if num_ghost_max == 0: return None
47 | #
48 | # T, num_markers = markers.shape[:-1]
49 | #
50 | # mrk_median, mrk_std = torch.median(markers, 1).values, markers.std(1)
51 | # mrk_std_batched = torch.eye(3).reshape((1, 3, 3)).repeat(T, 1, 1) * mrk_std.reshape((-1, 1, 3))
52 | # ghost_mrk_generator = torch.distributions.MultivariateNormal(mrk_median, scale_tril=mrk_std_batched)
53 | #
54 | # ghost_markers = torch.cat([ghost_mrk_generator.sample()[:, None] for _ in range(num_ghost_max)], dim=1)
55 | #
56 | # if use_exact_num_ghost:
57 | #
58 | # # Number of ghost markers should vary across frames. to do this we append a dummy marker
59 | # # and sample from a random generator that produces X times more than the number of ghost markers
60 | # # any number in the mask bigger than the actual number of ghosts is set to the dummy marker
61 | # ghost_markers = torch.cat([ghost_markers, torch.zeros(T, 1, 3)], dim=1)
62 | # ghost_reject_mask = np.random.randint(3 * num_ghost_max, size=(T, num_ghost_max))
63 | # ghost_reject_mask[ghost_reject_mask >= num_ghost_max] = ghost_markers.shape[1] - 1
64 | # ghost_reject_mask = np.repeat(ghost_reject_mask[:, :, None], 3, axis=-1)
65 | # np.put_along_axis(ghost_markers, ghost_reject_mask, 0, axis=1)
66 | # ghost_markers = ghost_markers[:, :-1]
67 | #
68 | #
69 | # return ghost_markers
70 |
71 | def make_ghost_points(markers, num_ghost_max, ghost_distribution='spherical_gaussian', use_upto_num_ghost=False):
72 | # todo: do you really need use_exact_num_ghost.
73 | # todo: is use_exact_num_ghost doing what it is intended for?
74 |
75 | assert ghost_distribution in ['spherical_gaussian', 'uniform', 'skewed_gaussian']
76 |
77 | if num_ghost_max == 0: return None
78 |
79 | time_length, num_markers = markers.shape[:-1]
80 |
81 | mrk_median, mrk_std = torch.median(markers, 1).values, markers.std(1)
82 | mrk_std_batched = torch.eye(3).reshape((1, 3, 3)).repeat(time_length, 1, 1) * mrk_std.reshape((-1, 1, 3))
83 |
84 | if ghost_distribution == 'spherical_gaussian':
85 | ghost_mrk_generator = torch.distributions.MultivariateNormal(mrk_median, scale_tril=mrk_std_batched)
86 |
87 | ghost_markers = torch.cat([ghost_mrk_generator.sample()[:, None] for _ in range(num_ghost_max)], dim=1)
88 |
89 | elif ghost_distribution == 'uniform':
90 | assert time_length == 1
91 | uniform_dist = torch.distributions.uniform.Uniform(low=-2, high=2)
92 | ghost_markers = \
93 | torch.stack([torch.stack([uniform_dist.sample() for _ in range(3)]) for _ in range(num_ghost_max)], dim=0)[
94 | None]
95 |
96 | elif ghost_distribution == 'skewed_gaussian':
97 | assert time_length == 1
98 |
99 | from scipy.stats import random_correlation
100 | random_eigens = np.random.uniform(size=3)
101 | random_eigens = (random_eigens / random_eigens.sum()) * 3
102 | random_cov = random_correlation.rvs(random_eigens)
103 | # random_cov = random_cov.dot(random_cov.time_length)
104 | random_mean = np.random.uniform(low=-2, high=2, size=3)
105 |
106 | ghost_mrk_generator = torch.distributions.MultivariateNormal(mrk_median.new(random_mean[None]),
107 | covariance_matrix=mrk_std_batched.new(
108 | random_cov[None]))
109 |
110 | ghost_markers = torch.cat([ghost_mrk_generator.sample()[:, None] for _ in range(num_ghost_max)], dim=1)
111 |
112 | else:
113 | raise NotImplementedError
114 |
115 | if use_upto_num_ghost:
116 | # Number of ghost markers should vary across frames. to do this we append a dummy marker
117 | # and sample from a random generator that produces X times more than the number of ghost markers
118 | # any number in the mask bigger than the actual number of ghosts is set to the dummy marker
119 | ghost_markers = torch.cat([ghost_markers, torch.zeros(time_length, 1, 3)], dim=1)
120 | ghost_reject_mask = np.random.randint(3 * num_ghost_max, size=(time_length, num_ghost_max))
121 | ghost_reject_mask[ghost_reject_mask >= num_ghost_max] = ghost_markers.shape[1] - 1
122 | ghost_reject_mask = np.repeat(ghost_reject_mask[:, :, None], 3, axis=-1)
123 | np.put_along_axis(ghost_markers, ghost_reject_mask, 0, axis=1)
124 | ghost_markers = ghost_markers[:, :-1]
125 |
126 | return ghost_markers
127 |
128 |
129 | # def occlude_points(markers, num_occ_max, use_exact_num_oc=False):
130 | # '''
131 | #
132 | # Args:
133 | # markers: Txnum_pointsx3
134 | # num_occ_max:
135 | #
136 | # Returns:
137 | #
138 | # '''
139 | # T, num_markers = markers.shape[:-1]
140 | #
141 | # markers = torch.cat([markers, torch.zeros(T, 1, 3)], dim=1)
142 | # if use_exact_num_oc:
143 | # occ_mask = []
144 | # for t in range(T):
145 | # occ_mask.append(np.random.choice(num_markers, size=num_occ_max, replace=False))
146 | # occ_mask = np.stack(occ_mask)
147 | # else:
148 | # occ_mask = np.random.randint(3 * num_markers, size=(T, num_occ_max))
149 | # occ_mask[occ_mask >= num_markers] = markers.shape[1] - 1
150 | #
151 | # occ_mask = np.repeat(occ_mask[:, :, None], 3, axis=-1)#so that all x,y,z channels are flattened
152 | #
153 | #
154 | # np.put_along_axis(markers, occ_mask, 0, axis=1)
155 | # markers = markers[:, :-1]
156 | # return markers
157 |
158 | def occlude_markers(markers, num_occ):
159 | '''
160 |
161 | Args:
162 | markers: num_markers x 3
163 | num_occ:
164 |
165 | Returns:
166 |
167 | '''
168 | if num_occ == 0: return markers
169 | num_markers = markers.shape[0]
170 |
171 | occ_mask = np.random.choice(num_markers, size=num_occ, replace=False)
172 |
173 | occ_mask = np.repeat(occ_mask[:, None], 3, axis=-1) # to effect xyz
174 |
175 | np.put_along_axis(markers, occ_mask, 0, axis=0)
176 | return markers
177 |
178 |
179 | def break_trajectories(markers, label_ids, nan_class_id, num_btraj_max):
180 | '''
181 |
182 | Args:
183 | markers: Txnum_pointsx3
184 | label_ids:
185 | nan_class_id:
186 | num_btraj_max:
187 |
188 | Returns:
189 |
190 | '''
191 | T, num_markers = markers.shape[:-1]
192 |
193 | selection_ids = list(range(label_ids.shape[1]))
194 | add_count = label_ids.shape[1] - 1
195 | for t in sorted(np.random.choice(np.arange(2, T - 1), np.minimum(num_btraj_max, T), replace=False)):
196 | # at each t in t_list a trajectory will be cut and placed at a new index with the same label
197 | # t should monotonically grow
198 | # print(t)
199 | # if np.random.rand() > 0.5: continue
200 | i = np.random.choice(len(selection_ids))
201 | mrk_id = selection_ids.pop(i) # this id will be cut
202 | while label_ids[t, mrk_id] == nan_class_id:
203 | i = np.random.choice(len(selection_ids))
204 | mrk_id = selection_ids.pop(i)
205 |
206 | mrk_data = torch.cat([torch.zeros_like(markers[:t, mrk_id]), markers[t:, mrk_id]])
207 | label_data = torch.cat([torch.tensor([nan_class_id] * t), label_ids[t:, mrk_id]])
208 |
209 | markers[t:, mrk_id] = 0.0
210 | label_ids[t:, mrk_id] = nan_class_id
211 | markers = torch.cat([markers, mrk_data[:, None]], axis=1)
212 | label_ids = torch.cat([label_ids, label_data[:, None]], axis=1)
213 | add_count = add_count + 1
214 | selection_ids.append(add_count) # add the pasted id so that it can be cut again by chance
215 | # mrks_labels2.append(mrks_labels2[mrk_id])
216 | # print('cut mrk_id %d, label %s at time t=%d'%(mrk_id, label_ids[t-1,mrk_id], t))
217 | # continue
218 | return markers, label_ids
219 |
--------------------------------------------------------------------------------
/src/soma/render/parameters_to_mesh.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
31 | import os.path as osp
32 |
33 | import cv2
34 | import numpy as np
35 | import seaborn as sns
36 | import torch
37 | from colour import Color
38 | from human_body_prior.body_model.body_model import BodyModel
39 |
40 | sns.set_theme()
41 |
42 | from human_body_prior.tools.omni_tools import copy2cpu as c2c
43 | from human_body_prior.tools.omni_tools import makepath
44 |
45 | from body_visualizer.mesh.psbody_mesh_sphere import points_to_spheres
46 | from body_visualizer.mesh.psbody_mesh_cube import points_to_cubes
47 |
48 | from psbody.mesh import Mesh
49 | from human_body_prior.tools.rotation_tools import rotate_points_xyz
50 |
51 | from moshpp.mosh_head import MoSh
52 | from loguru import logger
53 | from moshpp.tools.mocap_interface import MocapSession
54 |
55 | from soma.render.blender_tools import prepare_render_cfg
56 |
57 |
58 | def convert_to_mesh_once(cfg):
59 | cfg = prepare_render_cfg(**cfg)
60 |
61 | logger.info(f'Preparing mesh files for: {cfg.mesh.mosh_stageii_pkl_fnames}')
62 | logger.info(f'dirs.mesh_out_dir: {cfg.dirs.mesh_out_dir}')
63 |
64 | datas = {}
65 | selected_frames = None
66 | time_length = None
67 | for mosh_stageii_pkl_fname in cfg.mesh.mosh_stageii_pkl_fnames:
68 | mosh_id = '/'.join(mosh_stageii_pkl_fname.replace('.pkl', '').split('/')[-2:])
69 |
70 | datas[mosh_id] = {}
71 |
72 | mosh_result = MoSh.load_as_amass_npz(mosh_stageii_pkl_fname, include_markers=True)
73 |
74 | # logger.info(mosh_result.keys())
75 |
76 | num_betas = len(mosh_result['betas']) if 'betas' in mosh_result else 10
77 | num_dmpls = None if cfg.mesh.enable_dmpl and 'dmpls' in mosh_result else None
78 | surface_model_type = mosh_result['surface_model_type']
79 | gender = mosh_result['gender']
80 | surface_model_fname = osp.join(cfg.dirs.support_base_dir, surface_model_type, gender, 'model.npz')
81 | assert osp.exists(surface_model_fname), FileExistsError(surface_model_fname)
82 | if num_dmpls:
83 | dmpl_fname = osp.join(cfg.dirs.support_base_dir, surface_model_type, gender, 'dmpl.npz')
84 | assert osp.exists(dmpl_fname), FileExistsError(dmpl_fname)
85 | else:
86 | dmpl_fname = None
87 |
88 | num_expressions = len(mosh_result['expression']) if 'expression' in mosh_result else None
89 |
90 | # Todo add object model here
91 | sm = BodyModel(bm_fname=surface_model_fname,
92 | num_betas=num_betas,
93 | num_expressions=num_expressions,
94 | num_dmpls=num_dmpls,
95 | dmpl_fname=dmpl_fname)
96 |
97 | datas[mosh_id]['faces'] = c2c(sm.f)
98 |
99 | # selected_frames = range(0, 10, step_size)
100 | if selected_frames is None:
101 | time_length = len(mosh_result['trans'])
102 | selected_frames = range(0, time_length, cfg.mesh.ds_rate)
103 |
104 | assert time_length == len(mosh_result['trans']), \
105 | ValueError(
106 | f'All mosh sequences should have same length. {mosh_stageii_pkl_fname} '
107 | f'has {len(mosh_result["trans"])} != {time_length}')
108 |
109 | datas[mosh_id]['markers'] = mosh_result['markers'][selected_frames]
110 | datas[mosh_id]['labels'] = mosh_result['labels']
111 | # todo: add the ability to have a control on marker colors here
112 |
113 | datas[mosh_id]['num_markers'] = mosh_result['markers'].shape[1]
114 |
115 | if 'betas' in mosh_result:
116 | mosh_result['betas'] = np.repeat(mosh_result['betas'][None], repeats=time_length, axis=0)
117 |
118 | body_keys = ['betas', 'trans', 'pose_body', 'root_orient', 'pose_hand']
119 |
120 | if 'v_template' in mosh_result:
121 | mosh_result['v_template'] = np.repeat(mosh_result['v_template'][None], repeats=time_length, axis=0)
122 | body_keys += ['v_template']
123 | if num_expressions == 'smplx':
124 | body_keys += ['expression']
125 | if num_dmpls:
126 | body_keys += ['dmpls']
127 |
128 | surface_parms = {k: torch.Tensor(v[selected_frames]) for k, v in mosh_result.items() if k in body_keys}
129 |
130 | datas[mosh_id]['mosh_bverts'] = c2c(sm(**surface_parms).v)
131 |
132 | if cfg.render.show_markers:
133 | datas[mosh_id]['marker_meta'] = mosh_result['marker_meta']
134 |
135 | num_verts = sm.init_v_template.shape[1]
136 | datas[mosh_id]['body_color_mosh'] = np.ones([num_verts, 3]) * \
137 | (cfg.mesh.colors.get(mosh_id, cfg.mesh.colors.default))
138 |
139 | first_frame_rot = cv2.Rodrigues(mosh_result['root_orient'][0].copy())[0]
140 | datas[mosh_id]['theta_z_mosh'] = np.rad2deg(np.arctan2(first_frame_rot[1, 0], first_frame_rot[0, 0]))
141 |
142 | for t, fId in enumerate(selected_frames):
143 | body_mesh = None
144 | marker_mesh = None
145 |
146 | for mosh_id, data in datas.items():
147 |
148 | cur_body_verts = rotate_points_xyz(data['mosh_bverts'][t][None],
149 | np.array([0, 0, -data['theta_z_mosh']]).reshape(-1, 3))
150 | cur_body_verts = rotate_points_xyz(cur_body_verts, np.array([-90, 0, 0]).reshape(-1, 3))[0]
151 |
152 | cur_body_mesh = Mesh(cur_body_verts, data['faces'], vc=data['body_color_mosh'])
153 | body_mesh = cur_body_mesh if body_mesh is None else body_mesh.concatenate_mesh(cur_body_mesh)
154 |
155 | if cfg.render.show_markers:
156 |
157 | nonan_mask = MocapSession.marker_availability_mask(data['markers'][t:t + 1])[0]
158 | marker_radius = np.array([
159 | cfg.mesh.marker_radius.get(data['marker_meta']['marker_type'][m],
160 | cfg.mesh.marker_radius.default) if m in data['marker_meta'][
161 | 'marker_type']
162 | else cfg.mesh.marker_radius.default
163 | for m in data['labels']])
164 | ghost_mask = np.array([l == 'nan' for l, valid in zip(data['labels'], nonan_mask) if valid],
165 | dtype=np.bool)
166 | if cfg.mesh.marker_color.style == 'superset':
167 | marker_colors = np.array([data['marker_meta']['marker_colors'][m]
168 | if m in data['marker_meta']['marker_type']
169 | else cfg.mesh.marker_color.default for m in data['labels']])
170 | else:
171 | marker_colors = np.array([Color(cfg.mesh.marker_color.style).rgb for _ in data['labels']])
172 |
173 | cur_marker_verts = rotate_points_xyz(data['markers'][t][None],
174 | np.array([0, 0, -data['theta_z_mosh']]).reshape(-1, 3))
175 |
176 | cur_marker_verts = rotate_points_xyz(cur_marker_verts, np.array([-90, 0, 0]).reshape(-1, 3))[0]
177 | if cfg.mesh.marker_color.style == 'black':
178 | cur_marker_mesh = points_to_spheres(cur_marker_verts[nonan_mask],
179 | radius=marker_radius[nonan_mask],
180 | point_color=marker_colors[nonan_mask])
181 | else:
182 | cur_marker_mesh = points_to_spheres(cur_marker_verts[nonan_mask][~ghost_mask],
183 | radius=marker_radius[nonan_mask][~ghost_mask],
184 | point_color=marker_colors[nonan_mask][~ghost_mask])
185 | if ghost_mask.sum() and cfg.mesh.marker_color.style != 'black':
186 | try:
187 | cur_ghost_mesh = points_to_cubes(cur_marker_verts[nonan_mask][ghost_mask],
188 | radius=marker_radius[nonan_mask][ghost_mask],
189 | point_color=np.ones([ghost_mask.sum(), 3]) * [0.83, 1,
190 | 0]) # yellow cube
191 | cur_marker_mesh = cur_marker_mesh.concatenate_mesh(cur_ghost_mesh)
192 | except:
193 | pass
194 |
195 | marker_mesh = cur_marker_mesh if marker_mesh is None else marker_mesh.concatenate_mesh(cur_marker_mesh)
196 |
197 | body_mesh.write_obj(makepath(cfg.dirs.mesh_out_dir, 'body_mesh', f'{fId:05d}.obj', isfile=True))
198 | if cfg.render.show_markers:
199 | cur_marker_mesh.write_ply(makepath(cfg.dirs.mesh_out_dir, 'marker_mesh', f'{fId:05d}.ply', isfile=True))
200 |
201 | if cfg.render.render_only_one_image: break
202 |
203 | logger.info(f'Created {cfg.dirs.mesh_out_dir}')
204 |
205 | return
206 |
--------------------------------------------------------------------------------
/src/soma/data/body_synthesizer.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
31 | import glob
32 | import os.path as osp
33 | from datetime import datetime
34 | from glob import glob
35 | from pathlib import Path
36 | from typing import Union
37 |
38 | import numpy as np
39 | import torch
40 | from human_body_prior.tools.omni_tools import create_list_chunks
41 | from human_body_prior.tools.rotation_tools import noisy_zrot
42 | from loguru import logger
43 |
44 | from soma.data.sample_hand_sequences import MANO
45 | from soma.data.sample_hand_sequences import fullrightpose2leftpose as r2l
46 |
47 |
48 | class FullBodySynthesizer:
49 | def __init__(self, surface_model_fname: Union[str, Path],
50 | unified_frame_rate: int = 30, num_hand_var_perseq: int = 15,
51 | num_betas: int = 10, num_expressions: int = 80,
52 | augment_by_temporal_inversion: bool = False,
53 | wrist_markers_on_stick: bool = False):
54 |
55 | self.start_time = datetime.now().replace(microsecond=0)
56 | logger.debug('Starting to synthesize body/markers')
57 |
58 | self.surface_model_fname = surface_model_fname
59 | self.num_hand_var_perseq = num_hand_var_perseq
60 | self.num_betas = num_betas
61 | self.num_expressions = num_expressions
62 | self.wrist_markers_on_stick = wrist_markers_on_stick
63 | self.unified_frame_rate = unified_frame_rate
64 | self.augment_by_temporal_inversion = augment_by_temporal_inversion
65 |
66 | self.comp_device = torch.device("cpu")
67 | # usually mocap sequences are very long and they might not fit to gpu. ToDo: process in batches?
68 | # self.comp_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
69 |
70 | def _body_sampler(self, npz_fnames):
71 | '''
72 | :param npz_fnames: numpy files holding poses, trans and gender
73 | :return:
74 | '''
75 |
76 | np.random.shuffle(npz_fnames)
77 |
78 | logger.debug(f'Total body parameter sequences {len(npz_fnames):05d} ')
79 |
80 | def get_next_seq():
81 | while get_next_seq.npz_id < len(npz_fnames):
82 | mosh_fname = npz_fnames[get_next_seq.npz_id]
83 | get_next_seq.npz_id += 1
84 | try:
85 | mo = np.load(mosh_fname)
86 | except:
87 | logger.debug(f'Problem occurred when accessing {mosh_fname}')
88 | continue
89 |
90 | ds_rate = int(mo['mocap_frame_rate'] // self.unified_frame_rate)
91 | # if ds_rate == 0:
92 | # logger.debug('ds_rate == 0 for {}. skipping'.format(mosh_fname))
93 | # continue
94 | if ds_rate == 0: ds_rate = 1
95 |
96 | result = {
97 | 'pose_body': mo['pose_body'][::ds_rate],
98 | 'root_orient': mo['root_orient'][::ds_rate],
99 | 'trans': mo['trans'][::ds_rate],
100 | # 'pose_body_gender': mo['gender'].tolist(),
101 | # 'pose_body_fname': npz_fnames[get_next_seq.npz_id]
102 | }
103 | yield result
104 |
105 | get_next_seq.npz_id = 0
106 |
107 | return get_next_seq
108 |
109 | def _face_sampler(self, npz_fnames):
110 |
111 | raise NotImplementedError('This functionality is not released for current SOMA.')
112 |
113 | def _hand_sampler(self, handR_frames):
114 |
115 | raise NotImplementedError('This functionality is not released for current SOMA.')
116 |
117 | def _betas_sampler(self, betas, single_beta_perseq=True):
118 |
119 | logger.debug(f'Total beta parameters {len(betas):05d} ')
120 |
121 | def gen_betas(T):
122 | if single_beta_perseq:
123 | return {'betas': np.repeat(betas[np.random.choice(len(betas))][None], repeats=T, axis=0)}
124 | return {'betas': betas[np.random.choice(len(betas), size=T, replace=False)]}
125 |
126 | return gen_betas
127 |
128 | def sample_mocap_windowed(self, body_fnames, face_fnames=None, hand_frames=None, betas=None,
129 | num_timeseq_frames=15, num_frames_overlap=8, rnd_zrot=True):
130 |
131 | body_sampler = self._body_sampler(body_fnames)
132 | face_sampler = self._face_sampler(face_fnames) if face_fnames is not None else None
133 | hand_sampler = self._hand_sampler(hand_frames) if hand_frames is not None else None
134 | betas_sampler = self._betas_sampler(betas, single_beta_perseq=True)
135 |
136 | # bm = BodyModel(bm_fname=self.surface_model_fname,
137 | # num_betas=self.num_betas,
138 | # num_expressions=self.num_expressions).to(self.comp_device)
139 |
140 | for id, body_parms_np in enumerate(body_sampler()):
141 | T = len(body_parms_np['pose_body'])
142 | if T <= self.num_hand_var_perseq: continue
143 | if face_fnames is not None: body_parms_np.update(face_sampler(T))
144 | if hand_frames is not None: body_parms_np.update(hand_sampler(T, self.num_hand_var_perseq))
145 |
146 | for tIds in create_list_chunks(range(T), num_timeseq_frames, num_frames_overlap):
147 |
148 | windowed_body_parms = {k: v[tIds] for k, v in body_parms_np.items() if isinstance(v, np.ndarray)}
149 | windowed_body_parms.update(betas_sampler(len(tIds)))
150 | if rnd_zrot: windowed_body_parms['root_orient'] = noisy_zrot(windowed_body_parms['root_orient'])
151 | # body_parms_torch = {k: torch.from_numpy(v.astype(np.float32)).to(self.comp_device)
152 | # for k, v in windowed_body_parms.items()}
153 | if np.any([np.any(np.isnan(v)) for v in windowed_body_parms.values()]):
154 | print({k: np.any(np.isnan(v)) for k, v in windowed_body_parms.items()})
155 | raise ValueError('detected nan value in marker data.')
156 |
157 | result = windowed_body_parms.copy()
158 |
159 | # result.update({'body_parms_torch': body_parms_torch,
160 | # 'joints': bm(betas=body_parms_torch['betas']).Jtr})
161 |
162 | yield result
163 |
164 | if self.augment_by_temporal_inversion:
165 | result = {k: v[::-1] for k, v in windowed_body_parms.copy().items()}
166 | # body_parms_rev_torch = {k: v.flip(0) for k, v in body_parms_torch.items()}
167 |
168 | # result.update({'body_parms_torch': body_parms_rev_torch,
169 | # 'joints': bm(betas=body_parms_rev_torch['betas']).Jtr
170 | # })
171 |
172 | yield result # this yields the augmentation. it is different than the one before
173 |
174 |
175 | def body_populate_source(amass_npz_dir, amass_splits, babel=None):
176 | npz_fnames = {k: [] for k in amass_splits.keys()}
177 |
178 | for split_name in amass_splits.keys():
179 | if amass_splits[split_name] is None: continue
180 | for ds_name in amass_splits[split_name]:
181 | ds_npz_contents, used_babel = [], False
182 | if babel and ds_name in babel:
183 | ds_npz_contents = babel.get(ds_name, [])
184 | used_babel = True if len(ds_npz_contents) > 0 else False
185 |
186 | if len(ds_npz_contents) == 0:
187 | subset_dir = osp.join(amass_npz_dir, ds_name)
188 | ds_npz_contents = glob(osp.join(subset_dir, '*/*_stageii.npz'))
189 |
190 | npz_fnames[split_name].extend(ds_npz_contents)
191 | logger.debug(
192 | f'Body: {len(ds_npz_contents):05d} sequences found from AMASS subset {ds_name}. used_babel = {used_babel}')
193 |
194 | return npz_fnames
195 |
196 |
197 | class MANO():
198 | def __init__(self):
199 | raise NotImplementedError('This functionality is not released for current SOMA.')
200 |
201 | def face_populate_source():
202 | raise NotImplementedError('This functionality is not released for current SOMA.')
203 |
204 | def hand_populate_source():
205 | raise NotImplementedError('This functionality is not released for current SOMA.')
206 |
207 |
208 |
209 | def betas_populate_source(amass_npz_dir, amass_splits, gender, num_betas=10):
210 | '''
211 |
212 | :param datasets: which datasets to get shapes from
213 | :param amass_npz_dir: amass directory with npz files per subject of each dataset
214 | :param outpath: path to betas.pt file
215 | :return:
216 | '''
217 | assert gender in ['male', 'female', 'neutral']
218 |
219 | data_betas = {k: [] for k in amass_splits.keys()}
220 | for split_name in amass_splits.keys():
221 | if amass_splits[split_name] is None: continue
222 | for ds_name in amass_splits[split_name]:
223 |
224 | npz_fnames = glob(osp.join(amass_npz_dir, ds_name, f'*/{gender}*.npz'))
225 |
226 | if len(npz_fnames) == 0:
227 | logger.error(f'no amass betas found at {osp.join(amass_npz_dir, ds_name)}')
228 | cur_beta_count = 0
229 | for npz_fname in npz_fnames:
230 | cdata = np.load(npz_fname, allow_pickle=True)
231 |
232 | if str(cdata['gender'].astype(np.str)) == gender:
233 | data_betas[split_name].append(cdata['betas'][:num_betas])
234 | cur_beta_count += 1
235 | logger.debug(f'Betas: {cur_beta_count:04d} shapes chosen from {ds_name} for {split_name}')
236 | assert len(data_betas) > 0
237 |
238 | return {k: np.stack(data_betas[k]) if data_betas[k] else None for k in amass_splits.keys()}
239 |
--------------------------------------------------------------------------------
/src/soma/amass/amass_info.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
31 | from glob import glob
32 | from os import path as osp
33 | import os.path as osp
34 | amass_mocap_base_dir = '/ps/project/amass/MOCAP'
35 |
36 | amass_datasets = {
37 | 'ACCAD': {
38 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'ACCAD', '*/*.c3d')),
39 | 'mosh_cfg_override': {
40 | 'mocap.unit': 'mm',
41 | }
42 | },
43 | 'BMLHandball': {
44 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'BMLhandball/pkl', '*/*.pkl')),
45 | 'mosh_cfg_override': {
46 | 'mocap.unit': 'mm',
47 | },
48 | },
49 | 'BMLmovi': {
50 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'BMLmovi/2019_09_24', '*/*.pkl')),
51 | 'mosh_cfg_override': {
52 | 'mocap.unit': 'mm',
53 | 'moshpp.optimize_toes': True,
54 | 'mocap.rotate': [0, 0, -90],
55 | }
56 | },
57 | 'BMLrub': {
58 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'BMLrub/1999_rub/pkl', '*/*.pkl')),
59 | 'mosh_cfg_override': {
60 | 'mocap.unit': 'mm',
61 | },
62 | 'subject_specific_settings': {
63 | 'rub002': {
64 | 'mocap.exclude_markers': ['LFHD']
65 | }
66 | },
67 | },
68 | 'CMU': {
69 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'CMU/c3d/subjects', '*/*.c3d')),
70 | 'mosh_cfg_override': {
71 | 'mocap.unit': 'mm',
72 | 'moshpp.wrist_markers_on_stick': True
73 | }
74 | },
75 | # 'CMUII': {
76 | # 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, '/CMU_II/SOMA_V48_02/CMUII_unlabeled_mpc', '*/*.c3d')),
77 | # 'mosh_cfg_override': {
78 | # 'mocap.unit': 'mm',
79 | # 'surface_model.gender': '${resolve_gender:${mocap.fname},neutral}'
80 | # },
81 | # },
82 | 'CMU_MS': {
83 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'CMU/c3d/multisubject', '*/*.npz')),
84 | 'mosh_cfg_override': {
85 | 'mocap.unit': 'mm',
86 | 'moshpp.optimize_dynamics': False
87 | }
88 | },
89 | 'CNRS': {
90 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'KIT_Whole_Body/CNRS', '*/*.c3d')),
91 | 'mosh_cfg_override': {
92 | 'mocap.unit': 'mm',
93 | }
94 | },
95 | 'DFaust': {
96 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'DFAUST', '*/*.npz')),
97 | 'mosh_cfg_override': {
98 | 'mocap.unit': 'm',
99 | 'moshpp.optimize_toes': True,
100 | },
101 | 'render_cfg_override': {
102 | 'render.video_fps': 10,
103 | 'mesh.ds_rate': 2,
104 | }
105 | },
106 | 'DanceDB': {
107 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'DanceDB/SOMA_V48_02/DanceDB_c3d_120hz', '*/*.c3d')),
108 | 'mosh_cfg_override': {
109 | 'mocap.unit': 'mm',
110 | },
111 | },
112 | 'EKUT': {
113 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'KIT_Whole_Body/EKUT', '*/*.c3d')),
114 | 'mosh_cfg_override': {
115 | 'mocap.unit': 'mm',
116 | }
117 | },
118 | 'Eyes_Japan_Dataset': {
119 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'Eyes_Japan_Dataset', '*/*.c3d')),
120 | 'mosh_cfg_override': {
121 | 'mocap.unit': 'mm',
122 | 'moshpp.wrist_markers_on_stick': True,
123 | 'moshpp.optimize_dynamics': False
124 | }
125 | },
126 | # 'GRAB': {
127 | # 'mocap_fnames': glob(
128 | # osp.join(amass_mocap_base_dir, 'PS_MoCaps/GRAB/GRAB_manual_labeled_gap_filled', '*/*.c3d')),
129 | # 'mosh_cfg_override': {
130 | # 'mocap.unit': 'mm',
131 | # 'moshpp.optimize_toes': True,
132 | # 'moshpp.optimize_fingers': True,
133 | # 'moshpp.optimize_betas': False,
134 | # 'opt_settings.weights_type': 'smplx_grab_vtemplate',
135 | # 'moshpp.stagei_frame_picker.least_avail_markers': 1.0,
136 | #
137 | # 'moshpp.separate_types': ['body', 'finger', 'face'],
138 | # 'subject_specific_settings': {subject_name:
139 | # {'moshpp.v_template_fname':f'/ps/project/amass/MOCAP/PS_MoCaps/GRAB/subject_meshes/{subject_name}.ply'}
140 | # for subject_name in [f's{d}' for d in range(1,11)]},
141 | # 'dirs.marker_layout_fname': '/ps/project/amass/MOCAP/PS_MoCaps/GRAB/marker_layout/s4/apple_eat_1.c3d',
142 | # }
143 | # },
144 | 'HDM05': {
145 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'MPI_HDM05', '*/*.c3d')),
146 | 'mosh_cfg_override': {
147 | 'mocap.unit': 'mm',
148 | 'moshpp.optimize_dynamics': False
149 | }
150 | },
151 | 'HUMAN4D': {
152 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'HUMAN4D/mocap_pkls', '*/*.pkl')),
153 | 'mosh_cfg_override': {
154 | 'mocap.unit': 'm',
155 | 'moshpp.optimize_dynamics': False,
156 | 'moshpp.optimize_toes': False,
157 | }
158 | },
159 | 'HumanEva': {
160 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'HumanEva/mocap_for_mosh', '*/*.c3d')),
161 | 'mosh_cfg_override': {
162 | 'mocap.unit': 'mm',
163 | 'moshpp.wrist_markers_on_stick': True,
164 | 'moshpp.optimize_dynamics': False
165 | }
166 | },
167 | 'KIT': {
168 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'KIT_Whole_Body/KIT', '*/*.c3d')),
169 | 'mosh_cfg_override': {
170 | 'mocap.unit': 'mm',
171 | 'moshpp.optimize_dynamics': False
172 | }
173 | },
174 | 'LAFAN1': {
175 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'LAFAN1/mocap_pkls', '*/*.pkl')),
176 | 'mosh_cfg_override': {
177 | 'mocap.unit': 'm',
178 | 'moshpp.optimize_dynamics': False
179 | }
180 | },
181 | 'SNU': {
182 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'SNU/initial_website_scrap', '*/*.c3d')),
183 | 'mosh_cfg_override': {
184 | 'mocap.unit': 'mm',
185 | 'moshpp.optimize_dynamics': False,
186 | 'moshpp.optimize_toes': False,
187 | }
188 | },
189 |
190 | 'MoSh': {
191 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'MPI_mosh/c3d/subjects', '*/*.c3d')),
192 | 'mosh_cfg_override': {
193 | 'mocap.unit': 'mm',
194 | 'moshpp.wrist_markers_on_stick': True,
195 | }
196 | },
197 | 'PosePrior': {
198 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'MPI_Limits/joint-angle-limits/cleaned_data', '*/*.c3d')),
199 | 'mosh_cfg_override': {
200 | 'mocap.unit': 'mm',
201 | 'mocap.rotate': [90, 0, 0],
202 | 'moshpp.wrist_markers_on_stick': True,
203 | },
204 | # 'subject_specific_settings': {
205 | # '03099': {
206 | # 'moshpp.wrist_markers_on_stick': True
207 | # }
208 | # },
209 | },
210 | 'SFU': {
211 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'SFU', '*/*.c3d')),
212 | 'mosh_cfg_override': {
213 | 'mocap.unit': 'mm',
214 | 'mocap.rotate': [90, 0, 0],
215 | }
216 | },
217 | 'Rokoko': {
218 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'Rokoko/c3d', '*/*.c3d')),
219 | 'mosh_cfg_override': {
220 | 'mocap.unit': 'mm',
221 | 'mocap.rotate': [90, 0, 0],
222 | }
223 | },
224 | 'SOMA': {
225 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'PS_MoCaps/SOMA/SOMA_manual_labeled', '*/*.c3d')),
226 | 'mosh_cfg_override': {
227 | 'mocap.unit': 'mm',
228 | },
229 | },
230 | 'SSM': {
231 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'SSM_synced/resynced', '*/*.pkl')),
232 | 'persubject_marker_layout': True,
233 | 'render_cfg_override': {
234 | 'mesh.ds_rate': 1,
235 | 'render.video_fps': 60,
236 | },
237 | 'mosh_cfg_override': {
238 | 'mocap.unit': 'm',
239 | 'moshpp.optimize_toes': True,
240 | 'mocap.rotate': [90, 0, -90],
241 | },
242 | 'subject_specific_settings': {
243 | '20160330_03333': {
244 | 'moshpp.wrist_markers_on_stick': True
245 | }
246 | },
247 | },
248 | 'TCDHands': {
249 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'TCD_handMocap/c3d_fullmkrs', '*/*.c3d')),
250 | 'mosh_cfg_override': {
251 | 'mocap.unit': 'mm',
252 | 'moshpp.optimize_fingers': True,
253 | 'moshpp.optimize_toes': False,
254 | }
255 | },
256 | 'TotalCapture': {
257 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'TotalCapture/C3D', '*/*.pkl')),
258 | 'mosh_cfg_override': {
259 | 'mocap.unit': 'mm',
260 | 'mocap.rotate': [90, 0, 0],
261 | }
262 | },
263 | 'Transitions': {
264 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'Transitions_mocap/c3d', '*/*.c3d')),
265 | 'mosh_cfg_override': {
266 | 'mocap.unit': 'mm',
267 | 'mocap.rotate': [90, 0, 180],
268 | }
269 | },
270 | 'WEIZMANN': {
271 | 'mocap_fnames': glob(osp.join(amass_mocap_base_dir, 'KIT_Whole_Body/WEIZMANN', '*/*.c3d')),
272 | 'mosh_cfg_override': {
273 | 'mocap.unit': 'mm',
274 | }
275 | },
276 | }
277 |
278 | if __name__ == '__main__':
279 | for k in sorted(amass_datasets.keys()):
280 | print(f"'{k}',")
281 |
--------------------------------------------------------------------------------
/src/soma/data/synthetic_body_dataset.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
31 |
32 | import os
33 | from pathlib import Path
34 | from typing import Union, Dict
35 |
36 | import numpy as np
37 | import tables as pytables
38 | import torch
39 | from human_body_prior.body_model.body_model import BodyModel
40 | from human_body_prior.tools.omni_tools import copy2cpu as c2c
41 | from human_body_prior.tools.omni_tools import makepath
42 | from loguru import logger
43 | from omegaconf import OmegaConf
44 | from tqdm import tqdm
45 |
46 | from soma.data.body_synthesizer import FullBodySynthesizer, betas_populate_source
47 | from soma.data.body_synthesizer import body_populate_source, face_populate_source, hand_populate_source
48 | from soma.data.marker_dataset import dataset_exists
49 |
50 |
51 | def prepare_synthetic_body_dataset(body_dataset_dir: Union[str, Path],
52 | amass_splits: Dict[str, list],
53 | amass_npz_dir: Dict[str, list],
54 | surface_model_fname: Union[str, Path],
55 | unified_frame_rate: int = 30,
56 | num_hand_var_perseq: int = 15,
57 | num_betas: int = 10,
58 | num_expressions: int = 80,
59 | rnd_zrot: bool = True,
60 | gender: str = 'neutral',
61 | animate_face: bool = True,
62 | animate_hand: bool = True,
63 | num_timeseq_frames: int = 1,
64 | num_frames_overlap: int = 0,
65 | babel: Dict[str, list] = None,
66 | augment_by_temporal_inversion: bool = False):
67 | """
68 | To learn a robust model, we exploit AMASS [1] in neutral gender SMPL-X body model
69 | and sub-sample to a unified 30 Hz. To be robust to subject body shape we generate AMASS motions for 3664 bodies
70 | from the CAESAR dataset [2]. Specifically, for training we take parameters from the following mocap sub-datasets
71 | of AMASS: CMU [9], Transitions [23] and Pose Prior [5].
72 | For validation we use HumanEva [40], ACCAD [4], and
73 | TotalCapture [25].
74 |
75 | References:
76 | [1] Naureen Mahmood, Nima Ghorbani, Nikolaus F. Troje, Gerard Pons-Moll, and Michael J. Black.
77 | AMASS: Archive of motion capture as surface shapes. In 2019 IEEE/CVF International
78 | Conference on Computer Vision (ICCV), pages 5441–5450, Oct. 2019.
79 |
80 | [2] K. Robinette, S. Blackwell, H. Daanen, M. Boehmer, S. Fleming, T. Brill, D. Hoeferlin, and D. Burnsides. Civilian
81 | American and European Surface Anthropometry Resource (CAESAR) final report. Technical Report AFRL-HE-WPTR-
82 | 2002-0169, US Air Force Research Laboratory, 2002.
83 |
84 | Args:
85 | body_dataset_dir:
86 | amass_splits:
87 | amass_npz_dir:
88 | surface_model_fname:
89 | unified_frame_rate:
90 | num_hand_var_perseq:
91 | num_betas:
92 | num_expressions:
93 | rnd_zrot:
94 | gender:
95 | animate_face:
96 | animate_hand:
97 | num_timeseq_frames:
98 | num_frames_overlap:
99 | babel:
100 | augment_by_temporal_inversion:
101 |
102 | Returns:
103 |
104 | """
105 | if dataset_exists(body_dataset_dir, split_names=['train', 'vald']):
106 | # we do not produce synthetic tests data. we tests only on real data
107 | logger.debug(f'Synthetic body dataset already exists at {body_dataset_dir}')
108 | return
109 |
110 | if np.all([(amass_splits[split_name] is not None) and (len(amass_splits[split_name]) != 0) for split_name in
111 | ['vald', 'train']]):
112 | assert len(set(amass_splits['train'] + amass_splits['vald'])) == len(set(amass_splits['train'])) + len(
113 | set(amass_splits['vald'])), \
114 | ValueError('Validation and training sets have overlapping elements')
115 |
116 | log_fname = makepath(body_dataset_dir, 'dataset.log', isfile=True)
117 | log_format = "{module}:{function}:{line} -- {message}"
118 | ds_logger_id = logger.add(log_fname, format=log_format, enqueue=True)
119 |
120 | surface_model_type = BodyModel(surface_model_fname).model_type
121 | acceptenum_total_attention_feat_types = ['smplx', 'animal_dog']
122 | assert np.any([k in surface_model_type for k in acceptenum_total_attention_feat_types]), ValueError(
123 | f'model_type should be one of {acceptenum_total_attention_feat_types}')
124 |
125 | logger.debug(f'Dumping SOMA synthetic_body_dataset at {body_dataset_dir}')
126 | logger.debug(
127 | f'These parameters will be used: unified_frame_rate={unified_frame_rate}, '
128 | f'num_hand_var_perseq={num_hand_var_perseq}, animate_face={animate_face}, '
129 | f'animate_hand={animate_hand},num_timeseq_frames={num_timeseq_frames}, num_frames_overlap={num_frames_overlap}')
130 |
131 | class BodyDataSetRow(pytables.IsDescription):
132 | betas = pytables.Float32Col(num_timeseq_frames * num_betas) # float (single-precision)
133 | trans = pytables.Float32Col(num_timeseq_frames * 3) # float (single-precision)
134 | root_orient = pytables.Float32Col(num_timeseq_frames * 3) # float (single-precision)
135 | if surface_model_type == 'animal_dog':
136 | raise NotImplementedError('This functionality is not released for current SOMA.')
137 |
138 | elif surface_model_type == 'smplx':
139 | pose_body = pytables.Float32Col(num_timeseq_frames * 21 * 3) # float (single-precision)
140 | if animate_hand:
141 | raise NotImplementedError('This functionality is not released for current SOMA.')
142 | if animate_face:
143 | raise NotImplementedError('This functionality is not released for current SOMA.')
144 |
145 | # joints = pytables.Float32Col(num_timeseq_frames * 165) # float (single-precision)
146 |
147 | mocap_ds = FullBodySynthesizer(surface_model_fname,
148 | unified_frame_rate=unified_frame_rate,
149 | num_hand_var_perseq=num_hand_var_perseq,
150 | num_betas=num_betas,
151 | num_expressions=num_expressions,
152 | augment_by_temporal_inversion=augment_by_temporal_inversion)
153 |
154 | body_npz_fnames = body_populate_source(amass_npz_dir, amass_splits, babel=babel)
155 | face_npz_fnames = face_populate_source() if animate_face else {k: None for k in body_npz_fnames.keys()}
156 | hand_frames = hand_populate_source() if animate_hand else {k: None for k in body_npz_fnames.keys()}
157 | betas = betas_populate_source(amass_npz_dir, amass_splits, gender, num_betas)
158 |
159 | for split_name in ['train', 'vald']: # for testing we use only real data
160 | logger.debug(f'--------------- Dataset Split {split_name.upper()} ------------------')
161 | ds_iter = mocap_ds.sample_mocap_windowed(body_fnames=body_npz_fnames[split_name],
162 | face_fnames=face_npz_fnames[split_name],
163 | hand_frames=hand_frames[split_name],
164 | betas=betas[split_name],
165 | num_timeseq_frames=num_timeseq_frames,
166 | num_frames_overlap=num_frames_overlap,
167 | rnd_zrot=rnd_zrot)
168 | h5_fpath = makepath(body_dataset_dir, split_name, 'data.h5', isfile=True)
169 | if not os.path.exists(h5_fpath):
170 | with pytables.open_file(h5_fpath, mode="w") as h5file:
171 | table = h5file.create_table('/', 'data', BodyDataSetRow)
172 | for data in tqdm(ds_iter):
173 | for k in BodyDataSetRow.columns.keys():
174 | table.row[k] = c2c(data[k]).reshape(-1)
175 | table.row.append()
176 | table.flush()
177 |
178 | assert os.path.exists(h5_fpath), ValueError(f'Data file {h5_fpath} does not exist!')
179 | with pytables.open_file(h5_fpath, mode="r") as h5file:
180 | data = h5file.get_node('/data')
181 | logger.debug(f'Dumping {len(data):d} data points for {split_name} split as final pytorch pt files.')
182 |
183 | data_dict = {k: [] for k in data.colnames}
184 | for id in range(len(data)):
185 | cdata = data[id]
186 | for k in data_dict.keys():
187 | data_dict[k].append(cdata[k])
188 |
189 | for k, v in data_dict.items():
190 | outfname = makepath(body_dataset_dir, split_name, f'{k}.pt', isfile=True)
191 | if os.path.exists(outfname): continue
192 | torch.save(torch.from_numpy(np.asarray(v)), outfname)
193 |
194 | body_dataset_cfg = OmegaConf.create({
195 | 'amass_splits': OmegaConf.to_object(amass_splits),
196 | 'gender': gender,
197 | 'unified_frame_rate': unified_frame_rate,
198 | 'num_hand_var_perseq': num_hand_var_perseq,
199 | 'num_betas': num_betas,
200 | 'num_expressions': num_expressions,
201 | 'surface_model_type': surface_model_type,
202 | 'num_timeseq_frames': num_timeseq_frames,
203 | 'num_frames_overlap': num_frames_overlap,
204 | 'animate_face': animate_face,
205 | 'animate_hand': animate_hand,
206 | })
207 | OmegaConf.save(body_dataset_cfg, f=makepath(body_dataset_dir, 'settings.yaml', isfile=True))
208 |
209 | logger.debug(f'body_dataset_dir: {body_dataset_dir}')
210 | logger.remove(ds_logger_id)
211 |
--------------------------------------------------------------------------------
/src/soma/data/amass_marker_noise_model.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (C) 2021 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5 | # Max Planck Institute for Biological Cybernetics. All rights reserved.
6 | #
7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8 | # on this computer program. You can only use this computer program if you have closed a license agreement
9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11 | # Contact: ps-license@tuebingen.mpg.de
12 | #
13 | # If you use this code in a research publication please cite the following:
14 | #
15 | # @inproceedings{SOMA:ICCV:2021,
16 | # title = {{SOMA}: Solving Optical MoCap Automatically},
17 | # author = {Ghorbani, Nima and Black, Michael J.},
18 | # booktitle = {Proceedings of IEEE/CVF International Conference on Computer Vision (ICCV)},
19 | # month = oct,
20 | # year = {2021},
21 | # doi = {},
22 | # month_numeric = {10}}
23 | #
24 | # You can find complementary content at the project website: https://soma.is.tue.mpg.de/
25 | #
26 | # Code Developed by:
27 | # Nima Ghorbani
28 | # While at Max-Planck Institute for Intelligent Systems, Tübingen, Germany
29 | #
30 | # 2021.06.18
31 |
32 | import glob
33 | import os
34 | from os import path as osp
35 | from pathlib import Path
36 | from typing import Union, Dict
37 |
38 | import numpy as np
39 | import torch
40 | from human_body_prior.tools.omni_tools import create_list_chunks
41 | from human_body_prior.tools.omni_tools import makepath
42 | from loguru import logger
43 |
44 | from moshpp.marker_layout.edit_tools import marker_layout_load
45 | from moshpp.marker_layout.labels_map import general_labels_map
46 | from moshpp.mosh_head import MoSh
47 | from soma.data.marker_dataset import sort_markers_like_superset
48 |
49 |
50 | def amass_marker_noise_model_exists(amass_marker_noise_dir: Union[str, Path], amass_splits: Dict[str, list]) -> bool:
51 | """
52 | Determine whether AMASS noise model exists for the given amass splits.
53 | If a split name is empty it is assumed not wanted.
54 | Args:
55 | amass_marker_noise_dir:
56 | amass_splits:
57 |
58 | Returns:
59 | True/False
60 | """
61 | done = []
62 | for split_name in amass_splits:
63 | if amass_splits[split_name] is None: done.append(True)
64 | amass_marker_noise_fname = osp.join(amass_marker_noise_dir, split_name, 'amass_marker_noise_model.npz')
65 | if osp.exists(amass_marker_noise_fname):
66 | done.append(True)
67 | else:
68 | done.append(False)
69 |
70 | if np.all(done):
71 | logger.debug(f'AMASS noise model already exists at {amass_marker_noise_fname}')
72 | return True
73 |
74 | return False
75 |
76 |
77 | def prepare_amass_marker_noise_model(amass_marker_noise_dir: Union[str, Path],
78 | superset_fname: Union[str, Path],
79 | amass_splits: Dict[str, list],
80 | amass_dir: Union[str, Path],
81 | num_timeseq_frames: int = 1,
82 | unified_frame_rate: int = 30,
83 | num_frames_overlap: int = 0,
84 | babel: Dict[str, list] = None):
85 | """
86 | We copy the noise for each label from the real AMASS mocap markers to help generalize
87 | to mocap hardware differences. We create a database of the differences between the simulated and actual markers of
88 | AMASS and draw random samples from this noise model to add to the synthetic marker positions.
89 |
90 | Implementation idea is as follows: for a real mocap sequence, we have a corresponding mosh fit which has pairs of
91 | simulated and observed (real) markers. We can use this in two ways:
92 | First fit a distribution to the differences in the markers and then use this to sample from to add noise.
93 | Another simpler way implemented here assumes that the observed errors represent the distribution and we literally draw samples from that.
94 | Specifically, take a random frame of mocap. Take the vector displacements between the simulated and real markers.
95 | Now, for the fame of synthetic marker we created for soma, to which we want to add noise,
96 | we add vector of displacements corresponding to labels of the synthetic markers.
97 | algo:
98 | # 1) load marker layout and create a marker noise dictionary;
99 | i.e.
100 | # 2) load mosh pkl files with simulated and real marker data.
101 | You might need to create these first by running mosh on actual mocap markers of amass subsets
102 | # 3) for each mosh fname get observed and simulated markers and replace simulated markers in place of real ones
103 | whenever real markers not available; i.e. occluded
104 | # 4) compute the distance and save a windowed copy in the marker noise dictionary
105 |
106 | Args:
107 | amass_marker_noise_dir: the output directory for amass noise model
108 | superset_fname: the path to superset marker layout
109 | amass_splits: {'vald':[], 'train':[]}
110 | amass_dir: amass directory where released npz files and original mosh pkl files exist.
111 | for mosh pkl files one might need to run mosh on real markers of the original dataset.
112 | num_timeseq_frames: if need to train a sequence model this should be larger than 1
113 | unified_frame_rate:
114 | num_frames_overlap:
115 | babel: whether to use babel[2]. it should be a dictionary with key being the split name and
116 | values list of npz files from amass
117 |
118 | References:
119 | [1] https://amass.is.tue.mpg.de/
120 | [2] https://babel.is.tue.mpg.de/
121 | """
122 | assert superset_fname.endswith('.json')
123 | if amass_marker_noise_model_exists(amass_marker_noise_dir, amass_splits): return
124 |
125 | marker_meta = marker_layout_load(superset_fname, labels_map=general_labels_map)
126 | superset_labels = list(marker_meta['marker_vids'].keys())
127 |
128 | log_fname = makepath(amass_marker_noise_dir, 'dataset.log', isfile=True)
129 | log_format = "{module}:{function}:{line} -- {level} -- {message}"
130 | ds_logger_id = logger.add(log_fname, format=log_format, enqueue=True)
131 |
132 | logger.debug(
133 | f'Creating amass marker noise model for superset {superset_fname} which has {len(superset_labels)} markers')
134 | marker_noise_map = {l: [] for l in superset_labels}
135 | for split_name in amass_splits.keys():
136 |
137 | if amass_splits[split_name] is None: continue
138 | amass_marker_noise_fname = makepath(amass_marker_noise_dir, split_name, 'amass_marker_noise_model.npz',
139 | isfile=True)
140 | for ds_name in amass_splits[split_name]:
141 | mosh_stageii_pkl_fnames, used_babel = [], False
142 | if babel and ds_name in babel:
143 | mosh_stageii_pkl_fnames = [fname.replace('.npz', '.pkl') for fname in babel[ds_name]]
144 | if mosh_stageii_pkl_fnames: used_babel = True
145 |
146 | if not mosh_stageii_pkl_fnames:
147 | subset_dir = os.path.join(amass_dir, ds_name)
148 | mosh_stageii_pkl_fnames = glob.glob(os.path.join(subset_dir, '*/*_stageii.pkl'))
149 |
150 | if len(mosh_stageii_pkl_fnames) == 0:
151 | logger.error(f'No mosh_stageii result found for {ds_name} at {subset_dir}')
152 | continue
153 |
154 | mosh_stageii_pkl_fnames = np.random.choice(mosh_stageii_pkl_fnames,
155 | min([20, len(mosh_stageii_pkl_fnames)]), replace=False).tolist()
156 |
157 | logger.debug(
158 | f'Found #{len(mosh_stageii_pkl_fnames)} for split {split_name} from ds_name {ds_name}. used_babel={used_babel}')
159 | for pkl_fname in mosh_stageii_pkl_fnames:
160 |
161 | mosh_data = MoSh.load_as_amass_npz(stageii_pkl_data_or_fname=pkl_fname, include_markers=True)
162 | ds_rate = max(1, int(mosh_data['mocap_frame_rate'] // unified_frame_rate))
163 |
164 | markers_sim = sort_markers_like_superset(mosh_data['markers_sim'][::ds_rate],
165 | mosh_data['labels_obs'][::ds_rate],
166 | superset_labels=superset_labels)
167 | markers_obs = sort_markers_like_superset(mosh_data['markers_obs'][::ds_rate],
168 | mosh_data['labels_obs'][::ds_rate],
169 | superset_labels=superset_labels)
170 |
171 | for tIds in create_list_chunks(range(len(markers_sim)),
172 | group_size=num_timeseq_frames,
173 | overlap_size=num_frames_overlap,
174 | cut_smaller_batches=True):
175 | for lId, l in enumerate(superset_labels):
176 | if l == 'nan': continue
177 | if np.all(markers_obs[tIds, lId] == 0): continue
178 | marker_noise_map[l].append(
179 | (markers_obs[tIds, lId] - markers_sim[tIds, lId]).astype(np.float))
180 |
181 | uncovered_labels = [k for k, v in marker_noise_map.items() if len(v) == 0]
182 | if len(uncovered_labels):
183 | logger.error(f'split_name {split_name}: No sequence found for labels {uncovered_labels}')
184 |
185 | # make all equal by oversampling
186 | # print({k: len(v) for k, v in marker_noise_map.items()})
187 | num_max_seq_per_label = max([len(v) for k, v in marker_noise_map.items()])
188 | for k, v in marker_noise_map.items():
189 | if len(v) < num_max_seq_per_label:
190 | if len(v) == 0:
191 | marker_noise_map[k] = [np.zeros([num_timeseq_frames, 3]) for _ in range(num_max_seq_per_label)]
192 | else:
193 | over_sampled_ids = np.random.choice(range(len(v)), num_max_seq_per_label - len(v))
194 | for i in over_sampled_ids:
195 | marker_noise_map[k].append(marker_noise_map[k][i])
196 |
197 | amass_marker_noise_model = np.zeros([num_max_seq_per_label, len(superset_labels), num_timeseq_frames, 3],
198 | dtype=np.float)
199 | for i, l in enumerate(superset_labels):
200 | np.random.shuffle(marker_noise_map[l])
201 | amass_marker_noise_model[:, i] = np.stack(marker_noise_map[l])
202 |
203 | amass_marker_noise_model = amass_marker_noise_model.transpose([0, 2, 1, 3])
204 |
205 | # print({k: len(v) for k, v in marker_noise_map.items()})
206 |
207 | np.savez(amass_marker_noise_fname, amass_marker_noise_model=amass_marker_noise_model)
208 | logger.debug(f'Created AMASS marker noise model at: {amass_marker_noise_fname}')
209 | logger.remove(ds_logger_id)
210 |
211 |
212 | def amass_marker_noise_model(amass_marker_noise_fname: Union[str, Path]):
213 | assert osp.exists(amass_marker_noise_fname), FileNotFoundError(f'Could not find {amass_marker_noise_fname}')
214 |
215 | label_noise_map = torch.from_numpy(np.load(amass_marker_noise_fname)['amass_marker_noise_model']).type(torch.float)
216 |
217 | def produce_once():
218 | i = np.random.choice(len(label_noise_map))
219 | return label_noise_map[i]
220 |
221 | return produce_once
222 |
--------------------------------------------------------------------------------