├── .gitignore ├── LICENSE ├── README.md ├── assets ├── demo.gif └── teaser.png ├── blender_render ├── LICENSE ├── README.md ├── configs │ ├── README.md │ ├── assets.yaml │ ├── base.yaml │ └── render.yaml ├── mld │ ├── __init__.py │ ├── config.py │ ├── launch │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-310.pyc │ │ │ ├── __init__.cpython-39.pyc │ │ │ ├── blender.cpython-310.pyc │ │ │ ├── blender.cpython-39.pyc │ │ │ ├── prepare.cpython-310.pyc │ │ │ └── prepare.cpython-39.pyc │ │ ├── blender.py │ │ ├── prepare.py │ │ └── tools.py │ ├── render │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-310.pyc │ │ │ ├── __init__.cpython-39.pyc │ │ │ ├── video.cpython-310.pyc │ │ │ └── video.cpython-39.pyc │ │ ├── anim.py │ │ ├── blender │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-310.pyc │ │ │ │ ├── __init__.cpython-39.pyc │ │ │ │ ├── camera.cpython-310.pyc │ │ │ │ ├── camera.cpython-39.pyc │ │ │ │ ├── floor.cpython-310.pyc │ │ │ │ ├── floor.cpython-39.pyc │ │ │ │ ├── joints.cpython-310.pyc │ │ │ │ ├── materials.cpython-310.pyc │ │ │ │ ├── materials.cpython-39.pyc │ │ │ │ ├── meshes.cpython-310.pyc │ │ │ │ ├── meshes.cpython-39.pyc │ │ │ │ ├── render.cpython-310.pyc │ │ │ │ ├── render.cpython-39.pyc │ │ │ │ ├── sampler.cpython-310.pyc │ │ │ │ ├── sampler.cpython-39.pyc │ │ │ │ ├── scene.cpython-310.pyc │ │ │ │ ├── scene.cpython-39.pyc │ │ │ │ ├── tools.cpython-310.pyc │ │ │ │ ├── tools.cpython-39.pyc │ │ │ │ ├── vertices.cpython-310.pyc │ │ │ │ └── vertices.cpython-39.pyc │ │ │ ├── camera.py │ │ │ ├── data.py │ │ │ ├── floor.py │ │ │ ├── joints.py │ │ │ ├── materials.py │ │ │ ├── meshes.py │ │ │ ├── render.py │ │ │ ├── sampler.py │ │ │ ├── scene.py │ │ │ ├── tools.py │ │ │ └── vertices.py │ │ ├── renderer.py │ │ ├── rendermotion.py │ │ ├── video.py │ │ └── visualize.py │ ├── tools │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-310.pyc │ │ │ ├── __init__.cpython-39.pyc │ │ │ ├── runid.cpython-310.pyc │ │ │ └── runid.cpython-39.pyc │ │ ├── geometry.py │ │ ├── logging.py │ │ └── runid.py │ ├── transforms │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── base.cpython-37.pyc │ │ │ └── smpl.cpython-37.pyc │ │ ├── base.py │ │ ├── feats2smpl.py │ │ ├── identity.py │ │ ├── joints2jfeats │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── base.cpython-37.pyc │ │ │ │ ├── rifke.cpython-37.pyc │ │ │ │ └── tools.cpython-37.pyc │ │ │ ├── base.py │ │ │ ├── rifke.py │ │ │ └── tools.py │ │ ├── joints2rots │ │ │ ├── __pycache__ │ │ │ │ ├── config.cpython-37.pyc │ │ │ │ ├── customloss.cpython-37.pyc │ │ │ │ ├── prior.cpython-37.pyc │ │ │ │ └── smplify.cpython-37.pyc │ │ │ ├── config.py │ │ │ ├── customloss.py │ │ │ ├── prior.py │ │ │ └── smplify.py │ │ ├── rotation2xyz.py │ │ ├── rots2joints │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── base.cpython-37.pyc │ │ │ │ └── smplh.cpython-37.pyc │ │ │ ├── base.py │ │ │ └── smplh.py │ │ ├── rots2rfeats │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── base.cpython-37.pyc │ │ │ │ └── smplvelp.cpython-37.pyc │ │ │ ├── base.py │ │ │ └── smplvelp.py │ │ ├── smpl.py │ │ └── xyz.py │ └── utils │ │ ├── __init__.py │ │ ├── __pycache__ │ │ ├── __init__.cpython-310.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── __init__.cpython-39.pyc │ │ ├── geometry.cpython-37.pyc │ │ ├── joints.cpython-310.pyc │ │ ├── joints.cpython-37.pyc │ │ ├── joints.cpython-39.pyc │ │ ├── rotation_conversions.cpython-37.pyc │ │ └── temos_utils.cpython-37.pyc │ │ ├── demo_utils.py │ │ ├── easyconvert.py │ │ ├── fixseed.py │ │ ├── geometry.py │ │ ├── joints.py │ │ ├── logger.py │ │ ├── misc.py │ │ ├── rotation_conversions.py │ │ ├── sample_utils.py │ │ ├── temos_utils.py │ │ └── tensors.py ├── render.py └── requirements.txt ├── common ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-37.pyc │ ├── __init__.cpython-38.pyc │ ├── quaternion.cpython-37.pyc │ └── quaternion.cpython-38.pyc └── quaternion.py ├── data_loaders ├── __pycache__ │ ├── get_data.cpython-37.pyc │ └── tensors.cpython-37.pyc ├── behave │ ├── README.md │ ├── common │ │ ├── __pycache__ │ │ │ ├── quaternion.cpython-37.pyc │ │ │ ├── quaternion.cpython-38.pyc │ │ │ ├── skeleton.cpython-37.pyc │ │ │ └── skeleton.cpython-38.pyc │ │ ├── quaternion.py │ │ └── skeleton.py │ ├── data │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ └── dataset.cpython-37.pyc │ │ └── dataset.py │ ├── networks │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-311.pyc │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── evaluator_wrapper.cpython-37.pyc │ │ │ ├── evaluator_wrapper.cpython-38.pyc │ │ │ ├── modules.cpython-311.pyc │ │ │ ├── modules.cpython-37.pyc │ │ │ ├── modules.cpython-38.pyc │ │ │ ├── trainers.cpython-311.pyc │ │ │ ├── trainers.cpython-37.pyc │ │ │ └── trainers.cpython-38.pyc │ │ ├── evaluator_wrapper.py │ │ ├── modules.py │ │ └── trainers.py │ ├── options │ │ ├── __pycache__ │ │ │ ├── base_options.cpython-37.pyc │ │ │ └── train_options.cpython-37.pyc │ │ ├── base_options.py │ │ ├── evaluate_options.py │ │ └── train_options.py │ ├── scripts │ │ ├── __pycache__ │ │ │ ├── motion_process.cpython-37.pyc │ │ │ └── motion_process.cpython-38.pyc │ │ └── motion_process.py │ └── utils │ │ ├── __pycache__ │ │ ├── get_opt.cpython-311.pyc │ │ ├── get_opt.cpython-37.pyc │ │ ├── get_opt.cpython-38.pyc │ │ ├── metrics.cpython-37.pyc │ │ ├── metrics.cpython-38.pyc │ │ ├── paramUtil.cpython-311.pyc │ │ ├── paramUtil.cpython-37.pyc │ │ ├── paramUtil.cpython-38.pyc │ │ ├── plot_script.cpython-37.pyc │ │ ├── plot_script.cpython-38.pyc │ │ ├── utils.cpython-37.pyc │ │ ├── utils.cpython-38.pyc │ │ ├── word_vectorizer.cpython-311.pyc │ │ ├── word_vectorizer.cpython-37.pyc │ │ └── word_vectorizer.cpython-38.pyc │ │ ├── get_opt.py │ │ ├── metrics.py │ │ ├── paramUtil.py │ │ ├── plot_script.py │ │ ├── utils.py │ │ └── word_vectorizer.py ├── get_data.py ├── humanml_utils.py └── tensors.py ├── dataset ├── behave_opt.txt ├── t2m_mean.npy └── t2m_std.npy ├── diffusion ├── fp16_util.py ├── gaussian_diffusion.py ├── logger.py ├── losses.py ├── nn.py ├── resample.py └── respace.py ├── environment.yml ├── model ├── __pycache__ │ ├── afford_est.cpython-37.pyc │ ├── cfg_sampler.cpython-37.pyc │ ├── cfg_sampler.cpython-38.pyc │ ├── comMDM.cpython-37.pyc │ ├── contact.cpython-37.pyc │ ├── contact.cpython-38.pyc │ ├── hoi_contact.cpython-37.pyc │ ├── hoi_diff.cpython-37.pyc │ ├── joint_model.cpython-37.pyc │ ├── mdm.cpython-37.pyc │ ├── mdm.cpython-38.pyc │ ├── mdm_cross_obj.cpython-37.pyc │ ├── mdm_cross_obj2.cpython-37.pyc │ ├── mdm_hoi_baseline.cpython-37.pyc │ ├── mdm_obj.cpython-37.pyc │ ├── mdm_obj2.cpython-37.pyc │ ├── points_encoder.cpython-37.pyc │ ├── points_encoder.cpython-38.pyc │ ├── rotation2xyz.cpython-311.pyc │ ├── rotation2xyz.cpython-37.pyc │ ├── rotation2xyz.cpython-38.pyc │ ├── smpl.cpython-37.pyc │ └── smpl.cpython-38.pyc ├── afford_est.py ├── cfg_sampler.py ├── hoi_diff.py ├── mdm.py ├── points_encoder.py ├── rotation2xyz.py └── smpl.py ├── prepare ├── download_glove.sh ├── download_smpl_files.sh ├── download_t2m_evaluators.sh └── process_behave_raw.sh ├── sample ├── condition.py └── local_generate_obj.py ├── train ├── hoi_diff.py ├── train_affordance.py ├── train_platforms.py └── training_loop.py ├── utils ├── PYTORCH3D_LICENSE ├── README.md ├── action_label.json ├── behave_process.py ├── cal_mean_variance.py ├── common │ ├── __init__.py │ ├── quaternion.py │ └── skeleton.py ├── config.py ├── dist_util.py ├── fixseed.py ├── human_body_prior │ ├── __init__.py │ ├── body_model │ │ ├── __init__.py │ │ ├── body_model.py │ │ ├── lbs.py │ │ ├── parts_segm │ │ │ ├── readme │ │ │ └── smplh │ │ │ │ └── parts_segm.pkl │ │ └── rigid_object_model.py │ ├── data │ │ ├── README.md │ │ ├── __init__.py │ │ ├── dataloader.py │ │ └── prepare_data.py │ ├── models │ │ ├── __init__.py │ │ ├── ik_engine.py │ │ ├── model_components.py │ │ └── vposer_model.py │ ├── tools │ │ ├── __init__.py │ │ ├── angle_continuous_repres.py │ │ ├── configurations.py │ │ ├── model_loader.py │ │ ├── omni_tools.py │ │ ├── rotation_tools.py │ │ └── tgm_conversion.py │ ├── train │ │ ├── README.md │ │ ├── V02_05 │ │ │ ├── V02_05.py │ │ │ ├── V02_05.yaml │ │ │ └── __init__.py │ │ ├── __init__.py │ │ └── vposer_trainer.py │ └── visualizations │ │ ├── __init__.py │ │ └── training_visualization.py ├── misc.py ├── model_util.py ├── motion_representation.py ├── paramUtil.py ├── parser_util.py ├── raw_pose_processing_behave.py ├── rotation_conversions.py └── utils.py └── visualize ├── __pycache__ ├── render_mesh.cpython-311.pyc ├── render_mesh.cpython-37.pyc ├── render_mesh.cpython-38.pyc ├── render_mitsuba.cpython-311.pyc ├── simplify_loc2rot.cpython-37.pyc ├── simplify_loc2rot.cpython-38.pyc ├── vis_utils.cpython-311.pyc ├── vis_utils.cpython-37.pyc └── vis_utils.cpython-38.pyc ├── joints2smpl ├── README.md ├── environment.yaml ├── fit_seq.py ├── smpl_models │ ├── SMPL_downsample_index.pkl │ ├── gmm_08.pkl │ ├── neutral_smpl_mean_params.h5 │ └── smplx_parts_segm.pkl └── src │ ├── __pycache__ │ ├── config.cpython-37.pyc │ ├── config.cpython-38.pyc │ ├── customloss.cpython-37.pyc │ ├── customloss.cpython-38.pyc │ ├── prior.cpython-37.pyc │ ├── prior.cpython-38.pyc │ ├── smplify.cpython-37.pyc │ └── smplify.cpython-38.pyc │ ├── config.py │ ├── customloss.py │ ├── prior.py │ └── smplify.py ├── motions2hik.py ├── render_mesh.py ├── simplify_loc2rot.py └── vis_utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | behave_t2m/* 2 | body_models/* 3 | data/* 4 | t2m/* 5 | HumanML3D/* 6 | save/* 7 | checkpoints/* 8 | dataset/* -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Guy Tevet 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /assets/demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/assets/demo.gif -------------------------------------------------------------------------------- /assets/teaser.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/assets/teaser.png -------------------------------------------------------------------------------- /blender_render/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Chen Xin 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /blender_render/README.md: -------------------------------------------------------------------------------- 1 | ```bash 2 | blender --background --python render.py -- --cfg=./configs/render.yaml --dir='the directory of your saved results.npy' --mode=sequence --joint_type=HumanML3D 3 | ``` 4 | 5 | 6 | 7 | 8 | 9 | 10 | -------------------------------------------------------------------------------- /blender_render/configs/README.md: -------------------------------------------------------------------------------- 1 | # Configuration 2 | 3 | - [Configuration](#configuration) 4 | - [Experiment Name Explanation](#experiment-name-explanation) 5 | - [Configuration Explanation](#configuration-explanation) 6 | - [Asset Path Configuration](#asset-path-configuration) 7 | - [Network Structure Configuration](#network-structure-configuration) 8 | - [Experiment Configuration](#experiment-configuration) 9 | - [Default Configuration](#default-configuration) 10 | 11 | ## Experiment Name Explanation 12 | 13 | Taking `1222_PELearn_Diff_Latent1_MEncDec49_MdiffEnc49_bs64_clip_uncond75_01` as an example: 14 | 15 | - `1222`: Eperiment date, for managing experiments 16 | - `PELearn`: Ablation study abbreviations, here `PELearn` means we use learnable positional embedding for ablation study 17 | - `Diff`: Stage flag, indicating whether it is the vae phase or the diffusion phase, `VAE` stands for the former and `Diff` stands for the latter 18 | - `Latent1`: Latent size, here Latent1 indicates the latent vector shape is (1,256) 19 | - `MEncDec49`: Numbers of head and layer of transformer-based Motion encoder & decoder 20 | - `MdiffEnc49`: Numbers of head and layer of transformer-based Diffusion denoiser 21 | - `bs64`: Batch size 22 | - `clip`: Text encoder type, clip indicates here we use pretrained clip model as our text encoder 23 | - `uncond75_01`: Classifier-free guidence parameter, here `uncond75_01` indicates the classifier-free guidence scale is 7.5 and probability is 0.1 24 | 25 | ## Configuration Explanation 26 | 27 | We use yaml files for configuration. For training & evaluation, our whole configurations are combined of 4 parts. 28 | 29 | [Asset Path Configuration](#asset-path-configuration) 30 | 31 | [Network Structure Configuration](#network-structure-configuration) 32 | 33 | [Experiment Configuration](#experiment-configuration) 34 | 35 | [Default Configuration](#default-configuration) 36 | 37 | ### Asset Path Configuration 38 | 39 | The assest configuration defines the file path of resources like datasets, dependence and so on. 40 | 41 | **By default**, the program will use the [configs/asssets.yaml](./asssets.yaml) as asset configuration. You can either directly replace the file path in configs/asssets.yaml with yours or create a new yaml file refer to the annotations in [configs/asssets.yaml](./asssets.yaml) and then in the cli line command add `--cfg_asset` to specify your own yaml file. 42 | 43 | ### Network Structure Configuration 44 | 45 | The network structure configuration defines the network structure settings. Our model ara mainly combined of four parts: Motion VAE, Text Encoder, Diffusion Denoiser, Diffusion Scheduler. 46 | 47 | In addition, we use the evaluators from previous work for fair comparision with other motion generation work, so we also need evluators network for evaluation. 48 | 49 | In conclusion, our network mainly combines the five components below. 50 | 51 | 1. Motion VAE 52 | 2. Text Encoder 53 | 3. Diffusion Denoiser 54 | 4. Diffusion Scheduler 55 | 5. Evaluators 56 | 57 | **By default**, the program will use the yaml files in `configs/modules` folder as the configuration for each part of network. If you want to change the configurations of some part of network, you have two methods: 58 | 59 | 1. Directly modify the `traget` to use different structure and `params` to modify the parameters. 60 | 2. Create a new set of module configurations, you can create 5 new yaml files by refering the annotations in [configs/modules](./modules) files **in subfoler of `configs`**. Then specify the your modules folder name `model.target` in Experiment Configuration. Take [config_novae_humanml3d.yaml](./config_novae_humanml3d.yaml) as an example, we specify the `model.target=modules_novae` which means the experiment model will use the configuration files in `configs/modules_novae`. 61 | 62 | ### Experiment Configuration 63 | 64 | The experiment configuration defines the settings apart from network structure like dataset settings, training settings, evaluation settings and so on. 65 | 66 | For more details of the configuration, you can refer to the annotations in [config_mld_humanml3d.yaml](./config_mld_humanml3d.yaml) 67 | 68 | ### Default Configuration 69 | 70 | The default configuration defines the default settings and will be overwritten by the configurations above. 71 | 72 | **By default**, the program will use the yaml file `configs/base.yaml` folder as the basic configuration. 73 | -------------------------------------------------------------------------------- /blender_render/configs/assets.yaml: -------------------------------------------------------------------------------- 1 | FOLDER: './experiments' # Experiment files saving path 2 | 3 | TEST: 4 | FOLDER: './results' # Testing files saving path 5 | 6 | DATASET: 7 | SMPL_PATH: './deps/smpl' 8 | TRANSFORM_PATH: './deps/transforms/' 9 | WORD_VERTILIZER_PATH: './deps/glove/' 10 | KIT: 11 | ROOT: './datasets/kit-ml' # KIT directory 12 | SPLIT_ROOT: './datasets/kit-ml' # KIT splits directory 13 | HUMANML3D: 14 | ROOT: './datasets/humanml3d' # HumanML3D directory 15 | SPLIT_ROOT: './datasets/humanml3d' # HumanML3D splits directory 16 | HUMANACT12: 17 | ROOT: ./datasets/HumanAct12Poses 18 | SPLIT_ROOT: ./datasets/HumanAct12Poses 19 | UESTC: 20 | ROOT: ./datasets/uestc 21 | SPLIT_ROOT: ./datasets/uestc 22 | AMASS: 23 | DB_ROOT: /apdcephfs/share_1227775/shingxchen/uicap/data/vibe_db 24 | 25 | model: 26 | bert_path: './deps/distilbert-base-uncased' # bert model path for all text encoders 27 | clip_path: './deps/clip-vit-large-patch14' # bert model path for all text encoders 28 | t2m_path: './deps/t2m/' 29 | 30 | humanact12_rec_path: './deps/actionrecognition' 31 | uestc_rec_path: './deps/actionrecognition' 32 | # Set model path separately for different encoders 33 | # TEXT_ENCODER: 34 | # MODELPATH: './deps/distilbert-base-uncased' # bert model path for text encoder 35 | # TEXT_STYLE_ENCODER: 36 | # MODELPATH: './deps/distilbert-base-uncased' # bert model path for text style encoder 37 | # TEXT_CONTENT_ENCODER: 38 | # MODELPATH: './deps/distilbert-base-uncased' # bert model path for text content encoder 39 | 40 | RENDER: 41 | BLENDER_PATH: '/blender_path/blender-2.93.18-linux-x64/blender' 42 | FACES_PATH: 'smpl_path/smplh/smplh.faces' 43 | FOLDER: ./animations 44 | -------------------------------------------------------------------------------- /blender_render/configs/base.yaml: -------------------------------------------------------------------------------- 1 | # FOLDER: ./experiments 2 | SEED_VALUE: 1234 3 | DEBUG: True 4 | TRAIN: 5 | SPLIT: 'train' 6 | NUM_WORKERS: 2 # Number of workers 7 | BATCH_SIZE: 4 # Size of batches 8 | START_EPOCH: 0 # Start epoch 9 | END_EPOCH: 2000 # End epoch 10 | RESUME: '' # Experiment path to be resumed training 11 | PRETRAINED_VAE: '' 12 | PRETRAINED: '' # Pretrained model path 13 | 14 | OPTIM: 15 | OPTIM.TYPE: 'AdamW' # Optimizer type 16 | OPTIM.LR: 1e-4 # Learning rate 17 | 18 | ABLATION: 19 | VAE_TYPE: 'actor' # vae ablation: actor or mcross 20 | VAE_ARCH: 'encoder_decoder' # mdiffusion vae architecture 21 | PE_TYPE: 'actor' # mdiffusion mld or actor 22 | DIFF_PE_TYPE: 'actor' # mdiffusion mld or actor 23 | SKIP_CONNECT: False # skip connection for denoiser va 24 | # use linear to expand mean and std rather expand token nums 25 | MLP_DIST: False 26 | IS_DIST: False # Mcross distribution kl 27 | PREDICT_EPSILON: True # noise or motion 28 | 29 | EVAL: 30 | SPLIT: 'gtest' 31 | BATCH_SIZE: 1 # Evaluating Batch size 32 | NUM_WORKERS: 12 # Evaluating Batch size 33 | 34 | TEST: 35 | TEST_DIR: '' 36 | CHECKPOINTS: '' # Pretrained model path 37 | SPLIT: 'gtest' 38 | BATCH_SIZE: 1 # Testing Batch size 39 | NUM_WORKERS: 12 # Evaluating Batch size 40 | SAVE_PREDICTIONS: False # Weather to save predictions 41 | COUNT_TIME: False # Weather to count time during test 42 | REPLICATION_TIMES: 20 # Number of times to replicate the test 43 | MM_NUM_SAMPLES: 100 # Number of samples for multimodal test 44 | MM_NUM_REPEATS: 30 # Number of repeats for multimodal test 45 | MM_NUM_TIMES: 10 # Number of times to repeat the multimodal test 46 | DIVERSITY_TIMES: 300 # Number of times to repeat the diversity test 47 | REP_I: 0 48 | model: 49 | target: 'modules' 50 | t2m_textencoder: 51 | dim_word: 300 52 | dim_pos_ohot: 15 53 | dim_text_hidden: 512 54 | dim_coemb_hidden: 512 55 | 56 | t2m_motionencoder: 57 | dim_move_hidden: 512 58 | dim_move_latent: 512 59 | dim_motion_hidden: 1024 60 | dim_motion_latent: 512 61 | LOSS: 62 | LAMBDA_LATENT: 1e-5 # Lambda for latent losses 63 | LAMBDA_KL: 1e-5 # Lambda for kl losses 64 | LAMBDA_REC: 1.0 # Lambda for reconstruction losses 65 | LAMBDA_JOINT: 1.0 # Lambda for joint losses 66 | LAMBDA_GEN: 1.0 # Lambda for text-motion generation losses 67 | LAMBDA_CROSS: 1.0 # Lambda for cross-reconstruction losses 68 | LAMBDA_CYCLE: 1.0 # Lambda for cycle losses 69 | LAMBDA_PRIOR: 0.0 70 | DIST_SYNC_ON_STEP: True 71 | METRIC: 72 | FORCE_IN_METER: True 73 | DIST_SYNC_ON_STEP: True 74 | DATASET: 75 | NCLASSES: 10 76 | SAMPLER: 77 | MAX_SQE: -1 78 | MAX_LEN: 196 79 | MIN_LEN: 40 80 | MAX_TEXT_LEN: 20 81 | KIT: 82 | PICK_ONE_TEXT: true 83 | FRAME_RATE: 12.5 84 | UNIT_LEN: 4 85 | HUMANML3D: 86 | PICK_ONE_TEXT: true 87 | FRAME_RATE: 20.0 88 | UNIT_LEN: 4 89 | HUMANACT12: 90 | NUM_FRAMES: 60 91 | POSE_REP: rot6d 92 | GLOB: true 93 | TRANSLATION: true 94 | UESTC: 95 | NUM_FRAMES: 60 96 | POSE_REP: rot6d 97 | GLOB: true 98 | TRANSLATION: true 99 | LOGGER: 100 | SACE_CHECKPOINT_EPOCH: 1 101 | LOG_EVERY_STEPS: 1 102 | VAL_EVERY_STEPS: 10 103 | TENSORBOARD: true 104 | WANDB: 105 | OFFLINE: false 106 | PROJECT: null 107 | RESUME_ID: null 108 | RENDER: 109 | JOINT_TYPE: mmm 110 | INPUT_MODE: npy 111 | DIR: '' 112 | NPY: '' 113 | DENOISING: true 114 | OLDRENDER: true 115 | RES: high 116 | DOWNSAMPLE: true 117 | FPS: 12.5 118 | CANONICALIZE: true 119 | EXACT_FRAME: 0.5 120 | NUM: 7 121 | MODE: sequence 122 | VID_EXT: mp4 123 | ALWAYS_ON_FLOOR: false 124 | GT: false 125 | DEMO: 126 | MOTION_TRANSFER: false 127 | RENDER: false 128 | FRAME_RATE: 12.5 129 | EXAMPLE: null 130 | -------------------------------------------------------------------------------- /blender_render/configs/render.yaml: -------------------------------------------------------------------------------- 1 | NAME: '___render_do_not_need_name__' # Experiment name 2 | ACCELERATOR: 'gpu' # Devices optioncal: “cpu”, “gpu”, “tpu”, “ipu”, “hpu”, “mps, “auto” 3 | DEVICE: [0] # Index of gpus eg. [0] or [0,1,2,3] 4 | 5 | RENDER: 6 | FOLDER: '___no_need__' 7 | JOINT_TYPE: 'HumanML3D' 8 | INPUT_MODE: 'npy' 9 | DIR: '' 10 | NPY: '___no_need__' 11 | DENOISING: True 12 | OLDRENDER: True 13 | RES: 'high' 14 | DOWNSAMPLE: False 15 | FPS: 20.0 16 | CANONICALIZE: True 17 | EXACT_FRAME: 0.5 18 | NUM: 3 19 | MODE: '___no_need__' #sequence frame video 20 | VID_EXT: 'mp4' 21 | FACES_PATH: 'smpl_path/smplh/smplh.faces' 22 | -------------------------------------------------------------------------------- /blender_render/mld/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/__init__.py -------------------------------------------------------------------------------- /blender_render/mld/launch/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/launch/__init__.py -------------------------------------------------------------------------------- /blender_render/mld/launch/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/launch/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /blender_render/mld/launch/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/launch/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /blender_render/mld/launch/__pycache__/blender.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/launch/__pycache__/blender.cpython-310.pyc -------------------------------------------------------------------------------- /blender_render/mld/launch/__pycache__/blender.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/launch/__pycache__/blender.cpython-39.pyc -------------------------------------------------------------------------------- /blender_render/mld/launch/__pycache__/prepare.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/launch/__pycache__/prepare.cpython-310.pyc -------------------------------------------------------------------------------- /blender_render/mld/launch/__pycache__/prepare.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/launch/__pycache__/prepare.cpython-39.pyc -------------------------------------------------------------------------------- /blender_render/mld/launch/blender.py: -------------------------------------------------------------------------------- 1 | # Fix blender path 2 | import sys 3 | import os 4 | # local packages 5 | sys.path.append(os.path.expanduser("~/.local/lib/python3.9/site-packages")) 6 | import bpy 7 | import os 8 | from argparse import ArgumentParser 9 | 10 | # Monkey patch argparse such that 11 | # blender / python / hydra parsing works 12 | def parse_args(self, args=None, namespace=None): 13 | if args is not None: 14 | return self.parse_args_bak(args=args, namespace=namespace) 15 | try: 16 | idx = sys.argv.index("--") 17 | args = sys.argv[idx+1:] # the list after '--' 18 | except ValueError as e: # '--' not in the list: 19 | args = [] 20 | return self.parse_args_bak(args=args, namespace=namespace) 21 | 22 | setattr(ArgumentParser, 'parse_args_bak', ArgumentParser.parse_args) 23 | setattr(ArgumentParser, 'parse_args', parse_args) 24 | -------------------------------------------------------------------------------- /blender_render/mld/launch/prepare.py: -------------------------------------------------------------------------------- 1 | import os 2 | import warnings 3 | from pathlib import Path 4 | 5 | import hydra 6 | from mld.tools.runid import generate_id 7 | from omegaconf import OmegaConf 8 | 9 | 10 | # Local paths 11 | def code_path(path=""): 12 | code_dir = hydra.utils.get_original_cwd() 13 | code_dir = Path(code_dir) 14 | return str(code_dir / path) 15 | 16 | 17 | def working_path(path): 18 | return str(Path(os.getcwd()) / path) 19 | 20 | 21 | # fix the id for this run 22 | ID = generate_id() 23 | 24 | 25 | def generate_id(): 26 | return ID 27 | 28 | 29 | def get_last_checkpoint(path, ckpt_name="last.ckpt"): 30 | output_dir = Path(hydra.utils.to_absolute_path(path)) 31 | last_ckpt_path = output_dir / "checkpoints" / ckpt_name 32 | return str(last_ckpt_path) 33 | 34 | 35 | def get_kitname(load_amass_data: bool, load_with_rot: bool): 36 | if not load_amass_data: 37 | return "kit-mmm-xyz" 38 | if load_amass_data and not load_with_rot: 39 | return "kit-amass-xyz" 40 | if load_amass_data and load_with_rot: 41 | return "kit-amass-rot" 42 | 43 | 44 | OmegaConf.register_new_resolver("code_path", code_path) 45 | OmegaConf.register_new_resolver("working_path", working_path) 46 | OmegaConf.register_new_resolver("generate_id", generate_id) 47 | OmegaConf.register_new_resolver("absolute_path", hydra.utils.to_absolute_path) 48 | OmegaConf.register_new_resolver("get_last_checkpoint", get_last_checkpoint) 49 | OmegaConf.register_new_resolver("get_kitname", get_kitname) 50 | 51 | 52 | # Remove warnings 53 | warnings.filterwarnings( 54 | "ignore", ".*Trying to infer the `batch_size` from an ambiguous collection.*" 55 | ) 56 | 57 | warnings.filterwarnings( 58 | "ignore", ".*does not have many workers which may be a bottleneck*" 59 | ) 60 | 61 | warnings.filterwarnings( 62 | "ignore", ".*Our suggested max number of worker in current system is*" 63 | ) 64 | 65 | 66 | # os.environ["HYDRA_FULL_ERROR"] = "1" 67 | os.environ["NUMEXPR_MAX_THREADS"] = "24" 68 | -------------------------------------------------------------------------------- /blender_render/mld/launch/tools.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from omegaconf import DictConfig, OmegaConf 3 | import hydra 4 | import os 5 | 6 | 7 | def resolve_cfg_path(cfg: DictConfig): 8 | working_dir = os.getcwd() 9 | cfg.working_dir = working_dir 10 | -------------------------------------------------------------------------------- /blender_render/mld/render/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/__init__.py -------------------------------------------------------------------------------- /blender_render/mld/render/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /blender_render/mld/render/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /blender_render/mld/render/__pycache__/video.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/__pycache__/video.cpython-310.pyc -------------------------------------------------------------------------------- /blender_render/mld/render/__pycache__/video.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/__pycache__/video.cpython-39.pyc -------------------------------------------------------------------------------- /blender_render/mld/render/blender/__init__.py: -------------------------------------------------------------------------------- 1 | from .render import render 2 | -------------------------------------------------------------------------------- /blender_render/mld/render/blender/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/blender/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /blender_render/mld/render/blender/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/blender/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /blender_render/mld/render/blender/__pycache__/camera.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/blender/__pycache__/camera.cpython-310.pyc -------------------------------------------------------------------------------- /blender_render/mld/render/blender/__pycache__/camera.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/blender/__pycache__/camera.cpython-39.pyc -------------------------------------------------------------------------------- /blender_render/mld/render/blender/__pycache__/floor.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/blender/__pycache__/floor.cpython-310.pyc -------------------------------------------------------------------------------- /blender_render/mld/render/blender/__pycache__/floor.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/blender/__pycache__/floor.cpython-39.pyc -------------------------------------------------------------------------------- /blender_render/mld/render/blender/__pycache__/joints.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/blender/__pycache__/joints.cpython-310.pyc -------------------------------------------------------------------------------- /blender_render/mld/render/blender/__pycache__/materials.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/blender/__pycache__/materials.cpython-310.pyc -------------------------------------------------------------------------------- /blender_render/mld/render/blender/__pycache__/materials.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/blender/__pycache__/materials.cpython-39.pyc -------------------------------------------------------------------------------- /blender_render/mld/render/blender/__pycache__/meshes.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/blender/__pycache__/meshes.cpython-310.pyc -------------------------------------------------------------------------------- /blender_render/mld/render/blender/__pycache__/meshes.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/blender/__pycache__/meshes.cpython-39.pyc -------------------------------------------------------------------------------- /blender_render/mld/render/blender/__pycache__/render.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/blender/__pycache__/render.cpython-310.pyc -------------------------------------------------------------------------------- /blender_render/mld/render/blender/__pycache__/render.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/blender/__pycache__/render.cpython-39.pyc -------------------------------------------------------------------------------- /blender_render/mld/render/blender/__pycache__/sampler.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/blender/__pycache__/sampler.cpython-310.pyc -------------------------------------------------------------------------------- /blender_render/mld/render/blender/__pycache__/sampler.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/blender/__pycache__/sampler.cpython-39.pyc -------------------------------------------------------------------------------- /blender_render/mld/render/blender/__pycache__/scene.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/blender/__pycache__/scene.cpython-310.pyc -------------------------------------------------------------------------------- /blender_render/mld/render/blender/__pycache__/scene.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/blender/__pycache__/scene.cpython-39.pyc -------------------------------------------------------------------------------- /blender_render/mld/render/blender/__pycache__/tools.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/blender/__pycache__/tools.cpython-310.pyc -------------------------------------------------------------------------------- /blender_render/mld/render/blender/__pycache__/tools.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/blender/__pycache__/tools.cpython-39.pyc -------------------------------------------------------------------------------- /blender_render/mld/render/blender/__pycache__/vertices.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/blender/__pycache__/vertices.cpython-310.pyc -------------------------------------------------------------------------------- /blender_render/mld/render/blender/__pycache__/vertices.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/render/blender/__pycache__/vertices.cpython-39.pyc -------------------------------------------------------------------------------- /blender_render/mld/render/blender/camera.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | 3 | 4 | class Camera: 5 | def __init__(self, *, first_root, mode, is_mesh): 6 | camera = bpy.data.objects['Camera'] 7 | 8 | ## initial position 9 | camera.location.x = 7.36 10 | camera.location.y = -6.93 11 | if is_mesh: 12 | # camera.location.z = 5.45 13 | camera.location.z = 5.6 14 | else: 15 | camera.location.z = 5.2 16 | 17 | # wider point of view 18 | if mode == "sequence": 19 | if is_mesh: 20 | camera.data.lens = 65 21 | else: 22 | camera.data.lens = 85 23 | elif mode == "frame": 24 | if is_mesh: 25 | camera.data.lens = 130 26 | else: 27 | camera.data.lens = 85 28 | elif mode == "video": 29 | if is_mesh: 30 | camera.data.lens = 110 31 | else: 32 | # avoid cutting person 33 | camera.data.lens = 85 34 | # camera.data.lens = 140 35 | 36 | # camera.location.x += 0.75 37 | 38 | self.mode = mode 39 | self.camera = camera 40 | 41 | self.camera.location.x += first_root[0] 42 | self.camera.location.y += first_root[1] 43 | 44 | self._root = first_root 45 | 46 | def update(self, newroot): 47 | delta_root = newroot - self._root 48 | 49 | self.camera.location.x += delta_root[0] 50 | self.camera.location.y += delta_root[1] 51 | 52 | self._root = newroot 53 | -------------------------------------------------------------------------------- /blender_render/mld/render/blender/data.py: -------------------------------------------------------------------------------- 1 | class Data: 2 | def __len__(self): 3 | return self.N 4 | -------------------------------------------------------------------------------- /blender_render/mld/render/blender/floor.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | from .materials import floor_mat 3 | 4 | 5 | def get_trajectory(data, is_mesh): 6 | if is_mesh: 7 | # mean of the vertices 8 | trajectory = data[:, :, [0, 1]].mean(1) 9 | else: 10 | # get the root joint 11 | trajectory = data[:, 0, [0, 1]] 12 | return trajectory 13 | 14 | 15 | def plot_floor(data, big_plane=True): 16 | # Create a floor 17 | minx, miny, _ = data.min(axis=(0, 1)) 18 | maxx, maxy, _ = data.max(axis=(0, 1)) 19 | minz = 0 20 | 21 | location = ((maxx + minx)/2, (maxy + miny)/2, 0) 22 | # a little bit bigger 23 | scale = (1.08*(maxx - minx)/2, 1.08*(maxy - miny)/2, 1) 24 | 25 | bpy.ops.mesh.primitive_plane_add(size=2, enter_editmode=False, align='WORLD', location=location, scale=(1, 1, 1)) 26 | 27 | bpy.ops.transform.resize(value=scale, orient_type='GLOBAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='GLOBAL', 28 | constraint_axis=(False, True, False), mirror=True, use_proportional_edit=False, 29 | proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, 30 | use_proportional_projected=False, release_confirm=True) 31 | obj = bpy.data.objects["Plane"] 32 | obj.name = "SmallPlane" 33 | obj.data.name = "SmallPlane" 34 | 35 | if not big_plane: 36 | obj.active_material = floor_mat(color=(0.2, 0.2, 0.2, 1)) 37 | else: 38 | obj.active_material = floor_mat(color=(1.0, 1.0, 1.0, 1)) 39 | 40 | if big_plane: 41 | location = ((maxx + minx)/2, (maxy + miny)/2, -0.01) 42 | bpy.ops.mesh.primitive_plane_add(size=2, enter_editmode=False, align='WORLD', location=location, scale=(1, 1, 1)) 43 | 44 | bpy.ops.transform.resize(value=[2*x for x in scale], orient_type='GLOBAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='GLOBAL', 45 | constraint_axis=(False, True, False), mirror=True, use_proportional_edit=False, 46 | proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, 47 | use_proportional_projected=False, release_confirm=True) 48 | 49 | obj = bpy.data.objects["Plane"] 50 | obj.name = "BigPlane" 51 | obj.data.name = "BigPlane" 52 | obj.active_material = floor_mat(color=(0.2, 0.2, 0.2, 1)) 53 | 54 | 55 | def show_traj(coords): 56 | None 57 | # create the Curve Datablock 58 | # curveData = bpy.data.curves.new('myCurve', type='CURVE') 59 | # curveData.dimensions = '3D' 60 | # curveData.resolution_u = 2 61 | 62 | # # map coords to spline 63 | # polyline = curveData.splines.new('POLY') 64 | # polyline.points.add(len(coords)-1) 65 | # for i, coord in enumerate(coords): 66 | # x, y = coord 67 | # polyline.points[i].co = (x, y, 0.001, 1) 68 | 69 | # # create Object 70 | # curveOB = bpy.data.objects.new('myCurve', curveData) 71 | # curveData.bevel_depth = 0.01 72 | 73 | # bpy.context.collection.objects.link(curveOB) 74 | -------------------------------------------------------------------------------- /blender_render/mld/render/blender/sampler.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def get_frameidx(*, mode, nframes, exact_frame, frames_to_keep): 4 | if mode == "sequence": 5 | frameidx = np.linspace(0, nframes - 1, frames_to_keep) 6 | frameidx = np.round(frameidx).astype(int) 7 | frameidx = list(frameidx) 8 | elif mode == "frame": 9 | index_frame = int(exact_frame*nframes) 10 | frameidx = [index_frame] 11 | elif mode == "video": 12 | frameidx = range(0, nframes) 13 | else: 14 | raise ValueError(f"Not support {mode} render mode") 15 | return frameidx 16 | -------------------------------------------------------------------------------- /blender_render/mld/render/blender/scene.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | from .materials import plane_mat # noqa 3 | 4 | 5 | def setup_renderer(denoising=True, oldrender=True, accelerator="gpu", device=[0]): 6 | bpy.context.scene.render.engine = "CYCLES" 7 | bpy.data.scenes[0].render.engine = "CYCLES" 8 | if accelerator.lower() == "gpu": 9 | bpy.context.preferences.addons[ 10 | "cycles" 11 | ].preferences.compute_device_type = "CUDA" 12 | bpy.context.scene.cycles.device = "GPU" 13 | i = 0 14 | bpy.context.preferences.addons["cycles"].preferences.get_devices() 15 | for d in bpy.context.preferences.addons["cycles"].preferences.devices: 16 | if i in device: # gpu id 17 | d["use"] = 1 18 | print(d["name"], "".join(str(i) for i in device)) 19 | else: 20 | d["use"] = 0 21 | i += 1 22 | 23 | if denoising: 24 | bpy.context.scene.cycles.use_denoising = True 25 | 26 | # bpy.context.scene.render.tile_x = 256 27 | # bpy.context.scene.render.tile_y = 256 28 | bpy.context.scene.cycles.samples = 64 29 | # bpy.context.scene.cycles.denoiser = 'OPTIX' 30 | 31 | if not oldrender: 32 | bpy.context.scene.view_settings.view_transform = "Standard" 33 | bpy.context.scene.render.film_transparent = True 34 | bpy.context.scene.display_settings.display_device = "sRGB" 35 | bpy.context.scene.view_settings.gamma = 1.2 36 | bpy.context.scene.view_settings.exposure = -0.75 37 | 38 | 39 | # Setup scene 40 | def setup_scene( 41 | res="high", denoising=True, oldrender=True, accelerator="gpu", device=[0] 42 | ): 43 | scene = bpy.data.scenes["Scene"] 44 | assert res in ["ultra", "high", "med", "low"] 45 | if res == "high": 46 | scene.render.resolution_x = 1280 47 | scene.render.resolution_y = 1024 48 | elif res == "med": 49 | scene.render.resolution_x = 1280 // 2 50 | scene.render.resolution_y = 1024 // 2 51 | elif res == "low": 52 | scene.render.resolution_x = 1280 // 4 53 | scene.render.resolution_y = 1024 // 4 54 | elif res == "ultra": 55 | scene.render.resolution_x = 1280 * 2 56 | scene.render.resolution_y = 1024 * 2 57 | 58 | scene.render.film_transparent= True 59 | world = bpy.data.worlds["World"] 60 | world.use_nodes = True 61 | bg = world.node_tree.nodes["Background"] 62 | bg.inputs[0].default_value[:3] = (1.0, 1.0, 1.0) 63 | bg.inputs[1].default_value = 1.0 64 | 65 | # Remove default cube 66 | if "Cube" in bpy.data.objects: 67 | bpy.data.objects["Cube"].select_set(True) 68 | bpy.ops.object.delete() 69 | 70 | bpy.ops.object.light_add( 71 | type="SUN", align="WORLD", location=(0, 0, 0), scale=(1, 1, 1) 72 | ) 73 | bpy.data.objects["Sun"].data.energy = 1.5 74 | 75 | # rotate camera 76 | bpy.ops.object.empty_add( 77 | type="PLAIN_AXES", align="WORLD", location=(0, 0, 0), scale=(1, 1, 1) 78 | ) 79 | bpy.ops.transform.resize( 80 | value=(10, 10, 10), 81 | orient_type="GLOBAL", 82 | orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), 83 | orient_matrix_type="GLOBAL", 84 | mirror=True, 85 | use_proportional_edit=False, 86 | proportional_edit_falloff="SMOOTH", 87 | proportional_size=1, 88 | use_proportional_connected=False, 89 | use_proportional_projected=False, 90 | ) 91 | bpy.ops.object.select_all(action="DESELECT") 92 | 93 | setup_renderer( 94 | denoising=denoising, oldrender=oldrender, accelerator=accelerator, device=device 95 | ) 96 | return scene 97 | -------------------------------------------------------------------------------- /blender_render/mld/render/blender/tools.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import numpy as np 3 | 4 | 5 | def mesh_detect(data): 6 | # heuristic 7 | if data.shape[1] > 1000: 8 | return True 9 | return False 10 | 11 | 12 | # see this for more explanation 13 | # https://gist.github.com/iyadahmed/7c7c0fae03c40bd87e75dc7059e35377 14 | # This should be solved with new version of blender 15 | class ndarray_pydata(np.ndarray): 16 | def __bool__(self) -> bool: 17 | return len(self) > 0 18 | 19 | 20 | def load_numpy_vertices_into_blender(vertices, faces, name, mat): 21 | mesh = bpy.data.meshes.new(name) 22 | mesh.from_pydata(vertices, [], faces.view(ndarray_pydata)) 23 | mesh.validate() 24 | 25 | obj = bpy.data.objects.new(name, mesh) 26 | bpy.context.scene.collection.objects.link(obj) 27 | 28 | bpy.ops.object.select_all(action='DESELECT') 29 | obj.select_set(True) 30 | obj.active_material = mat 31 | bpy.context.view_layer.objects.active = obj 32 | bpy.ops.object.shade_smooth() 33 | bpy.ops.object.select_all(action='DESELECT') 34 | return True 35 | 36 | 37 | def delete_objs(names): 38 | if not isinstance(names, list): 39 | names = [names] 40 | # bpy.ops.object.mode_set(mode='OBJECT') 41 | bpy.ops.object.select_all(action='DESELECT') 42 | for obj in bpy.context.scene.objects: 43 | for name in names: 44 | if obj.name.startswith(name) or obj.name.endswith(name): 45 | obj.select_set(True) 46 | bpy.ops.object.delete() 47 | bpy.ops.object.select_all(action='DESELECT') 48 | -------------------------------------------------------------------------------- /blender_render/mld/render/blender/vertices.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def prepare_vertices(vertices, canonicalize=True): 5 | data = vertices 6 | # Swap axis (gravity=Z instead of Y) 7 | # data = data[..., [2, 0, 1]] 8 | 9 | # Make left/right correct 10 | # data[..., [1]] = -data[..., [1]] 11 | 12 | # Center the first root to the first frame 13 | data -= data[[0], [0], :] 14 | 15 | # Remove the floor 16 | data[..., 2] -= np.min(data[..., 2]) 17 | return data 18 | -------------------------------------------------------------------------------- /blender_render/mld/render/video.py: -------------------------------------------------------------------------------- 1 | import moviepy.editor as mp 2 | import moviepy.video.fx.all as vfx 3 | import os 4 | import imageio 5 | 6 | 7 | def mask_png(frames): 8 | for frame in frames: 9 | im = imageio.imread(frame) 10 | # im[im[:, :, 3] < 1, :] = 255 11 | im[im[:, :, 3] < 1, :] = 214 12 | imageio.imwrite(frame, im[:, :, 0:3]) 13 | return 14 | 15 | 16 | class Video: 17 | def __init__(self, frame_path: str, fps: float = 12.5, res="high"): 18 | frame_path = str(frame_path) 19 | self.fps = fps 20 | 21 | self._conf = {"codec": "libx264", 22 | "fps": self.fps, 23 | "audio_codec": "aac", 24 | "temp_audiofile": "temp-audio.m4a", 25 | "remove_temp": True} 26 | 27 | if res == "low": 28 | bitrate = "500k" 29 | else: 30 | bitrate = "5000k" 31 | 32 | self._conf = {"bitrate": bitrate, 33 | "fps": self.fps} 34 | 35 | # Load video 36 | # video = mp.VideoFileClip(video1_path, audio=False) 37 | # Load with frames 38 | frames = [os.path.join(frame_path, x) 39 | for x in sorted(os.listdir(frame_path))] 40 | 41 | # mask background white for videos 42 | mask_png(frames) 43 | 44 | video = mp.ImageSequenceClip(frames, fps=fps) 45 | self.video = video 46 | self.duration = video.duration 47 | 48 | def add_text(self, text): 49 | # needs ImageMagick 50 | video_text = mp.TextClip(text, 51 | font='Amiri', 52 | color='white', 53 | method='caption', 54 | align="center", 55 | size=(self.video.w, None), 56 | fontsize=30) 57 | video_text = video_text.on_color(size=(self.video.w, video_text.h + 5), 58 | color=(0, 0, 0), 59 | col_opacity=0.6) 60 | # video_text = video_text.set_pos('bottom') 61 | video_text = video_text.set_pos('top') 62 | 63 | self.video = mp.CompositeVideoClip([self.video, video_text]) 64 | 65 | def save(self, out_path): 66 | out_path = str(out_path) 67 | self.video.subclip(0, self.duration).write_videofile( 68 | out_path, **self._conf) 69 | -------------------------------------------------------------------------------- /blender_render/mld/tools/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/tools/__init__.py -------------------------------------------------------------------------------- /blender_render/mld/tools/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/tools/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /blender_render/mld/tools/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/tools/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /blender_render/mld/tools/__pycache__/runid.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/tools/__pycache__/runid.cpython-310.pyc -------------------------------------------------------------------------------- /blender_render/mld/tools/__pycache__/runid.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/tools/__pycache__/runid.cpython-39.pyc -------------------------------------------------------------------------------- /blender_render/mld/tools/logging.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import tqdm 3 | 4 | 5 | class LevelsFilter(logging.Filter): 6 | def __init__(self, levels): 7 | self.levels = [getattr(logging, level) for level in levels] 8 | 9 | def filter(self, record): 10 | return record.levelno in self.levels 11 | 12 | 13 | class StreamToLogger(object): 14 | """ 15 | Fake file-like stream object that redirects writes to a logger instance. 16 | """ 17 | def __init__(self, logger, level): 18 | self.logger = logger 19 | self.level = level 20 | self.linebuf = '' 21 | 22 | def write(self, buf): 23 | for line in buf.rstrip().splitlines(): 24 | self.logger.log(self.level, line.rstrip()) 25 | 26 | def flush(self): 27 | pass 28 | 29 | 30 | class TqdmLoggingHandler(logging.Handler): 31 | def __init__(self, level=logging.NOTSET): 32 | super().__init__(level) 33 | 34 | def emit(self, record): 35 | try: 36 | msg = self.format(record) 37 | tqdm.tqdm.write(msg) 38 | self.flush() 39 | except Exception: 40 | self.handleError(record) 41 | -------------------------------------------------------------------------------- /blender_render/mld/tools/runid.py: -------------------------------------------------------------------------------- 1 | # 2 | """ 3 | runid util. 4 | Taken from wandb.sdk.lib.runid 5 | """ 6 | 7 | import shortuuid # type: ignore 8 | 9 | 10 | def generate_id() -> str: 11 | # ~3t run ids (36**8) 12 | run_gen = shortuuid.ShortUUID(alphabet=list("0123456789abcdefghijklmnopqrstuvwxyz")) 13 | return run_gen.random(8) -------------------------------------------------------------------------------- /blender_render/mld/transforms/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import Transform 2 | from .smpl import SMPLTransform 3 | # from .xyz import XYZTransform 4 | -------------------------------------------------------------------------------- /blender_render/mld/transforms/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/transforms/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /blender_render/mld/transforms/__pycache__/base.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/transforms/__pycache__/base.cpython-37.pyc -------------------------------------------------------------------------------- /blender_render/mld/transforms/__pycache__/smpl.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/transforms/__pycache__/smpl.cpython-37.pyc -------------------------------------------------------------------------------- /blender_render/mld/transforms/base.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, fields 2 | 3 | 4 | class Transform: 5 | 6 | def collate(self, lst_datastruct): 7 | from mld.datasets.utils import collate_tensor_with_padding 8 | example = lst_datastruct[0] 9 | 10 | def collate_or_none(key): 11 | if example[key] is None: 12 | return None 13 | key_lst = [x[key] for x in lst_datastruct] 14 | return collate_tensor_with_padding(key_lst) 15 | 16 | kwargs = {key: collate_or_none(key) for key in example.datakeys} 17 | 18 | return self.Datastruct(**kwargs) 19 | 20 | 21 | # Inspired from SMPLX library 22 | # need to define "datakeys" and transforms 23 | @dataclass 24 | class Datastruct: 25 | 26 | def __getitem__(self, key): 27 | return getattr(self, key) 28 | 29 | def __setitem__(self, key, value): 30 | self.__dict__[key] = value 31 | 32 | def get(self, key, default=None): 33 | return getattr(self, key, default) 34 | 35 | def __iter__(self): 36 | return self.keys() 37 | 38 | def keys(self): 39 | keys = [t.name for t in fields(self)] 40 | return iter(keys) 41 | 42 | def values(self): 43 | values = [getattr(self, t.name) for t in fields(self)] 44 | return iter(values) 45 | 46 | def items(self): 47 | data = [(t.name, getattr(self, t.name)) for t in fields(self)] 48 | return iter(data) 49 | 50 | def to(self, *args, **kwargs): 51 | for key in self.datakeys: 52 | if self[key] is not None: 53 | self[key] = self[key].to(*args, **kwargs) 54 | return self 55 | 56 | @property 57 | def device(self): 58 | return self[self.datakeys[0]].device 59 | 60 | def detach(self): 61 | 62 | def detach_or_none(tensor): 63 | if tensor is not None: 64 | return tensor.detach() 65 | return None 66 | 67 | kwargs = {key: detach_or_none(self[key]) for key in self.datakeys} 68 | return self.transforms.Datastruct(**kwargs) 69 | -------------------------------------------------------------------------------- /blender_render/mld/transforms/feats2smpl.py: -------------------------------------------------------------------------------- 1 | from os.path import join as pjoin 2 | 3 | import numpy as np 4 | import torch 5 | 6 | import mld.data.humanml.utils.paramUtil as paramUtil 7 | from mld.data.humanml.data.dataset import Text2MotionDatasetV2 8 | from mld.data.humanml.scripts.motion_process import recover_from_ric 9 | from mld.data.humanml.utils.plot_script import plot_3d_motion 10 | 11 | skeleton = paramUtil.t2m_kinematic_chain 12 | 13 | # convert humanML3d features to skeleton format for rendering 14 | # def feats2joints(motion, data_root = '../datasets/humanml3d'): 15 | # ''' 16 | # input: 263 features 17 | # output: 22 joints? 18 | # ''' 19 | # mean = torch.from_numpy(np.load(pjoin(data_root, 'Mean.npy'))) 20 | # std = torch.from_numpy(np.load(pjoin(data_root, 'Std.npy'))) 21 | 22 | # motion = motion * std + mean 23 | # motion_rec = recover_from_ric(motion, joints_num=22) 24 | # # motion_rec = motion_rec * 1.3 25 | # return motion_rec 26 | 27 | 28 | def main(): 29 | data_root = '../datasets/humanml3d' 30 | feastures_path = 'in.npy' 31 | animation_save_path = 'in.mp4' 32 | 33 | fps = 20 34 | mean = np.load(pjoin(data_root, 'Mean.npy')) 35 | std = np.load(pjoin(data_root, 'Std.npy')) 36 | 37 | motion = np.load(feastures_path) 38 | motion = motion * std + mean 39 | motion_rec = recover_from_ric(torch.tensor(motion), 22).cpu().numpy() 40 | # with open('in_22.npy', 'wb') as f: 41 | # np.save(f,motion_rec) 42 | motion_rec = motion_rec * 1.3 43 | plot_3d_motion(animation_save_path, motion_rec, title='input', fps=fps) 44 | 45 | 46 | if __name__ == '__main__': 47 | main() 48 | -------------------------------------------------------------------------------- /blender_render/mld/transforms/identity.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | from torch import Tensor 3 | 4 | from .base import Datastruct, dataclass, Transform 5 | 6 | 7 | class IdentityTransform(Transform): 8 | def __init__(self, **kwargs): 9 | return 10 | 11 | def Datastruct(self, **kwargs): 12 | return IdentityDatastruct(**kwargs) 13 | 14 | def __repr__(self): 15 | return "IdentityTransform()" 16 | 17 | 18 | @dataclass 19 | class IdentityDatastruct(Datastruct): 20 | transforms: IdentityTransform 21 | 22 | features: Optional[Tensor] = None 23 | 24 | def __post_init__(self): 25 | self.datakeys = ["features"] 26 | 27 | def __len__(self): 28 | return len(self.rfeats) 29 | -------------------------------------------------------------------------------- /blender_render/mld/transforms/joints2jfeats/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import Joints2Jfeats 2 | from .rifke import Rifke 3 | -------------------------------------------------------------------------------- /blender_render/mld/transforms/joints2jfeats/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/transforms/joints2jfeats/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /blender_render/mld/transforms/joints2jfeats/__pycache__/base.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/transforms/joints2jfeats/__pycache__/base.cpython-37.pyc -------------------------------------------------------------------------------- /blender_render/mld/transforms/joints2jfeats/__pycache__/rifke.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/transforms/joints2jfeats/__pycache__/rifke.cpython-37.pyc -------------------------------------------------------------------------------- /blender_render/mld/transforms/joints2jfeats/__pycache__/tools.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/transforms/joints2jfeats/__pycache__/tools.cpython-37.pyc -------------------------------------------------------------------------------- /blender_render/mld/transforms/joints2jfeats/base.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | import torch 4 | from torch import Tensor, nn 5 | from pathlib import Path 6 | 7 | 8 | class Joints2Jfeats(nn.Module): 9 | def __init__(self, path: Optional[str] = None, 10 | normalization: bool = False, 11 | eps: float = 1e-12, 12 | **kwargs) -> None: 13 | if normalization and path is None: 14 | raise TypeError("You should provide a path if normalization is on.") 15 | 16 | super().__init__() 17 | self.normalization = normalization 18 | self.eps = eps 19 | 20 | if normalization: 21 | mean_path = Path(path) / "jfeats_mean.pt" 22 | std_path = Path(path) / "jfeats_std.pt" 23 | self.register_buffer('mean', torch.load(mean_path)) 24 | self.register_buffer('std', torch.load(std_path)) 25 | 26 | def normalize(self, features: Tensor) -> Tensor: 27 | if self.normalization: 28 | features = (features - self.mean)/(self.std + self.eps) 29 | return features 30 | 31 | def unnormalize(self, features: Tensor) -> Tensor: 32 | if self.normalization: 33 | features = features * self.std + self.mean 34 | return features 35 | -------------------------------------------------------------------------------- /blender_render/mld/transforms/joints2jfeats/tools.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn.functional as F 3 | 4 | from mld.utils.joints import mmm_joints, humanml3d_joints 5 | 6 | # Get the indexes of particular body part 7 | 8 | 9 | # .T is deprecated now for reversing a tensor 10 | def T(x): 11 | return x.permute(*torch.arange(x.ndim - 1, -1, -1)) 12 | 13 | 14 | def get_forward_direction(poses, jointstype="mmm"): 15 | if jointstype == "mmm" or jointstype == "mmmns": 16 | joints = mmm_joints 17 | elif jointstype == "humanml3d": 18 | joints = humanml3d_joints 19 | else: 20 | raise TypeError('Only supports mmm, mmmns and humanl3d jointstype') 21 | # Shoulders 22 | LS, RS = joints.index("LS"), joints.index("RS") 23 | # Hips 24 | LH, RH = joints.index("LH"), joints.index("RH") 25 | 26 | across = poses[..., RH, :] - poses[..., LH, :] + poses[..., RS, :] - poses[ 27 | ..., LS, :] 28 | forward = torch.stack((-across[..., 2], across[..., 0]), axis=-1) 29 | forward = torch.nn.functional.normalize(forward, dim=-1) 30 | return forward 31 | 32 | 33 | def get_floor(poses, jointstype="mmm"): 34 | if jointstype == "mmm" or jointstype == "mmmns": 35 | joints = mmm_joints 36 | elif jointstype == "humanml3d": 37 | joints = humanml3d_joints 38 | else: 39 | raise TypeError('Only supports mmm, mmmns and humanl3d jointstype') 40 | ndim = len(poses.shape) 41 | # Feet 42 | LM, RM = joints.index("LMrot"), joints.index("RMrot") 43 | LF, RF = joints.index("LF"), joints.index("RF") 44 | foot_heights = poses[..., (LM, LF, RM, RF), 1].min(-1).values 45 | floor_height = softmin(foot_heights, softness=0.5, dim=-1) 46 | return T(floor_height[(ndim - 2) * [None]]) 47 | 48 | 49 | def softmax(x, softness=1.0, dim=None): 50 | maxi, mini = x.max(dim=dim).values, x.min(dim=dim).values 51 | return maxi + torch.log(softness + torch.exp(mini - maxi)) 52 | 53 | 54 | def softmin(x, softness=1.0, dim=0): 55 | return -softmax(-x, softness=softness, dim=dim) 56 | 57 | 58 | def gaussian_filter1d(_inputs, sigma, truncate=4.0): 59 | # Code adapted/mixed from scipy library into pytorch 60 | # https://github.com/scipy/scipy/blob/47bb6febaa10658c72962b9615d5d5aa2513fa3a/scipy/ndimage/filters.py#L211 61 | # and gaussian kernel 62 | # https://github.com/scipy/scipy/blob/47bb6febaa10658c72962b9615d5d5aa2513fa3a/scipy/ndimage/filters.py#L179 63 | # Correspond to mode="nearest" and order = 0 64 | # But works batched 65 | if len(_inputs.shape) == 2: 66 | inputs = _inputs[None] 67 | else: 68 | inputs = _inputs 69 | 70 | sd = float(sigma) 71 | radius = int(truncate * sd + 0.5) 72 | sigma2 = sigma * sigma 73 | x = torch.arange(-radius, 74 | radius + 1, 75 | device=inputs.device, 76 | dtype=inputs.dtype) 77 | phi_x = torch.exp(-0.5 / sigma2 * x**2) 78 | phi_x = phi_x / phi_x.sum() 79 | 80 | # Conv1d weights 81 | groups = inputs.shape[-1] 82 | weights = torch.tile(phi_x, (groups, 1, 1)) 83 | inputs = inputs.transpose(-1, -2) 84 | outputs = F.conv1d(inputs, weights, padding="same", 85 | groups=groups).transpose(-1, -2) 86 | 87 | return outputs.reshape(_inputs.shape) 88 | -------------------------------------------------------------------------------- /blender_render/mld/transforms/joints2rots/__pycache__/config.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/transforms/joints2rots/__pycache__/config.cpython-37.pyc -------------------------------------------------------------------------------- /blender_render/mld/transforms/joints2rots/__pycache__/customloss.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/transforms/joints2rots/__pycache__/customloss.cpython-37.pyc -------------------------------------------------------------------------------- /blender_render/mld/transforms/joints2rots/__pycache__/prior.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/transforms/joints2rots/__pycache__/prior.cpython-37.pyc -------------------------------------------------------------------------------- /blender_render/mld/transforms/joints2rots/__pycache__/smplify.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/transforms/joints2rots/__pycache__/smplify.cpython-37.pyc -------------------------------------------------------------------------------- /blender_render/mld/transforms/joints2rots/config.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from mld.utils.joints import mmm_joints, smplh2mmm_indexes 3 | 4 | # Map joints Name to SMPL joints idx 5 | JOINT_MAP = { 6 | 'MidHip': 0, 7 | 'LHip': 1, 8 | 'LKnee': 4, 9 | 'LAnkle': 7, 10 | 'LFoot': 10, 11 | 'RHip': 2, 12 | 'RKnee': 5, 13 | 'RAnkle': 8, 14 | 'RFoot': 11, 15 | 'LShoulder': 16, 16 | 'LElbow': 18, 17 | 'LWrist': 20, 18 | 'LHand': 22, 19 | 'RShoulder': 17, 20 | 'RElbow': 19, 21 | 'RWrist': 21, 22 | 'RHand': 23, 23 | 'spine1': 3, 24 | 'spine2': 6, 25 | 'spine3': 9, 26 | 'Neck': 12, 27 | 'Head': 15, 28 | 'LCollar': 13, 29 | 'Rcollar': 14, 30 | 'Nose': 24, 31 | 'REye': 26, 32 | 'LEye': 26, 33 | 'REar': 27, 34 | 'LEar': 28, 35 | 'LHeel': 31, 36 | 'RHeel': 34, 37 | 'OP RShoulder': 17, 38 | 'OP LShoulder': 16, 39 | 'OP RHip': 2, 40 | 'OP LHip': 1, 41 | 'OP Neck': 12, 42 | } 43 | 44 | mmm2smpl_correspondence = { 45 | "root": "MidHip", 46 | "BP": "spine1", 47 | "BT": "spine3", 48 | "BLN": "Neck", 49 | "BUN": "Head", 50 | "LS": "LShoulder", 51 | "LE": "LElbow", 52 | "LW": "LWrist", 53 | "RS": "RShoulder", 54 | "RE": "RElbow", 55 | "RW": "RWrist", 56 | "LH": "LHip", 57 | "LK": "LKnee", 58 | "LA": "LAnkle", 59 | "LMrot": "LHeel", 60 | "LF": "LFoot", 61 | "RH": "RHip", 62 | "RK": "RKnee", 63 | "RA": "RAnkle", 64 | "RMrot": "RHeel", 65 | "RF": "RFoot" 66 | } 67 | 68 | full_smpl_idx = range(24) 69 | key_smpl_idx = [0, 1, 4, 7, 2, 5, 8, 17, 19, 21, 16, 18, 20] 70 | 71 | AMASS_JOINT_MAP = { 72 | 'MidHip': 0, 73 | 'LHip': 1, 74 | 'LKnee': 4, 75 | 'LAnkle': 7, 76 | 'LFoot': 10, 77 | 'RHip': 2, 78 | 'RKnee': 5, 79 | 'RAnkle': 8, 80 | 'RFoot': 11, 81 | 'LShoulder': 16, 82 | 'LElbow': 18, 83 | 'LWrist': 20, 84 | 'RShoulder': 17, 85 | 'RElbow': 19, 86 | 'RWrist': 21, 87 | 'spine1': 3, 88 | 'spine2': 6, 89 | 'spine3': 9, 90 | 'Neck': 12, 91 | 'Head': 15, 92 | 'LCollar': 13, 93 | 'Rcollar': 14, 94 | } 95 | amass_idx = range(22) 96 | amass_smpl_idx = range(22) 97 | 98 | # cal mmm in smpl index 99 | smpl2mmm_correspondence = { 100 | val: key 101 | for key, val in mmm2smpl_correspondence.items() 102 | } 103 | smpl2mmm_indexes = [JOINT_MAP[mmm2smpl_correspondence[x]] for x in mmm_joints] 104 | 105 | # cal mmm joints map 106 | MMM_JOINT_MAP = { 107 | val: JOINT_MAP[val] 108 | for key, val in mmm2smpl_correspondence.items() 109 | } 110 | 111 | # mmm_idx = range(21) 112 | # mmm_smpl_dix = smpl2mmm_indexes 113 | # mmm_smpl_dix = smplh2mmm_indexes 114 | # todo - configable 115 | SMPL_MODEL_DIR = "/home/ericpeng/DeepLearning/Projects/human_motion_generation/motion-latent-diffusion/deps/smpl_models/" 116 | GMM_MODEL_DIR = "/home/ericpeng/DeepLearning/Projects/human_motion_generation/motion-latent-diffusion/deps/smpl_models" 117 | SMPL_MEAN_FILE = "/home/ericpeng/DeepLearning/Projects/human_motion_generation/motion-latent-diffusion/deps/smpl_models/neutral_smpl_mean_params.h5" 118 | # for collsion 119 | Part_Seg_DIR = "/home/ericpeng/DeepLearning/Projects/human_motion_generation/motion-latent-diffusion/deps/smpl_models/smplx_parts_segm.pkl" 120 | -------------------------------------------------------------------------------- /blender_render/mld/transforms/rotation2xyz.py: -------------------------------------------------------------------------------- 1 | # This code is based on https://github.com/Mathux/ACTOR.git 2 | import torch 3 | import mld.utils.rotation_conversions as geometry 4 | 5 | from .smpl import SMPL, JOINTSTYPE_ROOT 6 | # from .get_model import JOINTSTYPES 7 | JOINTSTYPES = ["a2m", "a2mpl", "smpl", "vibe", "vertices"] 8 | 9 | 10 | class Rotation2xyz(torch.nn.Module): 11 | 12 | def __init__(self, smpl_path): 13 | super().__init__() 14 | self.smpl_model = SMPL(smpl_path).eval() 15 | 16 | def __call__(self, 17 | x, 18 | mask, 19 | pose_rep, 20 | translation, 21 | glob, 22 | jointstype, 23 | vertstrans, 24 | betas=None, 25 | beta=0, 26 | glob_rot=None, 27 | get_rotations_back=False, 28 | **kwargs): 29 | if pose_rep == "xyz": 30 | return x 31 | 32 | if mask is None: 33 | mask = torch.ones((x.shape[0], x.shape[-1]), 34 | dtype=bool, 35 | device=x.device) 36 | 37 | if not glob and glob_rot is None: 38 | raise TypeError( 39 | "You must specify global rotation if glob is False") 40 | 41 | if jointstype not in JOINTSTYPES: 42 | raise NotImplementedError("This jointstype is not implemented.") 43 | 44 | if translation: 45 | x_translations = x[:, -1, :3] 46 | x_rotations = x[:, :-1] 47 | else: 48 | x_rotations = x 49 | 50 | x_rotations = x_rotations.permute(0, 3, 1, 2) 51 | nsamples, time, njoints, feats = x_rotations.shape 52 | 53 | # Compute rotations (convert only masked sequences output) 54 | if pose_rep == "rotvec": 55 | rotations = geometry.axis_angle_to_matrix(x_rotations[mask]) 56 | elif pose_rep == "rotmat": 57 | rotations = x_rotations[mask].view(-1, njoints, 3, 3) 58 | elif pose_rep == "rotquat": 59 | rotations = geometry.quaternion_to_matrix(x_rotations[mask]) 60 | elif pose_rep == "rot6d": 61 | rotations = geometry.rotation_6d_to_matrix(x_rotations[mask]) 62 | else: 63 | raise NotImplementedError("No geometry for this one.") 64 | 65 | if not glob: 66 | global_orient = torch.tensor(glob_rot, device=x.device) 67 | global_orient = geometry.axis_angle_to_matrix(global_orient).view( 68 | 1, 1, 3, 3) 69 | global_orient = global_orient.repeat(len(rotations), 1, 1, 1) 70 | else: 71 | global_orient = rotations[:, 0] 72 | rotations = rotations[:, 1:] 73 | 74 | if betas is None: 75 | betas = torch.zeros( 76 | [rotations.shape[0], self.smpl_model.num_betas], 77 | dtype=rotations.dtype, 78 | device=rotations.device) 79 | betas[:, 1] = beta 80 | # import ipdb; ipdb.set_trace() 81 | out = self.smpl_model(body_pose=rotations, 82 | global_orient=global_orient, 83 | betas=betas) 84 | 85 | # get the desirable joints 86 | joints = out[jointstype] 87 | 88 | x_xyz = torch.empty(nsamples, 89 | time, 90 | joints.shape[1], 91 | 3, 92 | device=x.device, 93 | dtype=x.dtype) 94 | x_xyz[~mask] = 0 95 | x_xyz[mask] = joints 96 | 97 | x_xyz = x_xyz.permute(0, 2, 3, 1).contiguous() 98 | 99 | # the first translation root at the origin on the prediction 100 | if jointstype != "vertices": 101 | rootindex = JOINTSTYPE_ROOT[jointstype] 102 | x_xyz = x_xyz - x_xyz[:, [rootindex], :, :] 103 | 104 | if translation and vertstrans: 105 | # the first translation root at the origin 106 | x_translations = x_translations - x_translations[:, :, [0]] 107 | 108 | # add the translation to all the joints 109 | x_xyz = x_xyz + x_translations[:, None, :, :] 110 | 111 | if get_rotations_back: 112 | return x_xyz, rotations, global_orient 113 | else: 114 | return x_xyz 115 | -------------------------------------------------------------------------------- /blender_render/mld/transforms/rots2joints/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import Rots2Joints 2 | from .smplh import SMPLH 3 | -------------------------------------------------------------------------------- /blender_render/mld/transforms/rots2joints/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/transforms/rots2joints/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /blender_render/mld/transforms/rots2joints/__pycache__/base.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/transforms/rots2joints/__pycache__/base.cpython-37.pyc -------------------------------------------------------------------------------- /blender_render/mld/transforms/rots2joints/__pycache__/smplh.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/transforms/rots2joints/__pycache__/smplh.cpython-37.pyc -------------------------------------------------------------------------------- /blender_render/mld/transforms/rots2joints/base.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | import torch 4 | from torch import Tensor, nn 5 | from pathlib import Path 6 | 7 | 8 | class Rots2Joints(nn.Module): 9 | def __init__(self, path: Optional[str] = None, 10 | normalization: bool = False, 11 | eps: float = 1e-12, 12 | **kwargs) -> None: 13 | if normalization and path is None: 14 | raise TypeError("You should provide a path if normalization is on.") 15 | 16 | super().__init__() 17 | self.normalization = normalization 18 | self.eps = eps 19 | 20 | if normalization: 21 | mean_path = Path(path) / "mean.pt" 22 | std_path = Path(path) / "std.pt" 23 | self.register_buffer('mean', torch.load(mean_path)) 24 | self.register_buffer('std', torch.load(std_path)) 25 | 26 | def normalize(self, features: Tensor) -> Tensor: 27 | if self.normalization: 28 | features = (features - self.mean)/(self.std + self.eps) 29 | return features 30 | 31 | def unnormalize(self, features: Tensor) -> Tensor: 32 | if self.normalization: 33 | features = features * self.std + self.mean 34 | return features 35 | -------------------------------------------------------------------------------- /blender_render/mld/transforms/rots2rfeats/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import Rots2Rfeats 2 | from .smplvelp import SMPLVelP 3 | -------------------------------------------------------------------------------- /blender_render/mld/transforms/rots2rfeats/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/transforms/rots2rfeats/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /blender_render/mld/transforms/rots2rfeats/__pycache__/base.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/transforms/rots2rfeats/__pycache__/base.cpython-37.pyc -------------------------------------------------------------------------------- /blender_render/mld/transforms/rots2rfeats/__pycache__/smplvelp.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/transforms/rots2rfeats/__pycache__/smplvelp.cpython-37.pyc -------------------------------------------------------------------------------- /blender_render/mld/transforms/rots2rfeats/base.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | import torch 4 | from torch import Tensor, nn 5 | from pathlib import Path 6 | 7 | 8 | class Rots2Rfeats(nn.Module): 9 | def __init__(self, path: Optional[str] = None, 10 | normalization: bool = False, 11 | eps: float = 1e-12, 12 | **kwargs) -> None: 13 | if normalization and path is None: 14 | raise TypeError("You should provide a path if normalization is on.") 15 | 16 | super().__init__() 17 | self.normalization = normalization 18 | self.eps = eps 19 | 20 | if normalization: 21 | mean_path = Path(path) / "rfeats_mean.pt" 22 | std_path = Path(path) / "rfeats_std.pt" 23 | self.register_buffer('mean', torch.load(mean_path)) 24 | self.register_buffer('std', torch.load(std_path)) 25 | 26 | def normalize(self, features: Tensor) -> Tensor: 27 | if self.normalization: 28 | features = (features - self.mean)/(self.std + self.eps) 29 | return features 30 | 31 | def unnormalize(self, features: Tensor) -> Tensor: 32 | if self.normalization: 33 | features = features * self.std + self.mean 34 | return features 35 | -------------------------------------------------------------------------------- /blender_render/mld/transforms/rots2rfeats/smplvelp.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | import torch 4 | from torch import Tensor 5 | from einops import rearrange 6 | 7 | from mld.utils.temos_utils import matrix_to, nfeats_of, to_matrix 8 | import mld.utils.geometry as geometry 9 | 10 | from .base import Rots2Rfeats 11 | 12 | 13 | class SMPLVelP(Rots2Rfeats): 14 | 15 | def __init__(self, 16 | path: Optional[str] = None, 17 | normalization: bool = False, 18 | pose_rep: str = "rot6d", 19 | canonicalize: bool = False, 20 | offset: bool = True, 21 | **kwargs) -> None: 22 | super().__init__(path=path, normalization=normalization) 23 | self.canonicalize = canonicalize 24 | self.pose_rep = pose_rep 25 | self.nfeats = nfeats_of(pose_rep) 26 | self.offset = offset 27 | 28 | def forward(self, data) -> Tensor: 29 | matrix_poses, trans = data.rots, data.trans 30 | # matrix_poses: [nframes, 22, 3, 3] 31 | 32 | # extract the root gravity axis 33 | # for smpl it is the last coordinate 34 | root_y = trans[..., 2] 35 | trajectory = trans[..., [0, 1]] 36 | 37 | # Comoute the difference of trajectory (for X and Y axis) 38 | vel_trajectory = torch.diff(trajectory, dim=-2) 39 | # 0 for the first one => keep the dimentionality 40 | vel_trajectory = torch.cat( 41 | (0 * vel_trajectory[..., [0], :], vel_trajectory), dim=-2) 42 | 43 | # first normalize the data 44 | if self.canonicalize: 45 | global_orient = matrix_poses[..., 0, :, :] 46 | # remove the rotation 47 | rot2d = geometry.matrix_to_axis_angle(global_orient[..., 0, :, :]) 48 | # Remove the fist rotation along the vertical axis 49 | # construct this by extract only the vertical component of the rotation 50 | rot2d[..., :2] = 0 51 | 52 | if self.offset: 53 | # add a bit more rotation 54 | rot2d[..., 2] += torch.pi / 2 55 | 56 | rot2d = geometry.axis_angle_to_matrix(rot2d) 57 | 58 | # turn with the same amount all the rotations 59 | global_orient = torch.einsum("...kj,...kl->...jl", rot2d, 60 | global_orient) 61 | 62 | matrix_poses = torch.cat( 63 | (global_orient[..., None, :, :], matrix_poses[..., 1:, :, :]), 64 | dim=-3) 65 | 66 | # Turn the trajectory as well 67 | vel_trajectory = torch.einsum("...kj,...lk->...lj", 68 | rot2d[..., :2, :2], vel_trajectory) 69 | 70 | poses = matrix_to(self.pose_rep, matrix_poses) 71 | features = torch.cat( 72 | (root_y[..., None], vel_trajectory, 73 | rearrange(poses, "... joints rot -> ... (joints rot)")), 74 | dim=-1) 75 | features = self.normalize(features) 76 | return features 77 | 78 | def extract(self, features): 79 | root_y = features[..., 0] 80 | vel_trajectory = features[..., 1:3] 81 | poses_features = features[..., 3:] 82 | poses = rearrange(poses_features, 83 | "... (joints rot) -> ... joints rot", 84 | rot=self.nfeats) 85 | return root_y, vel_trajectory, poses 86 | 87 | def inverse(self, features): 88 | features = self.unnormalize(features) 89 | root_y, vel_trajectory, poses = self.extract(features) 90 | 91 | # integrate the trajectory 92 | trajectory = torch.cumsum(vel_trajectory, dim=-2) 93 | # First frame should be 0, but if infered it is better to ensure it 94 | trajectory = trajectory - trajectory[..., [0], :] 95 | 96 | # Get back the translation 97 | trans = torch.cat([trajectory, root_y[..., None]], dim=-1) 98 | matrix_poses = to_matrix(self.pose_rep, poses) 99 | 100 | from temos.transforms.smpl import RotTransDatastruct 101 | return RotTransDatastruct(rots=matrix_poses, trans=trans) 102 | -------------------------------------------------------------------------------- /blender_render/mld/transforms/xyz.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | from torch import Tensor 3 | 4 | from .base import Datastruct, dataclass, Transform 5 | from mld.datasets.utils import collate_tensor_with_padding 6 | 7 | from .joints2jfeats import Joints2Jfeats 8 | 9 | 10 | class XYZTransform(Transform): 11 | 12 | def __init__(self, joints2jfeats: Joints2Jfeats, **kwargs): 13 | self.joints2jfeats = joints2jfeats 14 | 15 | def Datastruct(self, **kwargs): 16 | return XYZDatastruct(_joints2jfeats=self.joints2jfeats, 17 | transforms=self, 18 | **kwargs) 19 | 20 | def __repr__(self): 21 | return "XYZTransform()" 22 | 23 | 24 | @dataclass 25 | class XYZDatastruct(Datastruct): 26 | transforms: XYZTransform 27 | _joints2jfeats: Joints2Jfeats 28 | 29 | features: Optional[Tensor] = None 30 | joints_: Optional[Tensor] = None 31 | jfeats_: Optional[Tensor] = None 32 | 33 | def __post_init__(self): 34 | self.datakeys = ["features", "joints_", "jfeats_"] 35 | # starting point 36 | if self.features is not None and self.jfeats_ is None: 37 | self.jfeats_ = self.features 38 | 39 | @property 40 | def joints(self): 41 | # Cached value 42 | if self.joints_ is not None: 43 | return self.joints_ 44 | 45 | # self.jfeats_ should be defined 46 | assert self.jfeats_ is not None 47 | 48 | self._joints2jfeats.to(self.jfeats.device) 49 | self.joints_ = self._joints2jfeats.inverse(self.jfeats) 50 | return self.joints_ 51 | 52 | @property 53 | def jfeats(self): 54 | # Cached value 55 | if self.jfeats_ is not None: 56 | return self.jfeats_ 57 | 58 | # self.joints_ should be defined 59 | assert self.joints_ is not None 60 | 61 | self._joints2jfeats.to(self.joints.device) 62 | self.jfeats_ = self._joints2jfeats(self.joints) 63 | return self.jfeats_ 64 | 65 | def __len__(self): 66 | return len(self.jfeats) 67 | -------------------------------------------------------------------------------- /blender_render/mld/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/utils/__init__.py -------------------------------------------------------------------------------- /blender_render/mld/utils/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/utils/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /blender_render/mld/utils/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/utils/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /blender_render/mld/utils/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/utils/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /blender_render/mld/utils/__pycache__/geometry.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/utils/__pycache__/geometry.cpython-37.pyc -------------------------------------------------------------------------------- /blender_render/mld/utils/__pycache__/joints.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/utils/__pycache__/joints.cpython-310.pyc -------------------------------------------------------------------------------- /blender_render/mld/utils/__pycache__/joints.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/utils/__pycache__/joints.cpython-37.pyc -------------------------------------------------------------------------------- /blender_render/mld/utils/__pycache__/joints.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/utils/__pycache__/joints.cpython-39.pyc -------------------------------------------------------------------------------- /blender_render/mld/utils/__pycache__/rotation_conversions.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/utils/__pycache__/rotation_conversions.cpython-37.pyc -------------------------------------------------------------------------------- /blender_render/mld/utils/__pycache__/temos_utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/blender_render/mld/utils/__pycache__/temos_utils.cpython-37.pyc -------------------------------------------------------------------------------- /blender_render/mld/utils/demo_utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | 5 | # load example data 6 | def load_example_input(txt_path): 7 | file = open(txt_path, "r") 8 | Lines = file.readlines() 9 | count = 0 10 | texts, lens = [], [] 11 | # Strips the newline character 12 | for line in Lines: 13 | count += 1 14 | s = line.strip() 15 | s_l = s.split(" ")[0] 16 | s_t = s[(len(s_l) + 1):] 17 | lens.append(int(s_l)) 18 | texts.append(s_t) 19 | print("Length-{}: {}".format(s_l, s_t)) 20 | return texts, lens 21 | 22 | 23 | # render batch 24 | def render_batch(npy_dir, execute_python="./scripts/visualize_motion.sh", mode="sequence"): 25 | os.system(f"{execute_python} {npy_dir} {mode}") 26 | 27 | 28 | # render 29 | def render(execute_python, npy_path, jointtype, cfg_path): 30 | # execute_python = "/apdcephfs/share_1227775/shingxchen/libs/blender_bpy/blender-2.93.2-linux-x64/blender" 31 | # execute_python = "/apdcephfs/share_1227775/mingzhenzhu/jiangbiao/libs/blender-2.93.2-linux-x64/blender" 32 | export_scripts = "render.py" 33 | 34 | os.system( 35 | f"{execute_python} --background --python {export_scripts} -- --cfg={cfg_path} --npy={npy_path} --joint_type={jointtype}" 36 | ) 37 | 38 | fig_path = Path(str(npy_path).replace(".npy", ".png")) 39 | return fig_path 40 | 41 | 42 | # origin render 43 | # def render(npy_path, jointtype): 44 | # execute_python = '/apdcephfs/share_1227775/shingxchen/libs/blender_bpy/blender-2.93.2-linux-x64/blender' 45 | # export_scripts = 'render.py' 46 | 47 | # os.system(f"{execute_python} --background --python {export_scripts} -- npy={npy_path} jointstype={jointtype}") 48 | 49 | # fig_path = Path(str(npy_path).replace(".npy",".png")) 50 | # return fig_path 51 | 52 | # export fbx with hand params from pkl files 53 | # refer to /apdcephfs/share_1227775/shingxchen/AIMotion/TMOST/scripts/fbx_output_smplx.py 54 | def export_fbx_hand(pkl_path): 55 | input = pkl_path 56 | output = pkl_path.replace(".pkl", ".fbx") 57 | 58 | execute_python = "/apdcephfs/share_1227775/shingxchen/libs/blender_bpy/blender-2.93.2-linux-x64/blender" 59 | export_scripts = "./scripts/fbx_output_smplx.py" 60 | os.system( 61 | f"{execute_python} -noaudio --background --python {export_scripts}\ 62 | --input {input} \ 63 | --output {output}" 64 | ) 65 | 66 | 67 | # export fbx without hand params from pkl files 68 | # refer to /apdcephfs/share_1227775/shingxchen/AIMotion/TMOST/scripts/fbx_output.py 69 | def export_fbx(pkl_path): 70 | input = pkl_path 71 | output = pkl_path.replace(".pkl", ".fbx") 72 | 73 | execute_python = "/apdcephfs/share_1227775/shingxchen/libs/blender_bpy/blender-2.93.2-linux-x64/blender" 74 | export_scripts = "./scripts/fbx_output.py" 75 | os.system( 76 | f"{execute_python} -noaudio --background --python {export_scripts}\ 77 | --input {input} \ 78 | --output {output}" 79 | ) 80 | -------------------------------------------------------------------------------- /blender_render/mld/utils/easyconvert.py: -------------------------------------------------------------------------------- 1 | import mld.utils.geometry as geometry 2 | 3 | 4 | def nfeats_of(rottype): 5 | if rottype in ["rotvec", "axisangle"]: 6 | return 3 7 | elif rottype in ["rotquat", "quaternion"]: 8 | return 4 9 | elif rottype in ["rot6d", "6drot", "rotation6d"]: 10 | return 6 11 | elif rottype in ["rotmat"]: 12 | return 9 13 | else: 14 | return TypeError("This rotation type doesn't have features.") 15 | 16 | 17 | def axis_angle_to(newtype, rotations): 18 | if newtype in ["matrix"]: 19 | rotations = geometry.axis_angle_to_matrix(rotations) 20 | return rotations 21 | elif newtype in ["rotmat"]: 22 | rotations = geometry.axis_angle_to_matrix(rotations) 23 | rotations = matrix_to("rotmat", rotations) 24 | return rotations 25 | elif newtype in ["rot6d", "6drot", "rotation6d"]: 26 | rotations = geometry.axis_angle_to_matrix(rotations) 27 | rotations = matrix_to("rot6d", rotations) 28 | return rotations 29 | elif newtype in ["rotquat", "quaternion"]: 30 | rotations = geometry.axis_angle_to_quaternion(rotations) 31 | return rotations 32 | elif newtype in ["rotvec", "axisangle"]: 33 | return rotations 34 | else: 35 | raise NotImplementedError 36 | 37 | 38 | def matrix_to(newtype, rotations): 39 | if newtype in ["matrix"]: 40 | return rotations 41 | if newtype in ["rotmat"]: 42 | rotations = rotations.reshape((*rotations.shape[:-2], 9)) 43 | return rotations 44 | elif newtype in ["rot6d", "6drot", "rotation6d"]: 45 | rotations = geometry.matrix_to_rotation_6d(rotations) 46 | return rotations 47 | elif newtype in ["rotquat", "quaternion"]: 48 | rotations = geometry.matrix_to_quaternion(rotations) 49 | return rotations 50 | elif newtype in ["rotvec", "axisangle"]: 51 | rotations = geometry.matrix_to_axis_angle(rotations) 52 | return rotations 53 | else: 54 | raise NotImplementedError 55 | 56 | 57 | def to_matrix(oldtype, rotations): 58 | if oldtype in ["matrix"]: 59 | return rotations 60 | if oldtype in ["rotmat"]: 61 | rotations = rotations.reshape((*rotations.shape[:-2], 3, 3)) 62 | return rotations 63 | elif oldtype in ["rot6d", "6drot", "rotation6d"]: 64 | rotations = geometry.rotation_6d_to_matrix(rotations) 65 | return rotations 66 | elif oldtype in ["rotquat", "quaternion"]: 67 | rotations = geometry.quaternion_to_matrix(rotations) 68 | return rotations 69 | elif oldtype in ["rotvec", "axisangle"]: 70 | rotations = geometry.axis_angle_to_matrix(rotations) 71 | return rotations 72 | else: 73 | raise NotImplementedError 74 | -------------------------------------------------------------------------------- /blender_render/mld/utils/fixseed.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import random 4 | 5 | 6 | def fixseed(seed): 7 | random.seed(seed) 8 | np.random.seed(seed) 9 | torch.manual_seed(seed) 10 | 11 | 12 | SEED = 10 13 | EVALSEED = 0 14 | # Provoc warning: not fully functionnal yet 15 | # torch.set_deterministic(True) 16 | torch.backends.cudnn.benchmark = False 17 | 18 | fixseed(SEED) 19 | -------------------------------------------------------------------------------- /blender_render/mld/utils/logger.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | import os 3 | import time 4 | import logging 5 | from omegaconf import OmegaConf 6 | from pytorch_lightning.utilities.rank_zero import rank_zero_only 7 | 8 | 9 | def create_logger(cfg, phase='train'): 10 | # root dir set by cfg 11 | root_output_dir = Path(cfg.FOLDER) 12 | # set up logger 13 | if not root_output_dir.exists(): 14 | print('=> creating {}'.format(root_output_dir)) 15 | root_output_dir.mkdir() 16 | 17 | cfg_name = cfg.NAME 18 | model = cfg.model.model_type 19 | cfg_name = os.path.basename(cfg_name).split('.')[0] 20 | 21 | final_output_dir = root_output_dir / model / cfg_name 22 | cfg.FOLDER_EXP = str(final_output_dir) 23 | 24 | time_str = time.strftime('%Y-%m-%d-%H-%M-%S') 25 | 26 | new_dir(cfg, phase, time_str, final_output_dir) 27 | 28 | head = '%(asctime)-15s %(message)s' 29 | logger = config_logger(final_output_dir, time_str, phase, head) 30 | if logger is None: 31 | logger = logging.getLogger() 32 | logger.setLevel(logging.CRITICAL) 33 | logging.basicConfig(format=head) 34 | return logger 35 | 36 | 37 | @rank_zero_only 38 | def config_logger(final_output_dir, time_str, phase, head): 39 | log_file = '{}_{}_{}.log'.format('log', time_str, phase) 40 | final_log_file = final_output_dir / log_file 41 | logging.basicConfig(filename=str(final_log_file)) 42 | logger = logging.getLogger() 43 | logger.setLevel(logging.INFO) 44 | console = logging.StreamHandler() 45 | formatter = logging.Formatter(head) 46 | console.setFormatter(formatter) 47 | logging.getLogger('').addHandler(console) 48 | file_handler = logging.FileHandler(final_log_file, 'w') 49 | file_handler.setFormatter(logging.Formatter(head)) 50 | file_handler.setLevel(logging.INFO) 51 | logging.getLogger('').addHandler(file_handler) 52 | return logger 53 | 54 | 55 | @rank_zero_only 56 | def new_dir(cfg, phase, time_str, final_output_dir): 57 | # new experiment folder 58 | cfg.TIME = str(time_str) 59 | if os.path.exists( 60 | final_output_dir) and cfg.TRAIN.RESUME is None and not cfg.DEBUG: 61 | file_list = sorted(os.listdir(final_output_dir), reverse=True) 62 | for item in file_list: 63 | if item.endswith('.log'): 64 | os.rename(str(final_output_dir), 65 | str(final_output_dir) + '_' + cfg.TIME) 66 | break 67 | final_output_dir.mkdir(parents=True, exist_ok=True) 68 | # write config yaml 69 | config_file = '{}_{}_{}.yaml'.format('config', time_str, phase) 70 | final_config_file = final_output_dir / config_file 71 | OmegaConf.save(config=cfg, f=final_config_file) 72 | -------------------------------------------------------------------------------- /blender_render/mld/utils/misc.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def to_numpy(tensor): 5 | if torch.is_tensor(tensor): 6 | return tensor.cpu().numpy() 7 | elif type(tensor).__module__ != 'numpy': 8 | raise ValueError("Cannot convert {} to numpy array".format( 9 | type(tensor))) 10 | return tensor 11 | 12 | 13 | def to_torch(ndarray): 14 | if type(ndarray).__module__ == 'numpy': 15 | return torch.from_numpy(ndarray) 16 | elif not torch.is_tensor(ndarray): 17 | raise ValueError("Cannot convert {} to torch tensor".format( 18 | type(ndarray))) 19 | return ndarray 20 | 21 | 22 | def cleanexit(): 23 | import sys 24 | import os 25 | try: 26 | sys.exit(0) 27 | except SystemExit: 28 | os._exit(0) 29 | 30 | -------------------------------------------------------------------------------- /blender_render/mld/utils/sample_utils.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from pathlib import Path 3 | logger = logging.getLogger(__name__) 4 | 5 | def cfg_mean_nsamples_resolution(cfg): 6 | if cfg.mean and cfg.number_of_samples > 1: 7 | logger.error("All the samples will be the mean.. cfg.number_of_samples=1 will be forced.") 8 | cfg.number_of_samples = 1 9 | 10 | return cfg.number_of_samples == 1 11 | 12 | 13 | def get_path(sample_path: Path, is_amass: bool, gender: str, split: str, onesample: bool, mean: bool, fact: float): 14 | extra_str = ("_mean" if mean else "") if onesample else "_multi" 15 | fact_str = "" if fact == 1 else f"{fact}_" 16 | gender_str = gender + "_" if is_amass else "" 17 | path = sample_path / f"{fact_str}{gender_str}{split}{extra_str}" 18 | return path 19 | -------------------------------------------------------------------------------- /blender_render/mld/utils/tensors.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def lengths_to_mask(lengths): 5 | max_len = max(lengths) 6 | mask = torch.arange(max_len, device=lengths.device).expand( 7 | len(lengths), max_len) < lengths.unsqueeze(1) 8 | return mask 9 | 10 | 11 | def collate_tensors(batch): 12 | dims = batch[0].dim() 13 | max_size = [max([b.size(i) for b in batch]) for i in range(dims)] 14 | size = (len(batch),) + tuple(max_size) 15 | canvas = batch[0].new_zeros(size=size) 16 | for i, b in enumerate(batch): 17 | sub_tensor = canvas[i] 18 | for d in range(dims): 19 | sub_tensor = sub_tensor.narrow(d, 0, b.size(d)) 20 | sub_tensor.add_(b) 21 | return canvas 22 | 23 | 24 | def collate(batch): 25 | databatch = [b[0] for b in batch] 26 | labelbatch = [b[1] for b in batch] 27 | lenbatch = [len(b[0][0][0]) for b in batch] 28 | 29 | databatchTensor = collate_tensors(databatch) 30 | labelbatchTensor = torch.as_tensor(labelbatch) 31 | lenbatchTensor = torch.as_tensor(lenbatch) 32 | 33 | maskbatchTensor = lengths_to_mask(lenbatchTensor) 34 | # x - [bs, njoints, nfeats, lengths] 35 | # - nfeats, the representation of a joint 36 | # y - [bs] 37 | # mask - [bs, lengths] 38 | # lengths - [bs] 39 | batch = {"x": databatchTensor, "y": labelbatchTensor, 40 | "mask": maskbatchTensor, 'lengths': lenbatchTensor} 41 | return batch 42 | 43 | 44 | # slow version with padding 45 | def collate_data3d_slow(batch): 46 | batchTensor = {} 47 | for key in batch[0].keys(): 48 | databatch = [b[key] for b in batch] 49 | batchTensor[key] = collate_tensors(databatch) 50 | batch = batchTensor 51 | # theta - [bs, lengths, 85], theta shape (85,) 52 | # - (np.array([1., 0., 0.]), pose(72), shape(10)), axis=0) 53 | # kp_2d - [bs, lengths, njoints, nfeats], nfeats (x,y,weight) 54 | # kp_3d - [bs, lengths, njoints, nfeats], nfeats (x,y,z) 55 | # w_smpl - [bs, lengths] zeros 56 | # w_3d - [bs, lengths] zeros 57 | return batch 58 | 59 | def collate_data3d(batch): 60 | batchTensor = {} 61 | for key in batch[0].keys(): 62 | databatch = [b[key] for b in batch] 63 | if key == "paths": 64 | batchTensor[key] = databatch 65 | else: 66 | batchTensor[key] = torch.stack(databatch,axis=0) 67 | batch = batchTensor 68 | # theta - [bs, lengths, 85], theta shape (85,) 69 | # - (np.array([1., 0., 0.]), pose(72), shape(10)), axis=0) 70 | # kp_2d - [bs, lengths, njoints, nfeats], nfeats (x,y,weight) 71 | # kp_3d - [bs, lengths, njoints, nfeats], nfeats (x,y,z) 72 | # w_smpl - [bs, lengths] zeros 73 | # w_3d - [bs, lengths] zeros 74 | return batch 75 | -------------------------------------------------------------------------------- /blender_render/requirements.txt: -------------------------------------------------------------------------------- 1 | pytorch_lightning 2 | torchmetrics==0.7 3 | omegaconf 4 | shortuuid 5 | tqdm 6 | pandas 7 | # sklearn 8 | chumpy 9 | transformers 10 | psutil 11 | einops 12 | yacs 13 | wandb 14 | rich 15 | # for fitting 16 | smplx==0.1.28 17 | trimesh==3.9.24 18 | # pyyaml==5.4.1 19 | h5py 20 | joblib==1.2.0 21 | scikit-image 22 | spacy 23 | diffusers 24 | ftfy -------------------------------------------------------------------------------- /common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/common/__init__.py -------------------------------------------------------------------------------- /common/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/common/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /common/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/common/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /common/__pycache__/quaternion.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/common/__pycache__/quaternion.cpython-37.pyc -------------------------------------------------------------------------------- /common/__pycache__/quaternion.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/common/__pycache__/quaternion.cpython-38.pyc -------------------------------------------------------------------------------- /data_loaders/__pycache__/get_data.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/__pycache__/get_data.cpython-37.pyc -------------------------------------------------------------------------------- /data_loaders/__pycache__/tensors.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/__pycache__/tensors.cpython-37.pyc -------------------------------------------------------------------------------- /data_loaders/behave/README.md: -------------------------------------------------------------------------------- 1 | This code is based on https://github.com/EricGuo5513/text-to-motion.git -------------------------------------------------------------------------------- /data_loaders/behave/common/__pycache__/quaternion.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/common/__pycache__/quaternion.cpython-37.pyc -------------------------------------------------------------------------------- /data_loaders/behave/common/__pycache__/quaternion.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/common/__pycache__/quaternion.cpython-38.pyc -------------------------------------------------------------------------------- /data_loaders/behave/common/__pycache__/skeleton.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/common/__pycache__/skeleton.cpython-37.pyc -------------------------------------------------------------------------------- /data_loaders/behave/common/__pycache__/skeleton.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/common/__pycache__/skeleton.cpython-38.pyc -------------------------------------------------------------------------------- /data_loaders/behave/data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/data/__init__.py -------------------------------------------------------------------------------- /data_loaders/behave/data/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/data/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /data_loaders/behave/data/__pycache__/dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/data/__pycache__/dataset.cpython-37.pyc -------------------------------------------------------------------------------- /data_loaders/behave/networks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/networks/__init__.py -------------------------------------------------------------------------------- /data_loaders/behave/networks/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/networks/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /data_loaders/behave/networks/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/networks/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /data_loaders/behave/networks/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/networks/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /data_loaders/behave/networks/__pycache__/evaluator_wrapper.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/networks/__pycache__/evaluator_wrapper.cpython-37.pyc -------------------------------------------------------------------------------- /data_loaders/behave/networks/__pycache__/evaluator_wrapper.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/networks/__pycache__/evaluator_wrapper.cpython-38.pyc -------------------------------------------------------------------------------- /data_loaders/behave/networks/__pycache__/modules.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/networks/__pycache__/modules.cpython-311.pyc -------------------------------------------------------------------------------- /data_loaders/behave/networks/__pycache__/modules.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/networks/__pycache__/modules.cpython-37.pyc -------------------------------------------------------------------------------- /data_loaders/behave/networks/__pycache__/modules.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/networks/__pycache__/modules.cpython-38.pyc -------------------------------------------------------------------------------- /data_loaders/behave/networks/__pycache__/trainers.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/networks/__pycache__/trainers.cpython-311.pyc -------------------------------------------------------------------------------- /data_loaders/behave/networks/__pycache__/trainers.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/networks/__pycache__/trainers.cpython-37.pyc -------------------------------------------------------------------------------- /data_loaders/behave/networks/__pycache__/trainers.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/networks/__pycache__/trainers.cpython-38.pyc -------------------------------------------------------------------------------- /data_loaders/behave/options/__pycache__/base_options.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/options/__pycache__/base_options.cpython-37.pyc -------------------------------------------------------------------------------- /data_loaders/behave/options/__pycache__/train_options.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/options/__pycache__/train_options.cpython-37.pyc -------------------------------------------------------------------------------- /data_loaders/behave/options/evaluate_options.py: -------------------------------------------------------------------------------- 1 | from options.base_options import BaseOptions 2 | 3 | 4 | class TestOptions(BaseOptions): 5 | def initialize(self): 6 | BaseOptions.initialize(self) 7 | self.parser.add_argument('--batch_size', type=int, default=1, help='Batch size') 8 | self.parser.add_argument('--start_mov_len', type=int, default=10) 9 | self.parser.add_argument('--est_length', action="store_true", help="Whether to use sampled motion length") 10 | 11 | 12 | self.parser.add_argument('--repeat_times', type=int, default=3, help="Number of generation rounds for each text description") 13 | self.parser.add_argument('--split_file', type=str, default='test.txt') 14 | self.parser.add_argument('--text_file', type=str, default="./input.txt", help='Path of text description for motion generation') 15 | self.parser.add_argument('--which_epoch', type=str, default="latest", help='Checkpoint that will be used') 16 | self.parser.add_argument('--result_path', type=str, default="./eval_results/", help='Path to save generation results') 17 | self.parser.add_argument('--num_results', type=int, default=40, help='Number of descriptions that will be used') 18 | self.parser.add_argument('--ext', type=str, default='default', help='Save file path extension') 19 | 20 | self.is_train = False 21 | -------------------------------------------------------------------------------- /data_loaders/behave/scripts/__pycache__/motion_process.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/scripts/__pycache__/motion_process.cpython-37.pyc -------------------------------------------------------------------------------- /data_loaders/behave/scripts/__pycache__/motion_process.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/scripts/__pycache__/motion_process.cpython-38.pyc -------------------------------------------------------------------------------- /data_loaders/behave/utils/__pycache__/get_opt.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/utils/__pycache__/get_opt.cpython-311.pyc -------------------------------------------------------------------------------- /data_loaders/behave/utils/__pycache__/get_opt.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/utils/__pycache__/get_opt.cpython-37.pyc -------------------------------------------------------------------------------- /data_loaders/behave/utils/__pycache__/get_opt.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/utils/__pycache__/get_opt.cpython-38.pyc -------------------------------------------------------------------------------- /data_loaders/behave/utils/__pycache__/metrics.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/utils/__pycache__/metrics.cpython-37.pyc -------------------------------------------------------------------------------- /data_loaders/behave/utils/__pycache__/metrics.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/utils/__pycache__/metrics.cpython-38.pyc -------------------------------------------------------------------------------- /data_loaders/behave/utils/__pycache__/paramUtil.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/utils/__pycache__/paramUtil.cpython-311.pyc -------------------------------------------------------------------------------- /data_loaders/behave/utils/__pycache__/paramUtil.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/utils/__pycache__/paramUtil.cpython-37.pyc -------------------------------------------------------------------------------- /data_loaders/behave/utils/__pycache__/paramUtil.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/utils/__pycache__/paramUtil.cpython-38.pyc -------------------------------------------------------------------------------- /data_loaders/behave/utils/__pycache__/plot_script.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/utils/__pycache__/plot_script.cpython-37.pyc -------------------------------------------------------------------------------- /data_loaders/behave/utils/__pycache__/plot_script.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/utils/__pycache__/plot_script.cpython-38.pyc -------------------------------------------------------------------------------- /data_loaders/behave/utils/__pycache__/utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/utils/__pycache__/utils.cpython-37.pyc -------------------------------------------------------------------------------- /data_loaders/behave/utils/__pycache__/utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/utils/__pycache__/utils.cpython-38.pyc -------------------------------------------------------------------------------- /data_loaders/behave/utils/__pycache__/word_vectorizer.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/utils/__pycache__/word_vectorizer.cpython-311.pyc -------------------------------------------------------------------------------- /data_loaders/behave/utils/__pycache__/word_vectorizer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/utils/__pycache__/word_vectorizer.cpython-37.pyc -------------------------------------------------------------------------------- /data_loaders/behave/utils/__pycache__/word_vectorizer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/data_loaders/behave/utils/__pycache__/word_vectorizer.cpython-38.pyc -------------------------------------------------------------------------------- /data_loaders/behave/utils/get_opt.py: -------------------------------------------------------------------------------- 1 | import os 2 | from argparse import Namespace 3 | import re 4 | from os.path import join as pjoin 5 | from data_loaders.behave.utils.word_vectorizer import POS_enumerator 6 | 7 | 8 | def is_float(numStr): 9 | flag = False 10 | numStr = str(numStr).strip().lstrip('-').lstrip('+') # 去除正数(+)、负数(-)符号 11 | try: 12 | reg = re.compile(r'^[-+]?[0-9]+\.[0-9]+$') 13 | res = reg.match(str(numStr)) 14 | if res: 15 | flag = True 16 | except Exception as ex: 17 | print("is_float() - error: " + str(ex)) 18 | return flag 19 | 20 | 21 | def is_number(numStr): 22 | flag = False 23 | numStr = str(numStr).strip().lstrip('-').lstrip('+') # 去除正数(+)、负数(-)符号 24 | if str(numStr).isdigit(): 25 | flag = True 26 | return flag 27 | 28 | 29 | def get_opt(opt_path, device, use_global=False, wo_obj_motion=False): 30 | opt = Namespace() 31 | opt_dict = vars(opt) 32 | 33 | skip = ('-------------- End ----------------', 34 | '------------ Options -------------', 35 | '\n') 36 | print('Reading', opt_path) 37 | with open(opt_path) as f: 38 | for line in f: 39 | if line.strip() not in skip: 40 | # print(line.strip()) 41 | key, value = line.strip().split(': ') 42 | if value in ('True', 'False'): 43 | opt_dict[key] = bool(value) 44 | elif is_float(value): 45 | opt_dict[key] = float(value) 46 | elif is_number(value): 47 | opt_dict[key] = int(value) 48 | else: 49 | opt_dict[key] = str(value) 50 | 51 | # print(opt) 52 | opt_dict['which_epoch'] = 'latest' 53 | opt.save_root = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name) 54 | opt.model_dir = pjoin(opt.save_root, 'model') 55 | opt.meta_dir = pjoin(opt.save_root, 'meta') 56 | opt.use_global = use_global 57 | opt.wo_obj_motion = wo_obj_motion 58 | 59 | if opt.dataset_name == 't2m_behave': 60 | opt.data_root = './dataset/behave_t2m/' 61 | opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs_local') 62 | opt.dim_pose = 263 + 6 63 | 64 | opt.afford_dir = pjoin(opt.data_root,'affordance_data') 65 | opt.text_dir = pjoin(opt.data_root, 'texts') 66 | opt.joints_num = 22 67 | opt.max_motion_length = 196 68 | else: 69 | raise KeyError('Dataset not recognized') 70 | 71 | opt.dim_word = 300 72 | opt.num_classes = 200 // opt.unit_length 73 | opt.dim_pos_ohot = len(POS_enumerator) 74 | opt.is_train = False 75 | opt.is_continue = False 76 | opt.device = device 77 | 78 | return opt -------------------------------------------------------------------------------- /data_loaders/behave/utils/paramUtil.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | # Define a kinematic tree for the skeletal struture 4 | kit_kinematic_chain = [[0, 11, 12, 13, 14, 15], [0, 16, 17, 18, 19, 20], [0, 1, 2, 3, 4], [3, 5, 6, 7], [3, 8, 9, 10]] 5 | 6 | kit_raw_offsets = np.array( 7 | [ 8 | [0, 0, 0], 9 | [0, 1, 0], 10 | [0, 1, 0], 11 | [0, 1, 0], 12 | [0, 1, 0], 13 | [1, 0, 0], 14 | [0, -1, 0], 15 | [0, -1, 0], 16 | [-1, 0, 0], 17 | [0, -1, 0], 18 | [0, -1, 0], 19 | [1, 0, 0], 20 | [0, -1, 0], 21 | [0, -1, 0], 22 | [0, 0, 1], 23 | [0, 0, 1], 24 | [-1, 0, 0], 25 | [0, -1, 0], 26 | [0, -1, 0], 27 | [0, 0, 1], 28 | [0, 0, 1] 29 | ] 30 | ) 31 | 32 | t2m_raw_offsets = np.array([[0,0,0], 33 | [1,0,0], 34 | [-1,0,0], 35 | [0,1,0], 36 | [0,-1,0], 37 | [0,-1,0], 38 | [0,1,0], 39 | [0,-1,0], 40 | [0,-1,0], 41 | [0,1,0], 42 | [0,0,1], 43 | [0,0,1], 44 | [0,1,0], 45 | [1,0,0], 46 | [-1,0,0], 47 | [0,0,1], 48 | [0,-1,0], 49 | [0,-1,0], 50 | [0,-1,0], 51 | [0,-1,0], 52 | [0,-1,0], 53 | [0,-1,0]]) 54 | 55 | t2m_kinematic_chain = [[0, 2, 5, 8, 11], [0, 1, 4, 7, 10], [0, 3, 6, 9, 12, 15], [9, 14, 17, 19, 21], [9, 13, 16, 18, 20]] 56 | t2m_left_hand_chain = [[20, 22, 23, 24], [20, 34, 35, 36], [20, 25, 26, 27], [20, 31, 32, 33], [20, 28, 29, 30]] 57 | t2m_right_hand_chain = [[21, 43, 44, 45], [21, 46, 47, 48], [21, 40, 41, 42], [21, 37, 38, 39], [21, 49, 50, 51]] 58 | 59 | 60 | kit_tgt_skel_id = '03950' 61 | 62 | t2m_tgt_skel_id = '000021' 63 | 64 | -------------------------------------------------------------------------------- /data_loaders/behave/utils/word_vectorizer.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pickle 3 | from os.path import join as pjoin 4 | 5 | POS_enumerator = { 6 | 'VERB': 0, 7 | 'NOUN': 1, 8 | 'DET': 2, 9 | 'ADP': 3, 10 | 'NUM': 4, 11 | 'AUX': 5, 12 | 'PRON': 6, 13 | 'ADJ': 7, 14 | 'ADV': 8, 15 | 'Loc_VIP': 9, 16 | 'Body_VIP': 10, 17 | 'Obj_VIP': 11, 18 | 'Act_VIP': 12, 19 | 'Desc_VIP': 13, 20 | 'OTHER': 14, 21 | } 22 | 23 | Loc_list = ('left', 'right', 'clockwise', 'counterclockwise', 'anticlockwise', 'forward', 'back', 'backward', 24 | 'up', 'down', 'straight', 'curve') 25 | 26 | Body_list = ('arm', 'chin', 'foot', 'feet', 'face', 'hand', 'mouth', 'leg', 'waist', 'eye', 'knee', 'shoulder', 'thigh') 27 | 28 | Obj_List = ('stair', 'dumbbell', 'chair', 'window', 'floor', 'car', 'ball', 'handrail', 'baseball', 'basketball', 'trashbin', 29 | 'yogaball', 'yogamat', 'suitcase', 'tablesmall', 'tablesquare', 'backpack', 'boxlong', 'boxsmall', 'boxtiny', 30 | 'boxlarge', 'boxmedium', 'plasticcontainer', 'stool', 'toolbox', 'monitor', 'chairwood', 'chairblack' ) 31 | 32 | Act_list = ('walk', 'run', 'swing', 'pick', 'bring', 'kick', 'put', 'squat', 'throw', 'hop', 'dance', 'jump', 'turn', 33 | 'stumble', 'dance', 'stop', 'sit', 'lift', 'lower', 'raise', 'wash', 'stand', 'kneel', 'stroll', 34 | 'rub', 'bend', 'balance', 'flap', 'jog', 'shuffle', 'lean', 'rotate', 'spin', 'spread', 'climb', 'hold') 35 | 36 | Desc_list = ('slowly', 'carefully', 'fast', 'careful', 'slow', 'quickly', 'happy', 'angry', 'sad', 'happily', 37 | 'angrily', 'sadly') 38 | 39 | VIP_dict = { 40 | 'Loc_VIP': Loc_list, 41 | 'Body_VIP': Body_list, 42 | 'Obj_VIP': Obj_List, 43 | 'Act_VIP': Act_list, 44 | 'Desc_VIP': Desc_list, 45 | } 46 | 47 | 48 | class WordVectorizer(object): 49 | def __init__(self, meta_root, prefix): 50 | vectors = np.load(pjoin(meta_root, '%s_data.npy'%prefix)) 51 | words = pickle.load(open(pjoin(meta_root, '%s_words.pkl'%prefix), 'rb')) 52 | word2idx = pickle.load(open(pjoin(meta_root, '%s_idx.pkl'%prefix), 'rb')) 53 | self.word2vec = {w: vectors[word2idx[w]] for w in words} 54 | 55 | def _get_pos_ohot(self, pos): 56 | pos_vec = np.zeros(len(POS_enumerator)) 57 | if pos in POS_enumerator: 58 | pos_vec[POS_enumerator[pos]] = 1 59 | else: 60 | pos_vec[POS_enumerator['OTHER']] = 1 61 | return pos_vec 62 | 63 | def __len__(self): 64 | return len(self.word2vec) 65 | 66 | def __getitem__(self, item): 67 | 68 | word, pos = item.split('/') 69 | if word in self.word2vec: 70 | word_vec = self.word2vec[word] 71 | vip_pos = None 72 | for key, values in VIP_dict.items(): 73 | if word in values: 74 | vip_pos = key 75 | break 76 | if vip_pos is not None: 77 | pos_vec = self._get_pos_ohot(vip_pos) 78 | else: 79 | pos_vec = self._get_pos_ohot(pos) 80 | else: 81 | word_vec = self.word2vec['unk'] 82 | pos_vec = self._get_pos_ohot('OTHER') 83 | return word_vec, pos_vec -------------------------------------------------------------------------------- /data_loaders/get_data.py: -------------------------------------------------------------------------------- 1 | from torch.utils.data import DataLoader 2 | from data_loaders.tensors import collate as all_collate 3 | from data_loaders.tensors import t2m_collate, t2m_behave_collate, t2m_contact_collate, t2m_omomo_collate 4 | from dataclasses import dataclass 5 | 6 | def get_dataset_class(name): 7 | if name == "humanml": 8 | from data_loaders.humanml.data.dataset import HumanML3D 9 | return HumanML3D 10 | elif name == "behave": 11 | from data_loaders.behave.data.dataset import Behave 12 | return Behave 13 | else: 14 | raise ValueError(f'Unsupported dataset name [{name}]') 15 | 16 | @dataclass 17 | class DatasetConfig: 18 | name: str 19 | batch_size: int 20 | num_frames: int 21 | split: str = 'train' 22 | hml_mode: str = 'train' 23 | training_stage: int = 1 24 | 25 | 26 | def get_collate_fn(name, hml_mode='train', training_stage=1): 27 | if hml_mode == 'gt' and name in ["humanml", "kit"]: 28 | from data_loaders.humanml.data.dataset import collate_fn as t2m_eval_collate 29 | return t2m_eval_collate 30 | if hml_mode == 'gt' and name in ["behave"]: 31 | from data_loaders.behave.data.dataset import collate_fn as t2m_eval_collate 32 | return t2m_eval_collate 33 | if hml_mode == 'gt' and name in ["omomo"]: 34 | from data_loaders.omomo.data.dataset import collate_fn as t2m_eval_collate 35 | return t2m_eval_collate 36 | if name in ["humanml", "kit"]: 37 | return t2m_collate 38 | elif name in ["behave"] and training_stage==1: 39 | return t2m_contact_collate 40 | elif name in ["behave"] and training_stage==2: 41 | return t2m_behave_collate 42 | else: 43 | return all_collate 44 | 45 | 46 | def get_dataset(conf: DatasetConfig): 47 | DATA = get_dataset_class(conf.name) 48 | if conf.name in ["humanml", "behave", "omomo"]: 49 | dataset = DATA(split=conf.split, 50 | mode=conf.hml_mode, 51 | num_frames=conf.num_frames, 52 | training_stage=conf.training_stage) 53 | else: 54 | raise NotImplementedError() 55 | dataset = DATA(split=split, num_frames=num_frames) 56 | return dataset 57 | 58 | 59 | 60 | def get_dataset_loader(conf: DatasetConfig): 61 | # name, batch_size, num_frames, split='train', hml_mode='train' 62 | dataset = get_dataset(conf) 63 | collate = get_collate_fn(conf.name, conf.hml_mode, conf.training_stage) 64 | 65 | loader = DataLoader( 66 | dataset, batch_size=conf.batch_size, shuffle=True, 67 | num_workers=8, drop_last=True, collate_fn=collate, 68 | ) 69 | return loader -------------------------------------------------------------------------------- /data_loaders/humanml_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | HML_JOINT_NAMES = [ 4 | 'pelvis', 5 | 'left_hip', 6 | 'right_hip', 7 | 'spine1', 8 | 'left_knee', 9 | 'right_knee', 10 | 'spine2', 11 | 'left_ankle', 12 | 'right_ankle', 13 | 'spine3', 14 | 'left_foot', 15 | 'right_foot', 16 | 'neck', 17 | 'left_collar', 18 | 'right_collar', 19 | 'head', 20 | 'left_shoulder', 21 | 'right_shoulder', 22 | 'left_elbow', 23 | 'right_elbow', 24 | 'left_wrist', 25 | 'right_wrist', 26 | ] 27 | 28 | NUM_HML_JOINTS = len(HML_JOINT_NAMES) # 22 SMPLH body joints 29 | 30 | HML_LOWER_BODY_JOINTS = [HML_JOINT_NAMES.index(name) for name in ['pelvis', 'left_hip', 'right_hip', 'left_knee', 'right_knee', 'left_ankle', 'right_ankle', 'left_foot', 'right_foot',]] 31 | SMPL_UPPER_BODY_JOINTS = [i for i in range(len(HML_JOINT_NAMES)) if i not in HML_LOWER_BODY_JOINTS] 32 | 33 | 34 | # Recover global angle and positions for rotation data 35 | # root_rot_velocity (B, seq_len, 1) 36 | # root_linear_velocity (B, seq_len, 2) 37 | # root_y (B, seq_len, 1) 38 | # ric_data (B, seq_len, (joint_num - 1)*3) 39 | # rot_data (B, seq_len, (joint_num - 1)*6) 40 | # local_velocity (B, seq_len, joint_num*3) 41 | # foot contact (B, seq_len, 4) 42 | HML_ROOT_BINARY = np.array([True] + [False] * (NUM_HML_JOINTS-1)) 43 | HML_ROOT_MASK = np.concatenate(([True]*(1+2+1), 44 | HML_ROOT_BINARY[1:].repeat(3), 45 | HML_ROOT_BINARY[1:].repeat(6), 46 | HML_ROOT_BINARY.repeat(3), 47 | [False] * 4)) 48 | HML_LOWER_BODY_JOINTS_BINARY = np.array([i in HML_LOWER_BODY_JOINTS for i in range(NUM_HML_JOINTS)]) 49 | HML_LOWER_BODY_MASK = np.concatenate(([True]*(1+2+1), 50 | HML_LOWER_BODY_JOINTS_BINARY[1:].repeat(3), 51 | HML_LOWER_BODY_JOINTS_BINARY[1:].repeat(6), 52 | HML_LOWER_BODY_JOINTS_BINARY.repeat(3), 53 | [True]*4)) 54 | HML_UPPER_BODY_MASK = ~HML_LOWER_BODY_MASK -------------------------------------------------------------------------------- /dataset/behave_opt.txt: -------------------------------------------------------------------------------- 1 | ------------ Options ------------- 2 | batch_size: 32 3 | checkpoints_dir: ./checkpoints 4 | dataset_name: t2m_behave 5 | decomp_name: Decomp_SP001_SM001_H512 6 | dim_att_vec: 512 7 | dim_dec_hidden: 1024 8 | dim_movement2_dec_hidden: 512 9 | dim_movement_dec_hidden: 512 10 | dim_movement_enc_hidden: 512 11 | dim_movement_latent: 512 12 | dim_msd_hidden: 512 13 | dim_pos_hidden: 1024 14 | dim_pri_hidden: 1024 15 | dim_seq_de_hidden: 512 16 | dim_seq_en_hidden: 512 17 | dim_text_hidden: 512 18 | dim_z: 128 19 | early_stop_count: 3 20 | estimator_mod: bigru 21 | eval_every_e: 5 22 | feat_bias: 5 23 | fixed_steps: 5 24 | gpu_id: 3 25 | input_z: False 26 | is_continue: True 27 | is_train: True 28 | lambda_fake: 10 29 | lambda_gan_l: 0.1 30 | lambda_gan_mt: 0.1 31 | lambda_gan_mv: 0.1 32 | lambda_kld: 0.01 33 | lambda_rec: 1 34 | lambda_rec_init: 1 35 | lambda_rec_mot: 1 36 | lambda_rec_mov: 1 37 | log_every: 50 38 | lr: 0.0002 39 | max_sub_epoch: 50 40 | max_text_len: 20 41 | n_layers_dec: 1 42 | n_layers_msd: 2 43 | n_layers_pos: 1 44 | n_layers_pri: 1 45 | n_layers_seq_de: 2 46 | n_layers_seq_en: 1 47 | name: Comp_v6_KLD01 48 | num_experts: 4 49 | save_every_e: 10 50 | save_latest: 500 51 | text_enc_mod: bigru 52 | tf_ratio: 0.4 53 | unit_length: 4 54 | -------------- End ---------------- 55 | -------------------------------------------------------------------------------- /dataset/t2m_mean.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/dataset/t2m_mean.npy -------------------------------------------------------------------------------- /dataset/t2m_std.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/dataset/t2m_std.npy -------------------------------------------------------------------------------- /model/__pycache__/afford_est.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/model/__pycache__/afford_est.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/cfg_sampler.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/model/__pycache__/cfg_sampler.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/cfg_sampler.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/model/__pycache__/cfg_sampler.cpython-38.pyc -------------------------------------------------------------------------------- /model/__pycache__/comMDM.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/model/__pycache__/comMDM.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/contact.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/model/__pycache__/contact.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/contact.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/model/__pycache__/contact.cpython-38.pyc -------------------------------------------------------------------------------- /model/__pycache__/hoi_contact.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/model/__pycache__/hoi_contact.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/hoi_diff.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/model/__pycache__/hoi_diff.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/joint_model.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/model/__pycache__/joint_model.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/mdm.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/model/__pycache__/mdm.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/mdm.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/model/__pycache__/mdm.cpython-38.pyc -------------------------------------------------------------------------------- /model/__pycache__/mdm_cross_obj.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/model/__pycache__/mdm_cross_obj.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/mdm_cross_obj2.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/model/__pycache__/mdm_cross_obj2.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/mdm_hoi_baseline.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/model/__pycache__/mdm_hoi_baseline.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/mdm_obj.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/model/__pycache__/mdm_obj.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/mdm_obj2.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/model/__pycache__/mdm_obj2.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/points_encoder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/model/__pycache__/points_encoder.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/points_encoder.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/model/__pycache__/points_encoder.cpython-38.pyc -------------------------------------------------------------------------------- /model/__pycache__/rotation2xyz.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/model/__pycache__/rotation2xyz.cpython-311.pyc -------------------------------------------------------------------------------- /model/__pycache__/rotation2xyz.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/model/__pycache__/rotation2xyz.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/rotation2xyz.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/model/__pycache__/rotation2xyz.cpython-38.pyc -------------------------------------------------------------------------------- /model/__pycache__/smpl.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/model/__pycache__/smpl.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/smpl.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/model/__pycache__/smpl.cpython-38.pyc -------------------------------------------------------------------------------- /model/cfg_sampler.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | from copy import deepcopy 5 | 6 | # A wrapper model for Classifier-free guidance **SAMPLING** only 7 | # https://arxiv.org/abs/2207.12598 8 | class ClassifierFreeSampleModel(nn.Module): 9 | 10 | def __init__(self, model): 11 | super().__init__() 12 | self.model = model # model is the actual model to run 13 | 14 | assert self.model.cond_mask_prob > 0, 'Cannot run a guided diffusion on a model that has not been trained with no conditions' 15 | 16 | # pointers to inner model 17 | # self.rot2xyz = self.model.rot2xyz 18 | # self.translation = self.model.translation 19 | self.njoints = self.model.njoints 20 | self.nfeats = self.model.nfeats 21 | # self.data_rep = self.model.data_rep 22 | self.cond_mode = self.model.cond_mode 23 | 24 | def forward(self, x, timesteps, y=None): 25 | cond_mode = self.model.cond_mode 26 | assert cond_mode in ['text', 'action'] 27 | y_uncond = deepcopy(y) 28 | y_uncond['uncond'] = True 29 | out = self.model(x, timesteps, y) 30 | out_uncond = self.model(x, timesteps, y_uncond) 31 | 32 | return out_uncond + (y['scale'].view(-1, 1, 1, 1) * (out - out_uncond)) 33 | 34 | 35 | 36 | class UnconditionedModel(nn.Module): 37 | """this is a wrapper around a model that forces unconditional sampling. 38 | Note that when accessing the model's attributes, you must it returns the wrapped model's attributes. 39 | This does not apply to functions, though""" 40 | def __init__(self, model): 41 | super().__init__() 42 | vars(self)['model'] = model 43 | assert model.cond_mask_prob > 0, 'Cannot run unconditional generation on a model that has not been trained with no conditions' 44 | 45 | def __getattr__(self, name: str): 46 | model = vars(self)['model'] 47 | return getattr(model, name) 48 | 49 | 50 | def forward(self, x, timesteps, y=None): 51 | y_uncond = deepcopy(y) 52 | y_uncond['uncond'] = True 53 | out_uncond = self.model(x, timesteps, y_uncond) 54 | return out_uncond 55 | 56 | def parameters(self): 57 | return self.model.parameters() 58 | 59 | 60 | 61 | 62 | def wrap_model(model, args): 63 | if args.guidance_param not in [0., 1.]: 64 | return ClassifierFreeSampleModel(model) # wrapping model with the classifier-free sampler 65 | elif args.guidance_param == 0: 66 | return UnconditionedModel(model) 67 | else: 68 | return model 69 | -------------------------------------------------------------------------------- /model/points_encoder.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from pointnet2_ops.pointnet2_modules import PointnetSAModuleMSG 4 | 5 | 6 | # https://github.com/erikwijmans/Pointnet2_PyTorch/blob/master/pointnet2/models/pointnet2_msg_sem.py 7 | class PointNet2Encoder(nn.Module): 8 | """ 9 | c_in: input point feature dimension exculding xyz 10 | """ 11 | def __init__(self, c_in=6, c_out=128, num_keypoints=256): 12 | super(PointNet2Encoder, self).__init__() 13 | self.SA_modules = nn.ModuleList() 14 | self.SA_modules.append( 15 | PointnetSAModuleMSG( 16 | npoint=1024, 17 | radii=[0.05, 0.1], 18 | nsamples=[16, 32], 19 | mlps=[[c_in, 16, 16, 32], [c_in, 32, 32, 64]], 20 | use_xyz=True, 21 | ) 22 | ) 23 | c_out_0 = 32 + 64 24 | 25 | c_in = c_out_0 26 | self.SA_modules.append( 27 | PointnetSAModuleMSG( 28 | npoint=num_keypoints, # 256 29 | radii=[0.1, 0.2], 30 | nsamples=[16, 32], 31 | mlps=[[c_in, 64, 64, 128], [c_in, 64, 96, 128]], 32 | use_xyz=True, 33 | ) 34 | ) 35 | c_out_1 = 128 + 128 36 | 37 | self.num_keypoints = num_keypoints 38 | self.c_out = c_out 39 | self.Linear = nn.Linear(c_out_1, c_out - 3) 40 | 41 | def _break_up_pc(self, pc): 42 | xyz = pc[..., 0:3].contiguous() 43 | features = pc[..., 3:].transpose(1, 2).contiguous() if pc.size(-1) > 3 else None 44 | 45 | return xyz, features 46 | 47 | def forward(self, pointcloud): 48 | r""" 49 | Forward pass of the network 50 | Parameters 51 | ---------- 52 | pointcloud: Variable(torch.cuda.FloatTensor) 53 | (B, N, 3 + input_channels) tensor 54 | Point cloud to run predicts on 55 | Each point in the point-cloud MUST 56 | be formated as (x, y, z, features...) 57 | """ 58 | # B, P, C = pointcloud.shape 59 | # pointcloud = pointcloud.reshape(B*I, P, C) 60 | xyz, features = self._break_up_pc(pointcloud) 61 | 62 | l_xyz, l_features = [xyz], [features] 63 | for i in range(len(self.SA_modules)): 64 | li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i]) 65 | l_xyz.append(li_xyz) 66 | l_features.append(li_features) 67 | 68 | # print(l_xyz[-1].shape, l_features[-1].shape) 69 | local_keypoints = torch.cat((l_xyz[-1], 70 | self.Linear(l_features[-1].transpose(1, 2))), dim=-1) # B*I x Pb x C 71 | return local_keypoints #.reshape(B, I, self.num_keypoints, self.c_out) -------------------------------------------------------------------------------- /model/rotation2xyz.py: -------------------------------------------------------------------------------- 1 | # This code is based on https://github.com/Mathux/ACTOR.git 2 | import torch 3 | import utils.rotation_conversions as geometry 4 | 5 | 6 | from model.smpl import SMPL, JOINTSTYPE_ROOT 7 | # from .get_model import JOINTSTYPES 8 | JOINTSTYPES = ["a2m", "a2mpl", "smpl", "vibe", "vertices"] 9 | 10 | 11 | class Rotation2xyz: 12 | def __init__(self, device, dataset='amass'): 13 | self.device = device 14 | self.dataset = dataset 15 | self.smpl_model = SMPL().eval().to(device) 16 | 17 | def __call__(self, x, mask, pose_rep, translation, glob, 18 | jointstype, vertstrans, betas=None, beta=0, 19 | glob_rot=None, get_rotations_back=False, **kwargs): 20 | if pose_rep == "xyz": 21 | return x 22 | 23 | if mask is None: 24 | mask = torch.ones((x.shape[0], x.shape[-1]), dtype=bool, device=x.device) 25 | 26 | if not glob and glob_rot is None: 27 | raise TypeError("You must specify global rotation if glob is False") 28 | 29 | if jointstype not in JOINTSTYPES: 30 | raise NotImplementedError("This jointstype is not implemented.") 31 | 32 | if translation: 33 | x_translations = x[:, -1, :3] 34 | x_rotations = x[:, :-1] 35 | else: 36 | x_rotations = x 37 | 38 | x_rotations = x_rotations.permute(0, 3, 1, 2) 39 | nsamples, time, njoints, feats = x_rotations.shape 40 | 41 | # Compute rotations (convert only masked sequences output) 42 | if pose_rep == "rotvec": 43 | rotations = geometry.axis_angle_to_matrix(x_rotations[mask]) 44 | elif pose_rep == "rotmat": 45 | rotations = x_rotations[mask].view(-1, njoints, 3, 3) 46 | elif pose_rep == "rotquat": 47 | rotations = geometry.quaternion_to_matrix(x_rotations[mask]) 48 | elif pose_rep == "rot6d": 49 | rotations = geometry.rotation_6d_to_matrix(x_rotations[mask]) 50 | else: 51 | raise NotImplementedError("No geometry for this one.") 52 | 53 | if not glob: 54 | global_orient = torch.tensor(glob_rot, device=x.device) 55 | global_orient = geometry.axis_angle_to_matrix(global_orient).view(1, 1, 3, 3) 56 | global_orient = global_orient.repeat(len(rotations), 1, 1, 1) 57 | else: 58 | global_orient = rotations[:, 0] 59 | rotations = rotations[:, 1:] 60 | 61 | if betas is None: 62 | betas = torch.zeros([rotations.shape[0], self.smpl_model.num_betas], 63 | dtype=rotations.dtype, device=rotations.device) 64 | betas[:, 1] = beta 65 | # import ipdb; ipdb.set_trace() 66 | 67 | out = self.smpl_model(body_pose=rotations, global_orient=global_orient, betas=betas) 68 | 69 | # get the desirable joints 70 | joints = out[jointstype] 71 | 72 | x_xyz = torch.empty(nsamples, time, joints.shape[1], 3, device=x.device, dtype=x.dtype) 73 | x_xyz[~mask] = 0 74 | x_xyz[mask] = joints 75 | 76 | x_xyz = x_xyz.permute(0, 2, 3, 1).contiguous() 77 | 78 | # the first translation root at the origin on the prediction 79 | if jointstype != "vertices": 80 | rootindex = JOINTSTYPE_ROOT[jointstype] 81 | x_xyz = x_xyz - x_xyz[:, [rootindex], :, :] 82 | 83 | if translation and vertstrans: 84 | # the first translation root at the origin 85 | x_translations = x_translations - x_translations[:, :, [0]] 86 | 87 | # add the translation to all the joints 88 | x_xyz = x_xyz + x_translations[:, None, :, :] 89 | 90 | if get_rotations_back: 91 | return x_xyz, rotations, global_orient 92 | else: 93 | return x_xyz 94 | -------------------------------------------------------------------------------- /model/smpl.py: -------------------------------------------------------------------------------- 1 | # This code is based on https://github.com/Mathux/ACTOR.git 2 | import numpy as np 3 | import torch 4 | 5 | import contextlib 6 | 7 | from smplx import SMPLLayer as _SMPLLayer 8 | from smplx.lbs import vertices2joints 9 | 10 | 11 | # action2motion_joints = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 21, 24, 38] 12 | # change 0 and 8 13 | action2motion_joints = [8, 1, 2, 3, 4, 5, 6, 7, 0, 9, 10, 11, 12, 13, 14, 21, 24, 38] 14 | 15 | from utils.config import SMPL_MODEL_PATH, JOINT_REGRESSOR_TRAIN_EXTRA 16 | 17 | JOINTSTYPE_ROOT = {"a2m": 0, # action2motion 18 | "smpl": 0, 19 | "a2mpl": 0, # set(smpl, a2m) 20 | "vibe": 8} # 0 is the 8 position: OP MidHip below 21 | 22 | JOINT_MAP = { 23 | 'OP Nose': 24, 'OP Neck': 12, 'OP RShoulder': 17, 24 | 'OP RElbow': 19, 'OP RWrist': 21, 'OP LShoulder': 16, 25 | 'OP LElbow': 18, 'OP LWrist': 20, 'OP MidHip': 0, 26 | 'OP RHip': 2, 'OP RKnee': 5, 'OP RAnkle': 8, 27 | 'OP LHip': 1, 'OP LKnee': 4, 'OP LAnkle': 7, 28 | 'OP REye': 25, 'OP LEye': 26, 'OP REar': 27, 29 | 'OP LEar': 28, 'OP LBigToe': 29, 'OP LSmallToe': 30, 30 | 'OP LHeel': 31, 'OP RBigToe': 32, 'OP RSmallToe': 33, 'OP RHeel': 34, 31 | 'Right Ankle': 8, 'Right Knee': 5, 'Right Hip': 45, 32 | 'Left Hip': 46, 'Left Knee': 4, 'Left Ankle': 7, 33 | 'Right Wrist': 21, 'Right Elbow': 19, 'Right Shoulder': 17, 34 | 'Left Shoulder': 16, 'Left Elbow': 18, 'Left Wrist': 20, 35 | 'Neck (LSP)': 47, 'Top of Head (LSP)': 48, 36 | 'Pelvis (MPII)': 49, 'Thorax (MPII)': 50, 37 | 'Spine (H36M)': 51, 'Jaw (H36M)': 52, 38 | 'Head (H36M)': 53, 'Nose': 24, 'Left Eye': 26, 39 | 'Right Eye': 25, 'Left Ear': 28, 'Right Ear': 27 40 | } 41 | 42 | JOINT_NAMES = [ 43 | 'OP Nose', 'OP Neck', 'OP RShoulder', 44 | 'OP RElbow', 'OP RWrist', 'OP LShoulder', 45 | 'OP LElbow', 'OP LWrist', 'OP MidHip', 46 | 'OP RHip', 'OP RKnee', 'OP RAnkle', 47 | 'OP LHip', 'OP LKnee', 'OP LAnkle', 48 | 'OP REye', 'OP LEye', 'OP REar', 49 | 'OP LEar', 'OP LBigToe', 'OP LSmallToe', 50 | 'OP LHeel', 'OP RBigToe', 'OP RSmallToe', 'OP RHeel', 51 | 'Right Ankle', 'Right Knee', 'Right Hip', 52 | 'Left Hip', 'Left Knee', 'Left Ankle', 53 | 'Right Wrist', 'Right Elbow', 'Right Shoulder', 54 | 'Left Shoulder', 'Left Elbow', 'Left Wrist', 55 | 'Neck (LSP)', 'Top of Head (LSP)', 56 | 'Pelvis (MPII)', 'Thorax (MPII)', 57 | 'Spine (H36M)', 'Jaw (H36M)', 58 | 'Head (H36M)', 'Nose', 'Left Eye', 59 | 'Right Eye', 'Left Ear', 'Right Ear' 60 | ] 61 | 62 | 63 | # adapted from VIBE/SPIN to output smpl_joints, vibe joints and action2motion joints 64 | class SMPL(_SMPLLayer): 65 | """ Extension of the official SMPL implementation to support more joints """ 66 | 67 | def __init__(self, model_path=SMPL_MODEL_PATH, **kwargs): 68 | kwargs["model_path"] = model_path 69 | 70 | # remove the verbosity for the 10-shapes beta parameters 71 | with contextlib.redirect_stdout(None): 72 | super(SMPL, self).__init__(**kwargs) 73 | 74 | J_regressor_extra = np.load(JOINT_REGRESSOR_TRAIN_EXTRA) 75 | self.register_buffer('J_regressor_extra', torch.tensor(J_regressor_extra, dtype=torch.float32)) 76 | vibe_indexes = np.array([JOINT_MAP[i] for i in JOINT_NAMES]) 77 | a2m_indexes = vibe_indexes[action2motion_joints] 78 | smpl_indexes = np.arange(24) 79 | a2mpl_indexes = np.unique(np.r_[smpl_indexes, a2m_indexes]) 80 | 81 | self.maps = {"vibe": vibe_indexes, 82 | "a2m": a2m_indexes, 83 | "smpl": smpl_indexes, 84 | "a2mpl": a2mpl_indexes} 85 | 86 | def forward(self, *args, **kwargs): 87 | smpl_output = super(SMPL, self).forward(*args, **kwargs) 88 | 89 | extra_joints = vertices2joints(self.J_regressor_extra, smpl_output.vertices) 90 | all_joints = torch.cat([smpl_output.joints, extra_joints], dim=1) 91 | 92 | output = {"vertices": smpl_output.vertices} 93 | 94 | for joinstype, indexes in self.maps.items(): 95 | output[joinstype] = all_joints[:, indexes] 96 | 97 | return output -------------------------------------------------------------------------------- /prepare/download_glove.sh: -------------------------------------------------------------------------------- 1 | echo -e "Downloading glove (in use by the evaluators, not by MDM itself)" 2 | gdown --fuzzy https://drive.google.com/file/d/1cmXKUT31pqd7_XpJAiWEo1K81TMYHA5n/view?usp=sharing 3 | rm -rf glove 4 | 5 | unzip glove.zip 6 | echo -e "Cleaning\n" 7 | rm glove.zip 8 | 9 | echo -e "Downloading done!" -------------------------------------------------------------------------------- /prepare/download_smpl_files.sh: -------------------------------------------------------------------------------- 1 | mkdir -p body_models 2 | cd body_models/ 3 | 4 | echo -e "The smpl files will be stored in the 'body_models/smpl/' folder\n" 5 | gdown "https://drive.google.com/uc?id=1INYlGA76ak_cKGzvpOV2Pe6RkYTlXTW2" 6 | rm -rf smpl 7 | 8 | unzip smpl.zip 9 | echo -e "Cleaning\n" 10 | rm smpl.zip 11 | 12 | echo -e "Downloading done!" -------------------------------------------------------------------------------- /prepare/download_t2m_evaluators.sh: -------------------------------------------------------------------------------- 1 | echo -e "Downloading T2M evaluators" 2 | # gdown --fuzzy https://drive.google.com/file/d/1DSaKqWX2HlwBtVH5l7DdW96jeYUIXsOP/view 3 | # gdown --fuzzy https://drive.google.com/file/d/1tX79xk0fflp07EZ660Xz1RAFE33iEyJR/view 4 | gdown 1AYsmEG8I3fAAoraT4vau0GnesWBWyeT8 5 | tar -xvzf t2m.tar.gz 6 | # rm -rf t2m 7 | # rm -rf kit 8 | 9 | # unzip t2m.zip 10 | # unzip kit.zip 11 | echo -e "Cleaning\n" 12 | # rm t2m.zip 13 | # rm kit.zip 14 | rm t2m.tar.gz 15 | 16 | echo -e "Downloading done!" -------------------------------------------------------------------------------- /prepare/process_behave_raw.sh: -------------------------------------------------------------------------------- 1 | echo -e "spliting raw behave dataset" 2 | 3 | # cd dataset 4 | 5 | # mkdir behave-30fps-params 6 | 7 | # tar -xvf behave-30fps-params-v1.tar -C ./behave-30fps-params/ 8 | # echo -e "Cleaning\n" 9 | # rm behave-30fps-params-v1.tar 10 | 11 | # echo -e "Cleaning Done!\n" 12 | 13 | echo -e "spliting data now!\n" 14 | 15 | # cd .. 16 | python utils/behave_process.py 17 | 18 | echo -e "Done!" -------------------------------------------------------------------------------- /train/hoi_diff.py: -------------------------------------------------------------------------------- 1 | # This code is based on https://github.com/openai/guided-diffusion 2 | """ 3 | Train a diffusion model on images. 4 | """ 5 | 6 | import os 7 | import json 8 | import torch 9 | from utils.fixseed import fixseed 10 | from utils.parser_util import train_args 11 | from utils import dist_util 12 | from train.training_loop import TrainLoop 13 | from data_loaders.get_data import DatasetConfig, get_dataset_loader 14 | from utils.model_util import create_model_and_diffusion, load_pretrained_mdm, load_split_mdm 15 | from train.train_platforms import ClearmlPlatform, TensorboardPlatform, NoPlatform # required for the eval operation 16 | from model.mdm import MDM 17 | from diffusion.gaussian_diffusion import LocalMotionDiffusion 18 | 19 | def main(): 20 | args = train_args() 21 | fixseed(args.seed) 22 | train_platform_type = eval(args.train_platform_type) 23 | train_platform = train_platform_type(args.save_dir) 24 | train_platform.report_args(args, name='Args') 25 | 26 | if args.save_dir is None: 27 | raise FileNotFoundError('save_dir was not specified.') 28 | elif os.path.exists(args.save_dir) and not args.overwrite: 29 | raise FileExistsError('save_dir [{}] already exists.'.format(args.save_dir)) 30 | elif not os.path.exists(args.save_dir): 31 | os.makedirs(args.save_dir) 32 | args_path = os.path.join(args.save_dir, 'args.json') 33 | with open(args_path, 'w') as fw: 34 | json.dump(vars(args), fw, indent=4, sort_keys=True) 35 | 36 | dist_util.setup_dist(args.device) 37 | 38 | print("creating data loader...") 39 | data_conf = DatasetConfig( 40 | name=args.dataset, 41 | batch_size=args.batch_size, 42 | num_frames=args.num_frames, 43 | # use_global=args.global_3d, 44 | training_stage=2 45 | ) 46 | data = get_dataset_loader(data_conf) 47 | 48 | print("creating model and diffusion...") 49 | from model.hoi_diff import HOIDiff 50 | model, diffusion = create_model_and_diffusion(args, data, ModelClass=HOIDiff, DiffusionClass=LocalMotionDiffusion) 51 | 52 | 53 | print(f"Loading checkpoints from [{args.pretrained_path}]...") 54 | state_dict = torch.load(args.pretrained_path, map_location='cpu') 55 | 56 | 57 | if args.multi_backbone_split == 0: 58 | load_pretrained_mdm(model, state_dict) 59 | else: 60 | load_split_mdm(model, state_dict, args.multi_backbone_split) 61 | 62 | 63 | model.to(dist_util.dev()) 64 | model.rot2xyz.smpl_model.eval() 65 | 66 | print('Total params: %.2fM' % (sum(p.numel() for p in model.parameters_wo_clip()) / 1000000.0)) 67 | print('Trainable params: %.2fM' % (sum(p.numel() for p in model.trainable_parameters()) / 1000000.0)) 68 | print("Training...") 69 | TrainLoop(args, train_platform, model, diffusion, data).run_loop() 70 | train_platform.close() 71 | 72 | if __name__ == "__main__": 73 | main() 74 | -------------------------------------------------------------------------------- /train/train_affordance.py: -------------------------------------------------------------------------------- 1 | # This code is based on https://github.com/openai/guided-diffusion 2 | """ 3 | Train a diffusion model on images. 4 | """ 5 | 6 | import os 7 | import json 8 | import torch 9 | from utils.fixseed import fixseed 10 | from utils.parser_util import train_args 11 | from utils import dist_util 12 | from train.training_loop import TrainLoop 13 | from data_loaders.get_data import DatasetConfig, get_dataset_loader 14 | from utils.model_util import create_model_and_diffusion, load_pretrained_mdm 15 | from train.train_platforms import ClearmlPlatform, TensorboardPlatform, NoPlatform # required for the eval operation 16 | from model.afford_est import AffordEstimation 17 | from diffusion.gaussian_diffusion import AffordDiffusion 18 | 19 | def main(): 20 | args = train_args() 21 | fixseed(args.seed) 22 | train_platform_type = eval(args.train_platform_type) 23 | train_platform = train_platform_type(args.save_dir) 24 | train_platform.report_args(args, name='Args') 25 | 26 | if args.save_dir is None: 27 | raise FileNotFoundError('save_dir was not specified.') 28 | elif os.path.exists(args.save_dir) and not args.overwrite: 29 | raise FileExistsError('save_dir [{}] already exists.'.format(args.save_dir)) 30 | elif not os.path.exists(args.save_dir): 31 | os.makedirs(args.save_dir) 32 | args_path = os.path.join(args.save_dir, 'args.json') 33 | with open(args_path, 'w') as fw: 34 | json.dump(vars(args), fw, indent=4, sort_keys=True) 35 | 36 | dist_util.setup_dist(args.device) 37 | 38 | print("creating data loader...") 39 | data_conf = DatasetConfig( 40 | name=args.dataset, 41 | batch_size=args.batch_size, 42 | num_frames=args.num_frames, 43 | training_stage=1 44 | ) 45 | data = get_dataset_loader(data_conf) 46 | 47 | print("creating model and diffusion...") 48 | model, diffusion = create_model_and_diffusion(args, data, ModelClass=AffordEstimation, DiffusionClass=AffordDiffusion) 49 | model.to(dist_util.dev()) 50 | 51 | 52 | print('Total params: %.2fM' % (sum(p.numel() for p in model.parameters_wo_clip()) / 1000000.0)) 53 | print("Training...") 54 | TrainLoop(args, train_platform, model, diffusion, data).run_loop() 55 | train_platform.close() 56 | 57 | if __name__ == "__main__": 58 | main() 59 | -------------------------------------------------------------------------------- /train/train_platforms.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | class TrainPlatform: 4 | def __init__(self, save_dir): 5 | pass 6 | 7 | def report_scalar(self, name, value, iteration, group_name=None): 8 | pass 9 | 10 | def report_args(self, args, name): 11 | pass 12 | 13 | def close(self): 14 | pass 15 | 16 | 17 | class ClearmlPlatform(TrainPlatform): 18 | def __init__(self, save_dir): 19 | from clearml import Task 20 | path, name = os.path.split(save_dir) 21 | self.task = Task.init(project_name='motion_diffusion', 22 | task_name=name, 23 | output_uri=path) 24 | self.logger = self.task.get_logger() 25 | 26 | def report_scalar(self, name, value, iteration, group_name): 27 | self.logger.report_scalar(title=group_name, series=name, iteration=iteration, value=value) 28 | 29 | def report_args(self, args, name): 30 | self.task.connect(args, name=name) 31 | 32 | def close(self): 33 | self.task.close() 34 | 35 | 36 | class TensorboardPlatform(TrainPlatform): 37 | def __init__(self, save_dir): 38 | from torch.utils.tensorboard import SummaryWriter 39 | self.writer = SummaryWriter(log_dir=save_dir) 40 | 41 | def report_scalar(self, name, value, iteration, group_name=None): 42 | self.writer.add_scalar(f'{group_name}/{name}', value, iteration) 43 | 44 | def close(self): 45 | self.writer.close() 46 | 47 | 48 | class NoPlatform(TrainPlatform): 49 | def __init__(self, save_dir): 50 | pass 51 | 52 | 53 | -------------------------------------------------------------------------------- /utils/PYTORCH3D_LICENSE: -------------------------------------------------------------------------------- 1 | BSD License 2 | 3 | For PyTorch3D software 4 | 5 | Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. 6 | 7 | Redistribution and use in source and binary forms, with or without modification, 8 | are permitted provided that the following conditions are met: 9 | 10 | * Redistributions of source code must retain the above copyright notice, this 11 | list of conditions and the following disclaimer. 12 | 13 | * Redistributions in binary form must reproduce the above copyright notice, 14 | this list of conditions and the following disclaimer in the documentation 15 | and/or other materials provided with the distribution. 16 | 17 | * Neither the name Facebook nor the names of its contributors may be used to 18 | endorse or promote products derived from this software without specific 19 | prior written permission. 20 | 21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 22 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 23 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 24 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR 25 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 26 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 28 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 30 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /utils/cal_mean_variance.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import sys 3 | import os 4 | from os.path import join as pjoin 5 | 6 | 7 | # root_rot_velocity (B, seq_len, 1) 8 | # root_linear_velocity (B, seq_len, 2) 9 | # root_y (B, seq_len, 1) 10 | # ric_data (B, seq_len, (joint_num - 1)*3) 11 | # rot_data (B, seq_len, (joint_num - 1)*6) 12 | # local_velocity (B, seq_len, joint_num*3) 13 | # foot contact (B, seq_len, 4) 14 | def mean_variance(data_dir, save_dir, joints_num): 15 | file_list = os.listdir(data_dir) 16 | data_list = [] 17 | 18 | for file in file_list: 19 | data = np.load(pjoin(data_dir, file)) 20 | if np.isnan(data).any(): 21 | print(file) 22 | continue 23 | data_list.append(data) 24 | 25 | data = np.concatenate(data_list, axis=0) 26 | print(data.shape) 27 | Mean = data.mean(axis=0) 28 | Std = data.std(axis=0) 29 | Std[0:1] = Std[0:1].mean() / 1.0 30 | Std[1:3] = Std[1:3].mean() / 1.0 31 | Std[3:4] = Std[3:4].mean() / 1.0 32 | Std[4: 4+(joints_num - 1) * 3] = Std[4: 4+(joints_num - 1) * 3].mean() / 1.0 33 | Std[4+(joints_num - 1) * 3: 4+(joints_num - 1) * 9] = Std[4+(joints_num - 1) * 3: 4+(joints_num - 1) * 9].mean() / 1.0 34 | Std[4+(joints_num - 1) * 9: 4+(joints_num - 1) * 9 + joints_num*3] = Std[4+(joints_num - 1) * 9: 4+(joints_num - 1) * 9 + joints_num*3].mean() / 1.0 35 | Std[4 + (joints_num - 1) * 9 + joints_num * 3: 4 + (joints_num - 1) * 9 + joints_num * 3 +4 ] = Std[4 + (joints_num - 1) * 9 + joints_num * 3: 4 + (joints_num - 1) * 9 + joints_num * 3 + 4].mean() / 1.0 36 | Std[4 + (joints_num - 1) * 9 + joints_num * 3 +4 : 4 + (joints_num - 1) * 9 + joints_num * 3 + 7] = Std[4 + (joints_num - 1) * 9 + joints_num * 3 +4 :4 + (joints_num - 1) * 9 + joints_num * 3 +7 ].mean() / 1.0 37 | Std[4 + (joints_num - 1) * 9 + joints_num * 3 +7 : ] = Std[4 + (joints_num - 1) * 9 + joints_num * 3 +7 : ].mean() / 1.0 38 | assert 8 + (joints_num - 1) * 9 + joints_num * 3 + 6 == Std.shape[-1] 39 | 40 | 41 | np.save(pjoin(save_dir, 'Mean_local.npy'), Mean) 42 | np.save(pjoin(save_dir, 'Std_local.npy'), Std) 43 | 44 | return Mean, Std 45 | 46 | 47 | if __name__ == '__main__': 48 | data_dir = './dataset/behave_t2m/new_joint_vecs_local/' 49 | save_dir = './dataset/behave_t2m/' 50 | mean, std = mean_variance(data_dir, save_dir, 22) 51 | # print(mean) 52 | # print(Std) -------------------------------------------------------------------------------- /utils/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/utils/common/__init__.py -------------------------------------------------------------------------------- /utils/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | SMPL_DATA_PATH = "./body_models/smpl" 4 | 5 | SMPL_KINTREE_PATH = os.path.join(SMPL_DATA_PATH, "kintree_table.pkl") 6 | SMPL_MODEL_PATH = os.path.join(SMPL_DATA_PATH, "SMPL_NEUTRAL.pkl") 7 | JOINT_REGRESSOR_TRAIN_EXTRA = os.path.join(SMPL_DATA_PATH, 'J_regressor_extra.npy') 8 | 9 | ROT_CONVENTION_TO_ROT_NUMBER = { 10 | 'legacy': 23, 11 | 'no_hands': 21, 12 | 'full_hands': 51, 13 | 'mitten_hands': 33, 14 | } 15 | 16 | GENDERS = ['neutral', 'male', 'female'] 17 | NUM_BETAS = 10 -------------------------------------------------------------------------------- /utils/dist_util.py: -------------------------------------------------------------------------------- 1 | """ 2 | Helpers for distributed training. 3 | """ 4 | 5 | import socket 6 | 7 | import torch as th 8 | import torch.distributed as dist 9 | 10 | # Change this to reflect your cluster layout. 11 | # The GPU for a given rank is (rank % GPUS_PER_NODE). 12 | GPUS_PER_NODE = 8 13 | 14 | SETUP_RETRY_COUNT = 3 15 | 16 | used_device = 0 17 | 18 | def setup_dist(device=0): 19 | """ 20 | Setup a distributed process group. 21 | """ 22 | global used_device 23 | used_device = device 24 | if dist.is_initialized(): 25 | return 26 | # os.environ["CUDA_VISIBLE_DEVICES"] = str(device) # f"{MPI.COMM_WORLD.Get_rank() % GPUS_PER_NODE}" 27 | 28 | # comm = MPI.COMM_WORLD 29 | # backend = "gloo" if not th.cuda.is_available() else "nccl" 30 | 31 | # if backend == "gloo": 32 | # hostname = "localhost" 33 | # else: 34 | # hostname = socket.gethostbyname(socket.getfqdn()) 35 | # os.environ["MASTER_ADDR"] = comm.bcast(hostname, root=0) 36 | # os.environ["RANK"] = str(comm.rank) 37 | # os.environ["WORLD_SIZE"] = str(comm.size) 38 | 39 | # port = comm.bcast(_find_free_port(), root=used_device) 40 | # os.environ["MASTER_PORT"] = str(port) 41 | # dist.init_process_group(backend=backend, init_method="env://") 42 | 43 | 44 | def dev(): 45 | """ 46 | Get the device to use for torch.distributed. 47 | """ 48 | global used_device 49 | if th.cuda.is_available() and used_device>=0: 50 | return th.device(f"cuda:{used_device}") 51 | return th.device("cpu") 52 | 53 | 54 | def load_state_dict(path, **kwargs): 55 | """ 56 | Load a PyTorch file without redundant fetches across MPI ranks. 57 | """ 58 | return th.load(path, **kwargs) 59 | 60 | 61 | def sync_params(params): 62 | """ 63 | Synchronize a sequence of Tensors across ranks from rank 0. 64 | """ 65 | for p in params: 66 | with th.no_grad(): 67 | dist.broadcast(p, 0) 68 | 69 | 70 | def _find_free_port(): 71 | try: 72 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 73 | s.bind(("", 0)) 74 | s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 75 | return s.getsockname()[1] 76 | finally: 77 | s.close() 78 | -------------------------------------------------------------------------------- /utils/fixseed.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import random 4 | 5 | 6 | def fixseed(seed): 7 | torch.backends.cudnn.benchmark = False 8 | random.seed(seed) 9 | np.random.seed(seed) 10 | torch.manual_seed(seed) 11 | 12 | 13 | # SEED = 10 14 | # EVALSEED = 0 15 | # # Provoc warning: not fully functionnal yet 16 | # # torch.set_deterministic(True) 17 | # torch.backends.cudnn.benchmark = False 18 | # fixseed(SEED) 19 | -------------------------------------------------------------------------------- /utils/human_body_prior/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Copyright (C) 2019 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG), 4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the 5 | # Max Planck Institute for Biological Cybernetics. All rights reserved. 6 | # 7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights 8 | # on this computer program. You can only use this computer program if you have closed a license agreement 9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right. 10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution. 11 | # Contact: ps-license@tuebingen.mpg.de 12 | # 13 | # 14 | # If you use this code in a research publication please consider citing the following: 15 | # 16 | # Expressive Body Capture: 3D Hands, Face, and Body from a Single Image 17 | # 18 | # 19 | # Code Developed by: 20 | # Nima Ghorbani 21 | # 22 | # 2018.01.02 23 | -------------------------------------------------------------------------------- /utils/human_body_prior/body_model/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Copyright (C) 2019 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG), 4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the 5 | # Max Planck Institute for Biological Cybernetics. All rights reserved. 6 | # 7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights 8 | # on this computer program. You can only use this computer program if you have closed a license agreement 9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right. 10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution. 11 | # Contact: ps-license@tuebingen.mpg.de 12 | # 13 | # 14 | # If you use this code in a research publication please consider citing the following: 15 | # 16 | # Expressive Body Capture: 3D Hands, Face, and Body from a Single Image 17 | # 18 | # 19 | # Code Developed by: 20 | # Nima Ghorbani 21 | # 22 | # 2018.01.02 23 | -------------------------------------------------------------------------------- /utils/human_body_prior/body_model/parts_segm/readme: -------------------------------------------------------------------------------- 1 | ### Parts segmentation file obtained from https://github.com/vchoutas/torch-mesh-isect#examples and put here for convenience -------------------------------------------------------------------------------- /utils/human_body_prior/body_model/rigid_object_model.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Copyright (C) 2019 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG), 4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the 5 | # Max Planck Institute for Biological Cybernetics. All rights reserved. 6 | # 7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights 8 | # on this computer program. You can only use this computer program if you have closed a license agreement 9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right. 10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution. 11 | # Contact: ps-license@tuebingen.mpg.de 12 | # 13 | # 14 | # If you use this code in a research publication please consider citing the following: 15 | # 16 | # Expressive Body Capture: 3D Hands, Face, and Body from a Single Image 17 | # 18 | # 19 | # Code Developed by: 20 | # Nima Ghorbani 21 | # 22 | # 2018.12.13 23 | 24 | import numpy as np 25 | 26 | import torch 27 | import torch.nn as nn 28 | 29 | # from smplx.lbs import lbs 30 | from human_body_prior.body_model.lbs import lbs 31 | # import trimesh # dont use this package for loading meshes since it messes up the order of vertices 32 | from psbody.mesh import Mesh 33 | from human_body_prior.body_model.lbs import batch_rodrigues 34 | 35 | class RigidObjectModel(nn.Module): 36 | 37 | def __init__(self, plpath, batch_size=1, dtype=torch.float32): 38 | super(RigidObjectModel, self).__init__() 39 | 40 | trans = torch.tensor(np.zeros((batch_size, 3)), dtype=dtype, requires_grad=True) 41 | self.register_parameter('trans', nn.Parameter(trans, requires_grad=True)) 42 | 43 | root_orient = torch.tensor(np.zeros((batch_size, 3)), dtype=dtype, requires_grad=True) 44 | self.register_parameter('root_orient', nn.Parameter(root_orient, requires_grad=True)) 45 | 46 | mesh = Mesh(filename=plpath) 47 | 48 | self.rigid_v = torch.from_numpy(np.repeat(mesh.v[np.newaxis], batch_size, axis=0)).type(dtype) 49 | self.f = torch.from_numpy(mesh.f.astype(np.int32)) 50 | 51 | def forward(self, root_orient, trans): 52 | if root_orient is None: root_orient = self.root_orient 53 | if trans is None: trans = self.trans 54 | verts = torch.bmm(self.rigid_v, batch_rodrigues(root_orient)) + trans.view(-1,1,3) 55 | 56 | res = {} 57 | res['v'] = verts 58 | res['f'] = self.f 59 | 60 | class result_meta(object): pass 61 | 62 | res_class = result_meta() 63 | for k, v in res.items(): 64 | res_class.__setattr__(k, v) 65 | res = res_class 66 | 67 | return res 68 | -------------------------------------------------------------------------------- /utils/human_body_prior/data/README.md: -------------------------------------------------------------------------------- 1 | # Preparing VPoser Training Dataset 2 | The Human Body Prior, VPoser, presented here is trained on [AMASS](https://amass.is.tue.mpg.de/) dataset. 3 | AMASS is a large collection of human marker based optical mocap data as [SMPL](http://smpl.is.tue.mpg.de/) body model parameters. 4 | VPoser code here is implemented in [PyTorch](https://pytorch.org/), therefore, the data preparation code, 5 | turns AMASS data into pytorch readable *.pt* files in three stages: 6 | 7 | ***Stage I*** turns the AMASS numpy *.npz* files into PyTorch *.pt* files. 8 | For this, first you would need to download body parameters from the AMASS webpage: https://amass.is.tue.mpg.de/dataset. 9 | Then you have to select subsets of AMASS to be used for each data splits, e.g. train/validation/test. 10 | Here we follow the recommended data splits of AMASS, that is: 11 | 12 | ```python 13 | amass_splits = { 14 | 'vald': ['HumanEva', 'MPI_HDM05', 'SFU', 'MPI_mosh'], 15 | 'test': ['Transitions_mocap', 'SSM_synced'], 16 | 'train': ['CMU', 'MPI_Limits', 'TotalCapture', 'Eyes_Japan_Dataset', 'KIT', 'BML', 'EKUT', 'TCD_handMocap', 'ACCAD'] 17 | } 18 | amass_splits['train'] = list(set(amass_splits['train']).difference(set(amass_splits['test'] + amass_splits['vald']))) 19 | ``` 20 | 21 | During this stage, we also subsample the original data, so that we only take every some frames of the original mocap 22 | to be included in the final data files. 23 | 24 | ***Stage II*** turns the AMASS pytorch files into HDF5, *h5* files and along the process augments the data with extra fields or noise. 25 | Using pytorch in the middle stage helps to parallelize augmentation tasks. 26 | Furthermore, we use HDF5 files for the middle stage so that they can be used in other deep learning frameworks as well. 27 | 28 | ***Stage III*** again converts the augmented HDF5 files into final pytorch files that should be provided to the current VPoser training script. 29 | 30 | During the process, the data preparation code can dump a log file to make it possible to track how data for different 31 | experiments has been produced. 32 | 33 | Below is a full python script example to prepare a VPoser training data: 34 | 35 | ```python 36 | import os 37 | from human_body_prior.tools.omni_tools import makepath, log2file 38 | from human_body_prior.data.prepare_data import prepare_vposer_datasets 39 | 40 | expr_code = 'SOME_UNIQUE_ID' 41 | 42 | amass_dir = 'THE_PATH_TO_AMASS_NPZ_FILES' 43 | 44 | vposer_datadir = makepath('OUTPUT_DATA_PATH/%s' % (expr_code)) 45 | 46 | logger = log2file(os.path.join(vposer_datadir, '%s.log' % (expr_code))) 47 | logger('[%s] Preparing data for training VPoser.'%expr_code) 48 | 49 | amass_splits = { 50 | 'vald': ['HumanEva', 'MPI_HDM05', 'SFU', 'MPI_mosh'], 51 | 'test': ['Transitions_mocap', 'SSM_synced'], 52 | 'train': ['CMU', 'MPI_Limits', 'TotalCapture', 'Eyes_Japan_Dataset', 'KIT', 'BML', 'EKUT', 'TCD_handMocap', 'ACCAD'] 53 | } 54 | amass_splits['train'] = list(set(amass_splits['train']).difference(set(amass_splits['test'] + amass_splits['vald']))) 55 | 56 | prepare_vposer_datasets(vposer_datadir,amass_splits,amass_dir,logger=logger) 57 | ``` 58 | 59 | ## Note 60 | If you consider training your own VPoser for your research using AMASS dataset, then please follow its respective citation guideline. -------------------------------------------------------------------------------- /utils/human_body_prior/data/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Copyright (C) 2019 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG), 4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the 5 | # Max Planck Institute for Biological Cybernetics. All rights reserved. 6 | # 7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights 8 | # on this computer program. You can only use this computer program if you have closed a license agreement 9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right. 10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution. 11 | # Contact: ps-license@tuebingen.mpg.de 12 | # 13 | # 14 | # If you use this code in a research publication please consider citing the following: 15 | # 16 | # Expressive Body Capture: 3D Hands, Face, and Body from a Single Image 17 | # 18 | # 19 | # Code Developed by: 20 | # Nima Ghorbani 21 | # 22 | # 2018.01.02 23 | -------------------------------------------------------------------------------- /utils/human_body_prior/data/dataloader.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Copyright (C) 2019 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG), 4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the 5 | # Max Planck Institute for Biological Cybernetics. All rights reserved. 6 | # 7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights 8 | # on this computer program. You can only use this computer program if you have closed a license agreement 9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right. 10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution. 11 | # Contact: ps-license@tuebingen.mpg.de 12 | # 13 | # 14 | # If you use this code in a research publication please consider citing the following: 15 | # 16 | # Expressive Body Capture: 3D Hands, Face, and Body from a Single Image 17 | # 18 | # 19 | # Code Developed by: 20 | # Nima Ghorbani 21 | # 22 | # 2018.01.02 23 | 24 | import glob, os 25 | 26 | import torch 27 | from torch.utils.data import Dataset 28 | from configer import Configer 29 | 30 | class VPoserDS(Dataset): 31 | """AMASS: a pytorch loader for unified human motion capture dataset. http://amass.is.tue.mpg.de/""" 32 | 33 | def __init__(self, dataset_dir, data_fields=[]): 34 | assert os.path.exists(dataset_dir) 35 | self.ds = {} 36 | for data_fname in glob.glob(os.path.join(dataset_dir, '*.pt')): 37 | k = os.path.basename(data_fname).replace('.pt','') 38 | if len(data_fields) != 0 and k not in data_fields: continue 39 | self.ds[k] = torch.load(data_fname).type(torch.float32) 40 | 41 | dataset_ps_fname = glob.glob(os.path.join(dataset_dir, '..', '*.ini')) 42 | if len(dataset_ps_fname): 43 | self.ps = Configer(default_ps_fname=dataset_ps_fname[0], dataset_dir=dataset_dir) 44 | 45 | def __len__(self): 46 | k = list(self.ds.keys())[0] 47 | return len(self.ds[k]) 48 | 49 | def __getitem__(self, idx): 50 | return self.fetch_data(idx) 51 | 52 | def fetch_data(self, idx): 53 | data = {k: self.ds[k][idx] for k in self.ds.keys()} 54 | return data 55 | 56 | -------------------------------------------------------------------------------- /utils/human_body_prior/models/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Copyright (C) 2019 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG), 4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the 5 | # Max Planck Institute for Biological Cybernetics. All rights reserved. 6 | # 7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights 8 | # on this computer program. You can only use this computer program if you have closed a license agreement 9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right. 10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution. 11 | # Contact: ps-license@tuebingen.mpg.de 12 | # 13 | # 14 | # If you use this code in a research publication please consider citing the following: 15 | # 16 | # Expressive Body Capture: 3D Hands, Face, and Body from a Single Image 17 | # 18 | # 19 | # Code Developed by: 20 | # Nima Ghorbani 21 | # 22 | # 2020.12.12 -------------------------------------------------------------------------------- /utils/human_body_prior/models/model_components.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Copyright (C) 2019 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG), 4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the 5 | # Max Planck Institute for Biological Cybernetics. All rights reserved. 6 | # 7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights 8 | # on this computer program. You can only use this computer program if you have closed a license agreement 9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right. 10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution. 11 | # Contact: ps-license@tuebingen.mpg.de 12 | # 13 | # 14 | # If you use this code in a research publication please consider citing the following: 15 | # 16 | # Expressive Body Capture: 3D Hands, Face, and Body from a Single Image 17 | # 18 | # 19 | # Code Developed by: 20 | # Nima Ghorbani 21 | # 22 | # 2020.12.12 23 | 24 | from torch import nn 25 | 26 | class View(nn.Module): 27 | def __init__(self, *args): 28 | super(View, self).__init__() 29 | self.shape = args 30 | self._name = 'reshape' 31 | 32 | def forward(self, x): 33 | return x.view(self.shape) 34 | 35 | class BatchFlatten(nn.Module): 36 | def __init__(self): 37 | super(BatchFlatten, self).__init__() 38 | self._name = 'batch_flatten' 39 | 40 | def forward(self, x): 41 | return x.view(x.shape[0], -1) -------------------------------------------------------------------------------- /utils/human_body_prior/tools/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Copyright (C) 2019 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG), 4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the 5 | # Max Planck Institute for Biological Cybernetics. All rights reserved. 6 | # 7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights 8 | # on this computer program. You can only use this computer program if you have closed a license agreement 9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right. 10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution. 11 | # Contact: ps-license@tuebingen.mpg.de 12 | # 13 | # 14 | # If you use this code in a research publication please consider citing the following: 15 | # 16 | # Expressive Body Capture: 3D Hands, Face, and Body from a Single Image 17 | # 18 | # 19 | # Code Developed by: 20 | # Nima Ghorbani 21 | # 22 | # 2020.12.12 23 | -------------------------------------------------------------------------------- /utils/human_body_prior/tools/angle_continuous_repres.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Copyright (C) 2019 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG), 4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the 5 | # Max Planck Institute for Biological Cybernetics. All rights reserved. 6 | # 7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights 8 | # on this computer program. You can only use this computer program if you have closed a license agreement 9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right. 10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution. 11 | # Contact: ps-license@tuebingen.mpg.de 12 | # 13 | # 14 | # If you use this code in a research publication please consider citing the following: 15 | # 16 | # Expressive Body Capture: 3D Hands, Face, and Body from a Single Image 17 | # 18 | # 19 | # Code Developed by: 20 | # Nima Ghorbani 21 | # 22 | # 2020.12.12 23 | import torch.nn.functional as F 24 | import torch 25 | from torch import nn 26 | 27 | import numpy as np 28 | 29 | # numpy implementation of yi zhou's method 30 | def norm(v): 31 | return v/np.linalg.norm(v) 32 | 33 | def gs(M): 34 | a1 = M[:,0] 35 | a2 = M[:,1] 36 | b1 = norm(a1) 37 | b2 = norm((a2-np.dot(b1,a2)*b1)) 38 | b3 = np.cross(b1,b2) 39 | return np.vstack([b1,b2,b3]).T 40 | 41 | # input sz bszx3x2 42 | def bgs(d6s): 43 | 44 | bsz = d6s.shape[0] 45 | b1 = F.normalize(d6s[:,:,0], p=2, dim=1) 46 | a2 = d6s[:,:,1] 47 | c = torch.bmm(b1.view(bsz,1,-1),a2.view(bsz,-1,1)).view(bsz,1)*b1 48 | b2 = F.normalize(a2-c,p=2,dim=1) 49 | b3=torch.cross(b1,b2,dim=1) 50 | return torch.stack([b1,b2,b3],dim=1).permute(0,2,1) 51 | 52 | 53 | class geodesic_loss_R(nn.Module): 54 | def __init__(self, reduction='batchmean'): 55 | super(geodesic_loss_R, self).__init__() 56 | 57 | self.reduction = reduction 58 | self.eps = 1e-6 59 | 60 | # batch geodesic loss for rotation matrices 61 | def bgdR(self,m1,m2): 62 | batch = m1.shape[0] 63 | m = torch.bmm(m1, m2.transpose(1, 2)) # batch*3*3 64 | 65 | cos = (m[:, 0, 0] + m[:, 1, 1] + m[:, 2, 2] - 1) / 2 66 | cos = torch.min(cos, m1.new(np.ones(batch))) 67 | cos = torch.max(cos, m1.new(np.ones(batch)) * -1) 68 | 69 | return torch.acos(cos) 70 | 71 | def forward(self, ypred, ytrue): 72 | theta = self.bgdR(ypred,ytrue) 73 | if self.reduction == 'mean': 74 | return torch.mean(theta) 75 | if self.reduction == 'batchmean': 76 | breakpoint() 77 | return torch.mean(torch.sum(theta, dim=theta.shape[1:])) 78 | 79 | else: 80 | return theta -------------------------------------------------------------------------------- /utils/human_body_prior/tools/configurations.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Copyright (C) 2019 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG), 4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the 5 | # Max Planck Institute for Biological Cybernetics. All rights reserved. 6 | # 7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights 8 | # on this computer program. You can only use this computer program if you have closed a license agreement 9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right. 10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution. 11 | # Contact: ps-license@tuebingen.mpg.de 12 | # 13 | # 14 | # If you use this code in a research publication please consider citing the following: 15 | # 16 | # Expressive Body Capture: 3D Hands, Face, and Body from a Single Image 17 | # 18 | # 19 | # Code Developed by: 20 | # Nima Ghorbani 21 | # 22 | # 2020.12.12 23 | from dotmap import DotMap 24 | import os 25 | import yaml 26 | 27 | def load_config(default_ps_fname=None, **kwargs): 28 | if isinstance(default_ps_fname, str): 29 | assert os.path.exists(default_ps_fname), FileNotFoundError(default_ps_fname) 30 | assert default_ps_fname.lower().endswith('.yaml'), NotImplementedError('Only .yaml files are accepted.') 31 | default_ps = yaml.safe_load(open(default_ps_fname, 'r')) 32 | else: 33 | default_ps = {} 34 | 35 | default_ps.update(kwargs) 36 | 37 | return DotMap(default_ps, _dynamic=False) 38 | 39 | def dump_config(data, fname): 40 | ''' 41 | dump current configuration to an ini file 42 | :param fname: 43 | :return: 44 | ''' 45 | with open(fname, 'w') as file: 46 | yaml.dump(data.toDict(), file) 47 | return fname 48 | -------------------------------------------------------------------------------- /utils/human_body_prior/tools/model_loader.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Copyright (C) 2019 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG), 4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the 5 | # Max Planck Institute for Biological Cybernetics. All rights reserved. 6 | # 7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights 8 | # on this computer program. You can only use this computer program if you have closed a license agreement 9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right. 10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution. 11 | # Contact: ps-license@tuebingen.mpg.de 12 | # 13 | # 14 | # If you use this code in a research publication please consider citing the following: 15 | # 16 | # Expressive Body Capture: 3D Hands, Face, and Body from a Single Image 17 | # 18 | # 19 | # Code Developed by: Nima Ghorbani 20 | # 2018.01.02 21 | 22 | import os, glob 23 | import numpy as np 24 | from human_body_prior.tools.configurations import load_config, dump_config 25 | import os.path as osp 26 | 27 | def exprdir2model(expr_dir): 28 | 29 | if not os.path.exists(expr_dir): raise ValueError('Could not find the experiment directory: %s' % expr_dir) 30 | 31 | model_snapshots_dir = osp.join(expr_dir, 'snapshots') 32 | available_ckpts = sorted(glob.glob(osp.join(model_snapshots_dir, '*.ckpt')), key=osp.getmtime) 33 | assert len(available_ckpts) > 0, ValueError('No checck points found at {}'.format(model_snapshots_dir)) 34 | trained_weigths_fname = available_ckpts[-1] 35 | 36 | model_ps_fname = glob.glob(osp.join('/', '/'.join(trained_weigths_fname.split('/')[:-2]), '*.yaml')) 37 | if len(model_ps_fname) == 0: 38 | model_ps_fname = glob.glob(osp.join('/'.join(trained_weigths_fname.split('/')[:-2]), '*.yaml')) 39 | 40 | model_ps_fname = model_ps_fname[0] 41 | model_ps = load_config(default_ps_fname=model_ps_fname) 42 | 43 | model_ps.logging.best_model_fname = trained_weigths_fname 44 | 45 | return model_ps, trained_weigths_fname 46 | 47 | 48 | def load_model(expr_dir, model_code=None, remove_words_in_model_weights=None, load_only_ps=False, disable_grad=True, custom_ps = None): 49 | ''' 50 | 51 | :param expr_dir: 52 | :param model_code: an imported module 53 | from supercap.train.supercap_smpl import SuperCap, then pass SuperCap to this function 54 | :param if True will load the model definition used for training, and not the one in current repository 55 | :return: 56 | ''' 57 | import importlib 58 | import torch 59 | 60 | model_ps, trained_weigths_fname = exprdir2model(expr_dir) 61 | if load_only_ps: return model_ps 62 | if custom_ps is not None: model_ps = custom_ps 63 | assert model_code is not None, ValueError('mode_code should be provided') 64 | model_instance = model_code(model_ps) 65 | if disable_grad: # i had to do this. torch.no_grad() couldnt achieve what i was looking for 66 | for param in model_instance.parameters(): 67 | param.requires_grad = False 68 | state_dict = torch.load(trained_weigths_fname)['state_dict'] 69 | if remove_words_in_model_weights is not None: 70 | words = '{}'.format(remove_words_in_model_weights) 71 | state_dict = {k.replace(words, '') if k.startswith(words) else k: v for k, v in state_dict.items()} 72 | 73 | ## keys that were in the model trained file and not in the current model 74 | instance_model_keys = list(model_instance.state_dict().keys()) 75 | trained_model_keys = list(state_dict.keys()) 76 | wts_in_model_not_in_file = set(instance_model_keys).difference(set(trained_model_keys)) 77 | ## keys that are in the current model not in the training weights 78 | wts_in_file_not_in_model = set(trained_model_keys).difference(set(instance_model_keys)) 79 | # assert len(wts_in_model_not_in_file) == 0, ValueError('Some model weights are not present in the pretrained file. {}'.format(wts_in_model_not_in_file)) 80 | 81 | state_dict = {k:v for k, v in state_dict.items() if k in instance_model_keys} 82 | model_instance.load_state_dict(state_dict, strict=False) # Todo fix the issues so that we can set the strict to true. The body model uses unnecessary registered buffers 83 | model_instance.eval() 84 | 85 | return model_instance, model_ps 86 | 87 | 88 | -------------------------------------------------------------------------------- /utils/human_body_prior/train/README.md: -------------------------------------------------------------------------------- 1 | # Train VPoser from Scratch 2 | To train your own VPoser with new configuration duplicate the provided **V02_05** folder while setting a new experiment ID 3 | and change the settings as you desire. 4 | First you would need to download the 5 | [AMASS](https://amass.is.tue.mpg.de/) dataset, then following the [data preparation tutorial](../data/README.md) 6 | prepare the data for training. 7 | Following is a code snippet for training that can be found in the [example training experiment](https://github.com/nghorbani/human_body_prior/blob/master/src/human_body_prior/train/V02_05/V02_05.py): 8 | 9 | ```python 10 | import glob 11 | import os.path as osp 12 | 13 | from human_body_prior.tools.configurations import load_config 14 | from human_body_prior.train.vposer_trainer import train_vposer_once 15 | 16 | def main(): 17 | expr_id = 'V02_05' 18 | 19 | default_ps_fname = glob.glob(osp.join(osp.dirname(__file__), '*.yaml'))[0] 20 | 21 | vp_ps = load_config(default_ps_fname) 22 | 23 | vp_ps.train_parms.batch_size = 128 24 | 25 | vp_ps.general.expr_id = expr_id 26 | 27 | total_jobs = [] 28 | total_jobs.append(vp_ps.toDict().copy()) 29 | 30 | print('#training_jobs to be done: {}'.format(len(total_jobs))) 31 | if len(total_jobs) == 0: 32 | print('No jobs to be done') 33 | return 34 | 35 | for job in total_jobs: 36 | train_vposer_once(job) 37 | ``` 38 | The above code uses yaml configuration files to handle experiment settings. 39 | It loads the default settings in *.yaml* and overloads it with your new args. 40 | 41 | The training code, will dump a log file along with tensorboard readable events file. -------------------------------------------------------------------------------- /utils/human_body_prior/train/V02_05/V02_05.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Copyright (C) 2019 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG), 4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the 5 | # Max Planck Institute for Biological Cybernetics. All rights reserved. 6 | # 7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights 8 | # on this computer program. You can only use this computer program if you have closed a license agreement 9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right. 10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution. 11 | # Contact: ps-license@tuebingen.mpg.de 12 | # 13 | # 14 | # If you use this code in a research publication please consider citing the following: 15 | # 16 | # Expressive Body Capture: 3D Hands, Face, and Body from a Single Image 17 | # 18 | # 19 | # Code Developed by: 20 | # Nima Ghorbani 21 | # 22 | # 2020.12.12 23 | 24 | import glob 25 | import os.path as osp 26 | 27 | from human_body_prior.tools.configurations import load_config 28 | from human_body_prior.train.vposer_trainer import train_vposer_once 29 | 30 | def main(): 31 | expr_id = 'V02_05' 32 | 33 | default_ps_fname = glob.glob(osp.join(osp.dirname(__file__), '*.yaml'))[0] 34 | 35 | vp_ps = load_config(default_ps_fname) 36 | 37 | vp_ps.train_parms.batch_size = 128 38 | 39 | vp_ps.general.expr_id = expr_id 40 | 41 | total_jobs = [] 42 | total_jobs.append(vp_ps.toDict().copy()) 43 | 44 | print('#training_jobs to be done: {}'.format(len(total_jobs))) 45 | if len(total_jobs) == 0: 46 | print('No jobs to be done') 47 | return 48 | 49 | for job in total_jobs: 50 | train_vposer_once(job) 51 | 52 | 53 | if __name__ == '__main__': 54 | main() -------------------------------------------------------------------------------- /utils/human_body_prior/train/V02_05/V02_05.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | body_model: 3 | gender: neutral 4 | bm_fname: ../../../../support_data/dowloads/models/smplx/neutral/model.npz 5 | 6 | general: 7 | verbosity: 0 8 | expr_id: 9 | dataset_id: V02_03 #SMPLx neutral 10 | rnd_seed: 100 11 | work_basedir: ../../../../support_data/training/training_experiments 12 | dataset_basedir: ../../../../support_data/training/data 13 | 14 | logging: 15 | expr_msg: 16 | num_bodies_to_display: 25 17 | work_dir: 18 | dataset_dir: 19 | render_during_training: False 20 | best_model_fname: 21 | 22 | train_parms: 23 | batch_size: 24 | num_epochs: 100 25 | restore_optimizer: False 26 | gen_optimizer: 27 | type: Adam 28 | args: 29 | lr: 0.001 30 | weight_decay: 0.00001 31 | lr_scheduler: 32 | type: ReduceLROnPlateau 33 | args: 34 | # metrics: val_loss 35 | verbose: true 36 | patience: 5 37 | early_stopping: 38 | monitor: val_loss 39 | min_delta: 0.0 40 | patience: 10 41 | verbose: True 42 | mode: min 43 | keep_extra_loss_terms_until_epoch: 15 44 | loss_weights: 45 | loss_kl_wt: 0.005 46 | loss_rec_wt: 4 47 | loss_matrot_wt: 2 48 | loss_jtr_wt: 2 49 | 50 | 51 | data_parms: 52 | num_workers: 5 # Used for dataloaders 53 | amass_dir: support_data/dowloads/amass/smplx_neutral 54 | num_timeseq_frames: 1 55 | amass_splits: 56 | vald: 57 | # - HumanEva 58 | # - MPI_HDM05 59 | # - SFU 60 | # - MPI_mosh 61 | - BMLrub_vald 62 | train: 63 | - CMU 64 | - BMLrub_train 65 | # - MPI_Limits 66 | # - TotalCapture 67 | # - Eyes_Japan_Dataset 68 | # - KIT 69 | # - BMLrub 70 | # - EKUT 71 | # - TCD_handMocap 72 | # - ACCAD 73 | # - BMLmovi 74 | test: 75 | - BMLrub_test 76 | # - Transitions_mocap 77 | # - SSM_synced 78 | # - DFaust_67 79 | 80 | 81 | model_params: 82 | num_neurons : 512 83 | latentD : 32 84 | 85 | -------------------------------------------------------------------------------- /utils/human_body_prior/train/V02_05/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Copyright (C) 2019 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG), 4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the 5 | # Max Planck Institute for Biological Cybernetics. All rights reserved. 6 | # 7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights 8 | # on this computer program. You can only use this computer program if you have closed a license agreement 9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right. 10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution. 11 | # Contact: ps-license@tuebingen.mpg.de 12 | # 13 | # 14 | # If you use this code in a research publication please consider citing the following: 15 | # 16 | # Expressive Body Capture: 3D Hands, Face, and Body from a Single Image 17 | # 18 | # 19 | # Code Developed by: 20 | # Nima Ghorbani 21 | # 22 | # 2020.12.12 23 | -------------------------------------------------------------------------------- /utils/human_body_prior/train/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Copyright (C) 2019 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG), 4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the 5 | # Max Planck Institute for Biological Cybernetics. All rights reserved. 6 | # 7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights 8 | # on this computer program. You can only use this computer program if you have closed a license agreement 9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right. 10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution. 11 | # Contact: ps-license@tuebingen.mpg.de 12 | # 13 | # 14 | # If you use this code in a research publication please consider citing the following: 15 | # 16 | # Expressive Body Capture: 3D Hands, Face, and Body from a Single Image 17 | # 18 | # 19 | # Code Developed by: 20 | # Nima Ghorbani 21 | # 22 | # 2018.01.02 23 | -------------------------------------------------------------------------------- /utils/human_body_prior/visualizations/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Copyright (C) 2019 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG), 4 | # acting on behalf of its Max Planck Institute for Intelligent Systems and the 5 | # Max Planck Institute for Biological Cybernetics. All rights reserved. 6 | # 7 | # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights 8 | # on this computer program. You can only use this computer program if you have closed a license agreement 9 | # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right. 10 | # Any use of the computer program without a valid license is prohibited and liable to prosecution. 11 | # Contact: ps-license@tuebingen.mpg.de 12 | # 13 | # 14 | # If you use this code in a research publication please consider citing the following: 15 | # 16 | # Expressive Body Capture: 3D Hands, Face, and Body from a Single Image 17 | # 18 | # 19 | # Code Developed by: 20 | # Nima Ghorbani 21 | # 22 | # 2020.12.12 23 | -------------------------------------------------------------------------------- /utils/misc.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def to_numpy(tensor): 5 | if torch.is_tensor(tensor): 6 | return tensor.cpu().numpy() 7 | elif type(tensor).__module__ != 'numpy': 8 | raise ValueError("Cannot convert {} to numpy array".format( 9 | type(tensor))) 10 | return tensor 11 | 12 | 13 | def to_torch(ndarray): 14 | if type(ndarray).__module__ == 'numpy': 15 | return torch.from_numpy(ndarray) 16 | elif not torch.is_tensor(ndarray): 17 | raise ValueError("Cannot convert {} to torch tensor".format( 18 | type(ndarray))) 19 | return ndarray 20 | 21 | 22 | def cleanexit(): 23 | import sys 24 | import os 25 | try: 26 | sys.exit(0) 27 | except SystemExit: 28 | os._exit(0) 29 | 30 | def load_model_wo_clip(model, state_dict): 31 | missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) 32 | assert len(unexpected_keys) == 0 33 | assert all([k.startswith('clip_model.') for k in missing_keys]) 34 | 35 | def freeze_joints(x, joints_to_freeze): 36 | # Freezes selected joint *rotations* as they appear in the first frame 37 | # x [bs, [root+n_joints], joint_dim(6), seqlen] 38 | frozen = x.detach().clone() 39 | frozen[:, joints_to_freeze, :, :] = frozen[:, joints_to_freeze, :, :1] 40 | return frozen 41 | -------------------------------------------------------------------------------- /utils/paramUtil.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | # Define a kinematic tree for the skeletal struture 4 | kit_kinematic_chain = [[0, 11, 12, 13, 14, 15], [0, 16, 17, 18, 19, 20], [0, 1, 2, 3, 4], [3, 5, 6, 7], [3, 8, 9, 10]] 5 | 6 | kit_raw_offsets = np.array( 7 | [ 8 | [0, 0, 0], 9 | [0, 1, 0], 10 | [0, 1, 0], 11 | [0, 1, 0], 12 | [0, 1, 0], 13 | [1, 0, 0], 14 | [0, -1, 0], 15 | [0, -1, 0], 16 | [-1, 0, 0], 17 | [0, -1, 0], 18 | [0, -1, 0], 19 | [1, 0, 0], 20 | [0, -1, 0], 21 | [0, -1, 0], 22 | [0, 0, 1], 23 | [0, 0, 1], 24 | [-1, 0, 0], 25 | [0, -1, 0], 26 | [0, -1, 0], 27 | [0, 0, 1], 28 | [0, 0, 1] 29 | ] 30 | ) 31 | 32 | t2m_raw_offsets = np.array([[0,0,0], 33 | [1,0,0], 34 | [-1,0,0], 35 | [0,1,0], 36 | [0,-1,0], 37 | [0,-1,0], 38 | [0,1,0], 39 | [0,-1,0], 40 | [0,-1,0], 41 | [0,1,0], 42 | [0,0,1], 43 | [0,0,1], 44 | [0,1,0], 45 | [1,0,0], 46 | [-1,0,0], 47 | [0,0,1], 48 | [0,-1,0], 49 | [0,-1,0], 50 | [0,-1,0], 51 | [0,-1,0], 52 | [0,-1,0], 53 | [0,-1,0]]) 54 | 55 | t2m_kinematic_chain = [[0, 2, 5, 8, 11], [0, 1, 4, 7, 10], [0, 3, 6, 9, 12, 15], [9, 14, 17, 19, 21], [9, 13, 16, 18, 20]] 56 | t2m_left_hand_chain = [[20, 22, 23, 24], [20, 34, 35, 36], [20, 25, 26, 27], [20, 31, 32, 33], [20, 28, 29, 30]] 57 | t2m_right_hand_chain = [[21, 43, 44, 45], [21, 46, 47, 48], [21, 40, 41, 42], [21, 37, 38, 39], [21, 49, 50, 51]] 58 | 59 | 60 | kit_tgt_skel_id = '03950' 61 | 62 | t2m_tgt_skel_id = '000021' 63 | 64 | -------------------------------------------------------------------------------- /visualize/__pycache__/render_mesh.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/visualize/__pycache__/render_mesh.cpython-311.pyc -------------------------------------------------------------------------------- /visualize/__pycache__/render_mesh.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/visualize/__pycache__/render_mesh.cpython-37.pyc -------------------------------------------------------------------------------- /visualize/__pycache__/render_mesh.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/visualize/__pycache__/render_mesh.cpython-38.pyc -------------------------------------------------------------------------------- /visualize/__pycache__/render_mitsuba.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/visualize/__pycache__/render_mitsuba.cpython-311.pyc -------------------------------------------------------------------------------- /visualize/__pycache__/simplify_loc2rot.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/visualize/__pycache__/simplify_loc2rot.cpython-37.pyc -------------------------------------------------------------------------------- /visualize/__pycache__/simplify_loc2rot.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/visualize/__pycache__/simplify_loc2rot.cpython-38.pyc -------------------------------------------------------------------------------- /visualize/__pycache__/vis_utils.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/visualize/__pycache__/vis_utils.cpython-311.pyc -------------------------------------------------------------------------------- /visualize/__pycache__/vis_utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/visualize/__pycache__/vis_utils.cpython-37.pyc -------------------------------------------------------------------------------- /visualize/__pycache__/vis_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/visualize/__pycache__/vis_utils.cpython-38.pyc -------------------------------------------------------------------------------- /visualize/joints2smpl/README.md: -------------------------------------------------------------------------------- 1 | # joints2smpl 2 | fit SMPL model using 3D joints 3 | 4 | ## Prerequisites 5 | We have tested the code on Ubuntu 18.04/20.04 with CUDA 10.2/11.3 6 | 7 | ## Installation 8 | First you have to make sure that you have all dependencies in place. 9 | The simplest way to do is to use the [anaconda](https://www.anaconda.com/). 10 | 11 | You can create an anaconda environment called `fit3d` using 12 | ``` 13 | conda env create -f environment.yaml 14 | conda activate fit3d 15 | ``` 16 | 17 | ## Download SMPL models 18 | Download [SMPL Female and Male](https://smpl.is.tue.mpg.de/) and [SMPL Netural](https://smplify.is.tue.mpg.de/), and rename the files and extract them to `/smpl_models/smpl/`, eventually, the `/smpl_models` folder should have the following structure: 19 | ``` 20 | smpl_models 21 | └-- smpl 22 | └-- SMPL_FEMALE.pkl 23 | └-- SMPL_MALE.pkl 24 | └-- SMPL_NEUTRAL.pkl 25 | ``` 26 | 27 | ## Demo 28 | ### Demo for sequences 29 | python fit_seq.py --files test_motion2.npy 30 | 31 | The results will locate in ./demo/demo_results/ 32 | 33 | ## Citation 34 | If you find this project useful for your research, please consider citing: 35 | ``` 36 | @article{zuo2021sparsefusion, 37 | title={Sparsefusion: Dynamic human avatar modeling from sparse rgbd images}, 38 | author={Zuo, Xinxin and Wang, Sen and Zheng, Jiangbin and Yu, Weiwei and Gong, Minglun and Yang, Ruigang and Cheng, Li}, 39 | journal={IEEE Transactions on Multimedia}, 40 | volume={23}, 41 | pages={1617--1629}, 42 | year={2021} 43 | } 44 | ``` 45 | 46 | ## References 47 | We indicate if a function or script is borrowed externally inside each file. Here are some great resources we 48 | benefit: 49 | 50 | - Shape/Pose prior and some functions are borrowed from [VIBE](https://github.com/mkocabas/VIBE). 51 | - SMPL models and layer is from [SMPL-X model](https://github.com/vchoutas/smplx). 52 | - Some functions are borrowed from [HMR-pytorch](https://github.com/MandyMo/pytorch_HMR). 53 | -------------------------------------------------------------------------------- /visualize/joints2smpl/environment.yaml: -------------------------------------------------------------------------------- 1 | name: fit3d 2 | channels: 3 | - conda-forge 4 | - pytorch 5 | - defaults 6 | - pytorch3d 7 | - open3d-admin 8 | - anaconda 9 | dependencies: 10 | - pip=21.1.3 11 | - numpy=1.20.3 12 | - numpy-base=1.20.3 13 | - matplotlib=3.4.2 14 | - matplotlib-base=3.4.2 15 | - pandas=1.3.1 16 | - python=3.7.6 17 | - pytorch=1.7.1 18 | - tensorboardx=2.2 19 | - cudatoolkit=10.2.89 20 | - torchvision=0.8.2 21 | - einops=0.3.0 22 | - pytorch3d=0.4.0 23 | - tqdm=4.61.2 24 | - trimesh=3.9.24 25 | - joblib=1.0.1 26 | - open3d=0.13.0 27 | - pip: 28 | - h5py==2.9.0 29 | - chumpy==0.70 30 | - smplx==0.1.28 31 | -------------------------------------------------------------------------------- /visualize/joints2smpl/smpl_models/SMPL_downsample_index.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/visualize/joints2smpl/smpl_models/SMPL_downsample_index.pkl -------------------------------------------------------------------------------- /visualize/joints2smpl/smpl_models/neutral_smpl_mean_params.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/visualize/joints2smpl/smpl_models/neutral_smpl_mean_params.h5 -------------------------------------------------------------------------------- /visualize/joints2smpl/src/__pycache__/config.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/visualize/joints2smpl/src/__pycache__/config.cpython-37.pyc -------------------------------------------------------------------------------- /visualize/joints2smpl/src/__pycache__/config.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/visualize/joints2smpl/src/__pycache__/config.cpython-38.pyc -------------------------------------------------------------------------------- /visualize/joints2smpl/src/__pycache__/customloss.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/visualize/joints2smpl/src/__pycache__/customloss.cpython-37.pyc -------------------------------------------------------------------------------- /visualize/joints2smpl/src/__pycache__/customloss.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/visualize/joints2smpl/src/__pycache__/customloss.cpython-38.pyc -------------------------------------------------------------------------------- /visualize/joints2smpl/src/__pycache__/prior.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/visualize/joints2smpl/src/__pycache__/prior.cpython-37.pyc -------------------------------------------------------------------------------- /visualize/joints2smpl/src/__pycache__/prior.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/visualize/joints2smpl/src/__pycache__/prior.cpython-38.pyc -------------------------------------------------------------------------------- /visualize/joints2smpl/src/__pycache__/smplify.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/visualize/joints2smpl/src/__pycache__/smplify.cpython-37.pyc -------------------------------------------------------------------------------- /visualize/joints2smpl/src/__pycache__/smplify.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neu-vi/HOI-Diff/2d247bfb7e139eea0392e2514a448bcb4bebff28/visualize/joints2smpl/src/__pycache__/smplify.cpython-38.pyc -------------------------------------------------------------------------------- /visualize/joints2smpl/src/config.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | # Map joints Name to SMPL joints idx 4 | JOINT_MAP = { 5 | 'MidHip': 0, 6 | 'LHip': 1, 'LKnee': 4, 'LAnkle': 7, 'LFoot': 10, 7 | 'RHip': 2, 'RKnee': 5, 'RAnkle': 8, 'RFoot': 11, 8 | 'LShoulder': 16, 'LElbow': 18, 'LWrist': 20, 'LHand': 22, 9 | 'RShoulder': 17, 'RElbow': 19, 'RWrist': 21, 'RHand': 23, 10 | 'spine1': 3, 'spine2': 6, 'spine3': 9, 'Neck': 12, 'Head': 15, 11 | 'LCollar':13, 'Rcollar' :14, 12 | 'Nose':24, 'REye':26, 'LEye':26, 'REar':27, 'LEar':28, 13 | 'LHeel': 31, 'RHeel': 34, 14 | 'OP RShoulder': 17, 'OP LShoulder': 16, 15 | 'OP RHip': 2, 'OP LHip': 1, 16 | 'OP Neck': 12, 17 | } 18 | 19 | full_smpl_idx = range(24) 20 | key_smpl_idx = [0, 1, 4, 7, 2, 5, 8, 17, 19, 21, 16, 18, 20] 21 | 22 | 23 | AMASS_JOINT_MAP = { 24 | 'MidHip': 0, 25 | 'LHip': 1, 'LKnee': 4, 'LAnkle': 7, 'LFoot': 10, 26 | 'RHip': 2, 'RKnee': 5, 'RAnkle': 8, 'RFoot': 11, 27 | 'LShoulder': 16, 'LElbow': 18, 'LWrist': 20, 28 | 'RShoulder': 17, 'RElbow': 19, 'RWrist': 21, 29 | 'spine1': 3, 'spine2': 6, 'spine3': 9, 'Neck': 12, 'Head': 15, 30 | 'LCollar':13, 'Rcollar' :14, 31 | } 32 | amass_idx = range(22) 33 | amass_smpl_idx = range(22) 34 | 35 | 36 | SMPL_MODEL_DIR = "./body_models/" 37 | GMM_MODEL_DIR = "./visualize/joints2smpl/smpl_models/" 38 | SMPL_MEAN_FILE = "./visualize/joints2smpl/smpl_models/neutral_smpl_mean_params.h5" 39 | # for collsion 40 | Part_Seg_DIR = "./visualize/joints2smpl/smpl_models/smplx_parts_segm.pkl" -------------------------------------------------------------------------------- /visualize/motions2hik.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | from utils.rotation_conversions import rotation_6d_to_matrix, matrix_to_euler_angles 5 | from visualize.simplify_loc2rot import joints2smpl 6 | 7 | """ 8 | Utility function to convert model output to a representation used by HumanIK skeletons in Maya and Motion Builder 9 | by converting joint positions to joint rotations in degrees. Based on visualize.vis_utils.npy2obj 10 | """ 11 | 12 | # Mapping of SMPL joint index to HIK joint Name 13 | JOINT_MAP = [ 14 | 'Hips', 15 | 'LeftUpLeg', 16 | 'RightUpLeg', 17 | 'Spine', 18 | 'LeftLeg', 19 | 'RightLeg', 20 | 'Spine1', 21 | 'LeftFoot', 22 | 'RightFoot', 23 | 'Spine2', 24 | 'LeftToeBase', 25 | 'RightToeBase', 26 | 'Neck', 27 | 'LeftShoulder', 28 | 'RightShoulder', 29 | 'Head', 30 | 'LeftArm', 31 | 'RightArm', 32 | 'LeftForeArm', 33 | 'RightForeArm', 34 | 'LeftHand', 35 | 'RightHand' 36 | ] 37 | 38 | 39 | def motions2hik(motions, device=0, cuda=True): 40 | """ 41 | Utility function to convert model output to a representation used by HumanIK skeletons in Maya and Motion Builder 42 | by converting joint positions to joint rotations in degrees. Based on visualize.vis_utils.npy2obj 43 | 44 | :param motions: numpy array containing MDM model output [num_reps, num_joints, num_params (xyz), num_frames 45 | :param device: 46 | :param cuda: 47 | 48 | :returns: JSON serializable dict to be used with the Replicate API implementation 49 | """ 50 | 51 | nreps, njoints, nfeats, nframes = motions.shape 52 | j2s = joints2smpl(num_frames=nframes, device_id=device, cuda=cuda) 53 | 54 | thetas = [] 55 | root_translation = [] 56 | for rep_idx in range(nreps): 57 | rep_motions = motions[rep_idx].transpose(2, 0, 1) # [nframes, njoints, 3] 58 | 59 | if nfeats == 3: 60 | print(f'Running SMPLify for repetition [{rep_idx + 1}] of {nreps}, it may take a few minutes.') 61 | motion_tensor, opt_dict = j2s.joint2smpl(rep_motions) # [nframes, njoints, 3] 62 | motion = motion_tensor.cpu().numpy() 63 | 64 | elif nfeats == 6: 65 | motion = rep_motions 66 | thetas.append(rep_motions) 67 | 68 | # Convert 6D rotation representation to Euler angles 69 | thetas_6d = motion[0, :-1, :, :nframes].transpose(2, 0, 1) # [nframes, njoints, 6] 70 | thetas_deg = [] 71 | for frame, d6 in enumerate(thetas_6d): 72 | thetas_deg.append([_rotation_6d_to_euler(d6)]) 73 | 74 | thetas.append([np.concatenate(thetas_deg, axis=0)]) 75 | root_translation.append([motion[0, -1, :3, :nframes].transpose(1, 0)]) # [nframes, 3] 76 | 77 | thetas = np.concatenate(thetas, axis=0)[:nframes] 78 | root_translation = np.concatenate(root_translation, axis=0)[:nframes] 79 | 80 | data_dict = { 81 | 'joint_map': JOINT_MAP, 82 | 'thetas': thetas.tolist(), # [nreps, nframes, njoints, 3 (deg)] 83 | 'root_translation': root_translation.tolist(), # [nreps, nframes, 3 (xyz)] 84 | } 85 | 86 | return data_dict 87 | 88 | 89 | def _rotation_6d_to_euler(d6): 90 | """ 91 | Converts 6D rotation representation by Zhou et al. [1] to euler angles 92 | using Gram--Schmidt orthogonalisation per Section B of [1]. 93 | 94 | :param d6: numpy Array 6D rotation representation, of size (*, 6) 95 | :returns: JSON serializable dict to be used with the Replicate API implementation 96 | :returns: euler angles in degrees as a numpy array with shape (*, 3) 97 | """ 98 | rot_mat = rotation_6d_to_matrix(torch.tensor(d6)) 99 | rot_eul_rad = matrix_to_euler_angles(rot_mat, 'XYZ') 100 | eul_deg = torch.rad2deg(rot_eul_rad).numpy() 101 | 102 | return eul_deg 103 | 104 | -------------------------------------------------------------------------------- /visualize/render_mesh.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | from visualize import vis_utils 4 | import shutil 5 | from tqdm import tqdm 6 | 7 | if __name__ == '__main__': 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument("--input_path", type=str, required=True, help='stick figure mp4 file to be rendered.') 10 | parser.add_argument("--cuda", type=bool, default=True, help='') 11 | parser.add_argument("--device", type=int, default=0, help='') 12 | parser.add_argument("--obj_mesh_path", type=str, default='/work/vig/xiaogangp/codes/hoi-motion_pretrained/object_mesh') 13 | params = parser.parse_args() 14 | 15 | assert params.input_path.endswith('.mp4') 16 | parsed_name = os.path.basename(params.input_path).replace('.mp4', '').replace('sample', '').replace('rep', '') 17 | sample_i, rep_i = [int(e) for e in parsed_name.split('_')] 18 | npy_path = os.path.join(os.path.dirname(params.input_path), 'results.npy') 19 | out_npy_path = params.input_path.replace('.mp4', '_smpl_params.npy') 20 | out_obj_npy_path = params.input_path.replace('.mp4', '_obj_params.npy') 21 | assert os.path.exists(npy_path) 22 | results_dir = params.input_path.replace('.mp4', '_obj') 23 | if os.path.exists(results_dir): 24 | shutil.rmtree(results_dir) 25 | os.makedirs(results_dir) 26 | 27 | # object 28 | npy2obj_object = vis_utils.npy2obj_object(npy_path, params.obj_mesh_path, sample_i, rep_i, 29 | device=params.device, cuda=params.cuda, if_color=True) 30 | 31 | # # human 32 | npy2obj = vis_utils.npy2obj(npy_path, sample_i, rep_i, 33 | device=params.device, cuda=params.cuda, if_color=True) 34 | 35 | # print('Saving obj files to [{}]'.format(os.path.abspath(results_dir))) 36 | # for frame_i in tqdm(range(npy2obj.real_num_frames)): 37 | # npy2obj.save_ply(os.path.join(results_dir, 'frame{:03d}.ply'.format(frame_i)), frame_i) 38 | # npy2obj_object.save_ply(os.path.join(results_dir, 'obj_frame{:03d}.ply'.format(frame_i)), sample_i, frame_i) 39 | 40 | print('Saving SMPL params to [{}]'.format(os.path.abspath(out_npy_path))) 41 | npy2obj.save_npy(out_npy_path) 42 | npy2obj_object.save_npy(out_obj_npy_path) 43 | 44 | 45 | # blender -b -noaudio --python render.py -- --cfg=./configs/render.yaml --dir=../save/contact_lamda_0.1/samples_contact_000020025_seed10/ --mode=sequence --joint_type=HumanML3D --------------------------------------------------------------------------------