├── data_folder ├── smpl │ ├── __init__.py │ ├── LICENSE │ └── smpl_np.py └── taxonomy.json ├── humangenerator ├── util │ ├── __init__.py │ ├── amass_util.py │ ├── cloth3d_util.py │ ├── IO.py │ ├── blender_util.py │ └── smplutils.py ├── avail_datasets.yaml ├── amass.json ├── __init__.py ├── generator.py ├── cloth3d_gen.py └── amass_gen.py ├── .gitignore ├── LICENSE.md ├── convert_fbx.py ├── notes.md ├── start_blend_debug.py ├── README.md └── generate_sequence.py /data_folder/smpl/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /humangenerator/util/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /humangenerator/avail_datasets.yaml: -------------------------------------------------------------------------------- 1 | datasets: ["cloth3d","amass"] 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | textures/ 2 | smpl_data.npz 3 | .idea/ 4 | *.pyc 5 | *.pyc 6 | -------------------------------------------------------------------------------- /humangenerator/amass.json: -------------------------------------------------------------------------------- 1 | { 2 | "sub_dataset_id": "CMU", 3 | "num_betas": 10, 4 | "num_dmpls": 8, 5 | "subject_ids": "131" 6 | } -------------------------------------------------------------------------------- /data_folder/taxonomy.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "_comment": "this file contains the path to currently supported data sets", 4 | "sub_data_id": "CMU", 5 | "path": "" 6 | } 7 | ] -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | For licensing information please refer to the main repository of the project located [here](https://github.com/eliabntt/GRADE-RR/). The same terms and conditions apply. 2 | -------------------------------------------------------------------------------- /humangenerator/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | # check the python version, only python 3.X is allowed: 5 | if sys.version_info.major < 3: 6 | raise Exception("HumanGenerator requires at least python 3.X to run.") 7 | 8 | sys.path.remove(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) 9 | from .util.blender_util import * 10 | from data_folder.smpl.smpl_np import SMPLModel 11 | from .generator import * 12 | -------------------------------------------------------------------------------- /data_folder/smpl/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 CalciferZh 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /convert_fbx.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import humangenerator 4 | import bpy 5 | import humangenerator as hgen 6 | import argparse 7 | import ipdb 8 | import sys 9 | import yaml 10 | 11 | parser = argparse.ArgumentParser() 12 | parser.add_argument("--fbx", help="Path to the fbx file") 13 | parser.add_argument("--output_dir", help="Path to where the data should be saved") 14 | parser.add_argument("--temp_dir", help="Path to where the data should be temporary saved") 15 | parser.add_argument("--usd", help="True if export usd is necessary, default to false", default="False") 16 | args = parser.parse_args() 17 | 18 | 19 | out_dir = args.output_dir 20 | if not os.path.exists(out_dir): 21 | os.makedirs(out_dir) 22 | fbx = args.fbx 23 | for o in bpy.context.scene.objects: 24 | o.select_set(True) 25 | 26 | # Call the operator only once 27 | bpy.ops.object.delete() 28 | 29 | with open(os.path.join(out_dir, f"out.txt"), "w") as file_out, open( 30 | os.path.join(out_dir, f"err.txt"), "w") as file_err: 31 | try: 32 | sys.stdout = file_out 33 | sys.stderr = file_err 34 | bpy.ops.import_scene.fbx(filepath=fbx) 35 | 36 | filepath=os.path.join(out_dir,os.path.basename(fbx[:-4])+".usd") 37 | temp_filepath = os.path.join(args.temp_dir,os.path.basename(fbx[:-4])+".usd") 38 | 39 | hgen.export_data(temp_path, out_dir, os.path.basename(fbx[:-4]), False, None, {}, {}, False, args.usd.lower() == "true") 40 | 41 | bpy.ops.object.select_all(action='SELECT') 42 | bpy.ops.object.delete() 43 | succeed = True 44 | except: 45 | import traceback 46 | sys.stderr.write('error\n') 47 | sys.stderr.write(traceback.format_exc()) 48 | finally: 49 | sys.stdout.flush() 50 | sys.stderr.flush() 51 | sys.stdout = sys.__stdout__ 52 | sys.stderr = sys.__stderr__ -------------------------------------------------------------------------------- /notes.md: -------------------------------------------------------------------------------- 1 | Installation instructions 2 | 3 | From the `generate_people` folder 4 | 5 | ``` 6 | mkdir data_folder 7 | cd data_folder 8 | git clone https://github.com/gulvarol/surreact surreal 9 | ``` 10 | 11 | - Download the following two fbx files for SMPL for Maya from https://smpl.is.tue.mpg.de/ using your credentials. Please comply with their license. The files are `basicModel_f_lbs_10_207_0_v1.0.2.fbx` and `basicModel_m_lbs_10_207_0_v1.0.2.fbx` and can be downloaded with this [link](https://download.is.tue.mpg.de/download.php?domain=smpl&sfile=SMPL_maya.zip). Place them in `.../surreal/datageneration/smpl_data`. 12 | 13 | - download this [pkl](https://raw.githubusercontent.com/gulvarol/surreal/master/datageneration/pkl/segm_per_v_overlap.pkl) and place it in `.../surreal/datageneration/smpl_data` 14 | 15 | - get [SMPL_python_v.1.0.0](https://download.is.tue.mpg.de/download.php?domain=smpl&sfile=SMPL_python_v.1.0.0.zip). Extract the basicModel\_[m,f]\_lbs\_10\_207\_0\_v1.0.0.pkl. Place those two files in `.../surreal/datageneration/smpl_data/smpl/models/basicModel_{f,m}_lbs_10_207_0_v1.0.0.pkl`. Run `mv basicmodel_m_lbs_10_207_0_v1.0.0.pkl basicModel_m_lbs_10_207_0_v1.0.0.pkl` 16 | 17 | - `cp .../surreal/datageneration/misc/prepare_smpl_data/extract_J_regressors.py .../surreal/datageneration/smpl_data/smpl/` 18 | - run `python3 extract_J_regressor.py` 19 | 20 | ## Surreal Textures 21 | - Accept surreal terms and get an account (you will need username and password to download textures) 22 | 23 | - get the download script https://github.com/gulvarol/surreal/blob/master/download/download_smpl_data.sh and place it somewhere you like 24 | let's call this location "loc" 25 | 26 | - download this file https://github.com/gulvarol/surreal/blob/master/download/files/files_smpl_data.txt 27 | and place it "loc/files/files_smpl_data.txt"(alongside the fbx models) 28 | 29 | essentially you have ./loc/{script,files/files_smpl_data.txt} 30 | 31 | - call the download script with `./download_smpl_data.sh /yourpath/surreal/datageneration/smpl_data username_surreal pw_surreal` 32 | _____ 33 | 34 | At this point you should have 35 | smpl_data/basicModel_{f,m}_lbs_10_207_0_v1.0.2.fbx 36 | smpl_data/smpl/models/basicModel_{f,m}_lbs_10_207_0_v1.0.0.pkl 37 | smpl_data/segm_per_v_overlap.pkl 38 | smpl_data/joint_regressors.pkl 39 | _____ 40 | 41 | ## For AMASS 42 | 43 | - create a `body_models` folder in `data_folder` 44 | - create inside `smplh` and `dmpls` folders 45 | - download [dmpls](https://download.is.tue.mpg.de/download.php?domain=smpl&sfile=dmpls.tar.xz) (DMPLs compatibile with SMPL) and [smplh](https://mano.is.tue.mpg.de/download.php) and get `Extended SMPLH model for AMASS` (accepting the respective licenses) there. 46 | 47 | NOTE: 48 | If exporting WITH cache, the hand movement will be complete, if exporting WITHOUT cache it will not as the basic model for blendshapes is the SMPL model WITHOUT hand. It shouldn't be too difficult to adapt the code to your needs eventually. 49 | TESTED ONLY WITH CMU DATA -------------------------------------------------------------------------------- /start_blend_debug.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import sys 3 | import ipdb 4 | import os 5 | from pathlib import Path 6 | from bl_ui.space_text import TEXT_MT_editor_menus 7 | 8 | repo_root_directory = os.path.join(os.path.dirname(__file__), ".") 9 | sys.path.append(repo_root_directory) 10 | 11 | argv = sys.argv[sys.argv.index("--") + 1:] 12 | bpy.context.window.workspace = bpy.data.workspaces["Scripting"] 13 | bpy.context.view_layer.update() 14 | if argv[0].endswith(".py"): 15 | print(f"Loading: {os.path.join(os.path.dirname(os.path.abspath(__file__)), argv[0])}") 16 | text = bpy.data.texts.load(os.path.join(os.path.dirname(os.path.abspath(__file__)), argv[0])) 17 | sys.argv = argv[:] 18 | print(f"New argv: {sys.argv}") 19 | else: 20 | print("First argument should be the script file") 21 | exit(-1) 22 | 23 | # Declare operator that runs the blender proc script 24 | class RunHumanGeneratorOperator(bpy.types.Operator): 25 | bl_idname = "wm.run_humangenerator" 26 | bl_label = "Run Human Generator" 27 | bl_description = "This operator runs the loaded HumanGenerator script and also makes sure to unload all modules before starting." 28 | bl_options = {"REGISTER"} 29 | 30 | def execute(self, context): 31 | # Delete all loaded models inside src/, as they are cached inside blender 32 | for module in list(sys.modules.keys()): 33 | if module.startswith("humangenerator"): 34 | del sys.modules[module] 35 | 36 | # Make sure the parent of the humangenerator folder is in sys.path 37 | import_path = os.path.abspath(os.path.join(os.path.dirname(__file__), ".")) 38 | if import_path not in sys.path: 39 | sys.path.append(import_path) 40 | 41 | # Run the script 42 | try: 43 | bpy.ops.text.run_script() 44 | except RuntimeError: 45 | # Skip irrelevant error messages (The relevant stacktrace+error has already been printed at this point) 46 | pass 47 | return {"FINISHED"} 48 | 49 | bpy.utils.register_class(RunHumanGeneratorOperator) 50 | 51 | def draw(self, context): 52 | layout = self.layout 53 | 54 | st = context.space_data 55 | text = st.text 56 | is_syntax_highlight_supported = st.is_syntax_highlight_supported() 57 | layout.template_header() 58 | 59 | TEXT_MT_editor_menus.draw_collapsible(context, layout) 60 | 61 | if text and text.is_modified: 62 | row = layout.row(align=True) 63 | row.alert = True 64 | row.operator("text.resolve_conflict", text="", icon='HELP') 65 | 66 | layout.separator_spacer() 67 | 68 | row = layout.row(align=True) 69 | row.template_ID(st, "text", new="text.new", 70 | unlink="text.unlink", open="text.open") 71 | 72 | if text: 73 | is_osl = text.name.endswith((".osl", ".osl")) 74 | if is_osl: 75 | row.operator("node.shader_script_update", 76 | text="", icon='FILE_REFRESH') 77 | else: 78 | row = layout.row() 79 | row.active = is_syntax_highlight_supported 80 | # The following line has changed compared to the orignal code, it starts our operator instead of text.run_script 81 | row.operator("wm.run_humangenerator", text="Run") 82 | 83 | layout.separator_spacer() 84 | 85 | row = layout.row(align=True) 86 | row.prop(st, "show_line_numbers", text="") 87 | row.prop(st, "show_word_wrap", text="") 88 | 89 | syntax = row.row(align=True) 90 | syntax.active = is_syntax_highlight_supported 91 | syntax.prop(st, "show_syntax_highlight", text="") 92 | 93 | # Set our draw function as the default draw function for text area headers 94 | bpy.types.TEXT_HT_header.draw = draw 95 | 96 | # Put text into scripting tool 97 | for area in bpy.data.workspaces["Scripting"].screens[0].areas.values(): 98 | if area.type == 'TEXT_EDITOR': 99 | area.spaces.active.text = text -------------------------------------------------------------------------------- /humangenerator/generator.py: -------------------------------------------------------------------------------- 1 | import os 2 | from random import choice 3 | import bpy 4 | from .util.smplutils import SMPL_Body, rotate_vector 5 | from .cloth3d_gen import * 6 | from .amass_gen import * 7 | from .util.blender_util import export_stl_data, write_pkl_data, write_usd 8 | 9 | 10 | # import amass_gen 11 | 12 | def get_processor(dataset, parent_path, with_cache, path_out, path_samples, smpl_models, write_verts, config={}): 13 | if dataset == "cloth3d": 14 | return cloth3d(parent_path, with_cache, path_out, path_samples, smpl_models, write_verts), path_samples 15 | if dataset == "amass": # todo fixme 16 | tmp_obj = amass(parent_path, with_cache, path_out, path_samples, smpl_models, write_verts, config) 17 | return tmp_obj, path_samples 18 | raise Exception("NOT A VALID DATASET") 19 | 20 | 21 | def export_data(temp_path, path_out, sample, with_cache, frame, info, orient, write_verts, usd=True): 22 | try: 23 | if usd: 24 | write_usd(temp_path, path_out, sample + ('_with_cache' if with_cache else ''), with_cache, 25 | True if frame == None else False, 0 if frame == None else frame) 26 | for obj in bpy.data.objects.values(): 27 | if "body" in obj.name.lower() and obj.select_get(): 28 | ob = obj 29 | elif "armature" in obj.name.lower() and obj.select_get(): 30 | arm_ob = obj 31 | 32 | export_stl_data(path_out, sample + ('_with_cache' if with_cache else ''), 33 | [ob for ob in bpy.data.objects if ob.select_get()], orient) 34 | write_pkl_data(path_out, sample + ('_with_cache' if with_cache else ''), arm_ob, ob, info, write_verts=write_verts) 35 | except: 36 | return False 37 | return True 38 | 39 | 40 | def create_outfolder_structure(path_out, subfolder_name, with_cache): 41 | if (with_cache): 42 | path_cache = os.path.join(path_out, subfolder_name, 'view_cache') 43 | if not os.path.exists(path_cache): 44 | os.makedirs(path_cache) 45 | else: 46 | path_cache = os.path.join(path_out, subfolder_name, 'view_cache') 47 | if not os.path.exists(path_cache): 48 | os.makedirs(path_cache) 49 | return path_cache 50 | 51 | 52 | class generator: 53 | def __init__(self, smpl_path, write_verts=False): 54 | self.SMPL_PATH = smpl_path 55 | 56 | def pick_skin_texture(self, split_name='all', clothing_option="grey", gender="m"): 57 | if gender == "f": 58 | with open( 59 | os.path.join(self.SMPL_PATH, "textures", "female_{}.txt".format(split_name)) 60 | ) as f: 61 | txt_paths = f.read().splitlines() 62 | else: 63 | with open( 64 | os.path.join(self.SMPL_PATH, "textures", "male_{}.txt".format(split_name)) 65 | ) as f: 66 | txt_paths = f.read().splitlines() 67 | 68 | # if using only one source of clothing 69 | if clothing_option == "nongrey": 70 | txt_paths = [k for k in txt_paths if "nongrey" in k] 71 | elif clothing_option == "grey": 72 | txt_paths = [k for k in txt_paths if "nongrey" not in k] 73 | elif clothing_option == "same": 74 | # Orig 75 | txt_paths = ["textures/male/nongrey_male_0244.jpg"] 76 | elif clothing_option == "all": 77 | txt_paths = [k for k in txt_paths] 78 | 79 | # random clothing texture 80 | cloth_img_name = choice(txt_paths) 81 | cloth_img_name = os.path.join(self.SMPL_PATH, cloth_img_name) 82 | print("Picked skin texture: {}".format(cloth_img_name)) 83 | return cloth_img_name 84 | 85 | def create_material_SMPL(self, gender="m", person_no=0, clothing_option="grey", split_name="all"): 86 | print("Creating SMPL texture material") 87 | cloth_img_name = self.pick_skin_texture(split_name, clothing_option, gender) 88 | material = bpy.data.materials.new(name=f"Material_{person_no}") 89 | material.use_nodes = True 90 | 91 | # Add nodes 92 | tree = material.node_tree 93 | nodes = tree.nodes 94 | # Principled BSDf 95 | bsdf = nodes['Principled BSDF'] 96 | # Image 97 | img = nodes.new('ShaderNodeTexImage') 98 | img.image = bpy.data.images.load(cloth_img_name) 99 | # Links 100 | tree.links.new(img.outputs[0], bsdf.inputs[0]) 101 | return material 102 | 103 | def load_SMPLs_objects(self): 104 | # create the material for SMPL 105 | material = self.create_material_SMPL("m", 0) 106 | print("Male Material Created") 107 | smpl_body_list = [] 108 | # create the SMPL_Body object 109 | smpl_body_list.append( 110 | SMPL_Body(self.SMPL_PATH, material, 0, "male", person_no=0) 111 | ) 112 | print("Male created") 113 | 114 | material = self.create_material_SMPL("f", 1) 115 | print("Female material created") 116 | smpl_body_list.append( 117 | SMPL_Body(self.SMPL_PATH, material, 0, "female", person_no=1) 118 | ) 119 | print("Female created") 120 | return smpl_body_list 121 | -------------------------------------------------------------------------------- /humangenerator/cloth3d_gen.py: -------------------------------------------------------------------------------- 1 | from humangenerator.util.blender_util import * 2 | import bpy 3 | from .util.cloth3d_util import loadInfo, bodyCache, loadGarment 4 | import humangenerator as hgen 5 | from pathlib import Path 6 | 7 | class cloth3d: 8 | def __init__(self, parent_path, with_cache, path_out, path_samples, smpl_models, write_verts): 9 | from humangenerator.generator import generator 10 | # temporary usd export path, we cannot directly write in mounted network drives sometimes 11 | temp_path = os.path.join(parent_path, 'usd_exports') 12 | # surreal path for textures 13 | smpl_path = os.path.join(parent_path, "surreal", "datageneration", "smpl_data") 14 | 15 | self.generator = generator(smpl_path) 16 | self.with_cache = with_cache 17 | self.path_out = path_out 18 | self.path_samples = path_samples 19 | self.smpl = smpl_models 20 | self.temp_path = temp_path 21 | self.write_verts = (write_verts == "True") 22 | 23 | def animateSMPL(self, sample, smpl_ob, info, j): 24 | if self.with_cache: 25 | bodyCache(self.path_cache, sample, info, smpl_ob.ob, self.smpl) 26 | 27 | # generate blendshapes + trans 28 | s = info['shape'] 29 | smpl_ob.reset_joint_positions(s, bpy.data.scenes["Scene"]) 30 | if len(info['poses'].shape) > 1: 31 | N = info['poses'].shape[1] 32 | else: 33 | sys.stderr.write('Error animation is ONLY ONE FRAME \n') 34 | N = 1 35 | for i in range(N): 36 | if N > 1: 37 | p = info['poses'][:, i] 38 | t = info['trans'][:, i].reshape((3,)) - j[0] 39 | else: 40 | p = info['poses'][:] 41 | t = info['trans'][:].reshape((3,)) - j[0] 42 | bpy.data.scenes["Scene"].frame_set(i) 43 | smpl_ob.apply_trans_pose_shape(t, p, s, i, with_blendshapes=not self.with_cache) 44 | 45 | def generate_SMPLbody_animation(self, sample, info, gender, index): 46 | print("Generate Animation..") 47 | if len(info['poses'].shape) > 1: 48 | p = info['poses'][:, 0].reshape((24, 3)) 49 | t = info['trans'][:, 0].reshape((3,)) 50 | else: 51 | p = info['poses'][:].reshape((24, 3)) 52 | t = info['trans'][:].reshape((3,)) 53 | 54 | s = info['shape'] 55 | v, j = self.smpl[gender].set_params(pose=p, beta=s, trans=t) 56 | 57 | cloth_img_name = self.generator.pick_skin_texture(gender=gender, clothing_option="grey") 58 | img = bpy.data.materials[f'Material_{index}'].node_tree.nodes["Image Texture"] 59 | img.image = bpy.data.images.load(cloth_img_name) 60 | material = bpy.data.materials[f'Material_{index}'] 61 | 62 | self.smpl_body_list[index].refine_SMPL(material, j, info['zrot']) 63 | self.animateSMPL(sample, self.smpl_body_list[index], info, j) 64 | 65 | # Smooth 66 | bpy.ops.object.shade_smooth() 67 | 68 | def loadCloth3DSequence(self, sample: str, info: dict, frame: int = None): 69 | if len(info['poses'].shape) > 1: 70 | bpy.context.scene.frame_end = info['poses'].shape[-1] - 1 71 | else: 72 | bpy.context.scene.frame_end = 1 73 | bpy.ops.object.select_all(action='DESELECT') 74 | # delete current garments 75 | for obj in bpy.data.objects.values(): 76 | if 'body' not in obj.name.lower() and 'armature' not in obj.name.lower(): 77 | obj.select_set(True) 78 | bpy.ops.object.delete() 79 | 80 | # Load new garments 81 | for garment in info['outfit']: 82 | loadGarment(self.path_samples, self.path_cache, sample, garment, info) 83 | 84 | for obj in bpy.data.objects.values(): 85 | obj.select_set(False) 86 | 87 | gender = 'm' if info['gender'] else 'f' 88 | index = 0 if info['gender'] else 1 89 | self.generate_SMPLbody_animation(sample, info, gender, index) 90 | 91 | bpy.context.view_layer.objects.active = bpy.data.objects[f'Armature_{index}'] 92 | arm_obj = bpy.data.objects[f'Armature_{index}'] 93 | bpy.context.scene.frame_current = bpy.context.scene.frame_start 94 | 95 | for obj in bpy.data.objects.values(): 96 | if 'body' not in obj.name.lower() and 'armature' not in obj.name.lower(): 97 | obj.select_set(True) 98 | obj.parent = arm_obj 99 | obj.rotation_euler = [0, 0, 0] 100 | obj.select_set(False) 101 | 102 | for obj in bpy.data.objects.values(): 103 | if 'armature' not in obj.name.lower() and 'body' not in obj.name.lower(): 104 | obj.select_set(True) 105 | else: 106 | if str(index) in obj.name: 107 | obj.select_set(True) 108 | 109 | if frame != None and frame >= 0 and frame <= bpy.context.scene.frame_end: 110 | bpy.context.scene.frame_current = frame 111 | 112 | def process_sample(self, sample: str, frame: int, smpl_body_list): 113 | # load info 114 | info = loadInfo(os.path.join(self.path_samples, sample, 'info.mat')) 115 | 116 | self.smpl_body_list = smpl_body_list 117 | subfolder_name = Path(sample).stem + ('_with_cache' if self.with_cache else '') 118 | self.path_cache = hgen.create_outfolder_structure(self.path_out, subfolder_name, self.with_cache) 119 | 120 | if frame is None: 121 | self.loadCloth3DSequence(sample, info) 122 | else: 123 | self.loadCloth3DSequence(sample, info, frame) 124 | 125 | bpy.ops.wm.save_as_mainfile(filepath=os.path.join(self.path_out, subfolder_name, subfolder_name + ".blend")) 126 | return hgen.export_data(self.temp_path, self.path_out, Path(sample).stem, self.with_cache, frame, info, info['zrot'], self.write_verts) -------------------------------------------------------------------------------- /data_folder/smpl/smpl_np.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import numpy as np 3 | import pickle 4 | 5 | class SMPLModel(): 6 | def __init__(self, model_path): 7 | """ 8 | SMPL model. 9 | 10 | Parameter: 11 | --------- 12 | model_path: Path to the SMPL model parameters, pre-processed by 13 | `preprocess.py`. 14 | 15 | """ 16 | with open(model_path, 'rb') as f: 17 | if sys.version_info[0] == 2: 18 | params = pickle.load(f) # Python 2.x 19 | elif sys.version_info[0] == 3: 20 | params = pickle.load(f, encoding='latin1') # Python 3.x 21 | self.J_regressor = params['J_regressor'] 22 | self.weights = params['weights'] 23 | self.posedirs = params['posedirs'] 24 | self.v_template = params['v_template'] 25 | self.shapedirs = params['shapedirs'] 26 | self.faces = params['f'] 27 | self.kintree_table = params['kintree_table'] 28 | 29 | id_to_col = { 30 | self.kintree_table[1, i]: i for i in range(self.kintree_table.shape[1]) 31 | } 32 | self.parent = { 33 | i: id_to_col[self.kintree_table[0, i]] 34 | for i in range(1, self.kintree_table.shape[1]) 35 | } 36 | 37 | self.pose_shape = [24, 3] 38 | self.beta_shape = [10] 39 | self.trans_shape = [3] 40 | 41 | self.pose = np.zeros(self.pose_shape) 42 | self.beta = np.zeros(self.beta_shape) 43 | self.trans = np.zeros(self.trans_shape) 44 | 45 | self.verts = None 46 | self.J = None 47 | self.R = None 48 | 49 | self.update() 50 | 51 | def set_params(self, pose=None, beta=None, trans=None): 52 | """ 53 | Set pose, shape, and/or translation parameters of SMPL model. Verices of the 54 | model will be updated and returned. 55 | 56 | Parameters: 57 | --------- 58 | pose: Also known as 'theta', a [24,3] matrix indicating child joint rotation 59 | relative to parent joint. For root joint it's global orientation. 60 | Represented in a axis-angle format. 61 | 62 | beta: Parameter for model shape. A vector of shape [10]. Coefficients for 63 | PCA component. Only 10 components were released by MPI. 64 | 65 | trans: Global translation of shape [3]. 66 | 67 | Return: 68 | ------ 69 | Updated vertices. 70 | 71 | """ 72 | if pose is not None: 73 | self.pose = pose 74 | if beta is not None: 75 | self.beta = beta 76 | if trans is not None: 77 | self.trans = trans 78 | self.update() 79 | return self.verts, self.J 80 | 81 | def update(self): 82 | """ 83 | Called automatically when parameters are updated. 84 | 85 | """ 86 | # how beta affect body shape 87 | v_shaped = self.shapedirs.dot(self.beta) + self.v_template 88 | # joints location 89 | self.J = self.J_regressor.dot(v_shaped) 90 | pose_cube = self.pose.reshape((-1, 1, 3)) 91 | # rotation matrix for each joint 92 | self.R = self.rodrigues(pose_cube) 93 | I_cube = np.broadcast_to( 94 | np.expand_dims(np.eye(3), axis=0), 95 | (self.R.shape[0]-1, 3, 3) 96 | ) 97 | lrotmin = (self.R[1:] - I_cube).ravel() 98 | # how pose affect body shape in zero pose 99 | v_posed = v_shaped + self.posedirs.dot(lrotmin) 100 | # world transformation of each joint 101 | G = np.empty((self.kintree_table.shape[1], 4, 4)) 102 | G[0] = self.with_zeros(np.hstack((self.R[0], self.J[0, :].reshape([3, 1])))) 103 | for i in range(1, self.kintree_table.shape[1]): 104 | G[i] = G[self.parent[i]].dot( 105 | self.with_zeros( 106 | np.hstack( 107 | [self.R[i],((self.J[i, :]-self.J[self.parent[i],:]).reshape([3,1]))] 108 | ) 109 | ) 110 | ) 111 | G = G - self.pack( 112 | np.matmul( 113 | G, 114 | np.hstack([self.J, np.zeros([24, 1])]).reshape([24, 4, 1]) 115 | ) 116 | ) 117 | # transformation of each vertex 118 | T = np.tensordot(self.weights, G, axes=[[1], [0]]) 119 | rest_shape_h = np.hstack((v_posed, np.ones([v_posed.shape[0], 1]))) 120 | v = np.matmul(T, rest_shape_h.reshape([-1, 4, 1])).reshape([-1, 4])[:, :3] 121 | self.verts = v + self.trans.reshape([1, 3]) 122 | 123 | def rodrigues(self, r): 124 | """ 125 | Rodrigues' rotation formula that turns axis-angle vector into rotation 126 | matrix in a batch-ed manner. 127 | 128 | Parameter: 129 | ---------- 130 | r: Axis-angle rotation vector of shape [batch_size, 1, 3]. 131 | 132 | Return: 133 | ------- 134 | Rotation matrix of shape [batch_size, 3, 3]. 135 | 136 | """ 137 | theta = np.linalg.norm(r, axis=(1, 2), keepdims=True) 138 | # avoid zero divide 139 | theta = np.maximum(theta, np.finfo(np.float64).tiny) 140 | r_hat = r / theta 141 | cos = np.cos(theta) 142 | z_stick = np.zeros(theta.shape[0]) 143 | m = np.dstack([ 144 | z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1], 145 | r_hat[:, 0, 2], z_stick, -r_hat[:, 0, 0], 146 | -r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick] 147 | ).reshape([-1, 3, 3]) 148 | i_cube = np.broadcast_to( 149 | np.expand_dims(np.eye(3), axis=0), 150 | [theta.shape[0], 3, 3] 151 | ) 152 | A = np.transpose(r_hat, axes=[0, 2, 1]) 153 | B = r_hat 154 | dot = np.matmul(A, B) 155 | R = cos * i_cube + (1 - cos) * dot + np.sin(theta) * m 156 | return R 157 | 158 | def with_zeros(self, x): 159 | """ 160 | Append a [0, 0, 0, 1] vector to a [3, 4] matrix. 161 | 162 | Parameter: 163 | --------- 164 | x: Matrix to be appended. 165 | 166 | Return: 167 | ------ 168 | Matrix after appending of shape [4,4] 169 | 170 | """ 171 | return np.vstack((x, np.array([[0.0, 0.0, 0.0, 1.0]]))) 172 | 173 | def pack(self, x): 174 | """ 175 | Append zero matrices of shape [4, 3] to vectors of [4, 1] shape in a batched 176 | manner. 177 | 178 | Parameter: 179 | ---------- 180 | x: Matrices to be appended of shape [batch_size, 4, 1] 181 | 182 | Return: 183 | ------ 184 | Matrix of shape [batch_size, 4, 4] after appending. 185 | 186 | """ 187 | return np.dstack((np.zeros((x.shape[0], 4, 3)), x)) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Human animations to USD 2 | 3 | ## This repository is part of the [GRADE](https://eliabntt.github.io/GRADE-RR/home) project 4 | 5 | ### This was tested on Windows, using Omniverse suggested Drivers and CUDA version. 6 | 7 | The goal of this code is to show how you can convert any SMPL-based animation to a USD-based animation. 8 | The script is capable of managing mesh caches and skeletal animations. It can export point-sequence based animations and skeletal-based animations. 9 | 10 | ### Installation instructions 11 | 12 | Install blender connector from the Omniverse launcher. This code was tested with versions 3.4.0-usd.101.0 (main branch). For the paper work we used 3.1.0-usd.100.1.10. 13 | 14 | Some limitations of 3.1.0-usd.100.1.10: 15 | - you might need to use the mesh cache modifier instead of the blendshape. There is a _minimal_ difference that arise when loading the animation in Omniverse's products. 16 | - keep textures with absolute paths. You can replace them whenever you want afterwards with our tool [USD_text_replace](https://github.com/eliabntt/GRADE-RR/tree/main/scripts/process_paths) 17 | 18 | Install the necessary *dependencies*. Locate the blender installation path and run `python.exe -m pip install ipdb pyquaternion scipy torch pyyaml chumpy`. 19 | e.g. In my case `C:\User\ebonetto\AppData\Local\ov\pkg\blender-3.4.0-usd.101.0\Release\3.4\python\bin\python.exe -m pip install ipdb pyquaternion scipy torch pyyaml chumpy` 20 | 21 | Additionally, you need to follow [this]() to fill up the installation missing files that we cannot redistribute because of licensing. 22 | 23 | ### Already Supported datasets and HowTo expand 24 | 25 | We are already supporting two datasets. [Cloth3D](https://chalearnlap.cvc.uab.cat/dataset/38/description/) and [AMASS](https://amass.is.tue.mpg.de/). 26 | 27 | If you want to add a different dataset for AMASS you need to add it to the `data_folder/taxonomy.json` file 28 | 29 | ### Run the code 30 | 31 | *From the cloned repository main folder* 32 | 33 | `\AppData\Local\ov\pkg\blender-3.4.0-usd.101.0\Release\blender.exe --python-use-system-env --python-exit-code 0 --python start_blend_debug.py -- generate_sequence.py --dataset ... --output_dir ... --samples_dir ... --last_sample ... --parent_path ... --sample_id ...` 34 | 35 | The parameters are explained in the code or self-explaining. 36 | `dataset` can be either `[cloth3d, amass]`. With `amass` a necessary configuration file needs to be included (e.g. `--config_file this_repo\humangenerator\amass.json`). We provide a sample config [here](https://github.com/eliabntt/generate_people/blob/main/humangenerator/amass.json). 37 | 38 | Note that AMASS will process the folder directly (by querying subfolders) differently than Cloth3D for which you need to give the main parent folder (eg. `cloth3d/train_t1`). 39 | 40 | `sample_id` if is an ID it will process that ID otherwise you can set it to all or leave it empty and it will process the whole set of data. 41 | 42 | `last_sample` is used in case `sample_id` is empty and will be used to signal where to restart the processing. 43 | 44 | If running multiple generations the code will automatically periodically _clean_ the whole simulation environment including textures and materials to avoid crashing. 45 | 46 | - Cloth3D single sample example `--python-use-system-env --python-exit-code 0 --python start_blend_debug.py -- generate_sequence.py --dataset cloth3d --output_dir outdir --samples_dir cloth3d\train --last_sample 01056 --parent_path D:\generate_people\data_folder\ --sample_id 01056` 47 | 48 | - AMASS `--python-use-system-env --python-exit-code 0 --python start_blend_debug.py -- generate_sequence.py --dataset amass --output_dir D:\cloth3d\exported_usd --samples_dir D:\AMASS\CMU\ --parent_path D:\Cloth3D_to_usd\data_folder\ --config_file D:\Cloth3D_to_usd\humangenerator\amass.json` 49 | 50 | ### How does it work 51 | 52 | The texture of the person is random. In the Cloth3D case the chosen ones are the ones with underwears, with AMASS the ones with clothes. 53 | 54 | You have the possibility of exporting the SMPL information, the vertex info, the USD file, the STL trace of the animation and much more. 55 | 56 | You can also suppress the output from the shell. However, the exporter in USD forcibly write directly to stdout. I have found no redirect strategy that works. 57 | 58 | The system will replicate the input folder structure in the output folder. 59 | 60 | You can also select a single frame. 61 | 62 | You are encouraged to extend this and create pull requests. 63 | 64 | Cloth3D clothes are loaded and exported as MeshCaches. 65 | 66 | For the human animations you can chose. 67 | 68 | ### How to edit 69 | 70 | You can create your own processor by creating a new class [here](https://github.com/eliabntt/generate_people/tree/main/humangenerator), adding your dataset name [here](https://github.com/eliabntt/generate_people/blob/main/humangenerator/avail_datasets.yaml) and write the else [here](https://github.com/eliabntt/generate_people/blob/main/humangenerator/generator.py#L17). 71 | 72 | In practice you need to write your own python `dataset_gen.py`. 73 | 74 | That file needs to have a `process_sample` method which will be then called by the main script. 75 | 76 | Within `process_sample` you want to take care either of the sample (CLOTH3D) or of the whole folder (AMASS). Your choice. 77 | 78 | We see the processing from the loading of the animation to writing data. 79 | 80 | In the main script then there is a call to `get_processor` that returns `processor, PATH_SAMPLES`, `processor` is the instance of the class you just created. 81 | 82 | Few lines below you find `res = processor.process_sample(sample, frame, smpl_body_list)`. 83 | 84 | ### Some notes 85 | The exported USDs will have 24 fps as default. We did not investigate this much. You can change this by using the usd converter to text and change the 4th line to 30 fps. This value will influence how the mesh will be loaded into the simulation by the scripts used in GRADE. 86 | In our work we did NOT change this value. 87 | 88 | 89 | _______ 90 | 91 | ### LICENSING 92 | 93 | For licensing information, please refer to the main repository located [here](https://github.com/eliabntt/GRADE-RR/). 94 | __________ 95 | ### CITATION 96 | If you find this work useful please cite our work based on [this](https://github.com/eliabntt/GRADE-RR#citation) information 97 | 98 | __________ 99 | 100 | ### Acknowledgment 101 | Code based on 102 | - [blenderproc](https://github.com/DLR-RM/BlenderProc/) 103 | - [amass](https://amass.is.tue.mpg.de/) 104 | - [Cloth3D starter kit](http://158.109.8.102/CLOTH3D/StarterKit.zip) 105 | - [surreact](https://github.com/gulvarol/surreact) and [surreal](https://github.com/gulvarol/surreal) 106 | -------------------------------------------------------------------------------- /humangenerator/amass_gen.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from humangenerator.util.blender_util import * 3 | import bpy 4 | from .util.amass_util import loadInfo, bodyCache, _load_parametric_body_model, _get_supported_mocap_datasets, \ 5 | _get_sequence_path 6 | import humangenerator as hgen 7 | 8 | 9 | class amass: 10 | def __init__(self, parent_path, with_cache, path_out, path_samples, smpl_models, write_verts, config): 11 | # temporary usd export path, we cannot directly write in mounted network drives sometimes 12 | temp_path = os.path.join(parent_path, 'usd_exports') 13 | # surreal path for textures 14 | smpl_path = os.path.join(parent_path, "surreal", "datageneration", "smpl_data") 15 | 16 | from humangenerator.generator import generator 17 | self.generator = generator(smpl_path) 18 | self.with_cache = with_cache 19 | self.path_out = path_out 20 | self.path_samples = path_samples 21 | self.smpl = smpl_models 22 | self.sub_dataset_id = config['sub_dataset_id'] 23 | self.num_betas = config['num_betas'] 24 | self.num_dmpls = config['num_dmpls'] 25 | self.subject_ids = config['subject_ids'].split() 26 | self.write_verts = (write_verts == "True") 27 | 28 | self.temp_path = temp_path 29 | self.body_model_m, self.faces_m = _load_parametric_body_model(parent_path, "male", self.num_betas, 30 | self.num_dmpls) 31 | self.body_model_f, self.faces_f = _load_parametric_body_model(parent_path, "female", self.num_betas, 32 | self.num_dmpls) 33 | 34 | taxonomy_file_path = os.path.join(parent_path, "taxonomy.json") 35 | self.supported_datasets = _get_supported_mocap_datasets(taxonomy_file_path, path_samples) 36 | 37 | def animateSMPL(self, sample, smpl_ob, info, body_model): 38 | if self.with_cache: 39 | bodyCache(self.path_cache, sample, info, smpl_ob.ob, body_model, self.num_betas, self.num_dmpls) 40 | 41 | # generate blendshapes + trans 42 | s = info['betas'][:10] 43 | smpl_ob.reset_joint_positions(s, bpy.data.scenes["Scene"]) 44 | 45 | for i in range(info['poses'].shape[0]): 46 | p = np.append(info['poses'][i][:66].reshape(-1, 3), [[0, 0, 0], [0, 0, 0]], 0) 47 | t = info['trans'][i].reshape((3,)) 48 | bpy.data.scenes["Scene"].frame_set(i) 49 | smpl_ob.apply_trans_pose_shape(t, p, s, i, with_blendshapes=not self.with_cache) 50 | 51 | def generate_SMPLbody_animation(self, sample, info, gender, index, body_model): 52 | print("Generate Animation..") 53 | 54 | orient = info['poses'][0, :3][2] 55 | p = np.append(info['poses'][0][:66].reshape(-1, 3), [[0, 0, 0], [0, 0, 0]], 0) 56 | t = info['trans'][0].reshape((3,)) 57 | s = info['betas'][:10] 58 | v, j = self.smpl[gender].set_params(pose=p, beta=s, trans=t) 59 | cloth_img_name = self.generator.pick_skin_texture(gender=gender, clothing_option="all") 60 | img = bpy.data.materials[f'Material_{index}'].node_tree.nodes["Image Texture"] 61 | img.image = bpy.data.images.load(cloth_img_name) 62 | material = bpy.data.materials[f'Material_{index}'] 63 | 64 | self.smpl_body_list[index].refine_SMPL(material, j, orient) # info['zrot'] 65 | 66 | self.animateSMPL(sample, self.smpl_body_list[index], info, body_model) 67 | 68 | # Smooth 69 | bpy.ops.object.shade_smooth() 70 | 71 | def loadAmassSequence(self, sample: str, info: dict, body_model, frame: int = None): 72 | bpy.context.scene.frame_end = info['poses'].shape[0] - 1 73 | 74 | bpy.ops.object.select_all(action='DESELECT') 75 | # delete current garments 76 | for obj in bpy.data.objects.values(): 77 | if 'body' not in obj.name.lower() and 'armature' not in obj.name.lower(): 78 | obj.select_set(True) 79 | bpy.ops.object.delete() 80 | 81 | for obj in bpy.data.objects.values(): 82 | obj.select_set(False) 83 | 84 | gender = 'm' if info['gender'] == 'male' else 'f' 85 | index = 0 if info['gender'] == 'male' else 1 86 | self.generate_SMPLbody_animation(sample, info, gender, index, body_model) 87 | 88 | bpy.context.view_layer.objects.active = bpy.data.objects[f'Armature_{index}'] 89 | arm_obj = bpy.data.objects[f'Armature_{index}'] 90 | bpy.context.scene.frame_current = bpy.context.scene.frame_start 91 | 92 | for obj in bpy.data.objects.values(): 93 | if 'body' not in obj.name.lower() and 'armature' not in obj.name.lower(): 94 | obj.select_set(True) 95 | obj.parent = arm_obj 96 | obj.rotation_euler = [0, 0, 0] 97 | obj.select_set(False) 98 | 99 | for obj in bpy.data.objects.values(): 100 | if 'armature' not in obj.name.lower() and 'body' not in obj.name.lower(): 101 | obj.select_set(True) 102 | else: 103 | if str(index) in obj.name: 104 | obj.select_set(True) 105 | 106 | if frame != None and frame >= 0 and frame <= bpy.context.scene.frame_end: 107 | bpy.context.scene.frame_current = frame 108 | 109 | def process_sample(self, sample: str, frame: int, smpl_body_list): 110 | # load info 111 | if sample in self.subject_ids: 112 | for subject_id in os.listdir(os.path.join(self.path_samples, sample)): 113 | sequence_path, main_path = _get_sequence_path(self.supported_datasets, self.sub_dataset_id, sample, 114 | subject_id) 115 | info = loadInfo(sequence_path) 116 | 117 | self.smpl_body_list = smpl_body_list 118 | subfolder_name = Path(subject_id).stem + ('_with_cache' if self.with_cache else '') 119 | self.path_cache = hgen.create_outfolder_structure(self.path_out, subfolder_name, self.with_cache) 120 | 121 | if frame is None: 122 | self.loadAmassSequence(sample, info, self.body_model_m if info["gender"] == "male" else self.body_model_f) 123 | else: 124 | self.loadAmassSequence(sample, info, self.body_model_m if info["gender"] == "male" else self.body_model_f, 125 | frame) 126 | 127 | bpy.ops.wm.save_as_mainfile(filepath=os.path.join(self.path_out, subfolder_name, subfolder_name + ".blend")) 128 | my_l = list(info.keys()) 129 | new_info = {} 130 | for i in my_l: 131 | new_info[i] = info[i] 132 | hgen.export_data(self.temp_path, self.path_out, Path(subject_id).stem, self.with_cache, frame, new_info, 133 | info['poses'][0, :3][2], self.write_verts) 134 | 135 | return True -------------------------------------------------------------------------------- /humangenerator/util/amass_util.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import glob 3 | import os 4 | import random 5 | from .IO import readPC2, writePC2 6 | import bpy, sys, torch 7 | from .blender_util import mesh_cache 8 | from typing import Tuple 9 | 10 | def bodyCache(path_cache, sample, info, ob, body_model, num_betas, num_dmpls): 11 | print("Processing Body Cache") 12 | 13 | pc2_path = os.path.join(path_cache, sample + '.pc2') 14 | 15 | V = np.zeros((info['poses'].shape[1], 6890, 3), np.float32) 16 | 17 | bdata = info 18 | time_length = len(bdata['trans']) 19 | comp_device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 20 | body_params = { 21 | 'root_orient': torch.Tensor(bdata['poses'][:, :3]).to(comp_device), # controls the global root orientation 22 | 'pose_body': torch.Tensor(bdata['poses'][:, 3:66]).to(comp_device), # controls the body 23 | 'pose_hand': torch.Tensor(bdata['poses'][:, 66:]).to(comp_device), # controls the finger articulation 24 | 'trans': torch.Tensor(bdata['trans']).to(comp_device), # controls the global body position 25 | 'betas': torch.Tensor(np.repeat(bdata['betas'][:num_betas][np.newaxis], repeats=time_length, axis=0)).to( 26 | comp_device), # controls the body shape. Body shape is static 27 | 'dmpls': torch.Tensor(bdata['dmpls'][:, :num_dmpls]).to(comp_device) # controls soft tissue dynamics 28 | } 29 | 30 | body_trans_root = body_model( 31 | **{k: v for k, v in body_params.items() if k in ['pose_body', 'betas', 'pose_hand', 'dmpls', 32 | 'trans', 'root_orient']}) 33 | if not os.path.isfile(pc2_path): 34 | V = body_trans_root.v.data.cpu().numpy() 35 | print("Writing PC2 file...") 36 | writePC2(pc2_path, V) 37 | else: 38 | V = readPC2(pc2_path)['V'] 39 | 40 | if V.shape[1] != len(ob.data.vertices): 41 | sys.stderr.write("ERROR IN THE VERTEX COUNT FOR THE BODY!!!!!") 42 | sys.stderr.flush() 43 | 44 | mesh_cache(ob, pc2_path) 45 | bpy.ops.object.shade_smooth() 46 | return body_trans_root 47 | 48 | def loadInfo(sequence_path): 49 | 50 | if os.path.exists(sequence_path): 51 | # load AMASS dataset sequence file which contains the coefficients for the whole motion sequence 52 | sequence_body_data = np.load(sequence_path) 53 | # get the number of supported frames 54 | return sequence_body_data 55 | else: 56 | raise Exception( 57 | "Invalid sequence/subject category identifiers, please choose a " 58 | "valid one. Used path: {}".format(sequence_path)) 59 | 60 | def _get_sequence_path(supported_mocap_datasets: dict, used_sub_dataset_id: str, used_subject_id: str, used_sequence_id: str) -> [str, str]: 61 | """ Extract pose and shape parameters corresponding to the requested pose from the database to be processed by the parametric model 62 | 63 | :param supported_mocap_datasets: A dict which maps sub dataset names to their paths. 64 | :param used_sub_dataset_id: Identifier for the sub dataset, the dataset which the human pose object should be extracted from. 65 | :param used_subject_id: Type of motion from which the pose should be extracted, this is dataset dependent parameter. 66 | :param used_sequence_id: Sequence id in the dataset, sequences are the motion recorded to represent certain action. 67 | :return: tuple of arrays contains the parameters. Type: tuple 68 | """ 69 | 70 | 71 | # check if the sub_dataset is supported 72 | if used_sub_dataset_id in supported_mocap_datasets: 73 | # get path from dictionary 74 | sub_dataset_path = supported_mocap_datasets[used_sub_dataset_id] 75 | # concatenate path to specific 76 | if not used_subject_id: 77 | # if none was selected 78 | possible_subject_ids = glob.glob(os.path.join(sub_dataset_path, "*")) 79 | possible_subject_ids.sort() 80 | if len(possible_subject_ids) > 0: 81 | used_subject_id_str = os.path.basename(random.choice(possible_subject_ids)) 82 | else: 83 | raise Exception("No subjects found in folder: {}".format(sub_dataset_path)) 84 | else: 85 | try: 86 | used_subject_id_str = "{:02d}".format(int(used_subject_id)) 87 | except: 88 | used_subject_id_str = used_subject_id 89 | 90 | subject_path = os.path.join(sub_dataset_path, used_subject_id_str) 91 | sequence_path = os.path.join(subject_path, used_sequence_id) 92 | return sequence_path, subject_path 93 | else: 94 | raise Exception( 95 | "The requested mocap dataset is not yest supported, please choose anothe one from the following " 96 | "supported datasets: {}".format([key for key, value in supported_mocap_datasets.items()])) 97 | 98 | def _load_parametric_body_model(data_path: str, used_body_model_gender: str, num_betas: int, 99 | num_dmpls: int) -> Tuple["BodyModel", np.array]: 100 | """ loads the parametric model that is used to generate the mesh object 101 | 102 | :return: parametric model. Type: tuple. 103 | """ 104 | import torch 105 | from human_body_prior.body_model.body_model import BodyModel 106 | 107 | bm_path = os.path.join(data_path, 'body_models', 'smplh', used_body_model_gender, 'model.npz') # body model 108 | dmpl_path = os.path.join(data_path, 'body_models', 'dmpls', used_body_model_gender, 'model.npz') # deformation model 109 | if not os.path.exists(bm_path) or not os.path.exists(dmpl_path): 110 | raise Exception("Parametric Body model doesn't exist, please follow download instructions section in AMASS Example") 111 | comp_device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 112 | body_model = BodyModel(bm_path=bm_path, num_betas=num_betas, num_dmpls=num_dmpls, path_dmpl=dmpl_path).to(comp_device) 113 | faces = body_model.f.detach().cpu().numpy() 114 | return body_model, faces 115 | 116 | def _get_supported_mocap_datasets(taxonomy_file_path: str, data_path: str) -> dict: 117 | """ get latest updated list from taxonomoy json file about the supported mocap datasets supported in the loader module and update.supported_mocap_datasets list 118 | 119 | :param taxonomy_file_path: path to taxomomy.json file which contains the supported datasets and their respective paths. Type: string. 120 | :param data_path: path to the AMASS dataset root folder. Type: string. 121 | """ 122 | import json 123 | # dictionary contains mocap dataset name and path to its sub folder within the main dataset, dictionary will 124 | # be filled from taxonomy.json file which indicates the supported datastests 125 | supported_mocap_datasets = {} 126 | if os.path.exists(taxonomy_file_path): 127 | with open(taxonomy_file_path, "r") as f: 128 | loaded_data = json.load(f) 129 | for block in loaded_data: 130 | if "sub_data_id" in block: 131 | sub_dataset_id = block["sub_data_id"] 132 | supported_mocap_datasets[sub_dataset_id] = os.path.join(data_path, block["path"]) 133 | else: 134 | raise Exception("The taxonomy file could not be found: {}".format(taxonomy_file_path)) 135 | 136 | return supported_mocap_datasets -------------------------------------------------------------------------------- /humangenerator/util/cloth3d_util.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.io as sio 3 | from math import cos, sin 4 | from .blender_util import readOBJ, createBPYObj, setMaterial, mesh_cache, convert_meshcache 5 | import os, sys 6 | from .IO import readPC2, writePC2 7 | import bpy 8 | 9 | def loadInfo(path: str): 10 | ''' 11 | this function should be called instead of direct sio.loadmat 12 | as it cures the problem of not properly recovering python dictionaries 13 | from mat files. It calls the function check keys to cure all entries 14 | which are still mat-objects 15 | ''' 16 | data = sio.loadmat(path, struct_as_record=False, squeeze_me=True) 17 | del data['__globals__'] 18 | del data['__header__'] 19 | del data['__version__'] 20 | return _check_keys(data) 21 | 22 | def _check_keys(dict): 23 | ''' 24 | checks if entries in dictionary are mat-objects. If yes 25 | todict is called to change them to nested dictionaries 26 | ''' 27 | for key in dict: 28 | if isinstance(dict[key], sio.matlab.mio5_params.mat_struct): 29 | dict[key] = _todict(dict[key]) 30 | return dict 31 | 32 | def _todict(matobj): 33 | ''' 34 | A recursive function which constructs from matobjects nested dictionaries 35 | ''' 36 | dict = {} 37 | for strg in matobj._fieldnames: 38 | elem = matobj.__dict__[strg] 39 | if isinstance(elem, sio.matlab.mio5_params.mat_struct): 40 | dict[strg] = _todict(elem) 41 | elif isinstance(elem, np.ndarray) and np.any([isinstance(item, sio.matlab.mio5_params.mat_struct) for item in elem]): 42 | dict[strg] = [None] * len(elem) 43 | for i,item in enumerate(elem): 44 | if isinstance(item, sio.matlab.mio5_params.mat_struct): 45 | dict[strg][i] = _todict(item) 46 | else: 47 | dict[strg][i] = item 48 | else: 49 | dict[strg] = elem 50 | return dict 51 | 52 | # Computes matrix of rotation around z-axis for 'zrot' radians 53 | def zRotMatrix(zrot): 54 | c, s = cos(zrot), sin(zrot) 55 | return np.array([[c, -s, 0], 56 | [s, c, 0], 57 | [0, 0, 1]], np.float32) 58 | """ CAMERA """ 59 | def intrinsic(): 60 | RES_X = 640 61 | RES_Y = 480 62 | f_mm = 50 # blender default 63 | sensor_w_mm = 36 # blender default 64 | sensor_h_mm = sensor_w_mm * RES_Y / RES_X 65 | 66 | fx_px = f_mm * RES_X / sensor_w_mm; 67 | fy_px = f_mm * RES_Y / sensor_h_mm; 68 | 69 | u = RES_X / 2; 70 | v = RES_Y / 2; 71 | 72 | return np.array([[fx_px, 0, u], 73 | [0, fy_px, v], 74 | [0, 0, 1]], np.float32) 75 | 76 | def extrinsic(camLoc): 77 | R_w2bc = np.array([[0, 1, 0], 78 | [0, 0, 1], 79 | [1, 0, 0]], np.float32) 80 | 81 | T_w2bc = -1 * R_w2bc.dot(camLoc) 82 | 83 | R_bc2cv = np.array([[1, 0, 0], 84 | [0, -1, 0], 85 | [0, 0, -1]], np.float32) 86 | 87 | R_w2cv = R_bc2cv.dot(R_w2bc) 88 | T_w2cv = R_bc2cv.dot(T_w2bc) 89 | 90 | return np.concatenate((R_w2cv, T_w2cv[:,None]), axis=1) 91 | 92 | def proj(camLoc): 93 | return intrinsic().dot(extrinsic(camLoc)) 94 | 95 | """ 96 | Mesh to UV map 97 | Computes correspondences between 3D mesh and UV map 98 | NOTE: 3D mesh vertices can have multiple correspondences with UV vertices 99 | """ 100 | def mesh2UV(F, Ft): 101 | m2uv = {v: set() for f in F for v in f} 102 | for f, ft in zip(F, Ft): 103 | for v, vt in zip(f, ft): 104 | m2uv[v].add(vt) 105 | # m2uv = {k:list(v) for k,v in m2uv.items()} 106 | return m2uv 107 | 108 | # Maps UV coordinates to texture space (pixel) 109 | IMG_SIZE = 2048 # all image textures have this squared size 110 | def uv_to_pixel(vt): 111 | px = vt * IMG_SIZE # scale to image plane 112 | px %= IMG_SIZE # wrap to [0, IMG_SIZE] 113 | # Note that Blender graphic engines invert vertical axis 114 | return int(px[0]), int(IMG_SIZE - px[1]) # texel X, texel Y 115 | 116 | 117 | def loadGarment(path_sample, path_cache, sample, garment, info): 118 | print("Processing Garment Cache") 119 | print(f"Loading {garment}") 120 | texture = info['outfit'][garment]['texture'] 121 | # Read OBJ file and create BPY object 122 | V, F, Vt, Ft = readOBJ(os.path.join(path_sample, sample, garment + '.obj')) 123 | ob = createBPYObj(V, F, Vt, Ft, name=sample + '_' + garment) 124 | # z-rot 125 | ob.rotation_euler[2] = info['zrot'] 126 | # Convert cache PC16 to PC2 127 | 128 | pc2_path = os.path.join(path_cache, 129 | sample + '_' + garment + '.pc2' 130 | ) 131 | if not os.path.isfile(pc2_path): 132 | # Convert PC16 to PC2 (and move to view_cache folder) 133 | # Add trans to vertex locations 134 | pc16_path = os.path.join(path_sample, sample, garment + '.pc16') 135 | V = readPC2(pc16_path, True)['V'] 136 | for i in range(V.shape[0]): 137 | sys.stdout.write('\r' + str(i + 1) + '/' + str(V.shape[0])) 138 | sys.stdout.flush() 139 | if V.shape[0] > 1: 140 | V[i] += info['trans'][:, i][None] 141 | else: 142 | V[i] += info['trans'][:][None] 143 | writePC2(pc2_path, V) 144 | else: 145 | V = readPC2(pc2_path)['V'] 146 | 147 | if V.shape[1] != len(ob.data.vertices): 148 | sys.stderr.write("ERROR IN THE VERTEX COUNT!!!!!") 149 | sys.stderr.flush() 150 | 151 | mesh_cache(ob, pc2_path) 152 | # necessary to have this in the old version of the code with the old omni-blender 153 | # convert_meshcache(bpy.ops.object) 154 | 155 | # Set material 156 | setMaterial(path_sample, ob, sample, garment, texture) 157 | # Smooth 158 | bpy.ops.object.shade_smooth() 159 | print(f"\nLoaded {garment}.\n") 160 | 161 | 162 | def bodyCache(path_cache, sample, info, ob, smpl): 163 | print("Processing Body Cache") 164 | pc2_path = os.path.join(path_cache, sample + '.pc2') 165 | if not os.path.isfile(pc2_path): 166 | # Compute body sequence 167 | print("Computing body sequence...") 168 | print("") 169 | gender = 'm' if info['gender'] else 'f' 170 | if len(info['poses'].shape)>1: 171 | N = info['poses'].shape[1] 172 | else: 173 | N = 1 174 | V = np.zeros((N, 6890, 3), np.float32) 175 | for i in range(N): 176 | sys.stdout.write('\r' + str(i + 1) + '/' + str(N)) 177 | sys.stdout.flush() 178 | s = info['shape'] 179 | if N == 1: 180 | p = info['poses'][:].reshape((24, 3)) 181 | t = info['trans'][:].reshape((3,)) 182 | else: 183 | p = info['poses'][:, i].reshape((24, 3)) 184 | t = info['trans'][:, i].reshape((3,)) 185 | v, j = smpl[gender].set_params(pose=p, beta=s, trans=t) 186 | V[i] = v - j[0:1] 187 | print("") 188 | print("Writing PC2 file...") 189 | writePC2(pc2_path, V) 190 | else: 191 | V = readPC2(pc2_path)['V'] 192 | 193 | if V.shape[1] != len(ob.data.vertices): 194 | sys.stderr.write("ERROR IN THE VERTEX COUNT FOR THE BODY!!!!!") 195 | sys.stderr.flush() 196 | 197 | mesh_cache(ob, pc2_path) 198 | bpy.ops.object.shade_smooth() -------------------------------------------------------------------------------- /humangenerator/util/IO.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | from struct import pack, unpack 4 | 5 | """ 6 | Reads OBJ files 7 | Only handles vertices, faces and UV maps 8 | Input: 9 | - file: path to .obj file 10 | Outputs: 11 | - V: 3D vertices 12 | - F: 3D faces 13 | - Vt: UV vertices 14 | - Ft: UV faces 15 | Correspondence between mesh and UV map is implicit in F to Ft correspondences 16 | If no UV map data in .obj file, it shall return Vt=None and Ft=None 17 | """ 18 | def readOBJ(file): 19 | V, Vt, F, Ft = [], [], [], [] 20 | with open(file, 'r') as f: 21 | T = f.readlines() 22 | for t in T: 23 | # 3D vertex 24 | if t.startswith('v '): 25 | v = [float(n) for n in t.replace('v ','').split(' ')] 26 | V += [v] 27 | # UV vertex 28 | elif t.startswith('vt '): 29 | v = [float(n) for n in t.replace('vt ','').split(' ')] 30 | Vt += [v] 31 | # Face 32 | elif t.startswith('f '): 33 | idx = [n.split('/') for n in t.replace('f ','').split(' ')] 34 | f = [int(n[0]) - 1 for n in idx] 35 | F += [f] 36 | # UV face 37 | if '/' in t: 38 | f = [int(n[1]) - 1 for n in idx] 39 | Ft += [f] 40 | V = np.array(V, np.float32) 41 | Vt = np.array(Vt, np.float32) 42 | if Ft: assert len(F) == len(Ft), 'Inconsistent .obj file, mesh and UV map do not have the same number of faces' 43 | else: Vt, Ft = None, None 44 | return V, F, Vt, Ft 45 | 46 | """ 47 | Writes OBJ files 48 | Only handles vertices, faces and UV maps 49 | Inputs: 50 | - file: path to .obj file (overwrites if exists) 51 | - V: 3D vertices 52 | - F: 3D faces 53 | - Vt: UV vertices 54 | - Ft: UV faces 55 | Correspondence between mesh and UV map is implicit in F to Ft correspondences 56 | If no UV map data as input, it will write only 3D data in .obj file 57 | """ 58 | def writeOBJ(file, V, F, Vt=None, Ft=None): 59 | if not Vt is None: 60 | assert len(F) == len(Ft), 'Inconsistent data, mesh and UV map do not have the same number of faces' 61 | 62 | with open(file, 'w') as file: 63 | # Vertices 64 | for v in V: 65 | line = 'v ' + ' '.join([str(_) for _ in v]) + '\n' 66 | file.write(line) 67 | # UV verts 68 | if not Vt is None: 69 | for v in Vt: 70 | line = 'vt ' + ' '.join([str(_) for _ in v]) + '\n' 71 | file.write(line) 72 | # 3D Faces / UV faces 73 | if Ft: 74 | F = [[str(i+1)+'/'+str(j+1) for i,j in zip(f,ft)] for f,ft in zip(F,Ft)] 75 | else: 76 | F = [[str(i + 1) for i in f] for f in F] 77 | for f in F: 78 | line = 'f ' + ' '.join(f) + '\n' 79 | file.write(line) 80 | 81 | """ 82 | Reads PC2 files, and proposed format PC16 files 83 | Inputs: 84 | - file: path to .pc2/.pc16 file 85 | - float16: False for PC2 files, True for PC16 86 | Output: 87 | - data: dictionary with .pc2/.pc16 file data 88 | NOTE: 16-bit floats lose precision with high values (positive or negative), 89 | we do not recommend using this format for data outside range [-2, 2] 90 | """ 91 | def readPC2(file, float16=False): 92 | # assert file.endswith('.pc2') and not float16 or file.endswith('.pc16') and float16, 'File format not consistent with specified input format' 93 | data = {} 94 | bytes = 2 if float16 else 4 95 | dtype = np.float16 if float16 else np.float32 96 | with open(file, 'rb') as f: 97 | # Header 98 | data['sign'] = f.read(12) 99 | # data['version'] = int.from_bytes(f.read(4), 'little') 100 | data['version'] = unpack('= 0 and isinstance(frame,int), 'Frame must be a positive integer' 130 | bytes = 2 if float16 else 4 131 | dtype = np.float16 if float16 else np.float32 132 | with open(file,'rb') as f: 133 | # Num points 134 | f.seek(16) 135 | # nPoints = int.from_bytes(f.read(4), 'little') 136 | nPoints = unpack(' nSamples: 142 | print("Frame index outside size") 143 | print("\tN. frame: " + str(frame)) 144 | print("\tN. samples: " + str(nSamples)) 145 | return 146 | # Read frame 147 | size = nPoints * 3 * bytes 148 | f.seek(size * frame, 1) # offset from current '1' 149 | T = np.frombuffer(f.read(size), dtype=dtype).astype(np.float32) 150 | return T.reshape(nPoints, 3) 151 | 152 | """ 153 | Writes PC2 and PC16 files 154 | Inputs: 155 | - file: path to file (overwrites if exists) 156 | - V: 3D animation data as a three dimensional array (N. Frames x N. Vertices x 3) 157 | - float16: False for writing as PC2 file, True for PC16 158 | This function assumes 'startFrame' to be 0 and 'sampleRate' to be 1 159 | NOTE: 16-bit floats lose precision with high values (positive or negative), 160 | we do not recommend using this format for data outside range [-2, 2] 161 | """ 162 | def writePC2(file, V, float16=False): 163 | assert file.endswith('.pc2') and not float16 or file.endswith('.pc16') and float16, 'File format not consistent with specified input format' 164 | if float16: V = V.astype(np.float16) 165 | else: V = V.astype(np.float32) 166 | with open(file, 'wb') as f: 167 | # Create the header 168 | headerFormat='<12siiffi' 169 | headerStr = pack(headerFormat, b'POINTCACHE2\0', 170 | 1, V.shape[1], 0, 1, V.shape[0]) 171 | f.write(headerStr) 172 | # Write vertices 173 | f.write(V.tobytes()) 174 | 175 | """ 176 | Reads proposed compressed file format for mesh topology. 177 | Inputs: 178 | - fname: name of the file to read 179 | Outputs: 180 | - F: faces of the mesh, as triangles 181 | """ 182 | def readFaceBIN(fname): 183 | if '.' in os.path.basename(fname) and not fname.endswith('.bin'): 184 | print("File name extension should be '.bin'") 185 | return 186 | elif not '.' in os.path.basename(fname): fname += '.bin' 187 | with open(fname, 'rb') as f: 188 | F = np.frombuffer(f.read(), dtype=np.uint16).astype(np.int32) 189 | return F.reshape((-1,3)) 190 | 191 | """ 192 | Compress mesh topology into uint16 (Note that this imposes a maximum of 65,536 vertices). 193 | Writes this data into the specified file. 194 | Inputs: 195 | - fname: name of the file to be created (provide NO extension) 196 | - F: faces. MUST be an Nx3 array 197 | """ 198 | def writeFaceBIN(fname, F): 199 | assert type(F) is np.ndarray, "Make sure faces is an Nx3 NumPy array" 200 | assert len(F.shape) == 2 and F.shape[1] == 3, "Faces have the wron shape (should be Nx3)" 201 | if '.' in os.path.basename(fname) and not fname.endswith('.bin'): 202 | print("File name extension should be '.bin'") 203 | return 204 | elif not '.' in os.path.basename(fname): fname += '.bin' 205 | F = F.astype(np.uint16) 206 | with open(fname, 'wb') as f: 207 | f.write(F.tobytes()) -------------------------------------------------------------------------------- /generate_sequence.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import humangenerator 4 | import bpy 5 | import humangenerator as hgen 6 | import argparse 7 | import ipdb 8 | import sys 9 | import yaml 10 | 11 | parser = argparse.ArgumentParser() 12 | parser.add_argument("--dataset", help="Dataset from which you want to generate data") 13 | parser.add_argument("--output_dir", help="Path to where the data should be saved") 14 | parser.add_argument("--samples_dir", help="Paths where the data is stored") 15 | parser.add_argument("--last_sample", 16 | help="Last sample processed, this must be the FULL name of the folder (e.g. 00001). This WILL be processed", 17 | default="") 18 | parser.add_argument("--parent_path", help="Path containing the subfolders for the datasets (with the pkl models)", 19 | default="") 20 | parser.add_argument("--sample_id", help="ID of the sample, if emtpy process all", default="all") 21 | parser.add_argument("--with_cache", help="Write \"False\" if generating blendshapes", default="True") 22 | parser.add_argument("--suppress_out", help="Write \"False\" if output in console", default="False") 23 | parser.add_argument("--write_verts", help="Write \"True\" if you want to write verts info in the pkl", default="False") 24 | parser.add_argument("--frame", help="The n-th frame to generate. Default all", default="all") 25 | parser.add_argument("--config_file", help="json file containing the configuration", default="") 26 | parser.add_argument("--exp_name", 27 | help="The name of the \"experiment\" of the dataset. By default the name of the samples_dir folder", 28 | default="") 29 | 30 | 31 | # structure should be `parent_path/[surreal/datageneration/smpl_data,body_models/{smplh,dmpls}]` 32 | args = parser.parse_args() 33 | with open(os.path.join("humangenerator", "avail_datasets.yaml"), 'r') as stream: 34 | data_loaded = yaml.safe_load(stream) 35 | avail_datasets = data_loaded["datasets"] 36 | 37 | processor = None 38 | if avail_datasets == [] or args.dataset not in avail_datasets: 39 | if not avail_datasets: 40 | print("No avail dataset. Check file") 41 | else: 42 | print(f"Sought dataset is not yet avail. The avail ones are {avail_datasets}") 43 | exit(-1) 44 | else: 45 | print(f"Processing {args.dataset} data") 46 | 47 | found = (args.last_sample == "") 48 | 49 | try: 50 | WITH_CACHE = (False if args.with_cache == "False" else True) 51 | parent_path = args.parent_path 52 | 53 | smpl_body_list = [] 54 | # Init SMPL models 55 | smpl_path = os.path.join(parent_path, "surreal", "datageneration", "smpl_data") 56 | smpl_models = { 57 | 'f': hgen.SMPLModel(os.path.join(smpl_path, 'smpl', 'models', 'basicModel_f_lbs_10_207_0_v1.0.0.pkl')), 58 | 'm': hgen.SMPLModel(os.path.join(smpl_path, 'smpl', 'models', 'basicModel_m_lbs_10_207_0_v1.0.0.pkl')), 59 | } 60 | 61 | if args.frame != "all": 62 | try: 63 | frame = int(args.frame) 64 | except: 65 | print("Error converting frame to int, considering the WHOLE sequence") 66 | frame = None 67 | else: 68 | frame = None 69 | print("Whole sequence considered") 70 | print("This will export only the whole sequence") 71 | 72 | hgen.init() 73 | 74 | # Parse args 75 | PATH_SAMPLES = args.samples_dir 76 | 77 | if args.exp_name == "": 78 | exp_name = os.path.split(PATH_SAMPLES)[-1] 79 | else: 80 | exp_name = args.exp_name 81 | 82 | PATH_OUT = os.path.join(args.output_dir, exp_name) 83 | if not os.path.exists(PATH_OUT): 84 | os.makedirs(PATH_OUT) 85 | 86 | if args.config_file == "": 87 | config = {} 88 | else: 89 | if os.path.exists(args.config_file): 90 | with open(args.config_file, "r") as f: 91 | config = json.load(f) 92 | else: 93 | raise Exception("The taxonomy file could not be found: {}".format(args.config_file)) 94 | 95 | processor, PATH_SAMPLES = hgen.get_processor(args.dataset, parent_path, WITH_CACHE, PATH_OUT, PATH_SAMPLES, 96 | smpl_models, args.write_verts.lower() == "false", config) 97 | sample_id = args.sample_id 98 | if sample_id != "all": 99 | print("Processing single sample") 100 | # Check if sample exists 101 | if not os.path.isdir(os.path.join(PATH_SAMPLES, sample_id)): 102 | print("Specified sample does not exist") 103 | exit(-1) 104 | else: 105 | sample_id = [sample_id] 106 | else: 107 | print("Processing all samples") 108 | sample_id = os.listdir(PATH_SAMPLES) 109 | if not sample_id: 110 | print("No subfolder found") 111 | exit(-1) 112 | 113 | if len(smpl_body_list) == 0: 114 | smpl_body_list = processor.generator.load_SMPLs_objects() 115 | 116 | found = (args.last_sample == "") 117 | 118 | sample_id.sort() 119 | 120 | clean_cnt = 1 121 | for sample in sample_id: 122 | if not found: 123 | if sample == args.last_sample: 124 | found = True 125 | else: 126 | continue 127 | if clean_cnt % 100 == 0: 128 | clean_cnt = 0 129 | hgen.init() 130 | smpl_body_list = processor.generator.load_SMPLs_objects() 131 | 132 | clean_cnt += 1 133 | print("------------------------------") 134 | print(f"Processing {sample}") 135 | isdone = False 136 | count = 0 137 | while (not isdone and count <= 5): 138 | hgen.deselect() 139 | if len(sample_id) > 1: 140 | hgen.clean_mesh_and_textures( 141 | exclude=['Material_0', 'Material_1', 'Armature_0', 'Armature_1', 'body_0', 'body_1']) 142 | print("Scene cleaned!\n\n") 143 | 144 | count += 1 145 | path_sample = os.path.join(PATH_OUT, sample + ('_with_cache' if WITH_CACHE else '')) 146 | if not os.path.exists(path_sample): 147 | os.makedirs(path_sample) 148 | with open(os.path.join(path_sample, f"out_{count}.txt"), "w") as file_out, open( 149 | os.path.join(path_sample, f"err_{count}.txt"), "w") as file_err: 150 | # file logging 151 | try: 152 | if args.suppress_out == "True": 153 | sys.stdout = file_out 154 | sys.stderr = file_err 155 | 156 | res = processor.process_sample(sample, frame, smpl_body_list) 157 | if res: 158 | print("Exported!") 159 | else: 160 | raise Exception("Unknown error") 161 | 162 | isdone = True 163 | except: 164 | import traceback 165 | 166 | sys.stderr.write('error\n') 167 | sys.stderr.write(traceback.format_exc()) 168 | print(f"Failed -- going with try {count}\n\n") 169 | finally: 170 | sys.stderr.flush() 171 | sys.stdout.flush() 172 | sys.stdout = sys.__stdout__ 173 | sys.stderr = sys.__stderr__ 174 | except: 175 | 176 | import traceback 177 | 178 | sys.stderr.write('error\n') 179 | sys.stderr.write(traceback.format_exc()) 180 | 181 | sys.stdout.flush() 182 | sys.stderr.flush() 183 | 184 | sys.stdout = sys.__stdout__ 185 | sys.stderr = sys.__stderr__ 186 | print('error') 187 | print(traceback.format_exc()) 188 | extype, value, tb = sys.exc_info() 189 | ipdb.post_mortem(tb) 190 | -------------------------------------------------------------------------------- /humangenerator/util/blender_util.py: -------------------------------------------------------------------------------- 1 | import os 2 | import bpy 3 | from humangenerator.util.IO import readOBJ, readPC2, writePC2 4 | import numpy as np 5 | import bmesh 6 | import sys 7 | import pickle as pkl 8 | import shutil 9 | import random 10 | 11 | PI = 3.14159 12 | 13 | """ Scene """ 14 | def init(): 15 | clean() 16 | # scene 17 | return scene() 18 | 19 | def clean(): 20 | for collection in dir(bpy.data): 21 | data_structure = getattr(bpy.data, collection) 22 | # Check that it is a data collection 23 | if isinstance(data_structure, bpy.types.bpy_prop_collection) and hasattr(data_structure, 24 | "remove") and collection not in [ 25 | "texts"]: 26 | # Go over all entities in that collection 27 | for block in data_structure: 28 | # Remove everything besides the default scene 29 | if not isinstance(block, bpy.types.Scene) or block.name != "Scene": 30 | data_structure.remove(block) 31 | 32 | def clean_mesh_and_textures(exclude=[]): 33 | # ensure everything is lowered 34 | exclude = [i.lower() for i in exclude] 35 | 36 | for block in bpy.data.objects: 37 | if block.users == 0 or block.name.lower() not in exclude: 38 | bpy.data.objects.remove(block) 39 | 40 | for block in bpy.data.meshes: 41 | if block.users == 0: 42 | bpy.data.meshes.remove(block) 43 | 44 | for block in bpy.data.materials: 45 | if block.users == 0 and block.name.lower() not in exclude: 46 | bpy.data.materials.remove(block) 47 | 48 | for block in bpy.data.textures: 49 | if block.users == 0: 50 | bpy.data.textures.remove(block) 51 | 52 | for block in bpy.data.images: 53 | bpy.data.images.remove(block) 54 | 55 | for block in bpy.data.shape_keys: 56 | if block.users == 0: 57 | bpy.data.textures.remove(block) 58 | 59 | for block in bpy.data.actions: 60 | if block.users == 0: 61 | bpy.data.actions.remove(block) 62 | 63 | 64 | def scene(): 65 | scene = bpy.data.scenes["Scene"] 66 | scene.render.engine = "CYCLES" 67 | # bpy.data.materials['Material'].use_nodes = True 68 | scene.cycles.shading_system = True 69 | scene.use_nodes = True 70 | scene.render.film_transparent = True 71 | scene.frame_current = 0 72 | 73 | scene.render.fps = 30 74 | scene.render.resolution_x = 640 75 | scene.render.resolution_y = 480 76 | return scene 77 | 78 | 79 | """ BPY obj manipulation """ 80 | 81 | 82 | def select(ob, only=True): 83 | if type(ob) is str: ob = bpy.data.objects[ob] 84 | if only: deselect() 85 | ob.select_set(True) 86 | bpy.context.view_layer.objects.active = ob 87 | return ob 88 | 89 | 90 | def deselect(): 91 | for obj in bpy.data.objects.values(): 92 | obj.select_set(False) 93 | bpy.context.view_layer.objects.active = None 94 | 95 | 96 | def delete(ob): 97 | select(ob) 98 | bpy.ops.object.delete() 99 | 100 | 101 | def createBPYObj(V, F, Vt=None, Ft=None, name='new_obj'): 102 | # Create obj 103 | mesh = bpy.data.meshes.new('mesh') 104 | ob = bpy.data.objects.new(name, mesh) 105 | # Add to collection 106 | bpy.context.collection.objects.link(ob) 107 | select(ob) 108 | mesh = bpy.context.object.data 109 | bm = bmesh.new() 110 | # Vertices 111 | for v in V: 112 | bm.verts.new(v) 113 | bm.verts.ensure_lookup_table() 114 | # Faces 115 | for f in F: 116 | v = [bm.verts[i] for i in f] 117 | bm.faces.new(v) 118 | bm.to_mesh(mesh) 119 | bm.free() 120 | # UV Map 121 | if not Vt is None: 122 | # Create UV layer 123 | ob.data.uv_layers.new() 124 | # Assign UV coords 125 | iloop = 0 126 | for f in Ft: 127 | for i in f: 128 | ob.data.uv_layers['UVMap'].data[iloop].uv = Vt[i] 129 | iloop += 1 130 | return ob 131 | 132 | 133 | 134 | def convert_meshcache(ob: bpy.ops.object, offset=0): 135 | # Converts a MeshCache or Cloth modifiers to ShapeKeys 136 | bpy.context.scene.frame_current = bpy.context.scene.frame_start 137 | for frame in range(bpy.context.scene.frame_end + 1): 138 | bpy.context.scene.frame_current = frame 139 | 140 | # for alembic files converted to PC2 and loaded as MeshCache 141 | bpy.ops.object.modifier_apply_as_shapekey(keep_modifier=True, modifier="MeshCache") 142 | 143 | # loop through shapekeys and add as keyframe per frame 144 | # https://blender.stackexchange.com/q/149045/87258 145 | bpy.context.scene.frame_current = bpy.context.scene.frame_start 146 | for frame in range(bpy.context.scene.frame_end + 1): 147 | bpy.context.scene.frame_current = frame 148 | 149 | shapekey = bpy.data.shape_keys[-1] 150 | for i, keyblock in enumerate(shapekey.key_blocks): 151 | if keyblock.name != "Basis": 152 | curr = i - 1 153 | if curr != frame: 154 | keyblock.value = 0 155 | keyblock.keyframe_insert("value", frame=frame) 156 | else: 157 | keyblock.value = 1 158 | keyblock.keyframe_insert("value", frame=frame) 159 | 160 | bpy.ops.object.modifier_remove(modifier="MeshCache") 161 | 162 | 163 | def setMaterial(path_sample, ob, sample, garment, texture): 164 | mat = bpy.data.materials.new(name=sample + '_' + garment + '_Material') 165 | mat.use_nodes = True 166 | ob.data.materials.append(mat) 167 | if texture['type'] == 'color': 168 | mat.node_tree.nodes['Principled BSDF'].inputs[0].default_value = texture['data'].tolist() + [1] 169 | elif texture['type'] == 'pattern': 170 | # Read pattern 171 | img_path = os.path.join(path_sample, sample, garment + '.png') 172 | # Add nodes 173 | tree = mat.node_tree 174 | nodes = tree.nodes 175 | # Principled BSDf 176 | bsdf = nodes['Principled BSDF'] 177 | # Image 178 | img = nodes.new('ShaderNodeTexImage') 179 | try: 180 | img.image = bpy.data.images.load(img_path) 181 | # Links 182 | tree.links.new(img.outputs[0], bsdf.inputs[0]) 183 | except: 184 | mat.node_tree.nodes['Principled BSDF'].inputs[0].default_value = [random.random(), random.random(), 185 | random.random(), 1] 186 | 187 | 188 | """ Modifiers """ 189 | def mesh_cache(ob, cache, scale=1): 190 | ob = select(ob) 191 | bpy.ops.object.modifier_add(type='MESH_CACHE') 192 | ob.modifiers['MeshCache'].cache_format = 'PC2' 193 | ob.modifiers['MeshCache'].filepath = cache 194 | ob.modifiers['MeshCache'].frame_scale = scale 195 | 196 | 197 | def write_usd(temppath, filepath, filename, with_cache, export_animation=True, sf=0, ef=-1, frame_step=1): 198 | outpath = os.path.join(filepath, filename) 199 | filepath = os.path.join(filepath, filename, filename + ".usd") 200 | if ef == -1: 201 | ef = bpy.context.scene.frame_end 202 | 203 | print(f"\nExporting usd to {filepath}\n") 204 | 205 | print(f"With blendshapes = {not with_cache}") 206 | bpy.ops.wm.usd_export(filepath=os.path.join(temppath, filename + ".usd"), 207 | filemode=8, display_type='DEFAULT', sort_method='DEFAULT', 208 | selected_objects_only=True, visible_objects_only=True, export_animation=export_animation, 209 | export_hair=True, export_vertices=True, export_vertex_colors=True, 210 | export_vertex_groups=True, export_face_maps=True, export_uvmaps=True, export_normals=True, 211 | export_transforms=True, export_materials=True, export_meshes=True, export_lights=True, 212 | export_cameras=False, export_blendshapes=(not with_cache), 213 | export_curves=True, export_particles=True, export_armatures=True, use_instancing=False, 214 | evaluation_mode='VIEWPORT', default_prim_path=f"/body_{filename}", 215 | root_prim_path=f"/body_{filename}", material_prim_path=f"/body_{filename}/materials", 216 | generate_cycles_shaders=False, generate_preview_surface=True, generate_mdl=True, 217 | convert_uv_to_st=True, convert_orientation=True, 218 | convert_to_cm=True, export_global_forward_selection='Y', export_global_up_selection='Z', 219 | export_child_particles=False, 220 | export_as_overs=False, merge_transform_and_shape=False, export_custom_properties=True, 221 | add_properties_namespace=False, export_identity_transforms=False, 222 | apply_subdiv=True, author_blender_name=True, vertex_data_as_face_varying=False, 223 | frame_step=frame_step, start=sf, end=ef, override_shutter=False, 224 | init_scene_frame_range=True, export_textures=True, relative_paths=True, 225 | light_intensity_scale=1, 226 | convert_light_to_nits=True, scale_light_radius=True, convert_world_material=True, 227 | fix_skel_root=True, xform_op_mode='SRT') 228 | shutil.move(os.path.join(temppath, filename + ".usd"), filepath) 229 | shutil.move(os.path.join(temppath, "textures"), os.path.join(outpath, "textures")) 230 | 231 | 232 | def export_stl_data(filepath, filename, lobs, zrot): 233 | context = bpy.context 234 | 235 | dg = context.evaluated_depsgraph_get() 236 | scene = context.scene 237 | coll = context.collection 238 | 239 | step = 5 240 | for ob in lobs: 241 | if ob.type != 'MESH': 242 | print(ob.name) 243 | print(ob.type) 244 | ob.select_set(False) 245 | continue 246 | bpy.context.view_layer.objects.active = ob 247 | rings = [] 248 | me = ob.data 249 | nverts = len(me.vertices) 250 | nedges = len(me.edges) 251 | bm = bmesh.new() 252 | f = scene.frame_start 253 | while f <= scene.frame_end: 254 | scene.frame_set(f) 255 | bm.from_object(ob, dg, cage=True) 256 | bmesh.ops.remove_doubles(bm, verts=bm.verts, dist=0.02) 257 | # bmesh.ops.transform(bm, verts=bm.verts[:], matrix=ob.matrix_world) 258 | f += step 259 | rings.append(bm.edges[:]) 260 | print("Frames processeds, going to do rings") 261 | # build from rings 262 | next = rings.pop() 263 | while rings: 264 | ring = rings.pop() 265 | bmesh.ops.bridge_loops(bm, edges=ring + next) 266 | next = ring 267 | 268 | rme = bpy.data.meshes.new("Rib") 269 | bm.to_mesh(rme) 270 | copy = bpy.data.objects.new("Rib", rme) 271 | coll.objects.link(copy) 272 | print("DONE" + ob.name) 273 | 274 | for ob in bpy.data.objects: 275 | if 'Rib' in ob.name: 276 | ob.select_set(True) 277 | bpy.context.view_layer.objects.active = ob 278 | else: 279 | ob.select_set(False) 280 | bpy.ops.object.join() 281 | ob = bpy.context.view_layer.objects.active 282 | ob.select_set(True) 283 | ob.rotation_euler = [0, 0, zrot] 284 | bpy.ops.export_mesh.stl(filepath=os.path.join(filepath, filename, filename + ".stl"), check_existing=True, 285 | use_selection=True, global_scale=1, ascii=False, use_mesh_modifiers=False, batch_mode='OFF', 286 | axis_forward='Y', axis_up='Z') 287 | bpy.ops.object.delete() 288 | 289 | 290 | def write_pkl_data(filepath, filename, arm_ob, ob, info, frame_step=1, write_verts=False): 291 | bpy.context.scene.frame_current = bpy.context.scene.frame_start 292 | N = int((bpy.context.scene.frame_end - bpy.context.scene.frame_start + 1) / frame_step) 293 | n_bones = len(arm_ob.pose.bones) - 1 294 | n_verts = len(ob.data.vertices) 295 | if write_verts: 296 | d = { 297 | 'frame': [], 298 | 'bones': np.zeros((N, n_bones, 3), np.float32), 299 | 'info': info, 300 | 'verts': np.zeros((N, n_verts, 3), np.float32), 301 | 'sf': bpy.context.scene.frame_start, 302 | 'ef': bpy.context.scene.frame_end + 1, 303 | 'nframes': frame_step 304 | } 305 | else: 306 | d = { 307 | 'frame': [], 308 | 'bones': np.zeros((N, n_bones, 3), np.float32), 309 | 'info': info, 310 | 'sf': bpy.context.scene.frame_start, 311 | 'ef': bpy.context.scene.frame_end + 1, 312 | 'nframes': frame_step 313 | } 314 | select(ob) 315 | dg = bpy.context.evaluated_depsgraph_get() 316 | 317 | cnt = 0 318 | for f in range(bpy.context.scene.frame_start, bpy.context.scene.frame_end + 1): 319 | sys.stdout.write('\r' + str(f) + '/' + str(N * frame_step)) 320 | sys.stdout.flush() 321 | bpy.context.scene.frame_current = f 322 | bpy.context.view_layer.update() 323 | 324 | d['frame'].append(f) 325 | 326 | select(ob) 327 | tmp = ob.evaluated_get(dg) 328 | me = tmp.to_mesh() 329 | if write_verts: 330 | d['verts'][cnt] = np.reshape([ob.matrix_world @ v.co for v in me.vertices], (n_verts, 3)) 331 | 332 | select(arm_ob) 333 | d['bones'][cnt] = np.reshape([arm_ob.matrix_world @ bone.head for bone in arm_ob.pose.bones[1:]], (n_bones, 3)) 334 | cnt += 1 335 | 336 | if not os.path.exists(os.path.join(filepath, filename)): 337 | os.makedirs(os.path.join(filepath, filename)) 338 | filepath = os.path.join(filepath, filename, filename + ".pkl") 339 | 340 | out = open(filepath, 'wb') 341 | pkl.dump(d, out) 342 | out.close() -------------------------------------------------------------------------------- /humangenerator/util/smplutils.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | from bpy_extras.object_utils import world_to_camera_view 3 | from mathutils import Matrix, Quaternion 4 | import numpy as np 5 | import pickle as pkl 6 | import os 7 | import math 8 | from pyquaternion import Quaternion 9 | 10 | # computes rotation matrix through Rodrigues formula as in cv2.Rodrigues 11 | def Rodrigues(rotvec): 12 | theta = np.linalg.norm(rotvec) 13 | r = (rotvec / theta).reshape(3, 1) if theta > 0.0 else rotvec 14 | cost = np.cos(theta) 15 | mat = np.asarray([[0, -r[2], r[1]], [r[2], 0, -r[0]], [-r[1], r[0], 0]]) 16 | return cost * np.eye(3) + (1 - cost) * r.dot(r.T) + np.sin(theta) * mat 17 | 18 | # transformation between pose and blendshapes 19 | def rodrigues2bshapes(pose): 20 | rod_rots = np.asarray(pose).reshape(24, 3) 21 | mat_rots = [Rodrigues(rod_rot) for rod_rot in rod_rots] 22 | bshapes = np.concatenate( 23 | [(mat_rot - np.eye(3)).ravel() for mat_rot in mat_rots[1:]] 24 | ) 25 | return mat_rots, bshapes 26 | 27 | def rotate_vector(vector, axis, angle): 28 | """ 29 | Rotate a vector around an axis by an angle. 30 | """ 31 | q = Quaternion(axis=axis, angle=angle) 32 | return q.rotate(vector) 33 | 34 | class SMPL_Body: 35 | def __init__(self, smpl_data_folder, material, j, gender="female", person_no=0, zrot=0): 36 | # load fbx model 37 | bpy.ops.import_scene.fbx( 38 | filepath=os.path.join( 39 | smpl_data_folder, 40 | "basicModel_{}_lbs_10_207_0_v1.0.2.fbx".format(gender[0]), 41 | ), 42 | axis_forward="Y", 43 | axis_up="Z", 44 | global_scale=100, 45 | ) 46 | J_regressors = pkl.load( 47 | open(os.path.join(smpl_data_folder, "joint_regressors.pkl"), "rb") 48 | ) 49 | # 24 x 6890 regressor from vertices to joints 50 | self.joint_regressor = J_regressors["J_regressor_{}".format(gender)] 51 | self.j = j 52 | 53 | armature_name = "Armature_{}".format(person_no) 54 | bpy.context.active_object.name = armature_name 55 | 56 | self.gender_name = "{}_avg".format(gender[0]) 57 | 58 | self.obj_name = "body_{:d}".format(person_no) 59 | bpy.data.objects[armature_name].children[0].name = self.obj_name 60 | # not the default self.gender_name because each time fbx is loaded it adds some suffix 61 | self.ob = bpy.data.objects[self.obj_name] 62 | 63 | # Rename the armature 64 | self.ob.data.use_auto_smooth = False # autosmooth creates artifacts 65 | # assign the existing spherical harmonics material 66 | self.ob.active_material = bpy.data.materials["Material_{}".format(person_no)] 67 | bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN', center='MEDIAN') 68 | 69 | # clear existing animation data 70 | # self.ob.shape_key_clear() 71 | self.ob.data.shape_keys.animation_data_clear() 72 | 73 | self.arm_ob = bpy.data.objects[armature_name] 74 | self.arm_ob.animation_data_clear() 75 | 76 | self.setState0() 77 | # self.ob.select = True # blender < 2.8x 78 | self.ob.select_set(True) 79 | # bpy.context.scene.objects.active = self.ob # blender < 2.8x 80 | bpy.context.view_layer.objects.active = self.ob 81 | self.smpl_data_folder = smpl_data_folder 82 | self.materials = self.create_segmentation(material, smpl_data_folder) 83 | 84 | # unblocking both the pose and the blendshape limits 85 | for k in self.ob.data.shape_keys.key_blocks.keys(): 86 | self.ob.data.shape_keys.key_blocks[k].slider_min = -100 87 | self.ob.data.shape_keys.key_blocks[k].slider_max = 100 88 | # bpy.context.scene.objects.active = self.arm_ob # blender < 2.8x 89 | bpy.context.view_layer.objects.active = self.arm_ob 90 | 91 | # order 92 | self.part_match = { 93 | "root": "root", 94 | "bone_00": "Pelvis", 95 | "bone_01": "L_Hip", 96 | "bone_02": "R_Hip", 97 | "bone_03": "Spine1", 98 | "bone_04": "L_Knee", 99 | "bone_05": "R_Knee", 100 | "bone_06": "Spine2", 101 | "bone_07": "L_Ankle", 102 | "bone_08": "R_Ankle", 103 | "bone_09": "Spine3", 104 | "bone_10": "L_Foot", 105 | "bone_11": "R_Foot", 106 | "bone_12": "Neck", 107 | "bone_13": "L_Collar", 108 | "bone_14": "R_Collar", 109 | "bone_15": "Head", 110 | "bone_16": "L_Shoulder", 111 | "bone_17": "R_Shoulder", 112 | "bone_18": "L_Elbow", 113 | "bone_19": "R_Elbow", 114 | "bone_20": "L_Wrist", 115 | "bone_21": "R_Wrist", 116 | "bone_22": "L_Hand", 117 | "bone_23": "R_Hand", 118 | } 119 | 120 | def refine_SMPL(self, material, j, zrot): 121 | self.j = j 122 | self.arm_ob.rotation_euler = [0, 0, zrot] 123 | self.ob.data.shape_keys.animation_data_clear() 124 | self.arm_ob.animation_data_clear() 125 | 126 | self.ob.select_set(True) 127 | bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN', center='MEDIAN') 128 | 129 | # bpy.context.scene.objects.active = self.ob # blender < 2.8x 130 | bpy.context.view_layer.objects.active = self.ob 131 | self.materials = self.create_segmentation(material, self.smpl_data_folder) 132 | for k in self.ob.data.shape_keys.key_blocks.keys(): 133 | self.ob.data.shape_keys.key_blocks[k].slider_min = -10 134 | self.ob.data.shape_keys.key_blocks[k].slider_max = 10 135 | 136 | # bpy.context.scene.objects.active = self.arm_ob # blender < 2.8x 137 | bpy.context.view_layer.objects.active = self.arm_ob 138 | 139 | 140 | def setState0(self): 141 | for ob in bpy.data.objects.values(): 142 | # ob.select = False # blender < 2.8x 143 | ob.select_set(False) 144 | # bpy.context.scene.objects.active = None # blender < 2.8x 145 | bpy.context.view_layer.objects.active = None 146 | 147 | # create one material per part as defined in a pickle with the segmentation 148 | # this is useful to render the segmentation in a material pass 149 | def create_segmentation(self, material, smpl_path): 150 | print("Creating materials segmentation") 151 | sorted_parts = [ 152 | "hips", 153 | "leftUpLeg", 154 | "rightUpLeg", 155 | "spine", 156 | "leftLeg", 157 | "rightLeg", 158 | "spine1", 159 | "leftFoot", 160 | "rightFoot", 161 | "spine2", 162 | "leftToeBase", 163 | "rightToeBase", 164 | "neck", 165 | "leftShoulder", 166 | "rightShoulder", 167 | "head", 168 | "leftArm", 169 | "rightArm", 170 | "leftForeArm", 171 | "rightForeArm", 172 | "leftHand", 173 | "rightHand", 174 | "leftHandIndex1", 175 | "rightHandIndex1", 176 | ] 177 | part2num = {part: (ipart + 1) for ipart, part in enumerate(sorted_parts)} 178 | materials = {} 179 | vgroups = {} 180 | with open(os.path.join(smpl_path,"segm_per_v_overlap.pkl"), "rb") as f: 181 | vsegm = pkl.load(f) 182 | 183 | if len(self.ob.material_slots) <= 1: 184 | bpy.ops.object.material_slot_remove() 185 | 186 | parts = sorted(vsegm.keys()) 187 | existing = False 188 | cnt = 0 189 | for part in parts: 190 | vs = vsegm[part] 191 | # vgroups[part] = self.ob.vertex_groups.new(part) # blender < 2.8x 192 | if part not in self.ob.vertex_groups: 193 | vgroups[part] = self.ob.vertex_groups.new(name=part) 194 | vgroups[part].add(vs, 1.0, "ADD") 195 | else: 196 | existing = True 197 | 198 | bpy.ops.object.vertex_group_set_active(group=part) 199 | materials[part] = material.copy() 200 | materials[part].pass_index = part2num[part] 201 | if not existing: 202 | bpy.ops.object.material_slot_add() 203 | self.ob.material_slots[-1].material = materials[part] 204 | 205 | bpy.ops.object.mode_set(mode="EDIT") 206 | bpy.ops.mesh.select_all(action="DESELECT") 207 | bpy.ops.object.vertex_group_select() 208 | bpy.ops.object.material_slot_assign() 209 | bpy.ops.object.mode_set(mode="OBJECT") 210 | else: 211 | self.ob.material_slots[cnt].material = materials[part] 212 | cnt += 1 213 | for scene_material in bpy.data.materials: 214 | if not scene_material.users and len(scene_material.name) != len(material.name): 215 | bpy.data.materials.remove(scene_material) 216 | return materials 217 | 218 | def quaternion_multiply(self, quaternion1, quaternion0): 219 | w0, x0, y0, z0 = quaternion0 220 | w1, x1, y1, z1 = quaternion1 221 | return np.array([-x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0, 222 | x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0, 223 | -x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0, 224 | x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0], dtype=np.float64) 225 | 226 | 227 | def euler_from_quaternion(self, quat): 228 | """ 229 | Convert a quaternion into euler angles (roll, pitch, yaw) 230 | roll is rotation around x in radians (counterclockwise) 231 | pitch is rotation around y in radians (counterclockwise) 232 | yaw is rotation around z in radians (counterclockwise) 233 | """ 234 | w,x,y,z = quat 235 | t0 = +2.0 * (w * x + y * z) 236 | t1 = +1.0 - 2.0 * (x * x + y * y) 237 | roll_x = math.atan2(t0, t1) 238 | 239 | t2 = +2.0 * (w * y - z * x) 240 | t2 = +1.0 if t2 > +1.0 else t2 241 | t2 = -1.0 if t2 < -1.0 else t2 242 | pitch_y = math.asin(t2) 243 | 244 | t3 = +2.0 * (w * z + x * y) 245 | t4 = +1.0 - 2.0 * (y * y + z * z) 246 | yaw_z = math.atan2(t3, t4) 247 | 248 | return roll_x*180/3.1415, pitch_y*180/3.1415, yaw_z*180/3.1415 # in radians 249 | 250 | def apply_trans_pose_shape(self, trans, pose, shape, frame=None, with_blendshapes = True): 251 | """ 252 | Apply trans pose and shape to character 253 | """ 254 | # transform pose into rotation matrices (for pose) and pose blendshapes 255 | mrots, bsh = rodrigues2bshapes(pose) 256 | 257 | # set the location of the first bone to the translation parameter 258 | mytrans = [0,0,0] 259 | mytrans[2] = trans[2] 260 | mytrans[1] = trans[1] 261 | mytrans[0] = trans[0] 262 | 263 | self.arm_ob.pose.bones[self.gender_name + "_Pelvis"].location = mytrans 264 | if frame is not None: 265 | self.arm_ob.pose.bones[self.gender_name + "_root"].keyframe_insert( 266 | "location", frame=frame 267 | ) 268 | self.arm_ob.pose.bones[self.gender_name + "_root"].keyframe_insert( 269 | "rotation_quaternion", frame=frame 270 | ) 271 | 272 | # set the pose of each bone to the quaternion specified by pose 273 | for ibone, mrot in enumerate(mrots): 274 | bone = self.arm_ob.pose.bones[ 275 | self.gender_name + "_" + self.part_match["bone_{:02d}".format(ibone)] 276 | ] 277 | bone.rotation_quaternion = Matrix(mrot).to_quaternion() 278 | 279 | if frame is not None: 280 | bone.keyframe_insert("rotation_quaternion", frame=frame) 281 | bone.keyframe_insert("location", frame=frame) 282 | 283 | # apply pose blendshapes 284 | if with_blendshapes: 285 | for ibshape, bshape in enumerate(bsh): 286 | self.ob.data.shape_keys.key_blocks[ 287 | "Pose{:03d}".format(ibshape) 288 | ].value = bshape 289 | if frame is not None: 290 | self.ob.data.shape_keys.key_blocks[ 291 | "Pose{:03d}".format(ibshape) 292 | ].keyframe_insert("value", index=-1, frame=frame) 293 | 294 | # apply shape blendshapes 295 | for ibshape, shape_elem in enumerate(shape): 296 | self.ob.data.shape_keys.key_blocks[ 297 | "Shape{:03d}".format(ibshape) 298 | ].value = shape_elem 299 | if frame is not None: 300 | self.ob.data.shape_keys.key_blocks[ 301 | "Shape{:03d}".format(ibshape) 302 | ].keyframe_insert("value", index=-1, frame=frame) 303 | else: 304 | mod = self.ob.modifiers.get('Armature') 305 | if mod is not None: self.ob.modifiers.remove(mod) 306 | 307 | def reset_joint_positions(self, shape, scene): 308 | orig_trans = np.asarray( 309 | self.arm_ob.pose.bones[self.gender_name + "_Pelvis"].location 310 | ).copy() 311 | # zero the pose and trans to obtain joint positions in zero pose 312 | self.apply_trans_pose_shape(orig_trans, np.zeros(72), shape) 313 | 314 | bpy.ops.wm.memory_statistics() 315 | depsgraph = bpy.context.evaluated_depsgraph_get() 316 | me = self.ob.evaluated_get(depsgraph).to_mesh() 317 | 318 | num_vertices = len(me.vertices) # 6890 319 | reg_vs = np.empty((num_vertices, 3)) 320 | for iiv in range(num_vertices): 321 | reg_vs[iiv] = me.vertices[iiv].co 322 | # bpy.data.meshes.remove(me) # blender < 2.8x 323 | self.ob.evaluated_get(depsgraph).to_mesh_clear() 324 | 325 | # regress joint positions in rest pose 326 | joint_xyz = self.j 327 | 328 | # adapt joint positions in rest pose 329 | # self.arm_ob.hide = False 330 | # Added this line 331 | # bpy.context.scene.objects.active = self.arm_ob # blender < 2.8x 332 | bpy.context.view_layer.objects.active = self.arm_ob 333 | bpy.ops.object.mode_set(mode="EDIT") 334 | # self.arm_ob.hide = True 335 | for ibone in range(24): 336 | bb = self.arm_ob.data.edit_bones[ 337 | self.gender_name + "_" + self.part_match["bone_{:02d}".format(ibone)] 338 | ] 339 | bboffset = bb.tail - bb.head 340 | bb.head = joint_xyz[ibone] 341 | bb.tail = bb.head + bboffset 342 | bpy.ops.object.mode_set(mode="OBJECT") --------------------------------------------------------------------------------