├── list.npy ├── removeinnercloth.py ├── make_pairs.py ├── bodyextractor.py ├── visualize_mesh_sequence.py ├── reorder_smpl_files.py ├── sort_unsort_file.py ├── render_model.py ├── show_humaneva.py ├── ton_align_full.py ├── vis_joints.py ├── bvh.py ├── cvtjoint_viton2smpl.py ├── README.txt ├── cloth2smplmask.m ├── smpl3dclothxfer.py ├── smpl3dclothxfer_v2.py ├── boundary_matching.py ├── smpltemplate.py ├── smpl3dclothxfer_v4.py ├── smpl3dclothxfer_v7.py └── smpl2mask.py /list.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahnHeejune/smplvton01/HEAD/list.npy -------------------------------------------------------------------------------- /removeinnercloth.py: -------------------------------------------------------------------------------- 1 | """ 2 | Remove inner cloth parts 3 | 4 | (c) 2020 Matiur Rahman Minar and Thai Thanh Tuan @ icomlab.seoutech.ac.kr 5 | 6 | 7 | Description 8 | ============ 9 | Remove inner cloth part from cloth image and mask based on the mask labels. 10 | 11 | """ 12 | import os 13 | import cv2 14 | import numpy as np 15 | 16 | 17 | def remove_inner_cloth(img_dir, mask_dir): 18 | img_files = os.listdir(img_dir) 19 | 20 | for each in img_files: 21 | 22 | try: 23 | print("processing ", each) 24 | 25 | img_file = os.path.join(img_dir, each) 26 | mask_file = os.path.join(mask_dir, each) 27 | 28 | img = cv2.imread(img_file) 29 | mask = cv2.imread(mask_file) 30 | 31 | img_white = np.zeros([img.shape[0], img.shape[1], 3], dtype=np.uint8) 32 | img_white.fill(255) 33 | 34 | img = img * (mask != 2) + img_white * (mask == 2) 35 | new_mask = mask * (mask == 1) 36 | 37 | cv2.imwrite(img_file, img) 38 | cv2.imwrite(mask_file, new_mask) 39 | except Exception as err: 40 | print("issue with ", each, err) 41 | 42 | 43 | if __name__ == '__main__': 44 | img_dir_path = "./results/viton/c2dw/" 45 | mask_dir_path = "./results/viton/c2dwmask/" 46 | 47 | remove_inner_cloth(img_dir_path, mask_dir_path) 48 | 49 | -------------------------------------------------------------------------------- /make_pairs.py: -------------------------------------------------------------------------------- 1 | ''' 2 | make pairs from 0 to N in a directory 3 | 4 | (c) 2020 Matiur Rahman Minar and Heejune Ahn @ icomlab.seoutech.ac.kr 5 | 6 | 7 | Description 8 | ============ 9 | 10 | 11 | ''' 12 | import os 13 | import json 14 | import numpy as np 15 | 16 | 17 | def make_pairs_file(img_path, cloth_path): 18 | 19 | # 1. get the list 20 | img_files = os.listdir(img_path) 21 | cloth_files = os.listdir(cloth_path) 22 | 23 | the_file = open('viton_test_pairs.txt', 'a') 24 | 25 | count = 1 26 | for each in zip(img_files, cloth_files): 27 | if count <= 100: 28 | the_file.write(each[0] + ' ' + each[1] + '\n') 29 | count += 1 30 | 31 | 32 | def make_pairs_file_from_clothes(cloth_path): 33 | 34 | # 1. get the list 35 | # img_files = os.listdir(img_path) 36 | cloth_files = os.listdir(cloth_path) 37 | 38 | the_file = open('viton_test_pairs.txt', 'a') 39 | 40 | for each in cloth_files: 41 | the_file.write(each[:-4].replace('_1', '_0') + ' ' + each[:-4] + '\n') 42 | 43 | 44 | def make_pairs_file_from_warped_clothes(cloth_path): 45 | 46 | # 1. get the list 47 | cloth_files = os.listdir(cloth_path) 48 | 49 | the_file = open('viton_test_pairs.txt', 'a') 50 | 51 | for each in cloth_files: 52 | the_file.write(each[:-4].split("_")[0] + '_0 ' + each[:-4].split("_")[0] + '_1\n') 53 | 54 | 55 | if __name__ =='__main__': 56 | 57 | cloth_2dw_dir = "./results/viton/c2dw/" 58 | cloth_3dw_dir = "./results/viton/c3dw/" 59 | 60 | # make_pairs_file(img_dir, cloth_dir) 61 | make_pairs_file_from_clothes(cloth_2dw_dir) 62 | # make_pairs_file_from_warped_clothes(cloth_3dw_dir) 63 | 64 | -------------------------------------------------------------------------------- /bodyextractor.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 as cv 3 | from matplotlib import pyplot as plt 4 | import sys 5 | 6 | def extract(srcimg, xoffset = 50, yoffset = 50, tobgcolor = (0,0,0)): 7 | """ 8 | grabcut with offsets 9 | assuming that the forground can be bounded by a rectangle. 10 | code from :https://docs.opencv.org/3.4.3/d8/d83/tutorial_py_grabcut.html 11 | """ 12 | 13 | img = srcimg.copy() 14 | mask = np.zeros(img.shape[:2],np.uint8) 15 | bgdModel = np.zeros((1,65),np.float64) 16 | fgdModel = np.zeros((1,65),np.float64) 17 | 18 | w, h = img.shape[1], img.shape[0] 19 | 20 | rect = ( xoffset, yoffset, w - xoffset*2, h - yoffset*2) #(50,50,450,290) 21 | cv.rectangle(img, (xoffset, yoffset), (w-xoffset, h -yoffset),(0,255,0),3) 22 | 23 | cv.grabCut(img,mask,rect,bgdModel,fgdModel,5,cv.GC_INIT_WITH_RECT) 24 | mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8') 25 | masked = img*mask2[:,:,np.newaxis] 26 | 27 | ''' 28 | plt.subplot(1,3,1) 29 | plt.imshow(img[:,:, ::-1]) 30 | plt.title('original') 31 | plt.subplot(1,3,2) 32 | plt.imshow(masked[:, :, ::-1]) 33 | plt.title('segmented') 34 | plt.subplot(1,3,3) 35 | ''' 36 | img[mask2 == 0] = tobgcolor 37 | 38 | return img 39 | 40 | ''' 41 | plt.imshow(img) 42 | plt.title('bg color changed') 43 | plt.show() 44 | ''' 45 | 46 | 47 | if __name__ == "__main__": 48 | 49 | if len(sys.argv) < 4: 50 | print("usage: python ", sys.argv[0], " filename xoffset, yoffset") 51 | 52 | img = cv.imread(sys.argv[1]) 53 | extract(img, int(sys.argv[2]), int(sys.argv[3])) 54 | 55 | -------------------------------------------------------------------------------- /visualize_mesh_sequence.py: -------------------------------------------------------------------------------- 1 | #!/usr/local/bin/ipython --gui=wx 2 | """ 3 | Copyright 2016 Max Plank Society, Federica Bogo, Angjoo Kanazawa. All rights reserved. 4 | This software is provided for research purposes only. 5 | By using this software you agree to the terms of the SMPLify license here: 6 | http://smplify.is.tue.mpg.de/license 7 | 8 | This script visualizes the mesh (face & vertices) contained in the provided 9 | meshes.hdf5 files 10 | Requires vtkpython and mayavi: 11 | sudo apt-get install libvtk5-dev python-vtk 12 | pip install mayavi 13 | 14 | """ 15 | from argparse import ArgumentParser 16 | import h5py 17 | from itertools import count 18 | from mayavi import mlab 19 | import numpy as np 20 | 21 | 22 | def main(hdf5_path): 23 | 24 | with h5py.File(hdf5_path, 'r') as f: 25 | all_verts = np.array(f.get('all_verts')) 26 | faces = np.array(f.get('faces')) 27 | 28 | fig = mlab.figure(1, bgcolor=(1, 1, 1)) 29 | 30 | @mlab.animate(delay=1000, ui=True) 31 | def animation(): 32 | for i in count(): 33 | frame = i % all_verts.shape[2] 34 | verts = all_verts[:, :, frame].T 35 | mlab.clf() 36 | mlab.triangular_mesh( 37 | verts[:, 0], 38 | verts[:, 1], 39 | verts[:, 2], 40 | faces, 41 | color=(.9, .7, .7)) 42 | fig.scene.z_minus_view() 43 | mlab.view(azimuth=180) 44 | mlab.title('mesh %d' % i, size=0.5, height=0, color=(0, 0, 0)) 45 | yield 46 | 47 | a = animation() 48 | mlab.show() 49 | 50 | 51 | if __name__ == '__main__': 52 | parser = ArgumentParser(description='Visuzalize mesh.hdf5 files') 53 | parser.add_argument( 54 | 'path', 55 | type=str, 56 | default='../results/lsp/meshes.hdf5', 57 | nargs='?', 58 | help='Path to meshes.hdf5') 59 | args = parser.parse_args() 60 | main(args.path) 61 | -------------------------------------------------------------------------------- /reorder_smpl_files.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Reorder/Rename human images and smple params into original viton names 3 | 4 | (c) 2019 Matiur Rahman Minar and Heejune Ahn @ icomlab.seoutech.ac.kr 5 | 6 | 7 | Description 8 | ============ 9 | 10 | 11 | ''' 12 | import os 13 | import sys 14 | import numpy as np 15 | 16 | 17 | def reorder_human_images(list_file, image_dir): 18 | image_names = os.listdir(image_dir) 19 | # pf = open(list_file) 20 | filenp = np.load(list_file) 21 | 22 | for each in zip(image_names, filenp): 23 | src_path = image_dir + each[0] 24 | dst_path = image_dir + each[1].split(" ")[0] 25 | os.rename(src_path, dst_path) 26 | print("Converting " + each[0] + " to " + each[1].split(" ")[0]) 27 | 28 | 29 | def reorder_smpl_params(list_file, smpl_dir): 30 | # all_files = os.listdir(smpl_dir) 31 | # pf = open(list_file) 32 | filenp = np.load(list_file) 33 | 34 | count = 0 35 | # for each in pf.readlines(): 36 | for each in filenp: 37 | fname = str(each.decode("utf-8")) 38 | src_smpl_path = os.path.join(smpl_dir + '%04d.pkl' % count) 39 | dst_smpl_path = os.path.join(smpl_dir + fname.split(" ")[0].replace(".jpg", ".pkl")) 40 | os.rename(src_smpl_path, dst_smpl_path) 41 | 42 | src_image_path = os.path.join(smpl_dir + '%04d.png' % count) 43 | dst_image_path = os.path.join(smpl_dir + fname.split(" ")[0].replace(".jpg", ".png")) 44 | os.rename(src_image_path, dst_image_path) 45 | 46 | print("Converting " + src_smpl_path + " to " + fname.split(" ")[0].replace(".jpg", ".pkl")) 47 | count = count + 1 48 | 49 | 50 | if __name__ =='__main__': 51 | 52 | if len(sys.argv) < 3: 53 | print('usage: %s listnpyfile smpldir' % sys.argv[0]) 54 | exit() 55 | 56 | # list_file = "test_pairs.txt" # viton original test pairs file 57 | list_file = "list.npy" # viton original test files names' list 58 | 59 | # image_dir = "./images/viton/" # human images directory path 60 | smpl_dir = "./results/viton/smpl/" # saved smpl parameters directory path 61 | 62 | # ============Re-order=========== 63 | # reorder_human_images(list_file, image_dir) 64 | reorder_smpl_params(list_file, smpl_dir) 65 | -------------------------------------------------------------------------------- /sort_unsort_file.py: -------------------------------------------------------------------------------- 1 | ''' 2 | sort file and rename from 0 to N in a directory 3 | unsort it back 4 | 5 | (c) 2019 matiur Rahman and heejune Ahn @ icomlab.seoutech.ac.kr 6 | 7 | 8 | Description 9 | ============ 10 | 11 | 12 | ''' 13 | import os 14 | import json 15 | import numpy as np 16 | import sys 17 | 18 | 19 | def rename_by_sort(dir_path): 20 | 21 | # 1. get the list 22 | files = os.listdir(dir_path) 23 | files.sort() # make the ordering 24 | 25 | filesnp = np.array(files) 26 | 27 | for count, each in enumerate(files): 28 | print(count, " converting ", each , 'to', 'viton_' + format(count, '06d') + '.jpg') 29 | src_path = dir_path + each 30 | dst_path = dir_path + 'viton_' + format(count, '06d') + '.jpg' 31 | os.rename(src_path, dst_path) 32 | 33 | # save the ordering 34 | np.save('viton_list.npy', filesnp) 35 | 36 | 37 | def rename_by_list(dir_path): 38 | 39 | print('loading....' + dir_path + 'viton_list.npy') 40 | 41 | #with np.load(dir_path + 'list.npy') as filelist: # zip file loading 42 | filesnp = np.load('list.npy') 43 | fileslist = filesnp.tolist() 44 | 45 | for count, each in enumerate(fileslist): 46 | print(count, " converting ", 'viton_' + format(count, '06d') + '.jpg', 'to', each) 47 | src_path = dir_path + 'viton_' + format(count, '06d') + '.jpg' 48 | dst_path = dir_path + each 49 | os.rename(src_path, dst_path) 50 | 51 | 52 | def rename_images_by_sort(): 53 | 54 | img_dir = "./images/" 55 | rename_by_sort(img_dir) 56 | 57 | def restore_images_name(): 58 | 59 | c = input('finished renaming. restore? (1 for yes):') 60 | if c == 1: 61 | rename_by_list(img_dir) 62 | 63 | 64 | def check_list(file_path): 65 | filesnp = np.load(file_path) 66 | fileslist = filesnp.tolist() 67 | for idx, each in enumerate(fileslist): 68 | print(idx, ":", each) 69 | 70 | def remove_ext_list(file_path): 71 | filenp = np.load(file_path) 72 | filelist = filenp.tolist() 73 | for idx, each in enumerate(filelist): 74 | #print(idx, ":", filelist[idx]) 75 | filelist[idx] = filelist[idx].replace('.jpg','') # remove the .jpg ext 76 | 77 | _ = input('>>') 78 | 79 | for idx, each in enumerate(filelist): 80 | print(idx, ":", each) 81 | 82 | filenp = np.array(filelist) 83 | np.save('viton_list_numbers.npy', filenp) 84 | 85 | def restore_name(sys.argv): 86 | 87 | if len(sys.argv) < 5: 88 | print('usage: %s listnpyfile srcdir dstdir extension'%sys.argv[0]) 89 | return 90 | 91 | filenp = np.load(file_path) 92 | filelist= os.listdir(dir_path) 93 | for i in range(filelist): 94 | src_path = dir_path + '/' + '%06d'%i + '.' + extension 95 | dst_path = dir_path + '/' + filenp[i] + '.' + extension 96 | os.rename(src_path, dst_path) 97 | 98 | if __name__ =='__main__': 99 | 100 | #remove_ext_list(sys.argv[1]) 101 | #check_list(sys.argv[1]) 102 | 103 | #======================= 104 | restore_name(sys.argv) 105 | 106 | 107 | -------------------------------------------------------------------------------- /render_model.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright 2016 Max Planck Society, Federica Bogo, Angjoo Kanazawa. All rights reserved. 3 | This software is provided for research purposes only. 4 | By using this software you agree to the terms of the SMPLify license here: 5 | http://smplify.is.tue.mpg.de/license 6 | 7 | Utility script for rendering the SMPL model using OpenDR. 8 | """ 9 | 10 | import numpy as np 11 | from opendr.camera import ProjectPoints 12 | from opendr.renderer import ColoredRenderer 13 | from opendr.lighting import LambertianPointLight 14 | import cv2 15 | 16 | colors = { 17 | 'pink': [.7, .7, .9], 18 | 'neutral': [.9, .9, .8], 19 | 'capsule': [.7, .75, .5], 20 | 'yellow': [.5, .7, .75], 21 | } 22 | 23 | 24 | def _create_renderer(w=640, 25 | h=480, 26 | rt=np.zeros(3), 27 | t=np.zeros(3), 28 | f=None, 29 | c=None, 30 | k=None, 31 | near=.5, 32 | far=10.): 33 | 34 | f = np.array([w, w]) / 2. if f is None else f 35 | c = np.array([w, h]) / 2. if c is None else c 36 | k = np.zeros(5) if k is None else k 37 | 38 | rn = ColoredRenderer() 39 | 40 | rn.camera = ProjectPoints(rt=rt, t=t, f=f, c=c, k=k) 41 | rn.frustum = {'near': near, 'far': far, 'height': h, 'width': w} 42 | return rn 43 | 44 | 45 | def _rotateY(points, angle): 46 | """Rotate the points by a specified angle.""" 47 | ry = np.array([ 48 | [np.cos(angle), 0., np.sin(angle)], [0., 1., 0.], 49 | [-np.sin(angle), 0., np.cos(angle)] 50 | ]) 51 | return np.dot(points, ry) 52 | 53 | 54 | def simple_renderer(rn, verts, faces, yrot=np.radians(120)): 55 | 56 | # Rendered model color 57 | color = colors['pink'] 58 | 59 | rn.set(v=verts, f=faces, vc=color, bgcolor=np.ones(3)) 60 | 61 | albedo = rn.vc 62 | 63 | # Construct Back Light (on back right corner) 64 | rn.vc = LambertianPointLight( 65 | f=rn.f, 66 | v=rn.v, 67 | num_verts=len(rn.v), 68 | light_pos=_rotateY(np.array([-200, -100, -100]), yrot), 69 | vc=albedo, 70 | light_color=np.array([1, 1, 1])) 71 | 72 | # Construct Left Light 73 | rn.vc += LambertianPointLight( 74 | f=rn.f, 75 | v=rn.v, 76 | num_verts=len(rn.v), 77 | light_pos=_rotateY(np.array([800, 10, 300]), yrot), 78 | vc=albedo, 79 | light_color=np.array([1, 1, 1])) 80 | 81 | # Construct Right Light 82 | rn.vc += LambertianPointLight( 83 | f=rn.f, 84 | v=rn.v, 85 | num_verts=len(rn.v), 86 | light_pos=_rotateY(np.array([-500, 500, 1000]), yrot), 87 | vc=albedo, 88 | light_color=np.array([.7, .7, .7])) 89 | 90 | return rn.r 91 | 92 | 93 | def get_alpha(imtmp, bgval=1.): 94 | h, w = imtmp.shape[:2] 95 | alpha = (~np.all(imtmp == bgval, axis=2)).astype(imtmp.dtype) 96 | 97 | b_channel, g_channel, r_channel = cv2.split(imtmp) 98 | 99 | im_RGBA = cv2.merge( 100 | (b_channel, g_channel, r_channel, alpha.astype(imtmp.dtype))) 101 | return im_RGBA 102 | 103 | 104 | def render_model(verts, faces, w, h, cam, near=0.5, far=25, img=None): 105 | rn = _create_renderer( 106 | w=w, h=h, near=near, far=far, rt=cam.rt, t=cam.t, f=cam.f, c=cam.c) 107 | # Uses img as background, otherwise white background. 108 | if img is not None: 109 | rn.background_image = img / 255. if img.max() > 1 else img 110 | 111 | imtmp = simple_renderer(rn, verts, faces) 112 | 113 | # If white bg, make transparent. 114 | if img is None: 115 | imtmp = get_alpha(imtmp) 116 | 117 | return imtmp 118 | -------------------------------------------------------------------------------- /show_humaneva.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright 2016 Max Planck Society, Federica Bogo, Angjoo Kanazawa. All rights reserved. 3 | This software is provided for research purposes only. 4 | By using this software you agree to the terms of the SMPLify license here: 5 | http://smplify.is.tue.mpg.de/license 6 | 7 | An example script that shows how to use the fit parameter and pose SMPL. 8 | Also visualizes the detected joints. 9 | """ 10 | 11 | if __name__ == '__main__': 12 | from os.path import join 13 | import cPickle as pickle 14 | import numpy as np 15 | from glob import glob 16 | import matplotlib.pyplot as plt 17 | 18 | from smpl_webuser.serialization import load_model 19 | from opendr.camera import ProjectPoints 20 | 21 | from render_model import render_model 22 | 23 | import argparse 24 | parser = argparse.ArgumentParser(description='run SMPLify on LSP dataset') 25 | parser.add_argument( 26 | 'base_dir', 27 | # default='/scratch1/projects/smplify_public/', 28 | default='/home/heejune/Work/VTON/SMPLify/mpips-smplify_public_v2/smplify_public/', 29 | nargs='?', 30 | help="Directory that contains results/, i.e." 31 | "the directory you untared human_eva_results.tar.gz") 32 | parser.add_argument( 33 | '-seq', 34 | default='S1_Walking', 35 | nargs='?', 36 | help="Human Eva sequence name: S{1,2,3}_{Walking,Box} ") 37 | args = parser.parse_args() 38 | 39 | base_dir = args.base_dir 40 | model_dir = join(base_dir, 'code', 'models') 41 | 42 | seq = args.seq + '_1_C1' 43 | 44 | data_dir = join(base_dir, 'results/human_eva', seq) 45 | 46 | results_path = join(data_dir, 'all_results.pkl') 47 | joints_path = join(data_dir, 'est_joints.npz') 48 | 49 | if 'S1' in seq: 50 | model_path = join(model_dir, 'basicModel_f_lbs_10_207_0_v1.0.0.pkl') 51 | else: 52 | model_path = join(model_dir, 'basicmodel_m_lbs_10_207_0_v1.0.0.pkl') 53 | 54 | # Load everything: 55 | # SMPL model 56 | model = load_model(model_path) 57 | # detected joints 58 | est = np.load(joints_path)['est_joints'] 59 | 60 | # SMPL parameters + camera 61 | print('opening %s' % results_path) 62 | with open(results_path, 'r') as f: 63 | res = pickle.load(f) 64 | poses = res['poses'] 65 | betas = res['betas'] 66 | 67 | # Camera rotation is always at identity, 68 | # The rotation of the body is encoded by the first 3 bits of poses. 69 | cam_ts = res['cam_ts'] 70 | focal_length = res['focal_length'] 71 | principal_pt = res['principal_pt'] 72 | 73 | # Setup camera: 74 | cam = ProjectPoints( 75 | f=focal_length, rt=np.zeros(3), k=np.zeros(5), c=principal_pt) 76 | 77 | h = 480 78 | w = 640 79 | 80 | # Corresponding ids of detected and SMPL joints (except head) 81 | lsp_ids = range(0, 12) 82 | smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20] 83 | 84 | plt.ion() 85 | 86 | for i, (joints, pose, beta, 87 | cam_t) in enumerate(zip(est.T, poses, betas, cam_ts)): 88 | 89 | joints = joints[lsp_ids, :] 90 | 91 | # Pose the model: 92 | model.pose[:] = pose 93 | # Shape the model: the model requires 10 dimensional betas, 94 | # pad it with 0 if length of beta is less than 10 95 | model.betas[:] = np.hstack((beta, np.zeros(10 - len(beta)))) 96 | 97 | # Set camera location. 98 | cam.t = cam_t 99 | # make it project SMPL joints in 3D. 100 | cam.v = model.J_transformed[smpl_ids] 101 | 102 | # projected SMPL joints in image coordinate. 103 | smpl_joints = cam.r 104 | 105 | # Render this. 106 | res_im = (render_model(model.r, model.f, w, h, cam) * 107 | 255.).astype('uint8') 108 | plt.show() 109 | plt.subplot(121) 110 | plt.imshow(np.ones((h, w, 3))) 111 | plt.scatter(smpl_joints[:, 0], smpl_joints[:, 1], c='w') 112 | plt.scatter(joints[:, 0], joints[:, 1], c=joints[:, 2]) 113 | plt.axis('off') 114 | plt.subplot(122) 115 | plt.cla() 116 | plt.imshow(res_im) 117 | plt.axis('off') 118 | plt.show() 119 | plt.pause(1) 120 | raw_input('Press any key to continue...') 121 | 122 | plt.off() 123 | -------------------------------------------------------------------------------- /ton_align_full.py: -------------------------------------------------------------------------------- 1 | """ 2 | Try-on 3D warped cloth with target human by part-alignment 3 | Run with Python 3.x 4 | """ 5 | 6 | import os 7 | import cv2 8 | from PIL import Image 9 | import numpy as np 10 | import matplotlib.pyplot as plt 11 | 12 | 13 | """ 14 | LIP labels 15 | 16 | [(0, 0, 0), # 0=Background 17 | (128, 0, 0), # 1=Hat 18 | (255, 0, 0), # 2=Hair 19 | (0, 85, 0), # 3=Glove 20 | (170, 0, 51), # 4=Sunglasses 21 | (255, 85, 0), # 5=UpperClothes 22 | (0, 0, 85), # 6=Dress 23 | (0, 119, 221), # 7=Coat 24 | (85, 85, 0), # 8=Socks 25 | (0, 85, 85), # 9=Pants 26 | (85, 51, 0), # 10=Jumpsuits 27 | (52, 86, 128), # 11=Scarf 28 | (0, 128, 0), # 12=Skirt 29 | (0, 0, 255), # 13=Face 30 | (51, 170, 221), # 14=LeftArm 31 | (0, 255, 255), # 15=RightArm 32 | (85, 255, 170), # 16=LeftLeg 33 | (170, 255, 85), # 17=RightLeg 34 | (255, 255, 0), # 18=LeftShoe 35 | (255, 170, 0) # 19=RightShoe 36 | (189, 170, 160) # 20=Skin/Neck 37 | ] 38 | """ 39 | 40 | 41 | def ton_align(im_path, im_parse_path, im_parse_vis_path, c_name, c_path, save_dir, viz=False): 42 | im = Image.open(im_path) 43 | im_parse = Image.open(im_parse_path) 44 | im_parse_vis = Image.open(im_parse_vis_path) 45 | c = Image.open(c_path) 46 | 47 | if viz: 48 | plt.subplot(1, 3, 1) 49 | plt.imshow(im) 50 | plt.axis('off') 51 | plt.title('im') 52 | plt.draw() 53 | 54 | plt.subplot(1, 3, 2) 55 | plt.imshow(im_parse_vis) 56 | plt.axis('off') 57 | plt.title('parse') 58 | plt.draw() 59 | 60 | plt.subplot(1, 3, 3) 61 | plt.imshow(c) 62 | plt.axis('off') 63 | plt.title('cloth') 64 | plt.draw() 65 | 66 | plt.show() 67 | 68 | parse_array = np.array(im_parse) 69 | 70 | parse_bg = (parse_array == 0) 71 | 72 | im_bg = np.zeros_like(im) 73 | im_bg[:] = 255 74 | im_bg = im_bg * parse_bg - (1 - parse_bg) # [-1,1], fill 0 for other parts 75 | 76 | parse_top = (parse_array == 1) + \ 77 | (parse_array == 2) + \ 78 | (parse_array == 4) + \ 79 | (parse_array == 13) 80 | 81 | im_top = im * parse_top - (1 - parse_top) # [-1,1], fill 0 for other parts 82 | 83 | parse_cloth = (parse_array == 0) + \ 84 | (parse_array == 3) + \ 85 | (parse_array == 5) + \ 86 | (parse_array == 6) + \ 87 | (parse_array == 7) + \ 88 | (parse_array == 10) + \ 89 | (parse_array == 11) + \ 90 | (parse_array == 14) + \ 91 | (parse_array == 15) + \ 92 | (parse_array == 20) 93 | 94 | im_cloth = c * parse_cloth - (1 - parse_cloth) # [-1,1], fill 0 for other parts 95 | 96 | parse_bottom = (parse_array == 8) + \ 97 | (parse_array == 9) + \ 98 | (parse_array == 12) + \ 99 | (parse_array == 16) + \ 100 | (parse_array == 17) + \ 101 | (parse_array == 18) + \ 102 | (parse_array == 19) 103 | 104 | im_bottom = im * parse_bottom - (1 - parse_bottom) # [-1,1], fill 0 for other parts 105 | 106 | # ton_img = im_bg + im_top + im_cloth + im_bottom 107 | ton_img = im_top + im_bottom + im_cloth 108 | ton_img[ton_img <= 0] = 255 109 | 110 | if viz: 111 | plt.subplot(1, 4, 1) 112 | plt.imshow(im_top) 113 | plt.axis('off') 114 | plt.title('top') 115 | plt.draw() 116 | 117 | plt.subplot(1, 4, 2) 118 | plt.imshow(im_cloth) 119 | plt.axis('off') 120 | plt.title('cloth') 121 | plt.draw() 122 | 123 | plt.subplot(1, 4, 3) 124 | plt.imshow(im_bottom) 125 | plt.axis('off') 126 | plt.title('bottom') 127 | plt.draw() 128 | 129 | plt.subplot(1, 4, 4) 130 | plt.imshow(ton_img) 131 | plt.axis('off') 132 | plt.title('ton') 133 | plt.draw() 134 | 135 | plt.show() 136 | 137 | # Save final result 138 | Image.fromarray(ton_img.astype('uint8')).save(os.path.join(save_dir, c_name)) 139 | 140 | 141 | def main(): 142 | save_dir = "D:/Research/Fashion-Project-SeoulTech/9. 3D VTON/Results/SMPL-VTON-v2/TON-align" 143 | if not os.path.exists(save_dir): 144 | os.makedirs(save_dir) 145 | 146 | im_dir = "./images/viton" 147 | c_dir = "./results/viton/c3dw" 148 | im_parse_dir = "./results/viton/segmentation" 149 | im_parse_vis_dir = "./results/viton/segmentation-vis" 150 | 151 | pairs_filepath = "./results/viton/viton_test_pairs.txt" 152 | f = open(pairs_filepath, 'r') 153 | pairs_list = f.readlines() 154 | 155 | for each in pairs_list: 156 | print("processing pair:", each) 157 | pair = each.split(" ") 158 | c_name = pair[1].strip() + "_" + pair[0] + ".png" 159 | c_path = os.path.join(c_dir, c_name) 160 | im_path = os.path.join(im_dir, pair[0] + ".jpg") 161 | im_parse_path = os.path.join(im_parse_dir, pair[0] + ".png") 162 | im_parse_vis_path = os.path.join(im_parse_vis_dir, pair[0] + ".png") 163 | 164 | ton_align(im_path, im_parse_path, im_parse_vis_path, c_name, c_path, save_dir, viz=True) 165 | 166 | 167 | if __name__ == "__main__": 168 | main() 169 | -------------------------------------------------------------------------------- /vis_joints.py: -------------------------------------------------------------------------------- 1 | ## 2 | ## joint visualize 3 | ## 4 | ## (c) 2019 heejune@snut.ac.kr 5 | ## 6 | 7 | import numpy as np 8 | 9 | import numpy as np 10 | import matplotlib.pyplot as plt 11 | from matplotlib.colors import ListedColormap, BoundaryNorm 12 | import cv2 13 | import time 14 | import sys 15 | 16 | 17 | 18 | ############################################################### 19 | # Joint Visualize 20 | ############################################################### 21 | 22 | # This display setting is required to display the label image in matplot 23 | 24 | 25 | ## joint 26 | 27 | 28 | # LSP 29 | viton2lsp_joint = [13, # Head top 30 | 12, # Neck 31 | 8, # Right shoulder 32 | 7, # Right elbow 33 | 6, # Right wrist 34 | 9, # Left shoulder 35 | 10, # Left elbow 36 | 11, # Left wrist 37 | 2, # Right hip 38 | 1, # Right knee 39 | 0, # Right ankle 40 | 3, # Left hip 41 | 4, # Left knee 42 | 5] # Left ankle 43 | 44 | lsp_limbs = [ (13,12), # head to check/neck 45 | (12, 8), (12, 9), # chick to shoulders 46 | (8, 7), (9, 10), # shoulders to elbows 47 | (7, 6), (10,11), # elbows to wrists 48 | (8, 2), (9, 3), # shoulders to hips 49 | (2, 1), (3, 4), # hips to knees 50 | (1, 0), (4, 5)] # knees to ankles 51 | 52 | ## VITON ? 53 | ## 18 joints = 14 + 4 (eyes, nose, ???) 54 | viton_joint_order = [0, # nose 55 | 1, # Neck 56 | 2, # Right shoulder 57 | 3, # Right elbow 58 | 4, # Right wrist 59 | 5, # Left shoulder 60 | 6, # Left elbow 61 | 7, # Left wrist 62 | 8, # Right hip 63 | 9, # Right knee 64 | 10, # Right ankle 65 | 11, # Left hip 66 | 12, # Left knee 67 | 13, # Left ankle 68 | 14, # Right eye ## 4 more appended, not mixed up with above numebrs ## 69 | 15, # left eye 70 | 16, # left ear 71 | 17] # right ear 72 | 73 | viton_limbs = [ (0,1), # head to check/neck 74 | (1, 2), (1, 5), # chick to shoulders 75 | (2, 3), (5, 6), # shoulders to elbows 76 | (3, 4), (6, 7), # elbows to wrists 77 | (2, 8), (5, 11), # shoulders to hips 78 | (8, 9), (11, 12), # hips to knees 79 | (9, 10), (12, 13), # knees to ankles 80 | (0, 14), (0, 15), # nose to eyes 81 | (14, 16), (15, 17)] # 82 | 83 | 84 | # overlay joints 85 | def drawJoints(img, joints, T = 0.5, jointInPixel = False): 86 | 87 | if len(img.shape) == 2: # gray 88 | color = 0 # black 89 | else: # rgb 90 | color = (0,0,0) # black 91 | 92 | if jointInPixel != True: 93 | height_scale = img.shape[0] 94 | width_scale = img.shape[1] 95 | #print('h:', height, 'w:', width) 96 | else: 97 | height_scale = 1.0 98 | width_scale = 1.0 99 | 100 | for i in range(len(joints)): 101 | if joints[i,2] > T: # when confidence enough 102 | x = int((joints[i,0] +0.499)*width_scale) 103 | y = int((joints[i,1] +0.499)*height_scale) 104 | cv2.circle(img, (x,y), 3, color) 105 | 106 | # overlay limbs 107 | def drawLimbs(img, joints, T = 0.5, jointInPixel = False): 108 | 109 | 110 | if len(img.shape) == 2: # gray 111 | color = 0 # black 112 | else: # rgb 113 | color = (0,0,0) # black 114 | 115 | if jointInPixel != True: 116 | height_scale = img.shape[0] 117 | width_scale = img.shape[1] 118 | #print('h:', height, 'w:', width) 119 | else: 120 | height_scale = 1.0 121 | width_scale = 1.0 122 | 123 | 124 | # wich format 125 | limbs = viton_limbs 126 | 127 | for limb in limbs: 128 | if (joints[limb[0],2] > T) and (joints[limb[1],2] > T): # when confidence enough 129 | x = int((joints[limb[0],0] +0.5)*width_scale) 130 | y = int((joints[limb[0],1] +0.5)*height_scale) 131 | pt1 = (x,y) 132 | x = int((joints[limb[1],0] +0.5)*width_scale) 133 | y = int((joints[limb[1],1] +0.5)*height_scale) 134 | pt2 = (x,y) 135 | 136 | cv2.line(img, pt1, pt2, color, 1) # 0 : black 137 | 138 | 139 | def visualize_joints(imgfname, joints, jointInPixel = False): 140 | 141 | """ 142 | joint format 14x2 143 | 144 | """ 145 | 146 | img = cv2.imread(imgfname) 147 | if img is None: 148 | print('cannot load file:', imgfname) 149 | return 150 | 151 | fig = plt.figure() 152 | 153 | # display normal image with joints 154 | drawJoints(img, joints, 0.1, jointInPixel) 155 | drawLimbs(img, joints, 0.1, jointInPixel) 156 | plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) # becasue OpenCv use BGR not RGB 157 | plt.axis('off') 158 | plt.title('Joints') 159 | plt.show() 160 | 161 | 162 | def check_est_joints_file(fname, imgfile): 163 | 164 | print('checking ', fname) 165 | # 1. check joint estimation file format 166 | with np.load(fname) as zipfile: # zip file loading 167 | est = zipfile['est_joints'] 168 | print("shape:", est.shape, ", type:", est.dtype) 169 | for imgidx in range(1): 170 | joints = est[:2, :, imgidx].T # T for joint-wise 171 | conf = est[2, :, imgidx] 172 | print('joints:', joints) 173 | print('conf:', conf) 174 | 175 | # visualize the pose 176 | #joint2d = np.reshape(joints, (-1,2)) 177 | #print('reshaped:', joint2d) 178 | visualize_joints(imgfile, joints, jointInPixel = True) 179 | 180 | -------------------------------------------------------------------------------- /bvh.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | 4 | class BvhNode: 5 | 6 | def __init__(self, value=[], parent=None): 7 | self.value = value 8 | self.children = [] 9 | self.parent = parent 10 | if self.parent: 11 | self.parent.add_child(self) 12 | 13 | def add_child(self, item): 14 | item.parent = self 15 | self.children.append(item) 16 | 17 | def filter(self, key): 18 | for child in self.children: 19 | if child.value[0] == key: 20 | yield child 21 | 22 | def __iter__(self): 23 | for child in self.children: 24 | yield child 25 | 26 | def __getitem__(self, key): 27 | for child in self.children: 28 | for index, item in enumerate(child.value): 29 | if item == key: 30 | if index + 1 >= len(child.value): 31 | return None 32 | else: 33 | return child.value[index + 1:] 34 | raise IndexError('key {} not found'.format(key)) 35 | 36 | def __repr__(self): 37 | return str(' '.join(self.value)) 38 | 39 | @property 40 | def name(self): 41 | return self.value[1] 42 | 43 | 44 | class Bvh: 45 | 46 | def __init__(self, data): 47 | self.data = data 48 | self.root = BvhNode() 49 | self.frames = [] 50 | self.tokenize() 51 | 52 | def tokenize(self): 53 | first_round = [] 54 | accumulator = '' 55 | for char in self.data: 56 | if char not in ('\n', '\r'): 57 | accumulator += char 58 | elif accumulator: 59 | first_round.append(re.split('\\s+', accumulator.strip())) 60 | accumulator = '' 61 | node_stack = [self.root] 62 | frame_time_found = False 63 | node = None 64 | for item in first_round: 65 | if frame_time_found: 66 | self.frames.append(item) 67 | continue 68 | key = item[0] 69 | if key == '{': 70 | node_stack.append(node) 71 | elif key == '}': 72 | node_stack.pop() 73 | else: 74 | node = BvhNode(item) 75 | node_stack[-1].add_child(node) 76 | if item[0] == 'Frame' and item[1] == 'Time:': 77 | frame_time_found = True 78 | 79 | def search(self, *items): 80 | found_nodes = [] 81 | 82 | def check_children(node): 83 | if len(node.value) >= len(items): 84 | failed = False 85 | for index, item in enumerate(items): 86 | if node.value[index] != item: 87 | failed = True 88 | break 89 | if not failed: 90 | found_nodes.append(node) 91 | for child in node: 92 | check_children(child) 93 | check_children(self.root) 94 | return found_nodes 95 | 96 | def get_joints(self): 97 | joints = [] 98 | 99 | def iterate_joints(joint): 100 | joints.append(joint) 101 | for child in joint.filter('JOINT'): 102 | iterate_joints(child) 103 | iterate_joints(next(self.root.filter('ROOT'))) 104 | return joints 105 | 106 | def get_joints_names(self): 107 | joints = [] 108 | 109 | def iterate_joints(joint): 110 | joints.append(joint.value[1]) 111 | for child in joint.filter('JOINT'): 112 | iterate_joints(child) 113 | iterate_joints(next(self.root.filter('ROOT'))) 114 | return joints 115 | 116 | def get_joint_index(self, name): 117 | return self.get_joints().index(self.get_joint(name)) 118 | 119 | def get_joint(self, name): 120 | found = self.search('ROOT', name) 121 | if not found: 122 | found = self.search('JOINT', name) 123 | if found: 124 | return found[0] 125 | raise LookupError('joint not found') 126 | 127 | def joint_offset(self, name): 128 | joint = self.get_joint(name) 129 | offset = joint['OFFSET'] 130 | return (float(offset[0]), float(offset[1]), float(offset[2])) 131 | 132 | def joint_channels(self, name): 133 | joint = self.get_joint(name) 134 | return joint['CHANNELS'][1:] 135 | 136 | def get_joint_channels_index(self, joint_name): 137 | index = 0 138 | for joint in self.get_joints(): 139 | if joint.value[1] == joint_name: 140 | return index 141 | index += int(joint['CHANNELS'][0]) 142 | raise LookupError('joint not found') 143 | 144 | def get_joint_channel_index(self, joint, channel): 145 | channels = self.joint_channels(joint) 146 | if channel in channels: 147 | channel_index = channels.index(channel) 148 | else: 149 | channel_index = -1 150 | return channel_index 151 | 152 | def frame_joint_channel(self, frame_index, joint, channel, value=None): 153 | joint_index = self.get_joint_channels_index(joint) 154 | channel_index = self.get_joint_channel_index(joint, channel) 155 | if channel_index == -1 and value is not None: 156 | return value 157 | return float(self.frames[frame_index][joint_index + channel_index]) 158 | 159 | def frame_joint_channels(self, frame_index, joint, channels, value=None): 160 | values = [] 161 | joint_index = self.get_joint_channels_index(joint) 162 | for channel in channels: 163 | channel_index = self.get_joint_channel_index(joint, channel) 164 | if channel_index == -1 and value is not None: 165 | values.append(value) 166 | else: 167 | values.append( 168 | float( 169 | self.frames[frame_index][joint_index + channel_index] 170 | ) 171 | ) 172 | return values 173 | 174 | def frames_joint_channels(self, joint, channels, value=None): 175 | all_frames = [] 176 | joint_index = self.get_joint_channels_index(joint) 177 | for frame in self.frames: 178 | values = [] 179 | for channel in channels: 180 | channel_index = self.get_joint_channel_index(joint, channel) 181 | if channel_index == -1 and value is not None: 182 | values.append(value) 183 | else: 184 | values.append( 185 | float(frame[joint_index + channel_index])) 186 | all_frames.append(values) 187 | return all_frames 188 | 189 | def joint_parent(self, name): 190 | joint = self.get_joint(name) 191 | if joint.parent == self.root: 192 | return None 193 | return joint.parent 194 | 195 | def joint_parent_index(self, name): 196 | joint = self.get_joint(name) 197 | if joint.parent == self.root: 198 | return -1 199 | return self.get_joints().index(joint.parent) 200 | 201 | @property 202 | def nframes(self): 203 | try: 204 | return int(next(self.root.filter('Frames:')).value[1]) 205 | except StopIteration: 206 | raise LookupError('number of frames not found') 207 | 208 | @property 209 | def frame_time(self): 210 | try: 211 | return float(next(self.root.filter('Frame')).value[2]) 212 | except StopIteration: 213 | raise LookupError('frame time not found') 214 | -------------------------------------------------------------------------------- /cvtjoint_viton2smpl.py: -------------------------------------------------------------------------------- 1 | ''' 2 | 3 | conevrt from VITON Half-body model pose to simple npz file 4 | 5 | (c) 2019 matiur Rahman and heejune Ahn @ icomlab.seoutech.ac.kr 6 | 7 | 8 | Description 9 | ============ 10 | 11 | the ordering of npz file (is strange and in-efficient.) differ from input format 12 | number of joints for SMPL is 14, input is 18. (@TODO) 13 | the order of joints also different. so that so remapping is needed as same as LSP format 14 | 15 | each json has 18*3 in 1-D format 16 | 17 | 3 x 14 x #ofimages but we used all 18 so 3 x 18 x #ofimages 18 | 19 | 20 | VITON json format 21 | ------------------- 22 | { 23 | 'version': 1.0, 24 | 'people': [ {"face_keypoints": [], 25 | "pose_keypoints": [ x1,y1,p1, x2,y2,p2, ..., x18,y18,p18]}, # 18x3 floats 26 | "hand_right_keypoints":[]}, 27 | "hand_left_keypoints":[]} ] # originally multi-persons now only one assumped 28 | } 29 | 30 | x, y : in pixel unit 31 | p : confidence probability 32 | 33 | 34 | SMPL joint format 35 | -------------------- 36 | 37 | npz of list: index (x/y/p, joint, images) # pretty stupid format 38 | 39 | [ [ [ img 0 ] 40 | [ img 1 ] 41 | ... 42 | [ img last ] 43 | ] # joint-0 44 | ... 45 | ... 46 | ... # joint-18 47 | ] # x-pixel, (integer?) 48 | 49 | [ 50 | 51 | 52 | 53 | ] # y-pixel 54 | [ 55 | 56 | 57 | 58 | ] # confidence 59 | 60 | ''' 61 | import sys 62 | import os 63 | import json 64 | import numpy as np 65 | import pprint 66 | import vis_joints 67 | 68 | bcheck = True 69 | 70 | # append one person's joints into list 71 | def append_bodyjoints(bodyjoints, imgidx, joints, n_joints): 72 | 73 | for j in range(n_joints): 74 | # converting the viton joint ordering to lsp joint order 75 | if j < 14: # LSP joints: mapped 76 | joints[0,vis_joints.viton2lsp_joint[j],imgidx] = bodyjoints[3*j] 77 | joints[1,vis_joints.viton2lsp_joint[j],imgidx] = bodyjoints[3*j+1] 78 | joints[2,vis_joints.viton2lsp_joint[j],imgidx] = bodyjoints[3*j+2] 79 | else: # out of LSP, so same index mapping 80 | joints[0,j,imgidx] = bodyjoints[3*j] 81 | joints[1,j,imgidx] = bodyjoints[3*j+1] 82 | joints[2,j,imgidx] = bodyjoints[3*j+2] 83 | 84 | ''' 85 | joints[0,j,imgidx] = bodyjoints[3*j] 86 | joints[1,j,imgidx] = bodyjoints[3*j+1] 87 | joints[2,j,imgidx] = bodyjoints[3*j+2] 88 | ''' 89 | 90 | 91 | # load all the json files into list of x, y, p 92 | # Note: np is faster than list, pre-allocation is much faster than append 93 | def load_alljointjsons(joint_dir, img_dir): 94 | 95 | # 1. get the list 96 | joint_files = os.listdir(joint_dir) 97 | joint_files.sort() # make the ordering 98 | 99 | if bcheck: 100 | joint_files = joint_files[:5] # for test purpose 101 | 102 | # 1. add joint arrays 103 | n_joints = 18# the joint # of viton json file, 14 # LSP SMPL joint number 104 | smpl_joints = np.zeros((3, n_joints, len(joint_files)), dtype=float) 105 | imgidx = np.zeros((len(joint_files)), dtype=int) # for image index 106 | 107 | # 2. read joints from json files 108 | for count, each in enumerate(joint_files): 109 | print(count, " converting ", each) 110 | with open(joint_dir + each) as json_file: 111 | p = json.load(json_file) 112 | body_joints = p['people'][0]['pose_keypoints'] # needs body joint only 113 | append_bodyjoints(body_joints, count, smpl_joints, n_joints) 114 | 115 | ## file idex 116 | idx = each.replace('_0_keypoints.json', '') 117 | idx = int(idx) 118 | imgidx[count] = idx 119 | 120 | # visualize 121 | if bcheck: 122 | img_f_name = each.replace('_keypoints.json', '.jpg') 123 | img_f_path = img_dir + img_f_name 124 | bj = np.array(body_joints) 125 | bj = np.reshape(bj, (-1,3)) 126 | print(bj) 127 | vis_joints.visualize_joints(img_f_path, bj, True) 128 | 129 | return smpl_joints, imgidx 130 | 131 | 132 | # 1. check joint estimation file format 133 | def check_smpl_joint_file(fname): 134 | 135 | with np.load(fname) as zipfile: # zip file loading 136 | est = zipfile['est_joints'] 137 | print("shape:", est.shape, ", type:", est.dtype) 138 | for imgidx in range(5): 139 | joints = est[:2, :, imgidx].T # T for joint-wise 140 | conf = est[2, :, imgidx] 141 | print('joints:', joints) 142 | print('conf:', conf) 143 | 144 | ''' 145 | def check_idx(fname): 146 | 147 | with np.load(fname) as zipfile: # zip file loading 148 | idxes = zipfile['idx'] 149 | print("shape:", idxes.shape, ", type:", idxes.dtype) 150 | for i in range(50): 151 | print( idxes[i] ) 152 | ''' 153 | 154 | # 155 | # vito json files to smpl npz file 156 | # 157 | 158 | def cvt_viton2smpl(): 159 | 160 | #viton_joints_dir = "D:/Datasets/viton_resize/test/pose/" 161 | viton_joints_dir = "/home/heejune/Work/VTON/VITON/Dataset/viton_resize/test/pose/" 162 | img_dir = "/home/heejune/Work/VTON/VITON/Dataset/viton_resize/test/image/" 163 | joints, idxes = load_alljointjsons(viton_joints_dir, img_dir) 164 | 165 | 166 | return 167 | 168 | 169 | ''' 170 | pp = pprint.PrettyPrinter(depth=6) 171 | pp.pprint(joints) 172 | ''' 173 | np.savez("viton_est_joints2.npz", est_joints=joints) 174 | #np.savez("viton_est_joints.npz", est_joints=joints, idx=idxes) 175 | print("\nConversion finished!") 176 | 177 | ## validation ##### 178 | print('### reference file ###########################################') 179 | check_smpl_joint_file('est_joints.npz') 180 | #check_smpl_joint_file('10k_est_joints.npz') 181 | print('### my file ###########################################') 182 | check_smpl_joint_file("viton_est_joints2.npz") 183 | ''' no more using 184 | print('### idxes ###########################################') 185 | check_idx("viton_est_joints.npz") 186 | ''' 187 | 188 | 189 | # convert numpy to json for a single person joint 190 | def cvt_np2json(joints_np): 191 | 192 | # 1. re-ordering 193 | # same as viton2lsp_joint and reamining 194 | order = [13,12,8, 7, 6, 9, 10, 11, 2, 1, 0, 3, 4, 5, 14, 15, 16, 17] 195 | 196 | # 2. build dictionary 197 | oneperson = { "face_keypoints": [], 198 | "pose_keypoints": joints_np[order].flatten().tolist(), 199 | "hand_right_keypoints": [], 200 | "hand_left_keypoints":[]} 201 | 202 | people = {"poeple": [oneperson]} 203 | joints_json = { "version": 1.0, "people": people } 204 | 205 | return joints_json 206 | 207 | def test_cvt_smpl2viton(): 208 | 209 | fname = 'viton_est_joints_18.npz' 210 | num = 1 211 | with np.load(fname) as zipfile: # zip file loading 212 | est = zipfile['est_joints'] 213 | print("shape:", est.shape, ", type:", est.dtype) 214 | for imgidx in range(num): 215 | joints_np = est[:3, :, imgidx].T # T for joint-wise 216 | print('joints:', joints_np) 217 | 218 | joints_json = cvt_np2json(joints_np) 219 | 220 | json_file_path = '%06d_0_keypoints.json'%imgidx 221 | with open(json_file_path, 'w') as json_file: 222 | json.dump(joints_json, json_file ) 223 | 224 | if __name__ =='__main__': 225 | 226 | if len(sys.argv) < 2: 227 | print( 'usage:' + sys.argv[0] + ' tojson (for viton) or tonp (for smpl)') 228 | exit() 229 | 230 | if sys.argv[1] == 'tonp': 231 | cvt_viton2smpl() 232 | elif sys.argv[1] == 'tojson': 233 | test_cvt_smpl2viton() 234 | else: 235 | print('undefined command') 236 | 237 | 238 | -------------------------------------------------------------------------------- /README.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | SMPL model based 3D cloth shape recovery and virtual Try on Project 4 | ------------------------------------------------------------------------ 5 | 6 | PI: Heejune AHN (SeoulTech) 7 | CoI: Matiur Rahman Minar (SeoulTech), Thai Thanh Tuan (SeoulTech) Paul Rosin (Cardiff U), Yukun Lai (Cardiff U) 8 | 9 | ------------------------------------------------------------------------ 10 | 11 | 0. directory contents and sub project 12 | 13 | 0. original simplify project (reference purpose) 14 | 1. in-shop cloth VTON implementation (the current main project) 15 | 2. human image cloth swapping (finised first version, but not published work) 16 | 3. fashion show projects (partially published) 17 | 18 | 19 | 1. dependency 20 | 21 | copy smpl directory under the './code' directory. 22 | othewise, add pathonpath to the smpl project 'web_user' directory 23 | 24 | 25 | /////////////////////////////////////////////////////////////////////////////////////////////////// 26 | 27 | sub-project 1: Inshop cloth VTON 28 | 29 | /////////////////////////////////////////////////////////////////////////////////////////////////// 30 | 31 | 1) pipeline 32 | 33 | Note: cnum : viton orignal cloth number, e.g. 000010_1 34 | vitonhnum : viton orignal human number, e.g. 000010_0 35 | hnum : sorted human image number e.g. viton_000010 36 | 37 | cloth/.jpg 38 | cloth-mask/.jpg --------------------------------+ 39 | | 40 | v 41 | +--------------+ smplparam.pkl +-----------------+ results/viton/ 42 | | smpltemplate |--------------------> | cloth2smplmask.m|---> /c2dw/.png 43 | | (python) | smpltemlatemask.png | (matlab) | /c2dwmask/.png 44 | +--------------+ smpltmeplate.pkl +-----------------+ | 45 | | | 46 | pose/.json's | | 47 | | image/.jpg | | 48 | | | | | 49 | | +---v---------+ | | 50 | | | sort_unsort |------------------------+ | 51 | | +---|---------+ | | | 52 | | v | | | 53 | | .png -----+ +----------------------+ +--------------+ 54 | | | | | | 55 | | +--v------------+ | +---v--------v---+ results/ 56 | +-v---------+ : img2smplviton :-------v------>: smpl3dclothxfer| /c3dw/_.png 57 | |cvtjoint_ ->est_joints.npz-> (python) : => |---> /c3dwmask/_.png 58 | |viton2smpl*| +---------------+ +--------^-------+ | 59 | +-----------+ | | 60 | +--------------------------------------|-------------------+ 61 | | | 62 | image-parse/.jpg | +------------------------- viton_test_pair.txt 63 | image/.jpg | | 64 | | +--------v---v---+ 65 | ----> | TON+ | -------------> VTON image 66 | | | 67 | +----------------+ 68 | 69 | 70 | 1. Data prepration from viton to SMPL 71 | 72 | viton data location: Work/VTON/Dataset 73 | 74 | 1.1 rename images 75 | test images should be renamed for easy use (because image number is not continous). 76 | sort_and_unsort.py 77 | * FORWARD 78 | images/0000xx_0.jpg => viton_%06d.jpg 79 | viton_list.npy 80 | * BACKWARD 81 | images/viton_%06d.jpg <= 00000n.jpg 82 | 83 | 1.2 generate est_joints.npz 84 | * FORWARD 85 | cvtjint_viton2smpl.py 86 | 1. sort the json files as above 87 | 2. read each file and extract the required joint and make a SMPL style format 88 | 3. save it into npz file 89 | 90 | * BACKWARD 91 | Also back to json format? 92 | 1. read viton.list.npy file 93 | for each 94 | 2. load the joint from pkl file and get the updated joint from SMPL model 95 | 3. load the json file (corresponding) 96 | 4. update the values in json file 97 | 5. save the updated values into another json file 98 | 99 | 1.3. gender.cvs 100 | created manually: all 1 (woman) 101 | 102 | 103 | 1) cvtjoint_viton2smpl.py 104 | 105 | sort and re-format (joint) all viton joint file (json) into a single smplify (LSP) npz format 106 | > python cvtjoint_viton2smpl.py tonp 107 | input: hard-coded for the viton joint directory 108 | output: viton.npz 109 | 110 | Note: already run and distributed 111 | 112 | 1') sort_unsort.py 113 | > python sort_unsort.py 114 | 115 | output> .png 116 | mask needed to be sorted? 117 | 118 | Note: hard-codded for Cardiff Desktop. 119 | already done 120 | 121 | 2) Template SMPL mask, joints (jsonfile), smpl and camera file used for mask and joints 122 | 123 | python smpltemplate.py <1> 124 | 125 | input : smpl model files 126 | output : templatemask.png wxh = 196x(254*3/2) if 0, 196x254 if 1 127 | templatejoints.json (same format as viton ) 128 | templateparam.pkl (smpl parameter, camera file) 129 | 130 | Note 1: in fact , the index (0) is not very important, the params are fixed, not using the specific index 131 | Note 2: you generate the file or you can get it from google drive too 132 | 133 | 3) img2smplviton.py 134 | 135 | python img2smplviton.py .. --dataset=viton --viz 136 | 137 | Note: the codes are modification from fit_3d.py for our application 138 | moddifed joint locations (*add detail here) 139 | optimization cost details 140 | 141 | 142 | 4) smpl3dclothxfer.py 143 | 144 | python smpl3dclothxfer.py .. cnum hnum 145 | 146 | input: /cloth2dwarped/.png 147 | /cloth2dwarpedmask/.png 148 | output: /cloth3dwarped/.png 149 | /cloth3dwarpedmask/.png 150 | 151 | 152 | //////////////////////////////////////////////////////////////////////////////////////////// 153 | 154 | subProject 2: Old files for human cloth swapping and Fashion show with 10k dataset 155 | 156 | //////////////////////////////////////////////////////////////////////////////////////////// 157 | 158 | 159 | First verion: model cloth swapping 160 | 161 | 162 | s.mask ---------------------+ 163 | : 164 | s.img --> +----------+ +--> +------------+ 165 | : img2smpl : --------> : smpl2cloth : cloth_v3d, label 166 | s.joint-> : : s.smpl_p : :--------+ 167 | +-> +----------+ s.cam +------------+ : 168 | : +-->+-------------+ 169 | smpl -+ :smplxlothxfer:-> vtoned img 170 | : +-->+-------------+ 171 | +-> +----------+ +------------+ : 172 | : img2smpl : --------> : smpl2body : -------+ 173 | t.joint-> : : t.smpl_p : : body_v3d, label 174 | t.img -> +----------+ t.cam +-> +------------+ 175 | : 176 | t.mask ----------------------+ 177 | 178 | 179 | 180 | 181 | 182 | 1) 2D image with Joint to SMPL model 183 | 184 | for 10k dataset 185 | python img2smpl.py .. --dataset=10k --viz 186 | 187 | for viton dataset 188 | python img2smplviton.py .. --dataset=viton --viz 189 | 190 | Note: 191 | the codes are simple modification from fit_3d.py for our application 192 | 193 | 2) Template SMPL mask, joints (jsonfile), smpl and camera file used for mask and joints 194 | 195 | python smpl2mask.py .. viton 0 196 | 197 | input: smpl parameter file (pkl), SMPL template model files 198 | 199 | output : templatemask.png ( w x h ) : we can turn on full-size mask with size_ext = True in the code. 200 | templatejoints.json (same format as viton ) 201 | templateparam.pkl (parmater file) 202 | 203 | Note 1: in fact , the index (0) is not very important, the params are fixed, not using the specific index 204 | Note 2: you generate the file or you can get it from google drive too 205 | 206 | 207 | 2') SMPL model to shillouette mask 208 | 209 | python smpl2mask.py .. viton 1 210 | 211 | 1. load pkl file 212 | 2. pose it into standard pose 213 | 3. rendering it into (binary) mask 214 | 4. the json joint gneration 215 | 216 | 2'') resorting the image and json joint files 217 | 218 | 1. when saving use the viton_list file for re-naming the images 219 | 220 | 221 | 3) cloth 3d reconstuction 222 | python smpl3dclothrec.py .. smpltemplate.pkl cloth.png clothmask.png 223 | 224 | Note 1: The script can be run for testing purpose as above. 225 | Or can be used other script, i.e., smpl3dclothxfer.py 226 | 227 | 3') SMPL model to Cloth model 228 | python smpl2cloth.py .. 10k 1 229 | 230 | graphuitl.py used for vertices operations 231 | boundary_maching.py used for matching boudnary with TPS algorithm 232 | 233 | 4) 3D warping/transfer cloth 234 | 235 | python smpl3dcothxfer.py .. 236 | 237 | note 1: the script uses smpl3dclothrec.py for reconstruct 3d model of cloth from 2d matching 238 | note 2: 239 | 240 | 241 | 242 | 243 | 4') transfer cloth from src human image to dst human image 244 | python smpl_cloth_xfer.py 1 .. 10k 245 | 246 | 247 | 2. Directory and data prepration 248 | -------------- - 249 | 250 | mpips-smplify_public_v2 251 | | 252 | + code # python scripts 253 | | + fid_3d.py, render_model.py, show_humaneva.py # original SMPLify release 254 | | + img2smpl.py, smpl2cloth.py (and graphutil.py, boundary_matching.py), smplclothxfer.py # my work 255 | | + --- model # smpl model files 256 | | +--- basicmodel_f/m_lbs_10_207_0_v1.0.0..pkl 257 | | +--- gmm_08.pkl 258 | | +--- regression_locked_nromalized_female/male/hybrid.npz 259 | | +---- library 260 | | 261 | + results 262 | | +--- <10k> or 263 | | +---smpl 264 | | | + %04d.pkl 265 | | | + %04d.png 266 | | +---segmentation 267 | | | + %04d.png 268 | | +---cloth 269 | | | + %04d.pkl 270 | | +---vton 271 | | + %04d.pkl 272 | | 273 | + images 274 | +--- 10k 275 | +--- dataset10k_%04d.jpg 276 | +--- viton 277 | +--- %06d.jpg 278 | 279 | 280 | 281 | 282 | 283 | 284 | 285 | 286 | //////////////////////////////////////////////////////////////////////////////////////////// 287 | 288 | Git Usage 289 | 290 | //////////////////////////////////////////////////////////////////////////////////////////// 291 | 292 | // setup local reposity 293 | git init 294 | touch *.py 295 | touch *.txt 296 | 297 | // add files to local repository 298 | git status 299 | git add *.py 300 | git add README.txt 301 | 302 | // commit them 303 | 2106 git status 304 | 2107 git commit -m 'first commit for sharing update' 305 | 306 | // push to github site 307 | 2108 git remote add origin https://github.com/ahnHeejune/smplvton01.git 308 | 2109 git push -u origin master 309 | 310 | -------------------------------------------------------------------------------- /cloth2smplmask.m: -------------------------------------------------------------------------------- 1 | % 2 | clear all; % all variable cleared 3 | close all; % all figures closed 4 | 5 | 6 | %{ 7 | This is not for Lips classification 8 | 9 | labels = {"background", # 0 10 | "hat", # 1 11 | "hair", # 2 12 | "sunglass", # 3 13 | "upper-clothes", # 4 14 | "skirt", # 5 15 | "pants", # 6 16 | "dress", # 7 17 | "belt", # 8 18 | "left-shoe", # 9 19 | "right-shoe", # 10 20 | "face", # 11 21 | "left-leg", # 12 22 | "right-leg", # 13 23 | "left-arm",# 14 24 | "right-arm", # 15 25 | "bag", # 16 26 | "scarf" # 17 27 | ] 28 | %} 29 | 30 | addpath('shape_context') 31 | 32 | smpl_model = true; 33 | 34 | %%%%%%% VITON DATASET %%%%%%%%%%%%%%%%%%%% 35 | %%% train -- cloth : cloth images [hxw =256x192] jpg 36 | %%% cloth-mask : FG mask of cloth images [fg: white] %%% Some are not clean, JPG ^^ 37 | %%% image : model image [256x192x3] jpg 38 | %%% image-pare : segmentation label image PNG 39 | %%% pose : joint info JSON 40 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 41 | 42 | backward = true; 43 | scale_down = false; 44 | 45 | 46 | %DATA_TOP ='D:\3.Project\9.Fashion\3.Dataset\VITON_TPS\viton_resize\train'; 47 | DATA_TOP ='D:\3.Project\9.Fashion\3.Dataset\VITON_TPS\viton_resize\test'; 48 | 49 | RESULT_DIR = './scmm_results/'; 50 | 51 | %DATA_ROOT='cloth/woman_top'; % in-shop cloth 52 | DATA_ROOT= [DATA_TOP,'/image-parse/']; 53 | MODEL_ROOT=[DATA_TOP, '/image/']; 54 | 55 | %MASK_DIR='results/stage1/tps/00015000_'; % mask for cloth area in model using NN model 56 | MASK_DIR = [DATA_TOP,'/cloth-mask/']; 57 | CLOTH_DIR = [DATA_TOP,'/cloth/']; 58 | 59 | % Check if using MATLAB or Octave 60 | isOctave = exist('OCTAVE_VERSION', 'builtin') ~= 0; 61 | if(isOctave) 62 | % Load image package for resizing images 63 | pkg load image; 64 | % Turn off warning 65 | warning('off', 'Octave:possible-matlab-short-circuit-operator'); 66 | end 67 | 68 | %pairs_file = 'data/viton_test_pairs_classified_same.txt'; % 'data/viton_test_pairs.txt' 69 | %pairs_file = 'data/viton_test_pairs_classified_diff.txt'; % 'data/viton_test_pairs.txt' 70 | pairs_file = 'data/viton_smplmatching.txt'; % 'data/viton_test_pairs.txt' 71 | 72 | 73 | [image1, image2, comment] = textread(pairs_file, '%s %s %s'); 74 | 75 | % 76 | % @TODO: improve target area with paramgers of input cloth 77 | % 78 | if smpl_model 79 | 80 | smpl_mask_original = imread('templatemask.png'); %'smplmaskref.png'); 81 | smpl_mask_long = smpl_mask_original; % it is deep copy in matlab 82 | smpl_mask_long(200:end, :) = 0; % hide the hands and legs for maskt 83 | smpl_mask_long(1:55, :) = 0; % hide neck and head 84 | 85 | smpl_mask_short = smpl_mask_long; % it is deep copy in matlab 86 | % get the body bounddary 87 | body_line = zeros(size(smpl_mask_short,1),2); 88 | mid_x = round(size(smpl_mask_short,2)/2); 89 | for y = 1: size(smpl_mask_short,1) 90 | for x= mid_x: size(smpl_mask_short,2) 91 | if smpl_mask_short(y, x) <= 0 % out of body 92 | break; 93 | end 94 | 95 | end 96 | body_line(y,1) = mid_x - (x - mid_x); % start of body 97 | body_line(y, 2) = x; % end of body 98 | end 99 | 100 | for y = 1: size(smpl_mask_short,1) 101 | for x= 1: size(smpl_mask_short,2) 102 | % delete out side of body 103 | if x < body_line(y,1) || x > body_line(y,2) 104 | smpl_mask_short(y,x) = 0; 105 | end 106 | end 107 | end 108 | 109 | 110 | %imshow(smpl_mask_short); 111 | %axis('image'); 112 | end 113 | 114 | 115 | 116 | 117 | % using a smaller height and width for the shape context matching 118 | % can save time without hurting the perform too much. 119 | 120 | % original image size 121 | h_o = 256; 122 | w_o = 192; 123 | 124 | if scale_down 125 | h = h_o/4; 126 | w = w_o/4; 127 | else 128 | h = h_o; 129 | w = w_o; 130 | end 131 | 132 | % we use 10x10 control_points 133 | n_control = 100; 134 | for i = 1:length(image1) % only run over 1 image (for now) 135 | 136 | if i == 1 137 | smpl_mask = smpl_mask_long; 138 | elseif i == 2 || i == 3 139 | smpl_mask = smpl_mask_short; 140 | end 141 | 142 | image_name1 = image1{i}; 143 | image_name2 = image2{i}; 144 | 145 | if exist([MASK_DIR, image_name1, '_', image_name2, '_tps.mat']) 146 | disp('already done'); 147 | %continue; 148 | end 149 | 150 | 151 | TOP_LABEL = 5; 152 | % MASK in model image 153 | V1 = imread([DATA_ROOT, image_name2]); 154 | model_original_mask = imread([DATA_ROOT, image_name2]); % for later use 155 | [h0, w0, ~] = size(V1); 156 | if ~backward 157 | grayImage = imresize(im2double(V1), [h,w]); 158 | orig_im_mask = cat(3, grayImage*255, grayImage*255, grayImage*255); 159 | end 160 | % extract fashion item masks 161 | if false 162 | V1 = V1(:,:,1) ~= 255 & V1(:,:,2) ~= 255 & V1(:,:,3) ~= 255; 163 | V1 = imresize(double(V1), [h,w], 'nearest'); 164 | else 165 | if ~smpl_model 166 | V1 = (V1 == TOP_LABEL); 167 | else 168 | V1 = (smpl_mask > 200); 169 | end 170 | V1 = imresize(double(V1), [h,w], 'nearest'); 171 | 172 | % model 173 | model_img_name = strrep(image_name2,'png','jpg'); 174 | model = imread([MODEL_ROOT, model_img_name]); 175 | model = imresize(im2double(model), [h,w]); 176 | 177 | end 178 | 179 | if ~smpl_model 180 | V1 = imfill(V1); 181 | V1 = medfilt2(V1); 182 | else 183 | se = strel('square',3); 184 | V1 = imdilate(V1, se); 185 | end 186 | % Load product mask of image. 187 | % AHN: needs to generate using the 'parsed' image 188 | if false 189 | V2 = load([MASK_DIR, image_name1, '_', image_name2, '_mask.mat']); % stored in mat format where? 190 | V2 = imresize(double(V2.mask), [h,w]); 191 | else 192 | V2 = imread([MASK_DIR, image_name1]); %% stupid JPEG 193 | 194 | if false 195 | V2 = (V2 > 128); 196 | V2 = imresize(double(V2), [h,w],'nearest'); 197 | else 198 | V2 = (V2 > 128); 199 | V2 = imresize(double(V2), [h,w],'nearest'); 200 | 201 | % modifying the segementation 202 | % If we do here, the warping get worse, so we move this after 203 | % warping 204 | SE = strel('rectangle',[3,3]); 205 | V2 = imerode(V2, SE); 206 | end 207 | 208 | cloth = imread([CLOTH_DIR, image_name1]); 209 | cloth = imresize(cloth, [h,w]); 210 | 211 | end 212 | 213 | if backward 214 | grayImage = imresize(im2double(V2), [h,w]); 215 | orig_im_mask = cat(3, grayImage*255, grayImage*255, grayImage*255); 216 | end 217 | 218 | 219 | % CHECK the input to Shape Context 220 | fig = figure; 221 | if backward 222 | subplot(2,4,3); 223 | else 224 | subplot(2,4,2); 225 | end 226 | imshow(uint8(V1*255.0)); % instead of imagesc(V1) 227 | axis('image'); 228 | title('mask in model image'); 229 | 230 | 231 | if backward 232 | subplot(2,4,2); 233 | else 234 | subplot(2,4,3); 235 | end 236 | imshow(uint8(V2*255)); 237 | axis('image'); 238 | title('cloth mask'); 239 | 240 | 241 | subplot(2,4,1); 242 | imshow(cloth); 243 | axis('image'); 244 | title(['cloth', image1{i}]); 245 | 246 | subplot(2,4,4); 247 | if ~smpl_model 248 | %imshow(model); 249 | imagesc(model); 250 | axis('image'); 251 | else 252 | imagesc(smpl_mask); 253 | axis('image'); 254 | end 255 | title(['model:', comment{i}]); 256 | 257 | 258 | % TPS transformation 259 | % Paramter estimation (in fact, grid/control points at in shop cloth (src) 260 | % V1 (orig_im) => TPS => V2 (warped_im) 261 | % keypoints1 kypoints2 262 | % 263 | try 264 | if backward 265 | tic;[keypoints1, keypoints2, warp_points0, warped_cloth, warped_mask] = tps_main(V2, V1, n_control, im2double(cloth), orig_im_mask, 0);toc; 266 | warped_mask(isnan(warped_mask)) = 0.0; 267 | 268 | if false 269 | SE = strel('rectangle',[2,2]); 270 | warped_mask = imerode(warped_mask, SE); 271 | end 272 | 273 | %tic;[keypoints1, keypoints2, warp_points0, warped_cloth] = tps_main(V2, V1, n_control, im2double(cloth), 0);toc; 274 | warped_cloth(isnan(warped_cloth)) = 255.0; 275 | 276 | 277 | else 278 | tic;[keypoints1, keypoints2, warp_points0, warp_im] = tps_main(V1, V2, n_control, orig_im, 0);toc; 279 | end 280 | 281 | catch ME 282 | % when there is not enough keypoints for estimating the TPS 283 | % transformation 284 | disp('not enough keypoints') 285 | disp(ME) 286 | continue 287 | end 288 | 289 | % CHECK the input to Shape Context 290 | figure(fig); 291 | subplot(2,4,5); 292 | 293 | cloth_w_mask = zeros(size(cloth)); 294 | cloth_w_mask(1:end,1:end,1) = im2double(cloth(1:end, 1:end, 1)) .* V2; 295 | cloth_w_mask(1:end,1:end,2) = im2double(cloth(1:end, 1:end, 2)).* V2; 296 | cloth_w_mask(1:end,1:end,3) = im2double(cloth(1:end, 1:end, 3)).* V2; 297 | 298 | imagesc(cloth_w_mask); %imshow(uint8(orig_im*255.0)); % instead of imagesc(V1) 299 | axis('image'); 300 | title('orig im and mask'); 301 | 302 | subplot(2,4,6); 303 | imagesc(warped_mask); %imshow(uint8(warp_im*255.0)); 304 | axis('image'); 305 | title('warp mask'); 306 | 307 | subplot(2,4,7); 308 | imagesc(warped_cloth); %imshow(uint8(warp_im*255.0)); 309 | axis('image'); 310 | title('warp cloth'); 311 | 312 | subplot(2,4,8); 313 | 314 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 315 | % Blending 316 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 317 | alpha_ch = warped_mask; 318 | alpha_ch = alpha_ch/255.0; % binary alpha 319 | 320 | % clear the cloth area 321 | % simply masking 322 | % model_wo_cloth = model .* ~V1; 323 | 324 | if smpl_model 325 | model_wo_cloth = cat(3, smpl_mask, smpl_mask, smpl_mask); 326 | else 327 | % inpainting 328 | model_wo_cloth = cat(3, regionfill(model(:,:,1),V1),regionfill(model(:,:,2),V1),regionfill(model(:,:,2),V1)); 329 | end 330 | 331 | overlayed = im2double(model_wo_cloth) .* ( 1 - alpha_ch) + warped_cloth .* alpha_ch; 332 | 333 | % restore the hair (face), hair, hands, (pants) etc except the target cloth 334 | if ~smpl_model 335 | face_hairs_arms_mask = model_original_mask == 11 | model_original_mask == 11 | model_original_mask == 14 | model_original_mask == 15; 336 | overlayed = overlayed .* ( 1 - face_hairs_arms_mask) + model .* face_hairs_arms_mask; 337 | end 338 | 339 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 340 | % Evaluate the result 341 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 342 | has_gt = false; 343 | if has_gt 344 | % evalaute the GMM by IoU 345 | % 1. convert to logical type 346 | cloth_mask_gt = V1 > 0; %uint8(V1); 347 | cloth_mask_est = warped_mask(:,:, 1) > 0.0; % 3 channels are same 348 | %iouval = evaluateSemanticSegmentation(cloth_mask_gt, cloth_mask_est); 349 | uinon_area = cloth_mask_gt | cloth_mask_est; 350 | intersect_area = cloth_mask_gt & cloth_mask_est; 351 | xor_area = xor( cloth_mask_gt, cloth_mask_est); 352 | iouval = sum(intersect_area(:))/sum(uinon_area(:)); 353 | 354 | % evaluate the TON by SSIM 355 | [ssimval,ssimmap] = ssim(overlayed, model); 356 | 357 | msg = sprintf('IOU=%f, SSIM=%f', iouval, ssimval); 358 | disp(msg); 359 | end 360 | 361 | imagesc(overlayed); %imshow(uint8(warp_im*255.0)) 362 | axis('image'); 363 | if exist('msg') 364 | title(['overlayed(', msg, ')']); 365 | else 366 | title(['overlayed']); 367 | end 368 | drawnow; 369 | 370 | % SAVING 371 | %filename = [RESULT_DIR, image_name2, '_', image_name1, '_result.jpg']; 372 | %saveas(fig, filename) 373 | filename = [RESULT_DIR, image_name2, '_', image_name1, '_overlayed.png']; 374 | imwrite(overlayed, filename); 375 | filename = [RESULT_DIR, image_name2, '_', image_name1, '_2dwarped.png']; 376 | imwrite(warped_cloth, filename); 377 | filename = [RESULT_DIR, image_name2, '_', image_name1, '_2dwarpedmask.png']; 378 | imwrite(warped_mask, filename); 379 | 380 | end 381 | -------------------------------------------------------------------------------- /smpl3dclothxfer.py: -------------------------------------------------------------------------------- 1 | """ 2 | cloth 3d model reconstruction (smpl3dclothrecon.py) and transfer to a human model 3 | ------------------------------------------------------------------------------------------ 4 | 5 | (c) copyright 2019 heejune@seoultech.ac.kr 6 | 7 | In : used in smpl3dclothrec.py 8 | - SMPL template model params file (pkl) 9 | - 2D matched cloth image file and mask (png) 10 | used for transfering 11 | - SMPL target model params file (pkl) 12 | helper file 13 | - list.npy for re-ordering the smpl human image number to viton image number 14 | 15 | Note: re-ordering needed for SMPLify code 16 | 17 | Out: 18 | 3D warped cloth and mask (png) 19 | 20 | Note: the Texture (2D warped cloth) and related 2D vertex and face information is obtained 21 | with original SMPL and camera parameters 22 | 23 | For in-advance tesrt purpose of part 3. we could move the pose and apply the displacement vector 24 | we apply the pose and shape params for target user but with same texture and vertices and faces defitnion 25 | 26 | 27 | template (source: pose and shape) target (pose and shape) 28 | -------------------------------------------------------------------------- 29 | SMPL- p smpltemplate.pkl results/viton/smpl/000000.pkl 30 | camera-p smpltemplate.pkl results/viton/smpl/000000.pkl 31 | 3D body-v smpl with template param smpl with target params 32 | 3D cloth-v displacement obtained use displacemt obtained at template 33 | texture results/viton/2dwarp/00000_1.png same 34 | texture-v cam projected onto the texture same as template (not new vertices) 35 | texture-f model.f same 36 | lightening only for cloth-related vertices same 37 | 38 | 39 | """ 40 | from __future__ import print_function 41 | import sys 42 | from os.path import join, exists, abspath, dirname 43 | from os import makedirs 44 | import logging 45 | import cPickle as pickle 46 | import time 47 | import cv2 48 | import numpy as np 49 | import chumpy as ch 50 | from opendr.camera import ProjectPoints 51 | from smpl_webuser.serialization import load_model 52 | from smpl_webuser.verts import verts_decorated 53 | from render_model import render_model 54 | import inspect # for debugging 55 | import matplotlib.pyplot as plt 56 | from opendr.lighting import SphericalHarmonics 57 | from opendr.geometry import VertNormals, Rodrigues 58 | from opendr.renderer import TexturedRenderer 59 | import json 60 | from smpl_webuser.lbs import global_rigid_transformation 61 | 62 | _LOGGER = logging.getLogger(__name__) 63 | logging.basicConfig(level=logging.INFO) 64 | 65 | import boundary_matching 66 | import graphutil as graphutil 67 | 68 | import smpl3dclothrec 69 | 70 | # To understand and verify the SMPL itself 71 | def _examine_smpl_template(model, detail = False): 72 | 73 | print(">> SMPL Template <<<<<<<<<<<<<<<<<<<<<<") 74 | print(type(model)) 75 | print(dir(model)) 76 | #print('kintree_table', model.kintree_table) 77 | print('pose:', model.pose) 78 | if detail: 79 | print('posedirs:', model.posedirs) 80 | print('betas:', model.betas) 81 | print('shape(model):', model.shape) 82 | if detail: 83 | print('shapedirs:', model.shapedirs) 84 | 85 | #print('bs_style:', model.bs_style) # f-m-n 86 | #print('f:', model.f) 87 | print('V template :', type(model.v_template)) 88 | print('V template :', model.v_template.shape) 89 | #print('weights:', model.weoptimize_on_jointsights) 90 | print('W type:', type(model.weights)) 91 | print('W shape:', model.weights.r.shape) 92 | if detail: 93 | print('W value:') 94 | print(model.weights.r) 95 | #parts = np.count_nonzero(model.weights.r, axis =1) 96 | parts = np.argmax(model.weights.r, axis=1) 97 | print(" :", parts.shape, parts[:6000]) 98 | 99 | #print('J:', model.J) 100 | #print('v_template:', model.v_template) 101 | #print('J_regressor:', model.J_regressor) 102 | 103 | # To understand and verify the paramters 104 | 105 | def _examine_smpl_params(params): 106 | 107 | print(type(params)) 108 | print(params.keys()) 109 | print('camera params') 110 | camera = params['cam'] 111 | print(" - type:", type(camera)) 112 | #print(" - members:", dir(camera)) 113 | print(" - cam.t:", camera.t.r) # none-zero, likely only nonzero z 114 | print(" - cam.rt:", camera.rt.r) # zero (fixed) 115 | # print(" - cam.camera_mtx:", camera.camera_mtx) # 116 | print(" - cam.k:", camera.k.r) # 117 | print(" - cam.c:", camera.c.r) # 118 | print(" - cam.f:", camera.f.r) # 119 | 120 | # print(params['f'].shape) # 2 121 | print('>> pose') 122 | pose = params['pose'] 123 | print("\t\ttype:", type(pose)) 124 | print('\t\tshape:', pose.shape) # 72 125 | 126 | # convert within 127 | #pose = pose % (2.0*np.pi) 128 | 129 | print('\t\tvalues (in degree):') 130 | print(pose*180.0/np.pi) # degree 131 | print('>> betas') 132 | betas = params['betas'] 133 | print('\ttype:', type(betas)) 134 | print('\tshape:', betas.shape) # 10 135 | # print('\tvalues:', params['betas']) # 10 136 | 137 | 138 | def construct_clothed3d_from_clothed2d_depth(body_sv, cam, clothed2d): 139 | 140 | 141 | # 1. get the dept for body vertex 142 | bodydepth = graphutil.build_depthmap2(body_sv.r, cam) 143 | 144 | 145 | check_depthmap = False 146 | if check_depthmap: 147 | # depth in reverse way 148 | plt.suptitle('depthmap') 149 | plt.subplot(1, 2, 1) 150 | plt.imshow(img[:, :, ::-1]) # , cmap='gray') 151 | plt.subplot(1, 2, 2) 152 | depthmap = graphutil.build_depthimage(body_sv.r, model.f, bodydepth, cam, height=h, width=w) 153 | #plt.imshow(depthmap, cmap='gray') 154 | plt.imshow(depthmap) 155 | plt.draw() 156 | plt.show() 157 | #plt.imshow(depthmap, cmap='gray_r') # the closer to camera, the brighter 158 | _ = raw_input('quit?') 159 | exit() 160 | 161 | 162 | # 2. modify the depth for clothed 163 | # @TODO 164 | 165 | # 3. unproject to 3D 166 | # uv space? pixels coordinated!! 167 | clothuvd = np.zeros(body_sv.r.shape) 168 | clothuvd[:,0] = clothed2d[:,0] 169 | clothuvd[:,1] = clothed2d[:,1] 170 | clothuvd[:,2] = bodydepth # @TODO for now simply use the same depth as body ^^; 171 | cloth3d = cam.unproject_points(clothuvd) 172 | #sv.r = cloth3d # now the model is not body but cloth 173 | 174 | return cloth3d 175 | 176 | 177 | 178 | # calcuated the local coordinates at each vetex. 179 | # 180 | # z : normal to the vertex 181 | # x : the smallest indexed neighbor vertex based unit vector 182 | # y : the remianing axis in right handed way, ie. z x x => y 183 | def setup_vertex_local_coord(faces, vertices): 184 | 185 | # 1.1 normal vectors (1st axis) at each vertex 186 | _, axis_z = graphutil.calc_normal_vectors(vertices, faces) 187 | # 1.2 get 2nd axis 188 | axis_x = graphutil.find2ndaxis(faces, axis_z, vertices) 189 | # 1.3 get 3rd axis 190 | axis_y = np.cross(axis_z[:, :], axis_x[:,:]) # matuir contribution. np.cross support row-vectorization 191 | 192 | return axis_x, axis_y, axis_z 193 | 194 | # 195 | # reporesent the displacement (now in global coord) into local coordinates 196 | # 197 | # model: smpl mesh structure 198 | # v0 : reference vertex surface, ie. the body 199 | # v*****array: vertext index array for interest 200 | # d : displacement, ie. v = v0 + d 201 | # 202 | def compute_displacement_at_vertex(model, v0, d_global): 203 | 204 | debug = False 205 | 206 | # 1.setup local coordinate system to each vertex 207 | axis_x, axis_y, axis_z = setup_vertex_local_coord(model.f, v0) 208 | 209 | # 2. express displacement in 3 axises 210 | #dlocal = np.concatenate(np.dot(d, axis_x), np.dot(d, axis_y), np.dot(d, axis_z)) 211 | xl = np.sum(d_global*axis_x, axis=1) 212 | yl = np.sum(d_global*axis_y, axis=1) 213 | zl = np.sum(d_global*axis_z, axis=1) 214 | d_local = np.stack((xl, yl, zl), axis = -1) 215 | print('dlocal shape:', xl.shape, yl.shape, zl.shape, d_local.shape) 216 | 217 | if debug: # verifying d_global = xs * axis_x + ys* axis_y + z*axis_z 218 | # get global coorindate vector 219 | xg = xl[:, None]*axis_x 220 | yg = yl[:, None]*axis_y 221 | zg = zl[:, None]*axis_z 222 | dg = xg + yg + zg 223 | 224 | # check the error 225 | err = np.absolute(dg - d_global) 226 | print('d, e x:', np.amax(d_global[:,0]), np.amax(err[:,0]), np.mean(d_global[:,0]), np.mean(err[:,0])) 227 | print('d, e y:', np.amax(d_global[:,1]), np.amax(err[:,1]), np.mean(d_global[:,1]), np.mean(err[:,1])) 228 | print('d, e z:', np.amax(d_global[:,2]), np.amax(err[:,2]), np.mean(d_global[:,2]), np.mean(err[:,2])) 229 | ''' 230 | print('d 0:', np.amax(d_global[:,0]), np.amin(d_global[:,0])) 231 | print('error0:', np.amax(err[:,0]), np.amin(err[:,0])) 232 | print('d 1:', np.amax(d_global[:,1]), np.amin(d_global[:,1])) 233 | print('error1:', np.amax(err[:,1]), np.amin(err[:,1])) 234 | print('d 2:', np.amax(d_global[:,2]), np.amin(d_global[:,2])) 235 | print('error2:', np.amax(err[:,2]), np.amin(err[:,2])) 236 | ''' 237 | 238 | return d_local 239 | 240 | 241 | # 242 | # @TODO: Do this !! the most key part combining with displacement generatrion 243 | # 244 | # model : the body surface structure 245 | # body : body surface vertices 246 | # vi4cloth: vertex index for the cloth surface 247 | # d : displacement vector in local coordinate 248 | # 249 | #def transfer_body2clothed(cam_tgt, betas_tgt, n_betas_tgt, pose_tgt, v4cloth, d): 250 | def transfer_body2clothed(model, body, d_local): 251 | 252 | # 1.setup local coordinate system to each vertex 253 | axis_x, axis_y, axis_z = setup_vertex_local_coord(model.f, body) 254 | 255 | # 2. express local to global 256 | # 2.1 select vectices under interest 257 | #axis_x, axis_y, axis_z = axis_x[vi4cloth], axis_y[vi4cloth], axis_z[vi4cloth] 258 | # 2.2 displacement in global coordinate 259 | xg = (d_local[:, 0])[:, None]*axis_x 260 | yg = (d_local[:, 1])[:, None]*axis_y 261 | zg = (d_local[:, 2])[:, None]*axis_z 262 | dg = xg + yg + zg 263 | 264 | # 3. adding them to the base/body vertices 265 | clothed = body + dg 266 | 267 | return clothed 268 | 269 | 270 | # display 3d model 271 | def render_cloth(cam, _texture, texture_v2d, faces, imHuman): 272 | 273 | #h, w = imTexture.shape[:2] 274 | h_ext, w = _texture.shape[:2] # full body 275 | h, _= imHuman.shape[:2] # half body 276 | 277 | texture = _texture[:,:, :] 278 | 279 | # 1. texture rendereriing 280 | dist = 20.0 281 | cloth_renderer = smpl3dclothrec.build_texture_renderer(cam, cam.v, faces, texture_v2d, faces, 282 | texture[::-1, :, :], w, h_ext, 1.0, near=0.5, far=20 + dist) 283 | imCloth = (cloth_renderer.r * 255.).astype('uint8') 284 | imCloth = imCloth[:h,:,::-1] 285 | 286 | # 2. mask generation 287 | im3CBlack = np.zeros([h, w, 3], dtype = np.uint8) 288 | imModel = (render_model( 289 | cam.v, faces, w, h, cam, far= 20 + dist, img=im3CBlack) * 255.).astype('uint8') 290 | imMask = cv2.cvtColor(imModel, cv2.COLOR_BGR2GRAY) # gray silhouette 291 | imMask[imMask > 0] = 255 # binary (0, 1) 292 | 293 | # 3. image overlay to check result 294 | imBG = imHuman[:,:,::-1].astype('float32')/255.0 295 | overlay_renderer = smpl3dclothrec.build_texture_renderer(cam, cam.v, faces, texture_v2d, faces, 296 | texture[::-1, :, :], w, h, 1.0, near=0.5, far=20 + dist, background_image = imBG) 297 | imOverlayed = overlay_renderer.r.copy() 298 | 299 | 300 | # plt.figure() 301 | plt.subplot(1, 4, 1) 302 | plt.axis('off') 303 | plt.imshow(texture[:h,:,::-1]) 304 | plt.title('texture') 305 | 306 | plt.subplot(1, 4, 2) 307 | plt.imshow(imCloth[:,:,::-1]) 308 | plt.axis('off') 309 | plt.title('transfered') 310 | 311 | plt.subplot(1, 4, 3) 312 | plt.imshow(imMask) # @TODO use color render for mask or all whilte color for the cloth area texture 313 | plt.axis('off') 314 | plt.title('mask') 315 | 316 | plt.subplot(1, 4, 4) 317 | plt.imshow(imOverlayed[:,:,:]) # @overlay with human image 318 | plt.axis('off') 319 | plt.title('target human') 320 | plt.show() 321 | 322 | return imCloth, imMask 323 | 324 | 325 | 326 | def cloth3dxfer_single(smpl_model, src_param_path, tgt_param_path, cloth_path, clothmask_path, human_path, ocloth_path, omask_path): 327 | 328 | # 1. reconstruct 3D cloth from template 329 | params_src, body, diff_cloth_body, texture, texture_v2d, face4cloth = smpl3dclothrec.cloth3drec_single(smpl_model, src_param_path, cloth_path, clothmask_path) 330 | 331 | # 2. express the displacement in vertice specific coordinate. 332 | diff_cloth_body_local = compute_displacement_at_vertex(smpl_model, body, diff_cloth_body) 333 | 334 | # 3. transfer to a new human paramters 335 | # 3.1 load the SMPL params 336 | with open(tgt_param_path, 'rb') as f: 337 | if f is None: 338 | print("cannot open", tgt_param_path), exit() 339 | params_tgt = pickle.load(f) 340 | 341 | # 3.2 construct the model 342 | cam_tgt = ProjectPoints(f = params_tgt['cam_f'], rt=params_tgt['cam_rt'], t=params_tgt['cam_t'], k=params_tgt['cam_k'], c= params_tgt['cam_c']) 343 | betas_tgt = params_tgt['betas'] 344 | n_betas_tgt = betas_tgt.shape[0] #10 345 | pose_tgt = params_tgt['pose'] # angles, 27x3 numpy 346 | 347 | # 3.3 build a new body 348 | body_tgt_sv = smpl3dclothrec.build_smplbody_surface(smpl_model, pose_tgt, betas_tgt, cam_tgt) 349 | 350 | # 3.4 build the corresponding clothed 351 | clothed3d = transfer_body2clothed(smpl_model, body_tgt_sv.r, diff_cloth_body_local) 352 | cam_tgt.v = clothed3d 353 | #cam_tgt.v = body_tgt_sv.r 354 | 355 | # 4.5 check by viewing 356 | imHuman = cv2.imread(human_path) 357 | imCloth3dWarped, imClothMask3dWarped = render_cloth(cam_tgt, texture, texture_v2d, face4cloth, imHuman) # smpl_model.f) # cam_tgt has all the information 358 | #smpl3dclothrec.show_3d_model(cam_tgt, texture, texture_v2d, face4cloth) # smpl_model.f) # cam_tgt has all the information 359 | _ = raw_input("next sample?") 360 | plt.subplot(1, 1, 1) # restore the plot section 361 | #plt.close() # not to draw in subplot() 362 | 363 | # make white background 364 | img_white = np.zeros([imCloth3dWarped.shape[0], imCloth3dWarped.shape[1], 3],dtype=np.uint8) 365 | img_white.fill(255) 366 | # or img_white[:] = 255 367 | imCloth3dWarped = img_white + imCloth3dWarped 368 | 369 | # save result 370 | if ocloth_path is not None: 371 | cv2.imwrite(ocloth_path, imCloth3dWarped) 372 | if omask_path is not None: 373 | cv2.imwrite(omask_path, imClothMask3dWarped) 374 | 375 | if __name__ == '__main__': 376 | 377 | # 1. command argument checking 378 | if len(sys.argv) != 3: 379 | print('usage for batch test: %s base_path dataset'% sys.argv[0]) 380 | #print('usage for test: %s base_path smpl_param clothimg maskimg'% sys.argv[0]), exit() 381 | exit() 382 | 383 | base_dir = abspath(sys.argv[1]) 384 | dataset = sys.argv[2] 385 | 386 | # 2. input and output directory check and setting 387 | # 2.1 base dir 388 | base_dir = abspath(sys.argv[1]) 389 | if not exists(base_dir): 390 | print('No such a directory for base', base_path, base_dir), exit() 391 | 392 | # 2.2.1 human image dir 393 | human_dir = base_dir + "/images/" + dataset 394 | if not exists(human_dir): 395 | print('No such a directory for human images', data_set, human_dir), exit() 396 | 397 | data_dir = base_dir + "/results/" + dataset 398 | #print(data_dir) 399 | # 2.2.2 target human info 400 | human_smpl_param_dir = data_dir + "/smpl" 401 | if not exists(human_smpl_param_dir): 402 | print('No such a directory for smpl param', smpl_param_dir), exit() 403 | # 2.2.3 source cloth 404 | cloth_dir = data_dir + "/c2dw" 405 | if not exists(cloth_dir): 406 | print('No such a directory for cloth images', cloth_dir), exit() 407 | # 2.2.4 source cloth mask 408 | cloth_mask_dir = data_dir + "/c2dwmask" 409 | if not exists(cloth_mask_dir): 410 | print('No such a directory for cloth mask', cloth_mask_dir), exit() 411 | 412 | # 2.2.4 test pair file 413 | testpair_filepath = data_dir + "/" + dataset + "_test_pairs.txt" 414 | if not exists(testpair_filepath): 415 | print('No test pair file: ', cloth_mask_dir), exit() 416 | 417 | 418 | # 2. Loading SMPL models (independent from dataset) 419 | use_neutral = False 420 | # Assumes 'models' in the 'code/' directory where this file is in. 421 | MODEL_DIR = join(abspath(dirname(__file__)), 'models') 422 | MODEL_NEUTRAL_PATH = join( 423 | MODEL_DIR, 'basicModel_neutral_lbs_10_207_0_v1.0.0.pkl') 424 | MODEL_FEMALE_PATH = join( 425 | MODEL_DIR, 'basicModel_f_lbs_10_207_0_v1.0.0.pkl') 426 | MODEL_MALE_PATH = join(MODEL_DIR, 427 | 'basicmodel_m_lbs_10_207_0_v1.0.0.pkl') 428 | 429 | if not use_neutral: 430 | # File storing information about gender 431 | #with open(join(data_dir, dataset + '_gender.csv')) as f: 432 | # genders = f.readlines() 433 | model_female = load_model(MODEL_FEMALE_PATH) 434 | model_male = load_model(MODEL_MALE_PATH) 435 | else: 436 | gender = 'neutral' 437 | smpl_model = load_model(MODEL_NEUTRAL_PATH) 438 | 439 | #_examine_smpl(model_female), exit() 440 | 441 | 442 | ''' 443 | # Load joints 444 | estj2d = np.load(join(data_dir, 'est_joints.npz'))['est_joints'] 445 | #print('est_shape:', est.shape) 446 | joints = estj2d[:2, :, idx].T 447 | ''' 448 | 449 | # 2.3. Output Directory 450 | ocloth_dir = data_dir + "/c3dw" 451 | if not exists(ocloth_dir): 452 | makedirs(ocloth_dir) 453 | ocloth_mask_dir = data_dir + "/c3dwmask" 454 | if not exists(ocloth_mask_dir): 455 | makedirs(ocloth_mask_dir) 456 | 457 | #smplmask_path = smplmask_dir + '/%06d_0.png'% idx 458 | #jointfile_path = smpljson_dir + '/%06d_0.json'% idx 459 | ''' 460 | smpl_model = model_female 461 | # 3D reconstruction and tranfer it to a define smpl model 462 | cloth3dxfer_single(smpl_model, smplparam_path, cloth_path, clothmask_path) 463 | 464 | ''' 465 | 466 | test_pair_lines = open(testpair_filepath).read().splitlines() 467 | test_pairs = [] 468 | 469 | for i in range(len(test_pair_lines)): 470 | # loading batch data 471 | pair = test_pair_lines[i].split() 472 | #print(pair) 473 | test_pairs.append([pair[0], pair[1]]) # 0: human 1: cloth 474 | 475 | #print(test_pairs), exit() 476 | 477 | template_smpl_param_path = './templateparam1.pkl' #### Might each cloth have different verison of template used 478 | template_jointfile_path = './templatejoints1.json' #### We have to take into account this later 479 | 480 | for i in range(len(test_pairs)): 481 | 482 | # for i in range(1, 2): 483 | # if not use_neutral: 484 | # gender = 'male' if int(genders[i]) == 0 else 'female' 485 | # if gender == 'female': 486 | smpl_model = model_female 487 | human_smpl_param_path = human_smpl_param_dir + '/' + test_pairs[i][0] + '.pkl' 488 | human_image_path = human_dir + '/' + test_pairs[i][0] + '.jpg' 489 | cloth_path = cloth_dir + '/' + test_pairs[i][1] + '.png' 490 | clothmask_path = cloth_mask_dir + '/' + test_pairs[i][1] + '.png' 491 | ocloth_path = ocloth_dir + '/' + test_pairs[i][1] + '_' + test_pairs[i][0] + '.jpg' # '.png' 492 | oclothmask_path = ocloth_mask_dir + '/' + test_pairs[i][1] + '_' + test_pairs[i][0] + '.jpg' # '.png' 493 | cloth3dxfer_single(smpl_model, template_smpl_param_path, human_smpl_param_path, cloth_path, clothmask_path, human_image_path, ocloth_path, oclothmask_path) 494 | 495 | 496 | -------------------------------------------------------------------------------- /smpl3dclothxfer_v2.py: -------------------------------------------------------------------------------- 1 | """ 2 | cloth 3d model reconstruction (smpl3dclothrecon.py) and transfer to a human model 3 | ------------------------------------------------------------------------------------------ 4 | 5 | (c) copyright 2019 heejune@seoultech.ac.kr 6 | 7 | In : used in smpl3dclothrec.py 8 | - SMPL template model params file (pkl) 9 | - 2D matched cloth image file and mask (png) 10 | used for transfering 11 | - SMPL target model params file (pkl) 12 | helper file 13 | - list.npy for re-ordering the smpl human image number to viton image number 14 | 15 | Note: re-ordering needed for SMPLify code 16 | 17 | Out: 18 | 3D warped cloth and mask (png) 19 | 20 | Note: the Texture (2D warped cloth) and related 2D vertex and face information is obtained 21 | with original SMPL and camera parameters 22 | 23 | For in-advance tesrt purpose of part 3. we could move the pose and apply the displacement vector 24 | we apply the pose and shape params for target user but with same texture and vertices and faces defitnion 25 | 26 | 27 | template (source: pose and shape) target (pose and shape) 28 | -------------------------------------------------------------------------- 29 | SMPL- p smpltemplate.pkl results/viton/smpl/000000.pkl 30 | camera-p smpltemplate.pkl results/viton/smpl/000000.pkl 31 | 3D body-v smpl with template param smpl with target params 32 | 3D cloth-v displacement obtained use displacemt obtained at template 33 | texture results/viton/2dwarp/00000_1.png same 34 | texture-v cam projected onto the texture same as template (not new vertices) 35 | texture-f model.f same 36 | lightening only for cloth-related vertices same 37 | 38 | 39 | """ 40 | from __future__ import print_function 41 | import smpl3dclothrec_v2 42 | import graphutil as graphutil 43 | import boundary_matching 44 | import sys 45 | from os.path import join, exists, abspath, dirname 46 | from os import makedirs 47 | import logging 48 | import cPickle as pickle 49 | import time 50 | import cv2 51 | import numpy as np 52 | import chumpy as ch 53 | from opendr.camera import ProjectPoints 54 | from smpl_webuser.serialization import load_model 55 | from smpl_webuser.verts import verts_decorated 56 | from render_model import render_model 57 | import inspect # for debugging 58 | import matplotlib.pyplot as plt 59 | from opendr.lighting import SphericalHarmonics 60 | from opendr.geometry import VertNormals, Rodrigues 61 | from opendr.renderer import TexturedRenderer 62 | import json 63 | from smpl_webuser.lbs import global_rigid_transformation 64 | 65 | _LOGGER = logging.getLogger(__name__) 66 | logging.basicConfig(level=logging.INFO) 67 | 68 | 69 | # To understand and verify the SMPL itself 70 | def _examine_smpl_template(model, detail=False): 71 | 72 | print(">> SMPL Template <<<<<<<<<<<<<<<<<<<<<<") 73 | print(type(model)) 74 | print(dir(model)) 75 | #print('kintree_table', model.kintree_table) 76 | print('pose:', model.pose) 77 | if detail: 78 | print('posedirs:', model.posedirs) 79 | print('betas:', model.betas) 80 | print('shape(model):', model.shape) 81 | if detail: 82 | print('shapedirs:', model.shapedirs) 83 | 84 | # print('bs_style:', model.bs_style) # f-m-n 85 | #print('f:', model.f) 86 | print('V template :', type(model.v_template)) 87 | print('V template :', model.v_template.shape) 88 | #print('weights:', model.weoptimize_on_jointsights) 89 | print('W type:', type(model.weights)) 90 | print('W shape:', model.weights.r.shape) 91 | if detail: 92 | print('W value:') 93 | print(model.weights.r) 94 | #parts = np.count_nonzero(model.weights.r, axis =1) 95 | parts = np.argmax(model.weights.r, axis=1) 96 | print(" :", parts.shape, parts[:6000]) 97 | 98 | #print('J:', model.J) 99 | #print('v_template:', model.v_template) 100 | #print('J_regressor:', model.J_regressor) 101 | 102 | # To understand and verify the paramters 103 | 104 | 105 | def _examine_smpl_params(params): 106 | 107 | print(type(params)) 108 | print(params.keys()) 109 | print('camera params') 110 | camera = params['cam'] 111 | print(" - type:", type(camera)) 112 | #print(" - members:", dir(camera)) 113 | print(" - cam.t:", camera.t.r) # none-zero, likely only nonzero z 114 | print(" - cam.rt:", camera.rt.r) # zero (fixed) 115 | # print(" - cam.camera_mtx:", camera.camera_mtx) # 116 | print(" - cam.k:", camera.k.r) # 117 | print(" - cam.c:", camera.c.r) # 118 | print(" - cam.f:", camera.f.r) # 119 | 120 | # print(params['f'].shape) # 2 121 | print('>> pose') 122 | pose = params['pose'] 123 | print("\t\ttype:", type(pose)) 124 | print('\t\tshape:', pose.shape) # 72 125 | 126 | # convert within 127 | #pose = pose % (2.0*np.pi) 128 | 129 | print('\t\tvalues (in degree):') 130 | print(pose*180.0/np.pi) # degree 131 | print('>> betas') 132 | betas = params['betas'] 133 | print('\ttype:', type(betas)) 134 | print('\tshape:', betas.shape) # 10 135 | # print('\tvalues:', params['betas']) # 10 136 | 137 | 138 | def construct_clothed3d_from_clothed2d_depth(body_sv, cam, clothed2d): 139 | 140 | # 1. get the dept for body vertex 141 | bodydepth = graphutil.build_depthmap2(body_sv.r, cam) 142 | 143 | check_depthmap = False 144 | if check_depthmap: 145 | # depth in reverse way 146 | plt.suptitle('depthmap') 147 | plt.subplot(1, 2, 1) 148 | plt.imshow(img[:, :, ::-1]) # , cmap='gray') 149 | plt.subplot(1, 2, 2) 150 | depthmap = graphutil.build_depthimage( 151 | body_sv.r, model.f, bodydepth, cam, height=h, width=w) 152 | #plt.imshow(depthmap, cmap='gray') 153 | plt.imshow(depthmap) 154 | plt.draw() 155 | plt.show() 156 | # plt.imshow(depthmap, cmap='gray_r') # the closer to camera, the brighter 157 | _ = raw_input('quit?') 158 | exit() 159 | 160 | # 2. modify the depth for clothed 161 | # @TODO 162 | 163 | # 3. unproject to 3D 164 | # uv space? pixels coordinated!! 165 | clothuvd = np.zeros(body_sv.r.shape) 166 | clothuvd[:, 0] = clothed2d[:, 0] 167 | clothuvd[:, 1] = clothed2d[:, 1] 168 | # @TODO for now simply use the same depth as body ^^; 169 | clothuvd[:, 2] = bodydepth 170 | cloth3d = cam.unproject_points(clothuvd) 171 | # sv.r = cloth3d # now the model is not body but cloth 172 | 173 | return cloth3d 174 | 175 | 176 | # calcuated the local coordinates at each vetex. 177 | # 178 | # z : normal to the vertex 179 | # x : the smallest indexed neighbor vertex based unit vector 180 | # y : the remianing axis in right handed way, ie. z x x => y 181 | def setup_vertex_local_coord(faces, vertices): 182 | 183 | # 1.1 normal vectors (1st axis) at each vertex 184 | _, axis_z = graphutil.calc_normal_vectors(vertices, faces) 185 | # 1.2 get 2nd axis 186 | axis_x = graphutil.find2ndaxis(faces, axis_z, vertices) 187 | # 1.3 get 3rd axis 188 | # matuir contribution. np.cross support row-vectorization 189 | axis_y = np.cross(axis_z[:, :], axis_x[:, :]) 190 | 191 | return axis_x, axis_y, axis_z 192 | 193 | # 194 | # reporesent the displacement (now in global coord) into local coordinates 195 | # 196 | # model: smpl mesh structure 197 | # v0 : reference vertex surface, ie. the body 198 | # v*****array: vertext index array for interest 199 | # d : displacement, ie. v = v0 + d 200 | # 201 | 202 | 203 | def compute_displacement_at_vertex(model, v0, d_global): 204 | 205 | debug = False 206 | 207 | # 1.setup local coordinate system to each vertex 208 | axis_x, axis_y, axis_z = setup_vertex_local_coord(model.f, v0) 209 | 210 | # 2. express displacement in 3 axises 211 | #dlocal = np.concatenate(np.dot(d, axis_x), np.dot(d, axis_y), np.dot(d, axis_z)) 212 | xl = np.sum(d_global*axis_x, axis=1) 213 | yl = np.sum(d_global*axis_y, axis=1) 214 | zl = np.sum(d_global*axis_z, axis=1) 215 | d_local = np.stack((xl, yl, zl), axis=-1) 216 | print('dlocal shape:', xl.shape, yl.shape, zl.shape, d_local.shape) 217 | 218 | if debug: # verifying d_global = xs * axis_x + ys* axis_y + z*axis_z 219 | # get global coorindate vector 220 | xg = xl[:, None]*axis_x 221 | yg = yl[:, None]*axis_y 222 | zg = zl[:, None]*axis_z 223 | dg = xg + yg + zg 224 | 225 | # check the error 226 | err = np.absolute(dg - d_global) 227 | print('d, e x:', np.amax(d_global[:, 0]), np.amax( 228 | err[:, 0]), np.mean(d_global[:, 0]), np.mean(err[:, 0])) 229 | print('d, e y:', np.amax(d_global[:, 1]), np.amax( 230 | err[:, 1]), np.mean(d_global[:, 1]), np.mean(err[:, 1])) 231 | print('d, e z:', np.amax(d_global[:, 2]), np.amax( 232 | err[:, 2]), np.mean(d_global[:, 2]), np.mean(err[:, 2])) 233 | ''' 234 | print('d 0:', np.amax(d_global[:,0]), np.amin(d_global[:,0])) 235 | print('error0:', np.amax(err[:,0]), np.amin(err[:,0])) 236 | print('d 1:', np.amax(d_global[:,1]), np.amin(d_global[:,1])) 237 | print('error1:', np.amax(err[:,1]), np.amin(err[:,1])) 238 | print('d 2:', np.amax(d_global[:,2]), np.amin(d_global[:,2])) 239 | print('error2:', np.amax(err[:,2]), np.amin(err[:,2])) 240 | ''' 241 | 242 | return d_local 243 | 244 | 245 | # 246 | # @TODO: Do this !! the most key part combining with displacement generatrion 247 | # 248 | # model : the body surface structure 249 | # body : body surface vertices 250 | # vi4cloth: vertex index for the cloth surface 251 | # d : displacement vector in local coordinate 252 | # 253 | # def transfer_body2clothed(cam_tgt, betas_tgt, n_betas_tgt, pose_tgt, v4cloth, d): 254 | def transfer_body2clothed(model, body, d_local): 255 | 256 | # 1.setup local coordinate system to each vertex 257 | axis_x, axis_y, axis_z = setup_vertex_local_coord(model.f, body) 258 | 259 | # 2. express local to global 260 | # 2.1 select vectices under interest 261 | #axis_x, axis_y, axis_z = axis_x[vi4cloth], axis_y[vi4cloth], axis_z[vi4cloth] 262 | # 2.2 displacement in global coordinate 263 | xg = (d_local[:, 0])[:, None]*axis_x 264 | yg = (d_local[:, 1])[:, None]*axis_y 265 | zg = (d_local[:, 2])[:, None]*axis_z 266 | dg = xg + yg + zg 267 | 268 | # 3. adding them to the base/body vertices 269 | clothed = body + dg 270 | 271 | return clothed 272 | 273 | 274 | # display 3d model 275 | def render_cloth(cam, _texture, texture_v2d, faces, imHuman): 276 | 277 | #h, w = imTexture.shape[:2] 278 | h_ext, w = _texture.shape[:2] # full body 279 | h, _ = imHuman.shape[:2] # half body 280 | 281 | texture = _texture[:, :, :] 282 | 283 | # 1. texture rendering 284 | dist = 20.0 285 | cloth_renderer = smpl3dclothrec_v2.build_texture_renderer(cam, cam.v, faces, texture_v2d, faces, 286 | texture[::-1, :, :], w, h_ext, 1.0, near=0.5, far=20 + dist) 287 | imCloth = (cloth_renderer.r * 255.).astype('uint8') 288 | imCloth = imCloth[:h, :, ::-1] 289 | 290 | # 2. mask generation 291 | im3CBlack = np.zeros([h, w, 3], dtype=np.uint8) 292 | imModel = (render_model( 293 | cam.v, faces, w, h, cam, far=20 + dist, img=im3CBlack) * 255.).astype('uint8') 294 | imMask = cv2.cvtColor(imModel, cv2.COLOR_BGR2GRAY) # gray silhouette 295 | imMask[imMask > 0] = 255 # binary (0, 1) 296 | 297 | # 3. image overlay to check result 298 | imBG = imHuman[:, :, ::-1].astype('float32')/255.0 299 | overlay_renderer = smpl3dclothrec_v2.build_texture_renderer(cam, cam.v, faces, texture_v2d, faces, 300 | texture[::-1, :, :], w, h, 1.0, near=0.5, far=20 + dist, background_image=imBG) 301 | imOverlayed = overlay_renderer.r.copy() 302 | 303 | # plt.figure() 304 | plt.subplot(1, 4, 1) 305 | plt.axis('off') 306 | plt.imshow(texture[:h, :, ::-1]) 307 | plt.title('texture') 308 | 309 | plt.subplot(1, 4, 2) 310 | plt.imshow(imCloth[:, :, ::-1]) 311 | plt.axis('off') 312 | plt.title('transfered') 313 | 314 | plt.subplot(1, 4, 3) 315 | # @TODO use color render for mask or all whilte color for the cloth area texture 316 | plt.imshow(imMask) 317 | plt.axis('off') 318 | plt.title('mask') 319 | 320 | plt.subplot(1, 4, 4) 321 | plt.imshow(imOverlayed[:, :, :]) # @overlay with human image 322 | plt.axis('off') 323 | plt.title('target human') 324 | plt.show() 325 | 326 | return imCloth, imMask 327 | 328 | 329 | def cloth3dxfer_single(smpl_model, src_param_path, tgt_param_path, cloth_path, clothmask_path, human_path, human_segm_path, ocloth_path, omask_path): 330 | 331 | # 1. reconstruct 3D cloth from template 332 | params_src, body, diff_cloth_body, texture, texture_v2d, face4cloth = smpl3dclothrec_v2.cloth3drec_single( 333 | smpl_model, src_param_path, cloth_path, clothmask_path, human_path, human_segm_path) 334 | 335 | # 2. express the displacement in vertice specific coordinate. 336 | diff_cloth_body_local = compute_displacement_at_vertex( 337 | smpl_model, body, diff_cloth_body) 338 | 339 | # 3. transfer to a new human paramters 340 | # 3.1 load the SMPL params 341 | with open(tgt_param_path, 'rb') as f: 342 | if f is None: 343 | print("cannot open", tgt_param_path), exit() 344 | params_tgt = pickle.load(f) 345 | 346 | # 3.2 construct the model 347 | cam_tgt = ProjectPoints(f=params_tgt['cam_f'], rt=params_tgt['cam_rt'], 348 | t=params_tgt['cam_t'], k=params_tgt['cam_k'], c=params_tgt['cam_c']) 349 | betas_tgt = params_tgt['betas'] 350 | n_betas_tgt = betas_tgt.shape[0] # 10 351 | pose_tgt = params_tgt['pose'] # angles, 27x3 numpy 352 | 353 | # 3.3 build a new body 354 | body_tgt_sv = smpl3dclothrec_v2.build_smplbody_surface( 355 | smpl_model, pose_tgt, betas_tgt, cam_tgt) 356 | 357 | # 3.4 build the corresponding clothed 358 | clothed3d = transfer_body2clothed( 359 | smpl_model, body_tgt_sv.r, diff_cloth_body_local) 360 | cam_tgt.v = clothed3d 361 | #cam_tgt.v = body_tgt_sv.r 362 | 363 | # 4.5 check by viewing 364 | imHuman = cv2.imread(human_path) 365 | 366 | # smpl_model.f) # cam_tgt has all the information 367 | imCloth3dWarped, imClothMask3dWarped = render_cloth( 368 | cam_tgt, texture, texture_v2d, face4cloth, imHuman) 369 | # smpl3dclothrec.show_3d_model(cam_tgt, texture, texture_v2d, face4cloth) # smpl_model.f) # cam_tgt has all the information 370 | _ = raw_input("next sample?") 371 | plt.subplot(1, 1, 1) # restore the plot section 372 | # plt.close() # not to draw in subplot() 373 | 374 | # make white background 375 | """img_white = np.zeros( 376 | [imCloth3dWarped.shape[0], imCloth3dWarped.shape[1], 3], dtype=np.uint8) 377 | img_white.fill(255) 378 | # or img_white[:] = 255 379 | imCloth3dWarped = img_white + imCloth3dWarped""" 380 | 381 | # save result 382 | if ocloth_path is not None: 383 | cv2.imwrite(ocloth_path, imCloth3dWarped) 384 | if omask_path is not None: 385 | cv2.imwrite(omask_path, imClothMask3dWarped) 386 | 387 | 388 | if __name__ == '__main__': 389 | 390 | # 1. command argument checking 391 | if len(sys.argv) != 3: 392 | print('usage for batch test: %s base_path dataset' % sys.argv[0]) 393 | #print('usage for test: %s base_path smpl_param clothimg maskimg'% sys.argv[0]), exit() 394 | exit() 395 | 396 | base_dir = abspath(sys.argv[1]) 397 | dataset = sys.argv[2] 398 | 399 | # 2. input and output directory check and setting 400 | # 2.1 base dir 401 | base_dir = abspath(sys.argv[1]) 402 | if not exists(base_dir): 403 | print('No such a directory for base', base_path, base_dir), exit() 404 | 405 | # 2.2.1 human image dir 406 | human_dir = base_dir + "/images/" + dataset 407 | if not exists(human_dir): 408 | print('No such a directory for human images', 409 | data_set, human_dir), exit() 410 | 411 | data_dir = base_dir + "/results/" + dataset 412 | # print(data_dir) 413 | # 2.2.2 target human info 414 | human_smpl_param_dir = data_dir + "/smpl" 415 | if not exists(human_smpl_param_dir): 416 | print('No such a directory for smpl param', smpl_param_dir), exit() 417 | # 2.2.3 source cloth 418 | cloth_dir = data_dir + "/c2dw" 419 | if not exists(cloth_dir): 420 | print('No such a directory for cloth images', cloth_dir), exit() 421 | # 2.2.4 source cloth mask 422 | cloth_mask_dir = data_dir + "/c2dwmask" 423 | if not exists(cloth_mask_dir): 424 | print('No such a directory for cloth mask', cloth_mask_dir), exit() 425 | 426 | # 2.2.5 human segmentation dir 427 | human_segm_dir = data_dir + "/segmentation" 428 | if not exists(human_segm_dir): 429 | print('No such a directory for human segmentation', 430 | human_segm_dir), exit() 431 | 432 | # 2.2.4 test pair file 433 | testpair_filepath = data_dir + "/" + dataset + "_test_pairs.txt" 434 | if not exists(testpair_filepath): 435 | print('No test pair file: ', cloth_mask_dir), exit() 436 | 437 | # 2. Loading SMPL models (independent from dataset) 438 | use_neutral = False 439 | # Assumes 'models' in the 'code/' directory where this file is in. 440 | MODEL_DIR = join(abspath(dirname(__file__)), 'models') 441 | MODEL_NEUTRAL_PATH = join( 442 | MODEL_DIR, 'basicModel_neutral_lbs_10_207_0_v1.0.0.pkl') 443 | MODEL_FEMALE_PATH = join( 444 | MODEL_DIR, 'basicModel_f_lbs_10_207_0_v1.0.0.pkl') 445 | MODEL_MALE_PATH = join(MODEL_DIR, 446 | 'basicmodel_m_lbs_10_207_0_v1.0.0.pkl') 447 | 448 | if not use_neutral: 449 | # File storing information about gender 450 | # with open(join(data_dir, dataset + '_gender.csv')) as f: 451 | # genders = f.readlines() 452 | model_female = load_model(MODEL_FEMALE_PATH) 453 | model_male = load_model(MODEL_MALE_PATH) 454 | else: 455 | gender = 'neutral' 456 | smpl_model = load_model(MODEL_NEUTRAL_PATH) 457 | 458 | #_examine_smpl(model_female), exit() 459 | 460 | ''' 461 | # Load joints 462 | estj2d = np.load(join(data_dir, 'est_joints.npz'))['est_joints'] 463 | #print('est_shape:', est.shape) 464 | joints = estj2d[:2, :, idx].T 465 | ''' 466 | 467 | # 2.3. Output Directory 468 | ocloth_dir = data_dir + "/c3dw" 469 | if not exists(ocloth_dir): 470 | makedirs(ocloth_dir) 471 | ocloth_mask_dir = data_dir + "/c3dwmask" 472 | if not exists(ocloth_mask_dir): 473 | makedirs(ocloth_mask_dir) 474 | 475 | #smplmask_path = smplmask_dir + '/%06d_0.png'% idx 476 | #jointfile_path = smpljson_dir + '/%06d_0.json'% idx 477 | ''' 478 | smpl_model = model_female 479 | # 3D reconstruction and tranfer it to a define smpl model 480 | cloth3dxfer_single(smpl_model, smplparam_path, cloth_path, clothmask_path) 481 | 482 | ''' 483 | 484 | test_pair_lines = open(testpair_filepath).read().splitlines() 485 | test_pairs = [] 486 | 487 | for i in range(len(test_pair_lines)): 488 | # loading batch data 489 | pair = test_pair_lines[i].split() 490 | # print(pair) 491 | test_pairs.append([pair[0], pair[1]]) # 0: human 1: cloth 492 | 493 | #print(test_pairs), exit() 494 | 495 | # Might each cloth have different verison of template used 496 | template_smpl_param_path = './templateparam1.pkl' 497 | # We have to take into account this later 498 | template_jointfile_path = './templatejoints1.json' 499 | 500 | for i in range(len(test_pairs)): 501 | 502 | # for i in range(1, 2): 503 | # if not use_neutral: 504 | # gender = 'male' if int(genders[i]) == 0 else 'female' 505 | # if gender == 'female': 506 | smpl_model = model_female 507 | human_smpl_param_path = human_smpl_param_dir + \ 508 | '/' + test_pairs[i][0] + '.pkl' 509 | human_image_path = human_dir + '/' + test_pairs[i][0] + '.jpg' 510 | human_segm_path = human_segm_dir + '/' + test_pairs[i][0] + '.png' 511 | cloth_path = cloth_dir + '/' + test_pairs[i][1] + '.png' 512 | clothmask_path = cloth_mask_dir + '/' + test_pairs[i][1] + '.png' 513 | ocloth_path = ocloth_dir + '/' + \ 514 | test_pairs[i][1] + '_' + test_pairs[i][0] + '.png' # '.png' 515 | oclothmask_path = ocloth_mask_dir + '/' + \ 516 | test_pairs[i][1] + '_' + test_pairs[i][0] + '.jpg' # '.png' 517 | cloth3dxfer_single(smpl_model, template_smpl_param_path, human_smpl_param_path, 518 | cloth_path, clothmask_path, human_image_path, human_segm_path, ocloth_path, oclothmask_path) 519 | -------------------------------------------------------------------------------- /boundary_matching.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | 1. TPS Transform demo 4 | 2. Correspondences demo 5 | 6 | 7 | """ 8 | from __future__ import print_function 9 | import math 10 | import random 11 | import time 12 | import sys 13 | import os 14 | import numpy as np 15 | import cv2 16 | import matplotlib.pyplot as plt 17 | import cPickle as pickle 18 | #from matplotlib.colors import ListedColormap, BoundaryNorm 19 | 20 | labeldict = {"background": 0, 21 | "hat" :1, 22 | "hair":2, 23 | "sunglass":3, # 3 24 | "upper-clothes":4, # 4 25 | "skirt":5 , # 5 26 | "pants":6 , # 6 27 | "dress":7 , # 7 28 | "belt": 8 , # 8 29 | "left-shoe": 9, # 9 30 | "right-shoe": 10, # 10 31 | "face": 11, # 11 32 | "left-leg": 12, # 12 33 | "right-leg": 13, # 13 34 | "left-arm": 14,# 14 35 | "right-arm": 15, # 15 36 | "bag": 16, # 16 37 | "scarf": 17 # 17 38 | } 39 | 40 | def body2human(): 41 | """ 42 | 1. load SMPL model 43 | 44 | 2. project to 2-D 45 | 46 | 3. matching points (vertices) 47 | 48 | 4. estimated 2D TPS 49 | 50 | 5. apply TPS algorithm 51 | 52 | 6. get vertices positions 53 | 54 | 7. display the vertices onto the images 55 | 56 | 8. get the cloth vertices 57 | 58 | 9. 59 | """ 60 | pass 61 | 62 | def estimateTPS(srcPts, tgtPts, regulation = 0): 63 | """ 64 | srcPoints: numpy array of 1x-1x2 65 | tgtPoints: numpy array of 1x-1x2 66 | regulation : tolerance? 67 | """ 68 | # 4.1 create TPS warper 69 | tps = cv2.createThinPlateSplineShapeTransformer(regulation) 70 | 71 | # 1. corresponding points 72 | matches = list() 73 | for i in range(srcPts.shape[1]): 74 | matches.append(cv2.DMatch(i,i,0)) 75 | 76 | #print('len of match:', len(matches)) 77 | 78 | # 2. estimate TPS paramters using correspondings 79 | # @TODO: some problem: points mapping is fine, but image not ^^;; 80 | # https://github.com/opencv/opencv/issues/7084 81 | tps.estimateTransformation(srcPts, tgtPts, matches) 82 | regParam= tps.getRegularizationParameter() 83 | print('TPS param: reg', regParam) 84 | #print('TPS param:',tps.tpsParameters) 85 | 86 | return tps 87 | 88 | 89 | def testTPS(): 90 | 91 | h, w = 640, 480 92 | srcimg = np.zeros([h,w], dtype = 'uint8') 93 | tgtimg = np.zeros([h,w], dtype = 'uint8') 94 | # body-like 95 | cv2.ellipse(srcimg, (w//2, h//2), (w//4, h//3), 0.0, 0.0, 96 | 360.0, 128, -1) 97 | # cloth-like 98 | cv2.ellipse(tgtimg, (w//2, h//2), (w//3, h//3), 0.0, 0.0, 99 | 360.0, 128, -1) 100 | 101 | 102 | # 2. get the corresponding points 103 | srcPoints = [] 104 | tgtPoints = [] 105 | 106 | #srcimg2 = srcimg.copy() 107 | #tgtimg2 = tgtimg.copy() 108 | 109 | step = 20 110 | markersize = 5 111 | for y in range(0, h, step): 112 | c = np.count_nonzero(srcimg[y, :]) 113 | if c == 0: 114 | continue 115 | x1 = np.argmax(srcimg[y, :] !=0, axis=0) 116 | x2 = x1 + c 117 | print(x1, x2) 118 | srcPoints.append([y, x1]) 119 | srcPoints.append([y, x2]) 120 | 121 | # target points 122 | c = np.count_nonzero(tgtimg[y, :]) 123 | x1 = np.argmax(tgtimg[y, :] !=0, axis=0) 124 | x2 = x1 + c 125 | print(x1, x2) 126 | tgtPoints.append([y, x1]) 127 | tgtPoints.append([y, x2]) 128 | 129 | 130 | for i in range(len(srcPoints)): 131 | cv2.circle(srcimg, tuple(srcPoints[i][::-1]), markersize, 255, -1) 132 | cv2.circle(tgtimg, tuple(tgtPoints[i][::-1]), markersize, 255, -1) 133 | 134 | # grid 135 | for y in range(step, h, step): 136 | srcimg[y-1:y+1,:] = 255 137 | tgtimg[y-1:y+1,:] = 255 138 | for x in range(step, w, step): 139 | srcimg[:,x-1:x+1] = 255 140 | tgtimg[:,x-1:x+1] = 255 141 | 142 | 143 | plt.subplot(1,3,1) 144 | plt.imshow(srcimg[:,:], cmap='gray') 145 | plt.title('src') 146 | plt.subplot(1,3,2) 147 | plt.imshow(tgtimg[:,:], cmap='gray') 148 | plt.title('target') 149 | #plt.show() 150 | 151 | # 2. create TPS warper 152 | tps = cv2.createThinPlateSplineShapeTransformer() 153 | 154 | # 3. corresponding points 155 | srcPts = np.array(srcPoints,np.float32) 156 | srcPts = srcPts.reshape(1,-1,2) 157 | tgtPts = np.array(tgtPoints,np.float32) 158 | tgtPts = tgtPts.reshape(1,-1,2) 159 | matches = list() 160 | for i in range(len(srcPoints)): 161 | matches.append(cv2.DMatch(i,i,0)) 162 | 163 | # 4. estimate TPS paramters using correspondings 164 | # @TODO: some problem: points mapping is fine, but image not ^^;; 165 | # https://github.com/opencv/opencv/issues/7084 166 | 167 | reverse = False 168 | 169 | if reverse: 170 | param = tps.estimateTransformation(tgtPts, srcPts, matches) 171 | else: 172 | param = tps.estimateTransformation(srcPts, tgtPts, matches) 173 | 174 | 175 | # 5. check points 176 | if reverse: 177 | estTgtPts = tps.applyTransformation(srcPts) 178 | else: 179 | estTgtPts = tps.applyTransformation(srcPts) 180 | print('shapes:', srcPts.shape, tgtPts.shape, estTgtPts[1].shape) 181 | 182 | if reverse: 183 | print(' tgt est src') 184 | for i in range(len(srcPoints)): 185 | print(tgtPts[0, i, :], "=>", estTgtPts[1][0, i, :], ":", srcPts[0, i,:]) 186 | else: 187 | print(' src tgt gt') 188 | for i in range(len(srcPoints)): 189 | print(srcPts[0, i, :], "=>", estTgtPts[1][0, i, :], ":", tgtPts[0, i,:]) 190 | 191 | 192 | # 6. warp image 193 | warpedimg = tps.warpImage(srcimg) 194 | for i in range(len(srcPoints)): 195 | cv2.circle(warpedimg, tuple(estTgtPts[1][0, i, ::-1]), markersize, 255, -1) 196 | 197 | plt.subplot(1,3,3) 198 | plt.imshow(warpedimg[:,:], cmap='gray') 199 | plt.title('warped') 200 | plt.show() 201 | # 6. warp points 202 | 203 | # 204 | # find the index of cloest pixel at contours of 2D image from the SMPL boundary vertex 205 | # 206 | def find_nearest_contourpixel(pt, contour): 207 | """ pt = (x,y) source point 208 | contour : list of (x, y) target 209 | """ 210 | """ 211 | import sys 212 | mindist = sys.float_info.max 213 | minidx = -1 214 | for i in range(contour.shape[0]): 215 | dx = (pt[0] - contour[i, 0, 0]) 216 | dy = (pt[1] - contour[i, 0, 1]) 217 | dist = dx*dx + dy*dy 218 | if dist < mindist: 219 | mindist = dist 220 | minidx = i 221 | """ 222 | dists = (contour[:, 0, 0] -pt[0])**2 + (contour[:,0, 1] -pt[1])**2 223 | minidx = np.argmin(dists) 224 | mindist = dists[minidx] 225 | 226 | return minidx, math.sqrt(mindist) 227 | # 228 | # find the index of cloeset vertex 229 | # 230 | def find_nearest_smpl(pt, vertices): 231 | 232 | """ 233 | import sys 234 | mindist = sys.float_info.max 235 | minidx = -1 236 | # @TODO vectorization 237 | for i in range(vertices.shape[0]): 238 | dx = pt[0] - vertices[i][0] 239 | dy = pt[1] - vertices[i][1] 240 | dist = dx*dx + dy*dy 241 | if dist < mindist: 242 | mindist = dist 243 | minidx = i 244 | """ 245 | dists = (vertices[:,0] -pt[0])**2 + (vertices[:,1] -pt[1])**2 246 | minidx = np.argmin(dists) 247 | mindist = dists[minidx] 248 | #print(mindist) 249 | 250 | return minidx, math.sqrt(mindist) 251 | 252 | 253 | # 254 | # extract contour pixels in the mask 255 | # 256 | # mask : binary input (0 or non zeor) 257 | # annotation : flag for showing the boundary or not (mostly debugging purpose) 258 | # 259 | def extractContours(mask, annotation = False): 260 | 261 | bDebug = False 262 | 263 | img_allcontours = None 264 | # 3.1 extract contours 265 | #_, 266 | contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) 267 | if bDebug: 268 | print(len(contours)) # list 269 | for i in range(len(contours)): 270 | print(i, ";", len(contours[i])) 271 | print(i, ";", type(contours[i]), contours[i].shape) 272 | 273 | print(hierarchy) # 3 dimentional list ( different from manual) 274 | #cv2.drawContours(img, contours, -1, (0,255,0), 3) # To draw all the contours in an image 275 | #cnt = contours[0] But most of the time, below method will be useful: 276 | #cv2.drawContours(img, [cnt], 0, (0,255,0), 3) 277 | 278 | outtest = -1 279 | maxval = -1 280 | for i in range(len(contours)): 281 | if hierarchy[0][i][3] == -1: 282 | if maxval < len(contours[i]): 283 | outtest, maxval = i, len(contours[i]) 284 | if bDebug: 285 | print('the outest contour:', outtest, maxval) 286 | 287 | if annotation == True: 288 | # 3.2. draw all contours (outer and interiors with different colors) 289 | img_allcontours = mask.copy() 290 | for i in range(len(contours)): 291 | color = 255 if hierarchy[0][i][3] == -1 else 180 292 | cv2.drawContours(img_allcontours, contours, i, color, 1) # To draw an individual contour, 293 | 294 | return contours, outtest, img_allcontours 295 | 296 | 297 | # 298 | # matching mask boudnary to projected edge vertices 299 | # 300 | # mask: bi-level (o: bg, 1:fg) 301 | # edge_vertices: verticies coordinates in numpy [Nx2] 302 | # neck (x,y) is used for cutting the head part 303 | # return : list of (edges, contours, dist) 304 | # 305 | # algorithm 306 | # 307 | # for matching, 308 | # - mask boundary to vertices 309 | # - the reverse is difficult : TODO 310 | # - use the distance as a measure: TODO 311 | # 312 | # To handle the head and body separatley, 313 | # 1) seprate the mask into above and below neck 314 | # 2) b_h: smpl body boundary vertices of head 315 | # b_b: smpl body boundary vertices of body 316 | # m_h: 2d segmentation boundary of head 317 | # m_b: 2d segmentation boudnary of head 318 | # 319 | # 320 | # 321 | def boundary_match (mask, edge_vertices = None, top_y = None, step = 5) : 322 | 323 | # 1. extract contours from mask 324 | contours, outtest, img_allcontours = extractContours(mask, annotation=True) 325 | 326 | nearest_list = [] 327 | edge2contour = False # direction of matching 328 | ext_contour = contours[outtest] 329 | 330 | 331 | # 2. boundary matching (assuming we have quite good joint matching 332 | if edge2contour: 333 | # case 1: from body to mask 334 | if edge_vertices is not None: 335 | for i in range(edge_vertices.shape[0]): 336 | pt = (edge_vertices[i,0], edge_vertices[i,1]) 337 | minidx, dist = find_nearest_contourpixel(pt, ext_contour) 338 | nearest_list.append([(int(pt[0]), int(pt[1])), (ext_contour[minidx, 0,0], ext_contour[minidx, 0,1]), dist]) 339 | #if i % 25 == 0: 340 | # print(pt, coord, dist) 341 | else: 342 | print("need edge vertices") 343 | return 344 | 345 | else: 346 | # case 2: from mask contour to body boundary vertices 347 | if edge_vertices is not None: 348 | ext_contour = contours[outtest] 349 | # 1. adding matches for body 350 | for i in range(0, ext_contour.shape[0], step): #47) : 351 | if ext_contour[i, 0, 1] > top_y : # only below of neck, ie. body 352 | minidx, dist = find_nearest_smpl((ext_contour[i, 0, 0], ext_contour[i, 0, 1]), edge_vertices) 353 | nearest_list.append([(int(edge_vertices[minidx,0]), int(edge_vertices[minidx,1])), (ext_contour[i, 0, 0], ext_contour[i,0, 1]), dist]) 354 | #print('diff:', i, edge_vertices[minidx,:] - ext_contour[i, 0, :]) 355 | 356 | # 2. add head part 357 | head_idxes = np.argwhere(edge_vertices[:,1] < top_y) # head part 358 | for i in range(0, head_idxes.shape[0], step): # all? 359 | idx = head_idxes[i] 360 | pt = (int(edge_vertices[idx,0]), int(edge_vertices[idx,1])) 361 | nearest_list.append([pt, pt, 0]) # same position, i.e., pinning not to move it. 362 | #print('head part:', i, pt) # edge_vertices[i,:]) 363 | 364 | else: # just for testing purpose 365 | deviation = 30 # ! sensitive 366 | for i in range(0, ext_contour.shape[0], step): # sampling the real boundary 367 | dx = random.randint(-deviation, +deviation) 368 | dy = random.randint(-deviation, +deviation) 369 | pt = (ext_contour[i, 0, 0] + dx, ext_contour[i, 0, 1] + dy) # decreasing, inrease? offset? 370 | minidx, dist = find_nearest_contourpixel(pt, ext_contour) 371 | nearest_list.append([(int(pt[0]), int(pt[1])), (ext_contour[minidx, 0,0], ext_contour[minidx, 0,1]), dist]) 372 | 373 | return nearest_list, img_allcontours 374 | 375 | 376 | # 377 | # head part 378 | # TODO: Can we combine head and body part transform? 379 | # Maybe it is possible we can pin the boundary of region not to move into different region 380 | # 381 | def boundary_match_head (mask, edge_vertices = None, top_y = None, step = 5) : 382 | 383 | nearest_list = [] 384 | 385 | # 1. extract contours from mask 386 | contours, outtest, img_allcontours = extractContours(mask, annotation=True) 387 | ext_contour = contours[outtest] 388 | 389 | # 2. boundary matching (assuming we have quite good joint matching 390 | # case 2: from mask contour to body boundary vertices 391 | # 1. adding matches for body 392 | for i in range(0, ext_contour.shape[0], step): #47) : 393 | if ext_contour[i, 0, 1] <= top_y : # above the neck 394 | minidx, dist = find_nearest_smpl((ext_contour[i, 0, 0], ext_contour[i, 0, 1]), edge_vertices) 395 | nearest_list.append([(int(edge_vertices[minidx,0]), int(edge_vertices[minidx,1])), (ext_contour[i, 0, 0], ext_contour[i,0, 1]), dist]) 396 | #print('diff:', i, edge_vertices[minidx,:] - ext_contour[i, 0, :]) 397 | 398 | # 2. add body part 399 | body_idxes = np.argwhere(edge_vertices[:,1] > top_y) # body part FIXME: USE PARTMAP!! 400 | for i in range(0, body_idxes.shape[0]) :#, step): # all? 401 | idx = body_idxes[i] 402 | pt = (int(edge_vertices[idx,0]), int(edge_vertices[idx,1])) 403 | nearest_list.append([pt, pt, 0]) # same position, i.e., pinning not to move it. 404 | #print('body part:', i, pt) # edge_vertices[i,:]) 405 | 406 | return nearest_list, img_allcontours 407 | 408 | def tpsMorph(img_org, mask, edge_vertices = None): 409 | 410 | """ 411 | img_org: 412 | mask: bi-level (o: bg, 1:fg) 413 | edge_vertices: verticies coordinates in numpy [Nx2] 414 | """ 415 | 416 | numplot = 1 + 1 # rgb, contour 417 | # 1. boudnary matching 418 | nearest_list, img_allcontours = boundary_match(mask, edge_vertices) 419 | 420 | # visualization 421 | img_cor2 = mask.copy() 422 | for i in range(len(nearest_list)): 423 | cv2.drawMarker(img_cor2, nearest_list[i][0], 255, markerType=cv2.MARKER_STAR, markerSize=5) # source 424 | cv2.drawMarker(img_cor2, nearest_list[i][1], 255, markerType=cv2.MARKER_CROSS,markerSize=5) # dest 425 | cv2.line(img_cor2, nearest_list[i][0], nearest_list[i][1], 255) # line from src to dest 426 | #cv2.drawContours(img_cor2, contours, outtest, 255, 1) # To draw an individual contour, say 4th 427 | numplot = numplot +1 428 | 429 | # 2. estimate transform 430 | # 2.1 reformat corresponding points 431 | npts = len(nearest_list) 432 | srcPts = np.zeros([1, npts, 2], dtype ='float32') 433 | tgtPts = np.zeros([1, npts, 2], dtype ='float32') 434 | for i in range(npts): 435 | srcPts[0,i,:] = nearest_list[i][0] 436 | tgtPts[0,i,:] = nearest_list[i][1] 437 | 438 | # 2.2 estimate TPS params 439 | tps = estimateTPS(srcPts, tgtPts, 20) 440 | 441 | # 3. apply it to check it works 442 | estTgtPts = tps.applyTransformation(srcPts) 443 | #print('estTgtPts:', estTgtPts) 444 | 445 | # 3.2 check result 446 | print("distance average: ", estTgtPts[0]) 447 | print(' src => result : tgt') 448 | for i in range(srcPts.shape[1]): 449 | print(srcPts[0, i, :], "=>", estTgtPts[1][0, i, :], ":", tgtPts[0, i,:]) 450 | 451 | # 3.3 apply to all vertices 452 | img_cor3 = mask.copy() 453 | if edge_vertices is not None: 454 | npts = edge_vertices.shape[0] 455 | srcPts = edge_vertices.astype('float32') 456 | srcPts = srcPts.reshape(1, -1, 2) 457 | estTgtPts = tps.applyTransformation(srcPts) 458 | ''' 459 | srcPts2 = np.zeros([1, npts, 2], dtype ='float32') 460 | for i in range(npts): 461 | srcPts2[0,i,:] = edge_vertices[i, :] 462 | print("all edge vertices num:", srcPts.shape, srcPts2.shape) 463 | print("all edge vertices type:", srcPts.dtype, srcPts2.dtype) 464 | print("copied ", srcPts) 465 | print("reshaped", srcPts) 466 | print('estTgtPts:', estTgtPts) 467 | ''' 468 | for i in range(srcPts.shape[1]): 469 | cv2.drawMarker(img_cor3, (int(estTgtPts[1][0,i,0]), int(estTgtPts[1][0,i,1])), 255, markerType=cv2.MARKER_STAR, markerSize=5) # source 470 | 471 | else: 472 | for i in range(srcPts.shape[1]): 473 | cv2.drawMarker(img_cor3, (int(estTgtPts[1][0,i,0]), int(estTgtPts[1][0,i,1])), 255, markerType=cv2.MARKER_STAR, markerSize=5) # source 474 | #cv2.drawMarker(img_cor3, (int(tgtPts[0,i,0]), int(tgtPts[0,i,1])), 255, markerType=cv2.MARKER_CROSS,markerSize=1) # dest 475 | 476 | numplot = numplot +1 477 | ''' 478 | plt.imshow(img_cor2) # show correspondings 479 | plt.title('matching') 480 | 481 | #imwrite('contour.png', mask) 482 | ''' 483 | 484 | # 4. display all results 485 | plt.suptitle('correspondence') 486 | plt.subplot(1,numplot,1) 487 | plt.imshow(img_org[:,:,::-1]) # show all masks 488 | plt.title('input') 489 | 490 | plt.subplot(1,numplot,2) 491 | plt.imshow(img_allcontours) # show all masks 492 | plt.title('contours') 493 | 494 | plt.subplot(1,numplot,3) 495 | plt.imshow(img_cor2) # show correspondings 496 | plt.title('matching') 497 | 498 | plt.subplot(1,numplot,4) 499 | plt.imshow(img_cor3) # show correspondings 500 | plt.title('Morphed Points') 501 | plt.show() 502 | 503 | def testMorph(img_idx): 504 | """ 505 | get the contour of segmentaion mask 506 | define a shape and matching the nearest contour pixel 507 | """ 508 | # 1. read images 509 | infile = "../images/10k/dataset10k_%04d.jpg"%img_idx 510 | maskfile = "../results/10k/segmentation/10kgt_%04d.png"%img_idx 511 | img_org = cv2.imread(infile, cv2.IMREAD_UNCHANGED) 512 | mask = cv2.imread(maskfile, cv2.IMREAD_UNCHANGED) 513 | if img_org is None: 514 | print("cannot open", infile) 515 | exit() 516 | if mask is None: 517 | print("cannot open", maskfile) 518 | exit() 519 | 520 | # 2. edge vertices file (pre-calcuated) 521 | use_edge_vertices = True 522 | edge_vertices = None 523 | if use_edge_vertices: 524 | 525 | edge_vertices_path ='edge_vertices_%04d.pkl'%img_idx 526 | with open(edge_vertices_path, 'rb') as f: 527 | edge_vertices = pickle.load(f) 528 | 529 | print(type(edge_vertices)) 530 | print(edge_vertices.shape) 531 | print(np.amax(edge_vertices[:,0])) 532 | print(np.amax(edge_vertices[:,1])) 533 | 534 | ''' 535 | #img_cor3 = mask.copy() 536 | img_cor3 = img_org.copy() 537 | for i in range(edge_vertices.shape[0]): 538 | #print(edge_vertices[i]) 539 | cv2.drawMarker(img_cor3, (edge_vertices[i,0], edge_vertices[i,1]), 255, markerType=cv2.MARKER_STAR, markerSize=3) # source 540 | plt.imshow(img_cor3[:,:,::-1]) 541 | plt.show() 542 | exit() 543 | ''' 544 | 545 | ''' 546 | img_org = cv2.imread('in_%d.png'%img_idx, cv2.IMREAD_UNCHANGED) 547 | mask = cv2.imread('gt_%d.png'%img_idx, cv2.IMREAD_UNCHANGED) 548 | ''' 549 | # 2. pre-processing 550 | mask[mask == labeldict['bag']] = 0 # remove bag 551 | if True: # proprocessing 552 | if img_idx == 0: 553 | mask[500:,190] = 0 554 | #mask[200:,106] = 0 555 | elif img_idx == 1: 556 | mask[500:,220] = 0 557 | #mask[180:,120] = 0 558 | 559 | tpsMorph(img_org, mask, edge_vertices) 560 | 561 | if __name__ == '__main__': 562 | 563 | testMorph(1) 564 | #testTPS() 565 | 566 | 567 | -------------------------------------------------------------------------------- /smpltemplate.py: -------------------------------------------------------------------------------- 1 | """ 2 | SMPL Standard Image and Mask Generation 3 | ----------------------------- 4 | 5 | (c) copyright 2019 heejune@seoultech.ac.kr 6 | 7 | Prerequisite: SMPL model 8 | In : SMPL params fixed 9 | Out: body mask (binary and labeled) 10 | camera, pose and shape paramters 11 | 12 | camera 13 | - cam.t: [ 0. 0. 20.] # [-3.12641449e-03 4.31656201e-01 2.13035413e+01] 14 | - cam.rt: [0. 0. 0.] 15 | - cam.k: [0. 0. 0. 0. 0.] 16 | - cam.c: [ 96. 128.] # depending on the image size 17 | - cam.f: [5000. 5000.] # fixed 18 | 19 | betas type: shape: (10,) 20 | all zeros used (default) 21 | 22 | pose: shape: (72,) 23 | 24 | """ 25 | from __future__ import print_function 26 | import sys 27 | from os.path import join, exists, abspath, dirname 28 | from os import makedirs 29 | import logging 30 | import cPickle as pickle 31 | import time 32 | import cv2 33 | import numpy as np 34 | import chumpy as ch 35 | from opendr.camera import ProjectPoints 36 | from smpl_webuser.serialization import load_model 37 | from smpl_webuser.verts import verts_decorated 38 | from render_model import render_model 39 | import inspect # for debugging 40 | import matplotlib.pyplot as plt 41 | from opendr.lighting import SphericalHarmonics 42 | from opendr.geometry import VertNormals, Rodrigues 43 | from opendr.renderer import TexturedRenderer 44 | 45 | 46 | import json 47 | from smpl_webuser.lbs import global_rigid_transformation 48 | 49 | _LOGGER = logging.getLogger(__name__) 50 | 51 | logging.basicConfig(level=logging.INFO) 52 | 53 | 54 | import boundary_matching 55 | import graphutil as graphutil 56 | 57 | cloth_label_dict = {"background": 0, 58 | "hat" :1, 59 | "hair":2, 60 | "sunglass":3, # 3 61 | "upper-clothes":4, # 4 62 | "skirt":5 , # 5 63 | "pants":6 , # 6 64 | "dress":7 , # 7 65 | "belt": 8 , # 8 66 | "left-shoe": 9, # 9 67 | "right-shoe": 10, # 10 68 | "face": 11, # 11 69 | "left-leg": 12, # 12 70 | "right-leg": 13, # 13 71 | "left-arm": 14,# 14 72 | "right-arm": 15, # 15 73 | "bag": 16, # 16 74 | "scarf": 17, # 17 75 | "skin": 18 # added for skin region from face 76 | } 77 | 78 | # To understand and verify the SMPL itself 79 | def _examine_smpl_template(model, detail = False): 80 | 81 | print(">> SMPL Template <<<<<<<<<<<<<<<<<<<<<<") 82 | print(type(model)) 83 | print(dir(model)) 84 | #print('kintree_table', model.kintree_table) 85 | print('pose:', model.pose) 86 | if detail: 87 | print('posedirs:', model.posedirs) 88 | print('betas:', model.betas) 89 | print('shape(model):', model.shape) 90 | if detail: 91 | print('shapedirs:', model.shapedirs) 92 | 93 | #print('bs_style:', model.bs_style) # f-m-n 94 | #print('f:', model.f) 95 | print('V template :', type(model.v_template)) 96 | print('V template :', model.v_template.shape) 97 | #print('weights:', model.weoptimize_on_jointsights) 98 | print('W type:', type(model.weights)) 99 | print('W shape:', model.weights.r.shape) 100 | if detail: 101 | print('W value:') 102 | print(model.weights.r) 103 | #parts = np.count_nonzero(model.weights.r, axis =1) 104 | parts = np.argmax(model.weights.r, axis=1) 105 | print(" :", parts.shape, parts[:6000]) 106 | 107 | #print('J:', model.J) 108 | #print('v_template:', model.v_template) 109 | #print('J_regressor:', model.J_regressor) 110 | 111 | # To understand and verify the paramters 112 | 113 | def _examine_smpl_params(params): 114 | 115 | print(type(params)) 116 | print(params.keys()) 117 | print('camera params') 118 | camera = params['cam'] 119 | print(" - type:", type(camera)) 120 | #print(" - members:", dir(camera)) 121 | print(" - cam.t:", camera.t.r) # none-zero, likely only nonzero z 122 | print(" - cam.rt:", camera.rt.r) # zero (fixed) 123 | # print(" - cam.camera_mtx:", camera.camera_mtx) # 124 | print(" - cam.k:", camera.k.r) # 125 | print(" - cam.c:", camera.c.r) # 126 | print(" - cam.f:", camera.f.r) # 127 | 128 | # print(params['f'].shape) # 2 129 | print('>> pose') 130 | pose = params['pose'] 131 | print("\t\ttype:", type(pose)) 132 | print('\t\tshape:', pose.shape) # 72 133 | 134 | # convert within 135 | #pose = pose % (2.0*np.pi) 136 | 137 | print('\t\tvalues (in degree):') 138 | print(pose*180.0/np.pi) # degree 139 | print('>> betas') 140 | betas = params['betas'] 141 | print('\ttype:', type(betas)) 142 | print('\tshape:', betas.shape) # 10 143 | # print('\tvalues:', params['betas']) # 10 144 | 145 | 146 | # 147 | # 148 | # 149 | def build_body_model(model, pose, betas, cam): 150 | 151 | 152 | n_betas = betas.shape[0] 153 | viz = False 154 | 155 | # 2. build body model 156 | sv = verts_decorated( # surface vertices 157 | trans=ch.zeros(3), 158 | pose=ch.array(pose), 159 | v_template=model.v_template, 160 | J=model.J_regressor, 161 | betas=ch.array(betas), 162 | shapedirs=model.shapedirs[:, :, :n_betas], 163 | weights=model.weights, 164 | kintree_table=model.kintree_table, 165 | bs_style=model.bs_style, 166 | f=model.f, 167 | bs_type=model.bs_type, 168 | posedirs=model.posedirs, 169 | want_Jtr = not viz) # need J_transformed for reposing based on vertices 170 | 171 | return sv 172 | 173 | # convert numpy to json for a single person joint 174 | def cvt_joints_np2json(joints_np): 175 | 176 | # 1. re-ordering 177 | # same as viton2lsp_joint and reamining 178 | order = [13,12,8, 7, 6, 9, 10, 11, 2, 1, 0, 3, 4, 5, 14, 15, 16, 17] 179 | 180 | # 2. build dictionary 181 | oneperson = { "face_keypoints": [], 182 | "pose_keypoints": joints_np[order].flatten().tolist(), 183 | "hand_right_keypoints": [], 184 | "hand_left_keypoints":[]} 185 | 186 | people = [oneperson] 187 | joints_json = { "version": 1.0, "people": people } 188 | 189 | return joints_json 190 | 191 | # 192 | # calculate pixel position of SMPL joints 193 | # 194 | # cam: camera ie. projector 195 | # model: smpl basic mdoel 196 | # sv: surfac vectors (opendr) 197 | # betas : body shape, why needed? 198 | # h: projection image height 199 | # w: projection image width 200 | def calculate_joints(cam, model, sv, betas, h , w): 201 | 202 | # 1. get the joint locations 203 | smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20] # , 12 ] # index in Jtr # @TODO correct neck 204 | # lsh,lelb, lwr, neck 205 | 206 | # make the SMPL joints depend on betas 207 | Jdirs = np.dstack([model.J_regressor.dot(model.shapedirs[:, :, i]) for i in range(len(betas))]) 208 | J_onbetas = ch.array(Jdirs).dot(betas) + model.J_regressor.dot( model.v_template.r) 209 | 210 | # get joint positions as a function of model pose, betas and trans 211 | (_, A_global) = global_rigid_transformation( sv.pose, J_onbetas, model.kintree_table, xp=ch) 212 | Jtr = ch.vstack([g[:3, 3] for g in A_global]) + sv.trans 213 | 214 | # add joints, with corresponding to a vertex... 215 | neck_id = 3078 #2951 #3061 # viton's bewtween shoulder 216 | Jtr = ch.vstack((Jtr, sv[neck_id])) 217 | smpl_ids.append(len(Jtr) - 1) 218 | # head_id = 411 219 | nose_id = 331 # nose vertex id 220 | Jtr = ch.vstack((Jtr, sv[nose_id])) 221 | smpl_ids.append(len(Jtr) - 1) 222 | lear_id = 516 223 | Jtr = ch.vstack((Jtr, sv[lear_id])) 224 | smpl_ids.append(len(Jtr) - 1) 225 | rear_id = 3941 # 422# 226 #396 226 | Jtr = ch.vstack((Jtr, sv[rear_id])) 227 | smpl_ids.append(len(Jtr) - 1) 228 | leye_id = 125 #220 # 125 229 | Jtr = ch.vstack((Jtr, sv[leye_id])) 230 | smpl_ids.append(len(Jtr) - 1) 231 | reye_id = 3635 232 | Jtr = ch.vstack((Jtr, sv[reye_id])) 233 | smpl_ids.append(len(Jtr) - 1) 234 | 235 | # 2. project SMPL joints on the image plane using the estimated camera 236 | cam.v = Jtr 237 | 238 | joints_np_wo_confidence = cam.r[smpl_ids] # get the projected value 239 | #print(joints_np_wo_confidence) 240 | joints_np = np.zeros([18, 3]) 241 | joints_np[:,:2] = joints_np_wo_confidence 242 | joints_np[:,2] = 1.0 243 | 244 | for i in range(joints_np.shape[0]): 245 | if joints_np[i,0] < 0 or joints_np[i,0] > (w-1) or joints_np[i,1] < 0 or joints_np[i,1] > (h-1): 246 | joints_np[i, 2] = 0.0 247 | 248 | #print(joints_np) 249 | return joints_np 250 | 251 | 252 | 253 | # rendering with color 254 | # 255 | # @TODO: Texture rendering might be better for clearer segmentation 256 | # 257 | # no light setting needed for ColorRenderer 258 | # vertices: 3D position 259 | # faces : triangles 260 | # labelmap : map from vertex to label 261 | # cam, 262 | # height, width: projection size 263 | # near = 0.5, far = 25, 264 | # bDebug = False): 265 | def render_with_label(vertices, faces, labelmap, 266 | cam, height, width, near = 0.5, far = 25, bDebug = False): 267 | 268 | # 1. check labelmap 269 | if bDebug: 270 | print('label :min:', np.amin(labelmap), 'max:', np.amax(labelmap), 'avg:', np.mean(labelmap)) 271 | print('labelshape:', labelmap.shape) 272 | 273 | # 2. setup color renderer 274 | from opendr.renderer import ColoredRenderer 275 | rn = ColoredRenderer() 276 | rn.camera = cam 277 | rn.frustum = {'near': near, 'far': far, 'width': width, 'height': height} 278 | rn.bgcolor = ch.zeros(3) 279 | 280 | # 3. VC become the brightness of vertices 281 | # OpenGL uses float for processing, so convert it to float and then revert it to integer 282 | # in this rendering process, boundary gets blurred, so be carefull if you need clear boundary 283 | vc = np.zeros(vertices.shape) 284 | labelmap_float = labelmap.astype(np.float)/23.0 285 | vc[:,0], vc[:,1],vc[: ,2] = labelmap_float, labelmap_float, labelmap_float # gray color 286 | rn.vc = vc # giving the albera, FIXME: far -> bright? No! so you should use gray_r for display 287 | rn.set(v=vertices, f=faces) 288 | 289 | # get one channel for segmentation 290 | img = (rn.r[:,:,0]*23.0).astype('uint8') 291 | return img 292 | 293 | # 294 | # SMPL => binary mask, part mask, joint location 295 | # 296 | def smpl2maskcore(cam, # camera model, Chv 297 | betas, # shape coef, numpy 298 | n_betas, # num of PCA 299 | pose, # angles, 27x3 numpy 300 | imRGB, # img numpy 301 | model, # SMPL model 302 | viz = False): # visualize or not 303 | 304 | for which in [cam, betas, pose, imRGB, model]: 305 | if which is None: 306 | print( retrieve_name(which) , 'is None') 307 | exit() 308 | 309 | size_ext = False 310 | h, w = imRGB.shape[0:2] 311 | 312 | # 1 build SMPL model 313 | sv = build_body_model(model, pose, betas, cam) 314 | #sv_r = sv.r.copy() 315 | 316 | # 2. render the model with parameter 317 | h_ext = h*3//2 # extended for full body 318 | if size_ext: 319 | print("output size (hxw):", h_ext, w) 320 | else: 321 | print("output size (hxw):", h, w) 322 | 323 | im3CGray = cv2.cvtColor(cv2.cvtColor(imRGB, cv2.COLOR_BGR2GRAY), cv2.COLOR_GRAY2BGR) # 3 channel gray 324 | im3CBlack = np.zeros([h_ext, w, 3], dtype = np.uint8) 325 | imBackground = im3CBlack 326 | dist = np.abs(cam.t.r[2] - np.mean(sv.r, axis=0)[2]) 327 | im = (render_model( 328 | sv.r, model.f, w, h_ext, cam, far= 20 + dist, img=imBackground[:, :, ::-1]) * 255.).astype('uint8') 329 | 330 | # 3. binary mask 331 | imBinary = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) # gray silhouette 332 | imBinary[imBinary > 0] = 255 # binary (0, 1) 333 | # @TODO: check any noisy pixels, if yes, filtering or get contours and redrawing with contours 334 | 335 | # 4. segmentation information of human body 336 | bodyparts = np.argmax(model.weights.r, axis=1) 337 | #print(np.unique(bodyparts)) # will list all the joints nmbers 338 | imPart = render_with_label(sv.r, model.f, bodyparts, cam, height=h_ext, width=w, near= 0.5, far= 40, bDebug = True) 339 | #print("impart:", np.amax(imPart), np.amin(imPart), np.mean(imPart)) 340 | #print(" ", np.unique(imPart)) 341 | 342 | # 5. new 2d joints for template 343 | if size_ext: 344 | joints_np = calculate_joints(cam, model, sv, betas, h_ext, w) 345 | else: 346 | joints_np = calculate_joints(cam, model, sv, betas, h, w) 347 | # check the joints 348 | joints_np_int = joints_np.astype(int) 349 | imJoint = im.copy() 350 | for i in range(joints_np_int.shape[0]): 351 | cv2.circle(imJoint, tuple(joints_np_int[i,:2]), 2, (0, 0, 255), -1) # 2D joint White 352 | 353 | # display for check results 354 | if False: 355 | plt.subplot(2,2,1) 356 | plt.imshow(im[:,:,::-1]) # , cmap='gray') 357 | plt.title('rendered') 358 | plt.subplot(2,2,2) 359 | plt.imshow(imBinary) # , cmap='gray') 360 | plt.title('binary mask') 361 | plt.subplot(2,2,3) 362 | plt.imshow(imPart) # , cmap='gray') 363 | plt.title('part mask') 364 | plt.subplot(2,2,4) 365 | plt.imshow(imJoint[:,:,::-1]) # , cmap='gray') 366 | plt.title('joints') 367 | plt.suptitle('SMPL Template Check') 368 | _ = raw_input('next?') 369 | 370 | # 6. convert format 371 | joints_json = cvt_joints_np2json(joints_np) # json joints 372 | #print(joints_json) 373 | #json.dumps(joints_json) 374 | 375 | 376 | if size_ext : 377 | return imBinary, imPart, joints_json 378 | else: 379 | return imBinary[:h,:], imPart[:h,:], joints_json 380 | 381 | 382 | # load dataset dependent files and call the core processing 383 | #--------------------------------------------------------------- 384 | # smpl_mdoel: SMPL 385 | # inmodel_path : smpl param pkl file (by SMPLify) 386 | # inimg_path: input image 387 | # out mask image 388 | # ind : image index 389 | def smpl2mask_single(smpl_model, inmodel_path, inimg_path, outbinimg_path, outpartimg_path, outjoint_path, outparam_path, ind): 390 | 391 | if smpl_model is None or inmodel_path is None or inimg_path is None or outbinimg_path is None or outpartimg_path is None: 392 | print('There is None inputs'), exit() 393 | 394 | plt.ion() 395 | 396 | # model params 397 | with open(inmodel_path, 'rb') as f: 398 | if f is None: 399 | print("cannot open", inmodel_path), exit() 400 | params = pickle.load(f) 401 | 402 | #params['pose'] = params['pose'] % (2.0*np.pi) # modulo 403 | 404 | cam = ProjectPoints(f = params['cam_f'], rt=params['cam_rt'], t=params['cam_t'], k=params['cam_k'], c= params['cam_c']) 405 | params['cam'] = cam 406 | 407 | #_examine_smpl_params(params) 408 | 409 | # 2d rgb image for texture 410 | #inimg_path = img_dir + '/dataset10k_%04d.jpg'%idx 411 | img2D = cv2.imread(inimg_path) 412 | if img2D is None: 413 | print("cannot open", inimg_path), exit() 414 | 415 | # 3. run the SMPL body to cloth processing 416 | #cam = params['cam'] # camera model, Ch 417 | betas = params['betas'] 418 | n_betas = betas.shape[0] #10 419 | pose = params['pose'] # angles, 27x3 numpy 420 | 421 | 422 | # 4. set up model params 423 | # 4.1 pose # make standard pose for easier try-on 424 | pose[:] = 0.0 425 | pose[0] = np.pi 426 | # lsh = 16 rsh = 17 67.5 degree rotation around z axis 427 | pose[16*3+2] = -7/16.0*np.pi 428 | pose[17*3+2] = +7/16.0*np.pi 429 | # 4.2 shape 430 | betas[:] = 0.0 # all default 431 | betas[0] = 0.0 # size : + big, - small 432 | betas[1] = 0.0 # fatness: + slim - fat 433 | 434 | # camera postion 435 | #cam.t = [0. , 0., 20.] - cam.t: [ 0. 0. 20.] # [-3.12641449e-03 4.31656201e-01 2.13035413e+01] 436 | cam.t = [0., 0.4, 25.] 437 | cam.rt = [0., 0., 0.] 438 | cam.k = [0., 0., 0., 0., 0.] 439 | cam.f = [5000., 5000.] 440 | cam.c = [ 96., 128.] # depending on the image size 441 | 442 | 443 | 444 | beta_test = True 445 | 446 | if beta_test: 447 | 448 | beta0_list = [-2.0, 0.0, +2.0] 449 | beta1_list = [-3.0, -1.0, 0.0, +1.0, +3.0] 450 | for i in range(len(beta0_list)): 451 | betas[0] = beta0_list[i] 452 | 453 | for j in range(len(beta1_list)): 454 | betas[1] = beta1_list[j] 455 | params = {'cam_t': cam.t, 456 | 'cam_rt': cam.rt, 457 | 'cam_f': cam.f, 458 | 'cam_k': cam.k, 459 | 'cam_c': cam.c, 460 | 'pose': pose, 461 | 'betas': betas} 462 | img_mask, _, _ = smpl2maskcore(cam, # camera model, Ch 463 | betas, # shape coeff, numpy 464 | n_betas, # num of PCA 465 | pose, # angles, 27x3 numpy 466 | img2D, # img numpy 467 | smpl_model, # SMPL 468 | viz = True) # display 469 | 470 | plt.subplot(len(beta0_list), len(beta1_list), len(beta1_list)*i+j+1) 471 | plt.imshow(img_mask) 472 | plt.title('beta=%3.1f,%3.1f'%(beta0_list[i],beta1_list[j])) 473 | plt.show() 474 | 475 | plt.suptitle('shape params template') 476 | plt.show() 477 | _ = raw_input('next?') 478 | 479 | 480 | # final template 481 | betas[1] = 1.0 482 | 483 | print('Final pose and betas ') 484 | print('pose:', pose.reshape([-1,3])) 485 | print('betas:', betas) 486 | 487 | params = {'cam_t': cam.t, 488 | 'cam_rt': cam.rt, 489 | 'cam_f': cam.f, 490 | 'cam_k': cam.k, 491 | 'cam_c': cam.c, 492 | 'pose': pose, 493 | 'betas': betas} 494 | 495 | img_mask, img_part, joints_json = smpl2maskcore(cam, # camera model, Ch 496 | betas, # shape coeff, numpy 497 | n_betas, # num of PCA 498 | pose, # angles, 27x3 numpy 499 | img2D, # img numpy 500 | smpl_model, # SMPL 501 | viz = True) # display 502 | 503 | 504 | # 3.2 save output result 505 | if outbinimg_path is not None: 506 | cv2.imwrite(outbinimg_path, img_mask) 507 | if outpartimg_path is not None: 508 | cv2.imwrite(outpartimg_path, img_part) 509 | if outjoint_path is not None: 510 | with open(outjoint_path, 'w') as joint_file: 511 | json.dump(joints_json, joint_file) 512 | 513 | if outparam_path is not None: 514 | with open(outparam_path, 'w') as outf: 515 | pickle.dump(params, outf) 516 | 517 | 518 | if __name__ == '__main__': 519 | 520 | if len(sys.argv) < 4: 521 | print('usage: %s base_path dataset start_idx'% sys.argv[0]), exit() 522 | 523 | # 1. directory check and setting 524 | base_dir = abspath(sys.argv[1]) 525 | #print(base_dir) 526 | dataset = sys.argv[2] 527 | idx_s = int(sys.argv[3]) 528 | #idx_e= int(sys.argv[4]) 529 | 530 | if not exists(base_dir): 531 | print('No such a directory for base', base_path, base_dir), exit() 532 | 533 | # input Directory: image 534 | inp_dir = base_dir + "/images/" + dataset 535 | if not exists(inp_dir): 536 | print('No such a directory for dataset', data_set, inp_dir), exit() 537 | 538 | # input directory: preproccesed 539 | data_dir = base_dir + "/results/" + dataset 540 | print(data_dir) 541 | smpl_param_dir = data_dir + "/smpl" 542 | if not exists(smpl_param_dir): 543 | print('No such a directory for smpl param', smpl_param_dir), exit() 544 | ''' 545 | mask_dir = data_dir + "/segmentation" 546 | if not exists(mask_dir): 547 | print('No such a directory for mask', mask_dir), exit() 548 | ''' 549 | 550 | # Output Directory 551 | smplmask_dir = data_dir + "/smplmask" 552 | if not exists(smplmask_dir): 553 | makedirs(smplmask_dir) 554 | 555 | smpljson_dir = data_dir + "/smpljson" 556 | if not exists(smpljson_dir): 557 | makedirs(smpljson_dir) 558 | 559 | # 2. Loading SMPL models (independent from dataset) 560 | use_neutral = False 561 | # Assumes 'models' in the 'code/' directory where this file is in. 562 | MODEL_DIR = join(abspath(dirname(__file__)), 'models') 563 | MODEL_NEUTRAL_PATH = join( 564 | MODEL_DIR, 'basicModel_neutral_lbs_10_207_0_v1.0.0.pkl') 565 | MODEL_FEMALE_PATH = join( 566 | MODEL_DIR, 'basicModel_f_lbs_10_207_0_v1.0.0.pkl') 567 | MODEL_MALE_PATH = join(MODEL_DIR, 568 | 'basicmodel_m_lbs_10_207_0_v1.0.0.pkl') 569 | 570 | if not use_neutral: 571 | # File storing information about gender 572 | with open(join(data_dir, dataset + '_gender.csv')) as f: 573 | genders = f.readlines() 574 | model_female = load_model(MODEL_FEMALE_PATH) 575 | model_male = load_model(MODEL_MALE_PATH) 576 | else: 577 | gender = 'neutral' 578 | smpl_model = load_model(MODEL_NEUTRAL_PATH) 579 | 580 | #_examine_smpl_template(model_female) #, exit() 581 | 582 | # Load joints 583 | ''' 584 | estj2d = np.load(join(data_dir, 'est_joints.npz'))['est_joints'] 585 | #print('est_shape:', est.shape) 586 | joints = estj2d[:2, :, idx].T 587 | ''' 588 | idx = idx_s 589 | 590 | # for i in range(1, 2): 591 | # if not use_neutral: 592 | # gender = 'male' if int(genders[i]) == 0 else 'female' 593 | # if gender == 'female': 594 | smpl_model = model_female 595 | smpl_param_path = smpl_param_dir + '/%04d.pkl'%idx 596 | if dataset == '10k': 597 | inp_path = inp_dir + '/' + 'dataset10k' + '_%04d.jpg'%idx 598 | else: 599 | inp_path = inp_dir + '/' + dataset + '_%06d.jpg'%idx 600 | 601 | #smplmask_path = smplmask_dir + '/%06d_0.png'% idx 602 | #jointfile_path = smpljson_dir + '/%06d_0.json'% idx 603 | smplmask_path = './templatemask.png' 604 | smplpart_path = './templatepart.png' 605 | jointfile_path = './templatejoint.json' 606 | param_path = './templateparam.pkl' 607 | smpl2mask_single(smpl_model, smpl_param_path, inp_path, smplmask_path, smplpart_path, jointfile_path, param_path, idx) 608 | 609 | 610 | # plt.pause(10) 611 | _ = raw_input('quit?') 612 | -------------------------------------------------------------------------------- /smpl3dclothxfer_v4.py: -------------------------------------------------------------------------------- 1 | """ 2 | cloth 3d model reconstruction (smpl3dclothrecon.py) and transfer to a human model 3 | ------------------------------------------------------------------------------------------ 4 | 5 | (c) copyright 2019 heejune@seoultech.ac.kr 6 | 7 | In : used in smpl3dclothrec.py 8 | - SMPL template model params file (pkl) 9 | - 2D matched cloth image file and mask (png) 10 | used for transfering 11 | - SMPL target model params file (pkl) 12 | helper file 13 | - list.npy for re-ordering the smpl human image number to viton image number 14 | 15 | Note: re-ordering needed for SMPLify code 16 | 17 | Out: 18 | 3D warped cloth and mask (png) 19 | 20 | Note: the Texture (2D warped cloth) and related 2D vertex and face information is obtained 21 | with original SMPL and camera parameters 22 | 23 | For in-advance tesrt purpose of part 3. we could move the pose and apply the displacement vector 24 | we apply the pose and shape params for target user but with same texture and vertices and faces defitnion 25 | 26 | 27 | template (source: pose and shape) target (pose and shape) 28 | -------------------------------------------------------------------------- 29 | SMPL- p smpltemplate.pkl results/viton/smpl/000000.pkl 30 | camera-p smpltemplate.pkl results/viton/smpl/000000.pkl 31 | 3D body-v smpl with template param smpl with target params 32 | 3D cloth-v displacement obtained use displacemt obtained at template 33 | texture results/viton/2dwarp/00000_1.png same 34 | texture-v cam projected onto the texture same as template (not new vertices) 35 | texture-f model.f same 36 | lightening only for cloth-related vertices same 37 | 38 | 39 | """ 40 | from __future__ import print_function 41 | import smpl3dclothrec_v4 42 | import graphutil as graphutil 43 | import boundary_matching 44 | import sys 45 | from os.path import join, exists, abspath, dirname 46 | from os import makedirs 47 | import logging 48 | import cPickle as pickle 49 | import time 50 | import cv2 51 | from PIL import Image 52 | import numpy as np 53 | import chumpy as ch 54 | from opendr.camera import ProjectPoints 55 | from smpl_webuser.serialization import load_model 56 | from smpl_webuser.verts import verts_decorated 57 | from render_model import render_model 58 | import inspect # for debugging 59 | import matplotlib.pyplot as plt 60 | from opendr.lighting import SphericalHarmonics 61 | from opendr.geometry import VertNormals, Rodrigues 62 | from opendr.renderer import TexturedRenderer 63 | import json 64 | from smpl_webuser.lbs import global_rigid_transformation 65 | 66 | _LOGGER = logging.getLogger(__name__) 67 | logging.basicConfig(level=logging.INFO) 68 | 69 | 70 | # To understand and verify the SMPL itself 71 | def _examine_smpl_template(model, detail=False): 72 | 73 | print(">> SMPL Template <<<<<<<<<<<<<<<<<<<<<<") 74 | print(type(model)) 75 | print(dir(model)) 76 | #print('kintree_table', model.kintree_table) 77 | print('pose:', model.pose) 78 | if detail: 79 | print('posedirs:', model.posedirs) 80 | print('betas:', model.betas) 81 | print('shape(model):', model.shape) 82 | if detail: 83 | print('shapedirs:', model.shapedirs) 84 | 85 | # print('bs_style:', model.bs_style) # f-m-n 86 | #print('f:', model.f) 87 | print('V template :', type(model.v_template)) 88 | print('V template :', model.v_template.shape) 89 | #print('weights:', model.weoptimize_on_jointsights) 90 | print('W type:', type(model.weights)) 91 | print('W shape:', model.weights.r.shape) 92 | if detail: 93 | print('W value:') 94 | print(model.weights.r) 95 | #parts = np.count_nonzero(model.weights.r, axis =1) 96 | parts = np.argmax(model.weights.r, axis=1) 97 | print(" :", parts.shape, parts[:6000]) 98 | 99 | #print('J:', model.J) 100 | #print('v_template:', model.v_template) 101 | #print('J_regressor:', model.J_regressor) 102 | 103 | # To understand and verify the paramters 104 | 105 | 106 | def _examine_smpl_params(params): 107 | 108 | print(type(params)) 109 | print(params.keys()) 110 | print('camera params') 111 | camera = params['cam'] 112 | print(" - type:", type(camera)) 113 | #print(" - members:", dir(camera)) 114 | print(" - cam.t:", camera.t.r) # none-zero, likely only nonzero z 115 | print(" - cam.rt:", camera.rt.r) # zero (fixed) 116 | # print(" - cam.camera_mtx:", camera.camera_mtx) # 117 | print(" - cam.k:", camera.k.r) # 118 | print(" - cam.c:", camera.c.r) # 119 | print(" - cam.f:", camera.f.r) # 120 | 121 | # print(params['f'].shape) # 2 122 | print('>> pose') 123 | pose = params['pose'] 124 | print("\t\ttype:", type(pose)) 125 | print('\t\tshape:', pose.shape) # 72 126 | 127 | # convert within 128 | #pose = pose % (2.0*np.pi) 129 | 130 | print('\t\tvalues (in degree):') 131 | print(pose*180.0/np.pi) # degree 132 | print('>> betas') 133 | betas = params['betas'] 134 | print('\ttype:', type(betas)) 135 | print('\tshape:', betas.shape) # 10 136 | # print('\tvalues:', params['betas']) # 10 137 | 138 | 139 | def construct_clothed3d_from_clothed2d_depth(body_sv, cam, clothed2d): 140 | 141 | # 1. get the dept for body vertex 142 | bodydepth = graphutil.build_depthmap2(body_sv.r, cam) 143 | 144 | check_depthmap = False 145 | if check_depthmap: 146 | # depth in reverse way 147 | plt.suptitle('depthmap') 148 | plt.subplot(1, 2, 1) 149 | plt.imshow(img[:, :, ::-1]) # , cmap='gray') 150 | plt.subplot(1, 2, 2) 151 | depthmap = graphutil.build_depthimage( 152 | body_sv.r, model.f, bodydepth, cam, height=h, width=w) 153 | #plt.imshow(depthmap, cmap='gray') 154 | plt.imshow(depthmap) 155 | plt.draw() 156 | plt.show() 157 | # plt.imshow(depthmap, cmap='gray_r') # the closer to camera, the brighter 158 | _ = raw_input('quit?') 159 | exit() 160 | 161 | # 2. modify the depth for clothed 162 | # @TODO 163 | 164 | # 3. unproject to 3D 165 | # uv space? pixels coordinated!! 166 | clothuvd = np.zeros(body_sv.r.shape) 167 | clothuvd[:, 0] = clothed2d[:, 0] 168 | clothuvd[:, 1] = clothed2d[:, 1] 169 | # @TODO for now simply use the same depth as body ^^; 170 | clothuvd[:, 2] = bodydepth 171 | cloth3d = cam.unproject_points(clothuvd) 172 | # sv.r = cloth3d # now the model is not body but cloth 173 | 174 | return cloth3d 175 | 176 | 177 | # calcuated the local coordinates at each vetex. 178 | # 179 | # z : normal to the vertex 180 | # x : the smallest indexed neighbor vertex based unit vector 181 | # y : the remianing axis in right handed way, ie. z x x => y 182 | def setup_vertex_local_coord(faces, vertices): 183 | 184 | # 1.1 normal vectors (1st axis) at each vertex 185 | _, axis_z = graphutil.calc_normal_vectors(vertices, faces) 186 | # 1.2 get 2nd axis 187 | axis_x = graphutil.find2ndaxis(faces, axis_z, vertices) 188 | # 1.3 get 3rd axis 189 | # matuir contribution. np.cross support row-vectorization 190 | axis_y = np.cross(axis_z[:, :], axis_x[:, :]) 191 | 192 | return axis_x, axis_y, axis_z 193 | 194 | # 195 | # reporesent the displacement (now in global coord) into local coordinates 196 | # 197 | # model: smpl mesh structure 198 | # v0 : reference vertex surface, ie. the body 199 | # v*****array: vertext index array for interest 200 | # d : displacement, ie. v = v0 + d 201 | # 202 | 203 | 204 | def compute_displacement_at_vertex(model, v0, d_global): 205 | 206 | debug = False 207 | 208 | # 1.setup local coordinate system to each vertex 209 | axis_x, axis_y, axis_z = setup_vertex_local_coord(model.f, v0) 210 | 211 | # 2. express displacement in 3 axises 212 | #dlocal = np.concatenate(np.dot(d, axis_x), np.dot(d, axis_y), np.dot(d, axis_z)) 213 | xl = np.sum(d_global*axis_x, axis=1) 214 | yl = np.sum(d_global*axis_y, axis=1) 215 | zl = np.sum(d_global*axis_z, axis=1) 216 | d_local = np.stack((xl, yl, zl), axis=-1) 217 | print('dlocal shape:', xl.shape, yl.shape, zl.shape, d_local.shape) 218 | 219 | if debug: # verifying d_global = xs * axis_x + ys* axis_y + z*axis_z 220 | # get global coorindate vector 221 | xg = xl[:, None]*axis_x 222 | yg = yl[:, None]*axis_y 223 | zg = zl[:, None]*axis_z 224 | dg = xg + yg + zg 225 | 226 | # check the error 227 | err = np.absolute(dg - d_global) 228 | print('d, e x:', np.amax(d_global[:, 0]), np.amax( 229 | err[:, 0]), np.mean(d_global[:, 0]), np.mean(err[:, 0])) 230 | print('d, e y:', np.amax(d_global[:, 1]), np.amax( 231 | err[:, 1]), np.mean(d_global[:, 1]), np.mean(err[:, 1])) 232 | print('d, e z:', np.amax(d_global[:, 2]), np.amax( 233 | err[:, 2]), np.mean(d_global[:, 2]), np.mean(err[:, 2])) 234 | ''' 235 | print('d 0:', np.amax(d_global[:,0]), np.amin(d_global[:,0])) 236 | print('error0:', np.amax(err[:,0]), np.amin(err[:,0])) 237 | print('d 1:', np.amax(d_global[:,1]), np.amin(d_global[:,1])) 238 | print('error1:', np.amax(err[:,1]), np.amin(err[:,1])) 239 | print('d 2:', np.amax(d_global[:,2]), np.amin(d_global[:,2])) 240 | print('error2:', np.amax(err[:,2]), np.amin(err[:,2])) 241 | ''' 242 | 243 | return d_local 244 | 245 | 246 | # 247 | # @TODO: Do this !! the most key part combining with displacement generatrion 248 | # 249 | # model : the body surface structure 250 | # body : body surface vertices 251 | # vi4cloth: vertex index for the cloth surface 252 | # d : displacement vector in local coordinate 253 | # 254 | # def transfer_body2clothed(cam_tgt, betas_tgt, n_betas_tgt, pose_tgt, v4cloth, d): 255 | def transfer_body2clothed(model, body, d_local): 256 | 257 | # 1.setup local coordinate system to each vertex 258 | axis_x, axis_y, axis_z = setup_vertex_local_coord(model.f, body) 259 | 260 | # 2. express local to global 261 | # 2.1 select vectices under interest 262 | #axis_x, axis_y, axis_z = axis_x[vi4cloth], axis_y[vi4cloth], axis_z[vi4cloth] 263 | # 2.2 displacement in global coordinate 264 | xg = (d_local[:, 0])[:, None]*axis_x 265 | yg = (d_local[:, 1])[:, None]*axis_y 266 | zg = (d_local[:, 2])[:, None]*axis_z 267 | dg = xg + yg + zg 268 | 269 | # 3. adding them to the base/body vertices 270 | clothed = body + dg 271 | 272 | return clothed 273 | 274 | 275 | # display 3d model 276 | def render_cloth(cam, _texture, texture_v2d, faces, imHuman): 277 | 278 | #h, w = imTexture.shape[:2] 279 | h_ext, w = _texture.shape[:2] # full body 280 | h, _ = imHuman.shape[:2] # half body 281 | 282 | texture = _texture[:, :, :] 283 | 284 | # 1. texture rendering 285 | dist = 20.0 286 | cloth_renderer = smpl3dclothrec_v4.build_texture_renderer(cam, cam.v, faces, texture_v2d, faces, 287 | texture[::-1, :, :], w, h_ext, 1.0, near=0.5, far=20 + dist) 288 | imCloth = (cloth_renderer.r * 255.).astype('uint8') 289 | imCloth = imCloth[:h, :, ::-1] 290 | 291 | # 2. mask generation 292 | im3CBlack = np.zeros([h, w, 3], dtype=np.uint8) 293 | imModel = (render_model( 294 | cam.v, faces, w, h, cam, far=20 + dist, img=im3CBlack) * 255.).astype('uint8') 295 | imMask = cv2.cvtColor(imModel, cv2.COLOR_BGR2GRAY) # gray silhouette 296 | imMask[imMask > 0] = 255 # binary (0, 1) 297 | 298 | # 3. image overlay to check result 299 | imBG = imHuman[:, :, ::-1].astype('float32')/255.0 300 | overlay_renderer = smpl3dclothrec_v4.build_texture_renderer(cam, cam.v, faces, texture_v2d, faces, texture[::-1, :, :], w, h, 1.0, near=0.5, far=20 + dist, background_image=imBG) 301 | imOverlayed = overlay_renderer.r.copy() 302 | 303 | # plt.figure() 304 | plt.subplot(1, 4, 1) 305 | plt.axis('off') 306 | plt.imshow(texture[:h, :, ::-1]) 307 | plt.title('texture') 308 | 309 | plt.subplot(1, 4, 2) 310 | plt.imshow(imCloth[:, :, ::-1]) 311 | plt.axis('off') 312 | plt.title('transfered') 313 | 314 | plt.subplot(1, 4, 3) 315 | # @TODO use color render for mask or all whilte color for the cloth area texture 316 | plt.imshow(imMask) 317 | plt.axis('off') 318 | plt.title('mask') 319 | 320 | plt.subplot(1, 4, 4) 321 | plt.imshow(imOverlayed[:, :, :]) # @overlay with human image 322 | plt.axis('off') 323 | plt.title('target human') 324 | plt.show() 325 | 326 | return imCloth, imMask 327 | 328 | 329 | def save_rendered_textures(imCloth3dWarped, imClothMask3dWarped, human_segm_path, ocloth_path, oclothfull_path, omask_path): 330 | 331 | """ 332 | LIP labels 333 | 334 | [(0, 0, 0), # 0=Background 335 | (128, 0, 0), # 1=Hat 336 | (255, 0, 0), # 2=Hair 337 | (0, 85, 0), # 3=Glove 338 | (170, 0, 51), # 4=Sunglasses 339 | (255, 85, 0), # 5=UpperClothes 340 | (0, 0, 85), # 6=Dress 341 | (0, 119, 221), # 7=Coat 342 | (85, 85, 0), # 8=Socks 343 | (0, 85, 85), # 9=Pants 344 | (85, 51, 0), # 10=Jumpsuits 345 | (52, 86, 128), # 11=Scarf 346 | (0, 128, 0), # 12=Skirt 347 | (0, 0, 255), # 13=Face 348 | (51, 170, 221), # 14=LeftArm 349 | (0, 255, 255), # 15=RightArm 350 | (85, 255, 170), # 16=LeftLeg 351 | (170, 255, 85), # 17=RightLeg 352 | (255, 255, 0), # 18=LeftShoe 353 | (255, 170, 0) # 19=RightShoe 354 | (189, 170, 160) # 20=Skin/Neck 355 | ] 356 | """ 357 | 358 | im_parse = Image.open(human_segm_path) 359 | im_parse_2d = Image.open(human_segm_path).convert('L') 360 | parse_array = np.array(im_parse) 361 | parse_array_2d = np.array(im_parse_2d) 362 | 363 | parse_cloth = (parse_array == 0) + \ 364 | (parse_array == 5) + \ 365 | (parse_array == 6) + \ 366 | (parse_array == 7) + \ 367 | (parse_array == 14) + \ 368 | (parse_array == 15) + \ 369 | (parse_array == 20) 370 | 371 | parse_mask = (parse_array_2d == 0) + \ 372 | (parse_array_2d == 5) + \ 373 | (parse_array_2d == 6) + \ 374 | (parse_array_2d == 7) + \ 375 | (parse_array_2d == 14) + \ 376 | (parse_array_2d == 15) + \ 377 | (parse_array_2d == 20) 378 | 379 | im_cloth = imCloth3dWarped * parse_cloth - (1 - parse_cloth) # [-1,1], fill 0 for other parts 380 | im_cloth_mask = imClothMask3dWarped * parse_mask - (1 - parse_mask) # [-1,1], fill 0 for other parts 381 | 382 | # make white bg 383 | im_cloth[im_cloth <= 0] = 255 384 | 385 | plt.subplot(1, 2, 1) 386 | plt.axis('off') 387 | plt.imshow(imCloth3dWarped[:, :, ::-1]) 388 | plt.title('raw') 389 | 390 | plt.subplot(1, 2, 2) 391 | plt.axis('off') 392 | plt.imshow(im_cloth[:, :, ::-1]) 393 | plt.title('final') 394 | 395 | plt.show() 396 | _ = raw_input("Save?") 397 | 398 | # save result 399 | if oclothfull_path is not None: 400 | cv2.imwrite(oclothfull_path, imCloth3dWarped) 401 | if ocloth_path is not None: 402 | cv2.imwrite(ocloth_path, im_cloth) 403 | if omask_path is not None: 404 | cv2.imwrite(omask_path, im_cloth_mask) 405 | 406 | 407 | def cloth3dxfer_single(smpl_model, src_param_path, tgt_param_path, cloth_path, clothmask_path, human_path, human_segm_path, ocloth_path, oclothfull_path, omask_path): 408 | 409 | # 1. reconstruct 3D cloth from template 410 | params_src, body, diff_cloth_body, texture, texture_v2d, face4cloth = smpl3dclothrec_v4.cloth3drec_single( 411 | smpl_model, src_param_path, cloth_path, clothmask_path, human_path, human_segm_path) 412 | 413 | # 2. express the displacement in vertices specific coordinate. 414 | diff_cloth_body_local = compute_displacement_at_vertex( 415 | smpl_model, body, diff_cloth_body) 416 | 417 | # 3. transfer to a new human parameters 418 | # 3.1 load the SMPL params 419 | with open(tgt_param_path, 'rb') as f: 420 | if f is None: 421 | print("cannot open", tgt_param_path), exit() 422 | params_tgt = pickle.load(f) 423 | 424 | # 3.2 construct the model 425 | cam_tgt = ProjectPoints(f=params_tgt['cam_f'], rt=params_tgt['cam_rt'], 426 | t=params_tgt['cam_t'], k=params_tgt['cam_k'], c=params_tgt['cam_c']) 427 | betas_tgt = params_tgt['betas'] 428 | n_betas_tgt = betas_tgt.shape[0] # 10 429 | pose_tgt = params_tgt['pose'] # angles, 27x3 numpy 430 | 431 | # 3.3 build a new body 432 | body_tgt_sv = smpl3dclothrec_v4.build_smplbody_surface( 433 | smpl_model, pose_tgt, betas_tgt, cam_tgt) 434 | 435 | # 3.4 build the corresponding clothed 436 | clothed3d = transfer_body2clothed( 437 | smpl_model, body_tgt_sv.r, diff_cloth_body_local) 438 | cam_tgt.v = clothed3d 439 | #cam_tgt.v = body_tgt_sv.r 440 | 441 | # 4.5 check by viewing 442 | imHuman = cv2.imread(human_path) 443 | 444 | # smpl_model.f) # cam_tgt has all the information 445 | imCloth3dWarped, imClothMask3dWarped = render_cloth( 446 | cam_tgt, texture, texture_v2d, face4cloth, imHuman) 447 | _ = raw_input("next?") 448 | 449 | # save combined warped rendered texture 450 | save_rendered_textures(imCloth3dWarped, imClothMask3dWarped, human_segm_path, ocloth_path, oclothfull_path, omask_path) 451 | 452 | # smpl3dclothrec.show_3d_model(cam_tgt, texture, texture_v2d, face4cloth) # smpl_model.f) # cam_tgt has all the information 453 | _ = raw_input("next sample?") 454 | plt.subplot(1, 1, 1) # restore the plot section 455 | # plt.close() # not to draw in subplot() 456 | 457 | 458 | if __name__ == '__main__': 459 | 460 | # 1. command argument checking 461 | if len(sys.argv) != 3: 462 | print('usage for batch test: %s base_path dataset' % sys.argv[0]) 463 | #print('usage for test: %s base_path smpl_param clothimg maskimg'% sys.argv[0]), exit() 464 | exit() 465 | 466 | base_dir = abspath(sys.argv[1]) 467 | dataset = sys.argv[2] 468 | 469 | # 2. input and output directory check and setting 470 | # 2.1 base dir 471 | base_dir = abspath(sys.argv[1]) 472 | if not exists(base_dir): 473 | print('No such a directory for base', base_path, base_dir), exit() 474 | 475 | # 2.2.1 human image dir 476 | human_dir = base_dir + "/images/" + dataset 477 | if not exists(human_dir): 478 | print('No such a directory for human images', 479 | data_set, human_dir), exit() 480 | 481 | data_dir = base_dir + "/results/" + dataset 482 | # print(data_dir) 483 | # 2.2.2 target human info 484 | human_smpl_param_dir = data_dir + "/smpl" 485 | if not exists(human_smpl_param_dir): 486 | print('No such a directory for smpl param', smpl_param_dir), exit() 487 | # 2.2.3 source cloth 488 | cloth_dir = data_dir + "/c2dw" 489 | if not exists(cloth_dir): 490 | print('No such a directory for cloth images', cloth_dir), exit() 491 | # 2.2.4 source cloth mask 492 | cloth_mask_dir = data_dir + "/c2dwmask" 493 | if not exists(cloth_mask_dir): 494 | print('No such a directory for cloth mask', cloth_mask_dir), exit() 495 | 496 | # 2.2.5 human segmentation dir 497 | human_segm_dir = data_dir + "/segmentation" 498 | if not exists(human_segm_dir): 499 | print('No such a directory for human segmentation', 500 | human_segm_dir), exit() 501 | 502 | # 2.2.4 test pair file 503 | testpair_filepath = data_dir + "/" + dataset + "_test_pairs.txt" 504 | if not exists(testpair_filepath): 505 | print('No test pair file: ', cloth_mask_dir), exit() 506 | 507 | # 2. Loading SMPL models (independent from dataset) 508 | use_neutral = False 509 | # Assumes 'models' in the 'code/' directory where this file is in. 510 | MODEL_DIR = join(abspath(dirname(__file__)), 'models') 511 | MODEL_NEUTRAL_PATH = join( 512 | MODEL_DIR, 'basicModel_neutral_lbs_10_207_0_v1.0.0.pkl') 513 | MODEL_FEMALE_PATH = join( 514 | MODEL_DIR, 'basicModel_f_lbs_10_207_0_v1.0.0.pkl') 515 | MODEL_MALE_PATH = join(MODEL_DIR, 516 | 'basicmodel_m_lbs_10_207_0_v1.0.0.pkl') 517 | 518 | if not use_neutral: 519 | # File storing information about gender 520 | # with open(join(data_dir, dataset + '_gender.csv')) as f: 521 | # genders = f.readlines() 522 | model_female = load_model(MODEL_FEMALE_PATH) 523 | model_male = load_model(MODEL_MALE_PATH) 524 | else: 525 | gender = 'neutral' 526 | smpl_model = load_model(MODEL_NEUTRAL_PATH) 527 | 528 | #_examine_smpl(model_female), exit() 529 | 530 | ''' 531 | # Load joints 532 | estj2d = np.load(join(data_dir, 'est_joints.npz'))['est_joints'] 533 | #print('est_shape:', est.shape) 534 | joints = estj2d[:2, :, idx].T 535 | ''' 536 | 537 | # 2.3. Output Directory 538 | 539 | oclothfull_dir = data_dir + "/c3dwfull" 540 | if not exists(oclothfull_dir): 541 | makedirs(oclothfull_dir) 542 | ocloth_dir = data_dir + "/c3dw" 543 | if not exists(ocloth_dir): 544 | makedirs(ocloth_dir) 545 | ocloth_mask_dir = data_dir + "/c3dwmask" 546 | if not exists(ocloth_mask_dir): 547 | makedirs(ocloth_mask_dir) 548 | 549 | #smplmask_path = smplmask_dir + '/%06d_0.png'% idx 550 | #jointfile_path = smpljson_dir + '/%06d_0.json'% idx 551 | ''' 552 | smpl_model = model_female 553 | # 3D reconstruction and tranfer it to a define smpl model 554 | cloth3dxfer_single(smpl_model, smplparam_path, cloth_path, clothmask_path) 555 | 556 | ''' 557 | 558 | test_pair_lines = open(testpair_filepath).read().splitlines() 559 | test_pairs = [] 560 | 561 | for i in range(len(test_pair_lines)): 562 | # loading batch data 563 | pair = test_pair_lines[i].split() 564 | # print(pair) 565 | test_pairs.append([pair[0], pair[1]]) # 0: human 1: cloth 566 | 567 | #print(test_pairs), exit() 568 | 569 | # Might each cloth have different verison of template used 570 | template_smpl_param_path = './templateparam1.pkl' 571 | # We have to take into account this later 572 | template_jointfile_path = './templatejoints1.json' 573 | 574 | for i in range(len(test_pairs)): 575 | 576 | # for i in range(1, 2): 577 | # if not use_neutral: 578 | # gender = 'male' if int(genders[i]) == 0 else 'female' 579 | # if gender == 'female': 580 | smpl_model = model_female 581 | human_smpl_param_path = human_smpl_param_dir + \ 582 | '/' + test_pairs[i][0] + '.pkl' 583 | human_image_path = human_dir + '/' + test_pairs[i][0] + '.jpg' 584 | human_segm_path = human_segm_dir + '/' + test_pairs[i][0] + '.png' 585 | cloth_path = cloth_dir + '/' + test_pairs[i][1] + '.png' 586 | clothmask_path = cloth_mask_dir + '/' + test_pairs[i][1] + '.png' 587 | oclothfull_path = oclothfull_dir + '/' + \ 588 | test_pairs[i][1] + '_' + test_pairs[i][0] + '.png' # '.png' 589 | ocloth_path = ocloth_dir + '/' + \ 590 | test_pairs[i][1] + '_' + test_pairs[i][0] + '.jpg' # '.png' 591 | oclothmask_path = ocloth_mask_dir + '/' + \ 592 | test_pairs[i][1] + '_' + test_pairs[i][0] + '.jpg' # '.png' 593 | cloth3dxfer_single(smpl_model, template_smpl_param_path, human_smpl_param_path, 594 | cloth_path, clothmask_path, human_image_path, human_segm_path, ocloth_path, oclothfull_path, oclothmask_path) 595 | -------------------------------------------------------------------------------- /smpl3dclothxfer_v7.py: -------------------------------------------------------------------------------- 1 | """ 2 | cloth 3d model reconstruction (smpl3dclothrecon.py) and transfer to a human model 3 | ------------------------------------------------------------------------------------------ 4 | 5 | (c) copyright 2019 heejune@seoultech.ac.kr 6 | 7 | In : used in smpl3dclothrec.py 8 | - SMPL template model params file (pkl) 9 | - 2D matched cloth image file and mask (png) 10 | used for transfering 11 | - SMPL target model params file (pkl) 12 | helper file 13 | - list.npy for re-ordering the smpl human image number to viton image number 14 | 15 | Note: re-ordering needed for SMPLify code 16 | 17 | Out: 18 | 3D warped cloth and mask (png) 19 | 20 | Note: the Texture (2D warped cloth) and related 2D vertex and face information is obtained 21 | with original SMPL and camera parameters 22 | 23 | For in-advance tesrt purpose of part 3. we could move the pose and apply the displacement vector 24 | we apply the pose and shape params for target user but with same texture and vertices and faces defitnion 25 | 26 | 27 | template (source: pose and shape) target (pose and shape) 28 | -------------------------------------------------------------------------- 29 | SMPL- p smpltemplate.pkl results/viton/smpl/000000.pkl 30 | camera-p smpltemplate.pkl results/viton/smpl/000000.pkl 31 | 3D body-v smpl with template param smpl with target params 32 | 3D cloth-v displacement obtained use displacemt obtained at template 33 | texture results/viton/2dwarp/00000_1.png same 34 | texture-v cam projected onto the texture same as template (not new vertices) 35 | texture-f model.f same 36 | lightening only for cloth-related vertices same 37 | 38 | 39 | """ 40 | from __future__ import print_function 41 | import smpl3dclothrec_v7 42 | import graphutil as graphutil 43 | import boundary_matching 44 | import sys 45 | from os.path import join, exists, abspath, dirname 46 | from os import makedirs 47 | import logging 48 | import cPickle as pickle 49 | import time 50 | import cv2 51 | from PIL import Image 52 | import numpy as np 53 | import chumpy as ch 54 | from opendr.camera import ProjectPoints 55 | from smpl_webuser.serialization import load_model 56 | from smpl_webuser.verts import verts_decorated 57 | from render_model import render_model 58 | import inspect # for debugging 59 | import matplotlib.pyplot as plt 60 | from opendr.lighting import SphericalHarmonics 61 | from opendr.geometry import VertNormals, Rodrigues 62 | from opendr.renderer import TexturedRenderer 63 | import json 64 | from smpl_webuser.lbs import global_rigid_transformation 65 | 66 | _LOGGER = logging.getLogger(__name__) 67 | logging.basicConfig(level=logging.INFO) 68 | 69 | 70 | # To understand and verify the SMPL itself 71 | def _examine_smpl_template(model, detail=False): 72 | 73 | print(">> SMPL Template <<<<<<<<<<<<<<<<<<<<<<") 74 | print(type(model)) 75 | print(dir(model)) 76 | #print('kintree_table', model.kintree_table) 77 | print('pose:', model.pose) 78 | if detail: 79 | print('posedirs:', model.posedirs) 80 | print('betas:', model.betas) 81 | print('shape(model):', model.shape) 82 | if detail: 83 | print('shapedirs:', model.shapedirs) 84 | 85 | # print('bs_style:', model.bs_style) # f-m-n 86 | #print('f:', model.f) 87 | print('V template :', type(model.v_template)) 88 | print('V template :', model.v_template.shape) 89 | #print('weights:', model.weoptimize_on_jointsights) 90 | print('W type:', type(model.weights)) 91 | print('W shape:', model.weights.r.shape) 92 | if detail: 93 | print('W value:') 94 | print(model.weights.r) 95 | #parts = np.count_nonzero(model.weights.r, axis =1) 96 | parts = np.argmax(model.weights.r, axis=1) 97 | print(" :", parts.shape, parts[:6000]) 98 | 99 | #print('J:', model.J) 100 | #print('v_template:', model.v_template) 101 | #print('J_regressor:', model.J_regressor) 102 | 103 | # To understand and verify the paramters 104 | 105 | 106 | def _examine_smpl_params(params): 107 | 108 | print(type(params)) 109 | print(params.keys()) 110 | print('camera params') 111 | camera = params['cam'] 112 | print(" - type:", type(camera)) 113 | #print(" - members:", dir(camera)) 114 | print(" - cam.t:", camera.t.r) # none-zero, likely only nonzero z 115 | print(" - cam.rt:", camera.rt.r) # zero (fixed) 116 | # print(" - cam.camera_mtx:", camera.camera_mtx) # 117 | print(" - cam.k:", camera.k.r) # 118 | print(" - cam.c:", camera.c.r) # 119 | print(" - cam.f:", camera.f.r) # 120 | 121 | # print(params['f'].shape) # 2 122 | print('>> pose') 123 | pose = params['pose'] 124 | print("\t\ttype:", type(pose)) 125 | print('\t\tshape:', pose.shape) # 72 126 | 127 | # convert within 128 | #pose = pose % (2.0*np.pi) 129 | 130 | print('\t\tvalues (in degree):') 131 | print(pose*180.0/np.pi) # degree 132 | print('>> betas') 133 | betas = params['betas'] 134 | print('\ttype:', type(betas)) 135 | print('\tshape:', betas.shape) # 10 136 | # print('\tvalues:', params['betas']) # 10 137 | 138 | 139 | def construct_clothed3d_from_clothed2d_depth(body_sv, cam, clothed2d): 140 | 141 | # 1. get the dept for body vertex 142 | bodydepth = graphutil.build_depthmap2(body_sv.r, cam) 143 | 144 | check_depthmap = False 145 | if check_depthmap: 146 | # depth in reverse way 147 | plt.suptitle('depthmap') 148 | plt.subplot(1, 2, 1) 149 | plt.imshow(img[:, :, ::-1]) # , cmap='gray') 150 | plt.subplot(1, 2, 2) 151 | depthmap = graphutil.build_depthimage( 152 | body_sv.r, model.f, bodydepth, cam, height=h, width=w) 153 | #plt.imshow(depthmap, cmap='gray') 154 | plt.imshow(depthmap) 155 | plt.draw() 156 | plt.show() 157 | # plt.imshow(depthmap, cmap='gray_r') # the closer to camera, the brighter 158 | _ = raw_input('quit?') 159 | exit() 160 | 161 | # 2. modify the depth for clothed 162 | # @TODO 163 | 164 | # 3. unproject to 3D 165 | # uv space? pixels coordinated!! 166 | clothuvd = np.zeros(body_sv.r.shape) 167 | clothuvd[:, 0] = clothed2d[:, 0] 168 | clothuvd[:, 1] = clothed2d[:, 1] 169 | # @TODO for now simply use the same depth as body ^^; 170 | clothuvd[:, 2] = bodydepth 171 | cloth3d = cam.unproject_points(clothuvd) 172 | # sv.r = cloth3d # now the model is not body but cloth 173 | 174 | return cloth3d 175 | 176 | 177 | # calcuated the local coordinates at each vetex. 178 | # 179 | # z : normal to the vertex 180 | # x : the smallest indexed neighbor vertex based unit vector 181 | # y : the remianing axis in right handed way, ie. z x x => y 182 | def setup_vertex_local_coord(faces, vertices): 183 | 184 | # 1.1 normal vectors (1st axis) at each vertex 185 | _, axis_z = graphutil.calc_normal_vectors(vertices, faces) 186 | # 1.2 get 2nd axis 187 | axis_x = graphutil.find2ndaxis(faces, axis_z, vertices) 188 | # 1.3 get 3rd axis 189 | # matuir contribution. np.cross support row-vectorization 190 | axis_y = np.cross(axis_z[:, :], axis_x[:, :]) 191 | 192 | return axis_x, axis_y, axis_z 193 | 194 | # 195 | # reporesent the displacement (now in global coord) into local coordinates 196 | # 197 | # model: smpl mesh structure 198 | # v0 : reference vertex surface, ie. the body 199 | # v*****array: vertext index array for interest 200 | # d : displacement, ie. v = v0 + d 201 | # 202 | 203 | 204 | def compute_displacement_at_vertex(model, v0, d_global): 205 | 206 | debug = False 207 | 208 | # 1.setup local coordinate system to each vertex 209 | axis_x, axis_y, axis_z = setup_vertex_local_coord(model.f, v0) 210 | 211 | # 2. express displacement in 3 axises 212 | #dlocal = np.concatenate(np.dot(d, axis_x), np.dot(d, axis_y), np.dot(d, axis_z)) 213 | xl = np.sum(d_global*axis_x, axis=1) 214 | yl = np.sum(d_global*axis_y, axis=1) 215 | zl = np.sum(d_global*axis_z, axis=1) 216 | d_local = np.stack((xl, yl, zl), axis=-1) 217 | print('dlocal shape:', xl.shape, yl.shape, zl.shape, d_local.shape) 218 | 219 | if debug: # verifying d_global = xs * axis_x + ys* axis_y + z*axis_z 220 | # get global coorindate vector 221 | xg = xl[:, None]*axis_x 222 | yg = yl[:, None]*axis_y 223 | zg = zl[:, None]*axis_z 224 | dg = xg + yg + zg 225 | 226 | # check the error 227 | err = np.absolute(dg - d_global) 228 | print('d, e x:', np.amax(d_global[:, 0]), np.amax( 229 | err[:, 0]), np.mean(d_global[:, 0]), np.mean(err[:, 0])) 230 | print('d, e y:', np.amax(d_global[:, 1]), np.amax( 231 | err[:, 1]), np.mean(d_global[:, 1]), np.mean(err[:, 1])) 232 | print('d, e z:', np.amax(d_global[:, 2]), np.amax( 233 | err[:, 2]), np.mean(d_global[:, 2]), np.mean(err[:, 2])) 234 | ''' 235 | print('d 0:', np.amax(d_global[:,0]), np.amin(d_global[:,0])) 236 | print('error0:', np.amax(err[:,0]), np.amin(err[:,0])) 237 | print('d 1:', np.amax(d_global[:,1]), np.amin(d_global[:,1])) 238 | print('error1:', np.amax(err[:,1]), np.amin(err[:,1])) 239 | print('d 2:', np.amax(d_global[:,2]), np.amin(d_global[:,2])) 240 | print('error2:', np.amax(err[:,2]), np.amin(err[:,2])) 241 | ''' 242 | 243 | return d_local 244 | 245 | 246 | # 247 | # @TODO: Do this !! the most key part combining with displacement generatrion 248 | # 249 | # model : the body surface structure 250 | # body : body surface vertices 251 | # vi4cloth: vertex index for the cloth surface 252 | # d : displacement vector in local coordinate 253 | # 254 | # def transfer_body2clothed(cam_tgt, betas_tgt, n_betas_tgt, pose_tgt, v4cloth, d): 255 | def transfer_body2clothed(model, body, d_local): 256 | 257 | # 1.setup local coordinate system to each vertex 258 | axis_x, axis_y, axis_z = setup_vertex_local_coord(model.f, body) 259 | 260 | # 2. express local to global 261 | # 2.1 select vectices under interest 262 | #axis_x, axis_y, axis_z = axis_x[vi4cloth], axis_y[vi4cloth], axis_z[vi4cloth] 263 | # 2.2 displacement in global coordinate 264 | xg = (d_local[:, 0])[:, None]*axis_x 265 | yg = (d_local[:, 1])[:, None]*axis_y 266 | zg = (d_local[:, 2])[:, None]*axis_z 267 | dg = xg + yg + zg 268 | 269 | # 3. adding them to the base/body vertices 270 | clothed = body + dg 271 | 272 | return clothed 273 | 274 | 275 | # display 3d model 276 | def render_cloth(cam, _texture, texture_v2d, faces, imHuman): 277 | 278 | #h, w = imTexture.shape[:2] 279 | h_ext, w = _texture.shape[:2] # full body 280 | h, _ = imHuman.shape[:2] # half body 281 | 282 | texture = _texture[:, :, :] 283 | 284 | # 1. texture rendering 285 | dist = 20.0 286 | cloth_renderer = smpl3dclothrec_v7.build_texture_renderer(cam, cam.v, faces, texture_v2d, faces, 287 | texture[::-1, :, :], w, h_ext, 1.0, near=0.5, far=20 + dist) 288 | imCloth = (cloth_renderer.r * 255.).astype('uint8') 289 | imCloth = imCloth[:h, :, ::-1] 290 | 291 | # 2. mask generation 292 | im3CBlack = np.zeros([h, w, 3], dtype=np.uint8) 293 | imModel = (render_model( 294 | cam.v, faces, w, h, cam, far=20 + dist, img=im3CBlack) * 255.).astype('uint8') 295 | imMask = cv2.cvtColor(imModel, cv2.COLOR_BGR2GRAY) # gray silhouette 296 | imMask[imMask > 0] = 255 # binary (0, 1) 297 | 298 | # 3. image overlay to check result 299 | imBG = imHuman[:, :, ::-1].astype('float32')/255.0 300 | overlay_renderer = smpl3dclothrec_v7.build_texture_renderer(cam, cam.v, faces, texture_v2d, faces, texture[::-1, :, :], w, h, 1.0, near=0.5, far=20 + dist, background_image=imBG) 301 | imOverlayed = overlay_renderer.r.copy() 302 | 303 | # plt.figure() 304 | plt.subplot(1, 4, 1) 305 | plt.axis('off') 306 | plt.imshow(texture[:h, :, ::-1]) 307 | plt.title('texture') 308 | 309 | plt.subplot(1, 4, 2) 310 | plt.imshow(imCloth[:, :, ::-1]) 311 | plt.axis('off') 312 | plt.title('transfered') 313 | 314 | plt.subplot(1, 4, 3) 315 | # @TODO use color render for mask or all whilte color for the cloth area texture 316 | plt.imshow(imMask) 317 | plt.axis('off') 318 | plt.title('mask') 319 | 320 | plt.subplot(1, 4, 4) 321 | plt.imshow(imOverlayed[:, :, :]) # @overlay with human image 322 | plt.axis('off') 323 | plt.title('target human') 324 | plt.show() 325 | 326 | return imCloth, imMask 327 | 328 | 329 | def save_rendered_textures(imCloth3dWarped, imClothMask3dWarped, human_segm_path, ocloth_path, oclothfull_path, omask_path): 330 | 331 | """ 332 | LIP labels 333 | 334 | [(0, 0, 0), # 0=Background 335 | (128, 0, 0), # 1=Hat 336 | (255, 0, 0), # 2=Hair 337 | (0, 85, 0), # 3=Glove 338 | (170, 0, 51), # 4=Sunglasses 339 | (255, 85, 0), # 5=UpperClothes 340 | (0, 0, 85), # 6=Dress 341 | (0, 119, 221), # 7=Coat 342 | (85, 85, 0), # 8=Socks 343 | (0, 85, 85), # 9=Pants 344 | (85, 51, 0), # 10=Jumpsuits 345 | (52, 86, 128), # 11=Scarf 346 | (0, 128, 0), # 12=Skirt 347 | (0, 0, 255), # 13=Face 348 | (51, 170, 221), # 14=LeftArm 349 | (0, 255, 255), # 15=RightArm 350 | (85, 255, 170), # 16=LeftLeg 351 | (170, 255, 85), # 17=RightLeg 352 | (255, 255, 0), # 18=LeftShoe 353 | (255, 170, 0) # 19=RightShoe 354 | (189, 170, 160) # 20=Skin/Neck 355 | ] 356 | """ 357 | 358 | im_parse = Image.open(human_segm_path) 359 | # im_parse_2d = Image.open(human_segm_path).convert('L') 360 | parse_array = np.array(im_parse) 361 | # parse_array_2d = np.array(im_parse_2d) 362 | 363 | parse_cloth = (parse_array == 0) + \ 364 | (parse_array == 5) + \ 365 | (parse_array == 6) + \ 366 | (parse_array == 7) + \ 367 | (parse_array == 14) + \ 368 | (parse_array == 15) + \ 369 | (parse_array == 20) 370 | 371 | """parse_mask = (parse_array_2d == 0) + \ 372 | (parse_array_2d == 5) + \ 373 | (parse_array_2d == 6) + \ 374 | (parse_array_2d == 7) + \ 375 | (parse_array_2d == 14) + \ 376 | (parse_array_2d == 15) + \ 377 | (parse_array_2d == 20)""" 378 | 379 | im_cloth = imCloth3dWarped * parse_cloth - (1 - parse_cloth) # [-1,1], fill 0 for other parts 380 | # im_cloth_mask = imClothMask3dWarped * parse_mask - (1 - parse_mask) # [-1,1], fill 0 for other parts 381 | im_cloth_mask = np.zeros_like(im_cloth) 382 | im_cloth_mask[im_cloth > 0] = 255 383 | 384 | # make white bg 385 | im_cloth[im_cloth <= 0] = 255 386 | 387 | plt.subplot(1, 3, 1) 388 | plt.axis('off') 389 | plt.imshow(imCloth3dWarped[:, :, ::-1]) 390 | plt.title('raw') 391 | 392 | plt.subplot(1, 3, 2) 393 | plt.axis('off') 394 | plt.imshow(im_cloth[:, :, ::-1]) 395 | plt.title('final') 396 | 397 | plt.subplot(1, 3, 3) 398 | plt.axis('off') 399 | plt.imshow(im_cloth_mask[:, :, ::-1]) 400 | plt.title('mask') 401 | 402 | plt.show() 403 | _ = raw_input("Save?") 404 | 405 | # save result 406 | if oclothfull_path is not None: 407 | cv2.imwrite(oclothfull_path, imCloth3dWarped) 408 | if ocloth_path is not None: 409 | cv2.imwrite(ocloth_path, im_cloth) 410 | if omask_path is not None: 411 | cv2.imwrite(omask_path, im_cloth_mask) 412 | 413 | 414 | def cloth3dxfer_single(smpl_model, src_param_path, tgt_param_path, cloth_path, clothmask_path, human_path, human_segm_path, ocloth_path, oclothfull_path, omask_path): 415 | 416 | # 1. reconstruct 3D cloth from template 417 | params_src, body, diff_cloth_body, texture, texture_v2d, face4cloth = smpl3dclothrec_v7.cloth3drec_single( 418 | smpl_model, src_param_path, cloth_path, clothmask_path, human_path, human_segm_path) 419 | 420 | # 2. express the displacement in vertices specific coordinate. 421 | diff_cloth_body_local = compute_displacement_at_vertex( 422 | smpl_model, body, diff_cloth_body) 423 | 424 | # 3. transfer to a new human parameters 425 | # 3.1 load the SMPL params 426 | with open(tgt_param_path, 'rb') as f: 427 | if f is None: 428 | print("cannot open", tgt_param_path), exit() 429 | params_tgt = pickle.load(f) 430 | 431 | # 3.2 construct the model 432 | cam_tgt = ProjectPoints(f=params_tgt['cam_f'], rt=params_tgt['cam_rt'], 433 | t=params_tgt['cam_t'], k=params_tgt['cam_k'], c=params_tgt['cam_c']) 434 | betas_tgt = params_tgt['betas'] 435 | n_betas_tgt = betas_tgt.shape[0] # 10 436 | pose_tgt = params_tgt['pose'] # angles, 27x3 numpy 437 | 438 | # 3.3 build a new body 439 | body_tgt_sv = smpl3dclothrec_v7.build_smplbody_surface( 440 | smpl_model, pose_tgt, betas_tgt, cam_tgt) 441 | 442 | # 3.4 build the corresponding clothed 443 | clothed3d = transfer_body2clothed( 444 | smpl_model, body_tgt_sv.r, diff_cloth_body_local) 445 | cam_tgt.v = clothed3d 446 | #cam_tgt.v = body_tgt_sv.r 447 | 448 | # 4.5 check by viewing 449 | imHuman = cv2.imread(human_path) 450 | 451 | # smpl_model.f) # cam_tgt has all the information 452 | # imCloth3dWarped, imClothMask3dWarped = render_cloth(cam_tgt, texture, texture_v2d, smpl_model.f, imHuman) # fixed cloth with all faces 453 | imCloth3dWarped, imClothMask3dWarped = render_cloth(cam_tgt, texture, texture_v2d, face4cloth, imHuman) # fixed cloth with all faces 454 | # _, imClothMask3dWarped = render_cloth(cam_tgt, texture, texture_v2d, face4cloth, imHuman) # fixed mask from cloth vertices only 455 | _ = raw_input("next?") 456 | 457 | # save combined warped rendered texture 458 | save_rendered_textures(imCloth3dWarped, imClothMask3dWarped, human_segm_path, ocloth_path, oclothfull_path, omask_path) 459 | 460 | # smpl3dclothrec.show_3d_model(cam_tgt, texture, texture_v2d, face4cloth) # smpl_model.f) # cam_tgt has all the information 461 | _ = raw_input("next sample?") 462 | plt.subplot(1, 1, 1) # restore the plot section 463 | # plt.close() # not to draw in subplot() 464 | 465 | 466 | if __name__ == '__main__': 467 | 468 | # 1. command argument checking 469 | if len(sys.argv) != 3: 470 | print('usage for batch test: %s base_path dataset' % sys.argv[0]) 471 | #print('usage for test: %s base_path smpl_param clothimg maskimg'% sys.argv[0]), exit() 472 | exit() 473 | 474 | base_dir = abspath(sys.argv[1]) 475 | dataset = sys.argv[2] 476 | 477 | # 2. input and output directory check and setting 478 | # 2.1 base dir 479 | base_dir = abspath(sys.argv[1]) 480 | if not exists(base_dir): 481 | print('No such a directory for base', base_path, base_dir), exit() 482 | 483 | # 2.2.1 human image dir 484 | human_dir = base_dir + "/images/" + dataset 485 | if not exists(human_dir): 486 | print('No such a directory for human images', 487 | data_set, human_dir), exit() 488 | 489 | data_dir = base_dir + "/results/" + dataset 490 | # print(data_dir) 491 | # 2.2.2 target human info 492 | human_smpl_param_dir = data_dir + "/smpl" 493 | if not exists(human_smpl_param_dir): 494 | print('No such a directory for smpl param', smpl_param_dir), exit() 495 | # 2.2.3 source cloth 496 | cloth_dir = data_dir + "/c2dw" 497 | if not exists(cloth_dir): 498 | print('No such a directory for cloth images', cloth_dir), exit() 499 | # 2.2.4 source cloth mask 500 | cloth_mask_dir = data_dir + "/c2dwmask" 501 | if not exists(cloth_mask_dir): 502 | print('No such a directory for cloth mask', cloth_mask_dir), exit() 503 | 504 | # 2.2.5 human segmentation dir 505 | human_segm_dir = data_dir + "/segmentation" 506 | if not exists(human_segm_dir): 507 | print('No such a directory for human segmentation', 508 | human_segm_dir), exit() 509 | 510 | # 2.2.4 test pair file 511 | testpair_filepath = data_dir + "/" + dataset + "_test_pairs.txt" 512 | if not exists(testpair_filepath): 513 | print('No test pair file: ', cloth_mask_dir), exit() 514 | 515 | # 2. Loading SMPL models (independent from dataset) 516 | use_neutral = False 517 | # Assumes 'models' in the 'code/' directory where this file is in. 518 | MODEL_DIR = join(abspath(dirname(__file__)), 'models') 519 | MODEL_NEUTRAL_PATH = join( 520 | MODEL_DIR, 'basicModel_neutral_lbs_10_207_0_v1.0.0.pkl') 521 | MODEL_FEMALE_PATH = join( 522 | MODEL_DIR, 'basicModel_f_lbs_10_207_0_v1.0.0.pkl') 523 | MODEL_MALE_PATH = join(MODEL_DIR, 524 | 'basicmodel_m_lbs_10_207_0_v1.0.0.pkl') 525 | 526 | if not use_neutral: 527 | # File storing information about gender 528 | # with open(join(data_dir, dataset + '_gender.csv')) as f: 529 | # genders = f.readlines() 530 | model_female = load_model(MODEL_FEMALE_PATH) 531 | model_male = load_model(MODEL_MALE_PATH) 532 | else: 533 | gender = 'neutral' 534 | smpl_model = load_model(MODEL_NEUTRAL_PATH) 535 | 536 | #_examine_smpl(model_female), exit() 537 | 538 | ''' 539 | # Load joints 540 | estj2d = np.load(join(data_dir, 'est_joints.npz'))['est_joints'] 541 | #print('est_shape:', est.shape) 542 | joints = estj2d[:2, :, idx].T 543 | ''' 544 | 545 | # 2.3. Output Directory 546 | 547 | oclothfull_dir = data_dir + "/c3dwfull" 548 | if not exists(oclothfull_dir): 549 | makedirs(oclothfull_dir) 550 | ocloth_dir = data_dir + "/c3dw" 551 | if not exists(ocloth_dir): 552 | makedirs(ocloth_dir) 553 | ocloth_mask_dir = data_dir + "/c3dwmask" 554 | if not exists(ocloth_mask_dir): 555 | makedirs(ocloth_mask_dir) 556 | 557 | #smplmask_path = smplmask_dir + '/%06d_0.png'% idx 558 | #jointfile_path = smpljson_dir + '/%06d_0.json'% idx 559 | ''' 560 | smpl_model = model_female 561 | # 3D reconstruction and tranfer it to a define smpl model 562 | cloth3dxfer_single(smpl_model, smplparam_path, cloth_path, clothmask_path) 563 | 564 | ''' 565 | 566 | test_pair_lines = open(testpair_filepath).read().splitlines() 567 | test_pairs = [] 568 | 569 | for i in range(len(test_pair_lines)): 570 | # loading batch data 571 | pair = test_pair_lines[i].split() 572 | # print(pair) 573 | test_pairs.append([pair[0], pair[1]]) # 0: human 1: cloth 574 | 575 | #print(test_pairs), exit() 576 | 577 | # Might each cloth have different verison of template used 578 | template_smpl_param_path = './templateparam1.pkl' 579 | # We have to take into account this later 580 | template_jointfile_path = './templatejoints1.json' 581 | 582 | for i in range(len(test_pairs)): 583 | 584 | # for i in range(1, 2): 585 | # if not use_neutral: 586 | # gender = 'male' if int(genders[i]) == 0 else 'female' 587 | # if gender == 'female': 588 | smpl_model = model_female 589 | human_smpl_param_path = human_smpl_param_dir + \ 590 | '/' + test_pairs[i][0] + '.pkl' 591 | human_image_path = human_dir + '/' + test_pairs[i][0] + '.jpg' 592 | human_segm_path = human_segm_dir + '/' + test_pairs[i][0] + '.png' 593 | cloth_path = cloth_dir + '/' + test_pairs[i][1] + '.png' 594 | clothmask_path = cloth_mask_dir + '/' + test_pairs[i][1] + '.png' 595 | oclothfull_path = oclothfull_dir + '/' + \ 596 | test_pairs[i][1] + '_' + test_pairs[i][0] + '.png' # '.png' 597 | ocloth_path = ocloth_dir + '/' + \ 598 | test_pairs[i][1] + '_' + test_pairs[i][0] + '.jpg' # '.png' 599 | oclothmask_path = ocloth_mask_dir + '/' + \ 600 | test_pairs[i][1] + '_' + test_pairs[i][0] + '.jpg' # '.png' 601 | cloth3dxfer_single(smpl_model, template_smpl_param_path, human_smpl_param_path, 602 | cloth_path, clothmask_path, human_image_path, human_segm_path, ocloth_path, oclothfull_path, oclothmask_path) 603 | -------------------------------------------------------------------------------- /smpl2mask.py: -------------------------------------------------------------------------------- 1 | """ 2 | SMPL model to body mask 3 | ----------------------------- 4 | 5 | (c) copyright 2019 heejune@seoultech.ac.kr 6 | 7 | Prerequisite: SMPL model 8 | In : SMPL paramters(cam, shape, pose) for a image 9 | Out: body mask (binary or labeled) 10 | updated joint json file 11 | [optionally the validating images] 12 | 13 | 1.1 pre-calcuated fit data (camera, pose, shape) 14 | 15 | """ 16 | from __future__ import print_function 17 | import sys 18 | from os.path import join, exists, abspath, dirname 19 | from os import makedirs 20 | import logging 21 | import cPickle as pickle 22 | import time 23 | import cv2 24 | import numpy as np 25 | import chumpy as ch 26 | from opendr.camera import ProjectPoints 27 | from smpl_webuser.serialization import load_model 28 | from smpl_webuser.verts import verts_decorated 29 | from render_model import render_model 30 | import inspect # for debugging 31 | import matplotlib.pyplot as plt 32 | from opendr.lighting import SphericalHarmonics 33 | from opendr.geometry import VertNormals, Rodrigues 34 | from opendr.renderer import TexturedRenderer 35 | 36 | 37 | import json 38 | from smpl_webuser.lbs import global_rigid_transformation 39 | 40 | _LOGGER = logging.getLogger(__name__) 41 | 42 | logging.basicConfig(level=logging.INFO) 43 | 44 | 45 | import boundary_matching 46 | import graphutil as graphutil 47 | 48 | cloth_label_dict = {"background": 0, 49 | "hat" :1, 50 | "hair":2, 51 | "sunglass":3, # 3 52 | "upper-clothes":4, # 4 53 | "skirt":5 , # 5 54 | "pants":6 , # 6 55 | "dress":7 , # 7 56 | "belt": 8 , # 8 57 | "left-shoe": 9, # 9 58 | "right-shoe": 10, # 10 59 | "face": 11, # 11 60 | "left-leg": 12, # 12 61 | "right-leg": 13, # 13 62 | "left-arm": 14,# 14 63 | "right-arm": 15, # 15 64 | "bag": 16, # 16 65 | "scarf": 17, # 17 66 | "skin": 18 # added for skin region from face 67 | } 68 | 69 | # To understand and verify the SMPL itself 70 | def _examine_smpl(model): 71 | 72 | print(">>>> SMPL <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<") 73 | print(type(model)) 74 | print(dir(model)) 75 | #print('kintree_table', model.kintree_table) 76 | #print('J:', model.J) 77 | #print('v_template:', model.v_template) 78 | #print('J_regressor:', model.J_regressor) 79 | #print('shapedirs:', model.shapedirs) 80 | #print('weights:', model.weoptimize_on_jointsights) 81 | #print('bs_style:', model.bs_style) 82 | #print('f:', model.f) 83 | print('V template :', type(model.v_template)) 84 | print('V template :', model.v_template.shape) 85 | print('W type:', type(model.weights)) 86 | print('W value:', type(model.weights.r)) 87 | print('W shape:', model.weights.r.shape) 88 | #parts = np.count_nonzero(model.weights.r, axis =1) 89 | parts = np.argmax(model.weights.r, axis=1) 90 | print(" :", parts.shape, parts[:6000]) 91 | 92 | 93 | 94 | # To understand and verify the paramters 95 | 96 | def _examine_smpl_params(params): 97 | 98 | print(type(params)) 99 | print(params.keys()) 100 | print('camera params') 101 | camera = params['cam'] 102 | print(" - type:", type(camera)) 103 | #print(" - members:", dir(camera)) 104 | print(" - cam.t:", camera.t.r) # none-zero, likely only nonzero z 105 | print(" - cam.rt:", camera.rt.r) # zero (fixed) 106 | # print(" - cam.camera_mtx:", camera.camera_mtx) # 107 | print(" - cam.k:", camera.k.r) # 108 | print(" - cam.c:", camera.c.r) # 109 | print(" - cam.f:", camera.f.r) # 110 | 111 | # print(params['f'].shape) # 2 112 | print('>> pose') 113 | pose = params['pose'] 114 | print("\t\ttype:", type(pose)) 115 | print('\t\tshape:', pose.shape) # 72 116 | 117 | # convert within 118 | #pose = pose % (2.0*np.pi) 119 | 120 | print('\t\tvalues:', pose*180.0/np.pi) # degree 121 | print('>> betas') 122 | betas = params['betas'] 123 | print('\ttype:', type(betas)) 124 | print('\tshape:', betas.shape) # 10 125 | # print('\tvalues:', params['betas']) # 10 126 | 127 | 128 | # connvert 129 | # 1) uint8 image to float texture image 130 | # 2) normalize the vertices 131 | # optionally, 132 | # 3) coloring the backsize if face visibiltiy is not None) 133 | # ***note ****: texture coordinate is UP-side Down, and x-y nomalized 134 | #j 135 | def prepare_texture(pjt_v, pjt_f, img, face_visibility = None): 136 | 137 | # texture = overlayed images of 2d and projected. 138 | pjt_texture = img.astype(float)/255.0 # uint8 to float 139 | #pjt_texture[:, :, :] = pjt_texture[:, :, :]/255.0 140 | #print('dtype of img:', img.dtype) 141 | #print('dtype of pjt_texture:', pjt_texture.dtype) 142 | th, tw = pjt_texture.shape[0:2] 143 | ''' 144 | pjt_texture[:,:,:] = (1.0, .0, .0) # 145 | #pjt_texture[:,:int(tw/2),:] = (1.0, 0., 0.) # B, G, R 146 | pjt_texture[:,int(tw/4):int(3*tw/4),:] = (1.0, 1.0, 1.0) # B, G, R 147 | ''' 148 | #print("th, tw:", th, tw) 149 | # vt 150 | #pjt_v = cam.r.copy() 151 | pjt_v[:, 0] = pjt_v[:, 0]/tw # uv normalize 152 | pjt_v[:, 1] = pjt_v[:, 1]/th # uv normalize 153 | # ft 154 | #pjt_ft = model.f.copy() 155 | #print("ft:", pjt_ft.shape) 156 | 157 | # 5. project the body model with texture renderer 158 | # 3. reprojection 159 | #print(type(cam.v)) 160 | #print(cam.v.r.shape) 161 | 162 | #print("textured:", type(pjt_texture), 'dtype:', pjt_texture.dtype, "shape:", pjt_texture.shape) 163 | #print('max:', np.amax(pjt_texture[:, :, 0]), np.amax( 164 | # pjt_texture[:, :, 1]), np.amax(pjt_texture[:, :, 2])) 165 | #print('meam:', np.mean(pjt_texture[:, :, 0]), np.mean( 166 | # pjt_texture[:, :, 1]), np.mean(pjt_texture[:, :, 2])) 167 | # apply the visibility map for texturing 168 | if face_visibility is not None: 169 | v_end = cam.r.shape[0] 170 | pjt_vt = np.append( 171 | pjt_vt, [[0./tw, 0./th], [1.0/tw, 0./th], [0./tw, 1.0/th]], axis=0) 172 | pjt_texture[th-50:th, 0:50] = (1.0, 1.0, 1.0) 173 | pjt_texture[0:50, 0:50] = (1.0, 1.0, 1.0) 174 | for i in range(f_normal.shape[0]): 175 | if face_visibility[i] < 0: 176 | pjt_ft[i] = (v_end, v_end+1, v_end+2) # (0, 1, 2) 177 | 178 | return pjt_texture 179 | 180 | 181 | # 182 | # texture processing with alpha blending 183 | def prepare_texture_with_alpha(pjt_v, pjt_f, img, mask, target_label): 184 | 185 | 186 | alpha = np.zeros(mask.shape) 187 | alpha[mask == target_label] = 1.0 # 1.0 for fully opaque, 0.0 for transparent 188 | 189 | rgb = img.astype(float)/255.0 # uint8 to float 190 | rgba = cv2.merge((rgb, alpha)) 191 | print('shapes:', img.shape, rgb.shape, alpha.shape, rgba.shape) 192 | 193 | th, tw = rgba.shape[0:2] 194 | pjt_v[:, 0] = pjt_v[:, 0]/tw # uv normalize 195 | pjt_v[:, 1] = pjt_v[:, 1]/th # uv normalize 196 | 197 | return rgba #[:,:,:3] 198 | 199 | # create V, A, U, f: geom, bright, cam, renderer 200 | def build_texture_renderer(U, V, f, vt, ft, texture, w, h, ambient=0.0, near=0.5, far=20000, background_image = None): 201 | 202 | A = SphericalHarmonics(vn=VertNormals(v=V, f=f), 203 | components=[0., 0., 0., 0., 0., 0., 0., 0., 0.], 204 | light_color=ch.ones(3)) + ambient 205 | 206 | if background_image is not None: 207 | R = TexturedRenderer(vc=A, camera=U, f=f, bgcolor=[0.0, 0.0, 0.0], 208 | texture_image=texture, vt=vt, ft=ft, 209 | frustum={'width': w, 'height': h, 'near': near, 'far': far}, background_image= background_image) 210 | 211 | else: 212 | R = TexturedRenderer(vc=A, camera=U, f=f, bgcolor=[0.0, 0.0, 0.0], 213 | texture_image=texture, vt=vt, ft=ft, 214 | frustum={'width': w, 'height': h, 'near': near, 'far': far}) 215 | 216 | return R 217 | 218 | 219 | 220 | 221 | 222 | # convert numpy to json for a single person joint 223 | def cvt_joints_np2json(joints_np): 224 | 225 | # 1. re-ordering 226 | # same as viton2lsp_joint and reamining 227 | order = [13,12,8, 7, 6, 9, 10, 11, 2, 1, 0, 3, 4, 5, 14, 15, 16, 17] 228 | 229 | # 2. build dictionary 230 | oneperson = { "face_keypoints": [], 231 | "pose_keypoints": joints_np[order].flatten().tolist(), 232 | "hand_right_keypoints": [], 233 | "hand_left_keypoints":[]} 234 | 235 | people = [oneperson] 236 | joints_json = { "version": 1.0, "people": people } 237 | 238 | return joints_json 239 | 240 | 241 | 242 | # 243 | # SMPL => mask 244 | # 245 | def smpl2maskcore(cam, # camera model, Chv 246 | betas, # shape coef, numpy 247 | n_betas, # num of PCA 248 | pose, # angles, 27x3 numpy 249 | imRGB, # img numpy 250 | model, # SMPL model 251 | viz = False): # visualize or not 252 | 253 | for which in [cam, betas, pose, imRGB, model]: 254 | if which is None: 255 | print( retrieve_name(which) , 'is None') 256 | exit() 257 | 258 | h, w = imRGB.shape[0:2] 259 | 260 | print('betas:', type(betas), betas) 261 | print('pose:', type(pose), pose) 262 | 263 | # 1. build body model 264 | sv = verts_decorated( # surface vertices 265 | trans=ch.zeros(3), 266 | pose=ch.array(pose), 267 | v_template=model.v_template, 268 | J=model.J_regressor, 269 | betas=ch.array(betas), 270 | shapedirs=model.shapedirs[:, :, :n_betas], 271 | weights=model.weights, 272 | kintree_table=model.kintree_table, 273 | bs_style=model.bs_style, 274 | f=model.f, 275 | bs_type=model.bs_type, 276 | posedirs=model.posedirs, 277 | want_Jtr = not viz) # need J_transformed for reposing based on vertices 278 | 279 | #sv_r = sv.r.copy() 280 | 281 | # 2. Pose to standard pose 282 | if True: # make standard pose for easier try-on 283 | sv.pose[:] = 0.0 284 | sv.pose[0] = np.pi 285 | # lsh = 16 rsh = 17 67.5 degree rotation around z axis 286 | sv.pose[16*3+2] = -7/16.0*np.pi 287 | sv.pose[17*3+2] = +7/16.0*np.pi 288 | 289 | # 3. render the model with parameter 290 | im3CGray = cv2.cvtColor(cv2.cvtColor(imRGB, cv2.COLOR_BGR2GRAY), cv2.COLOR_GRAY2BGR) # 3 channel gray 291 | im3CBlack = np.zeros_like(imRGB) # uint8 type, right? 292 | imBackground = im3CBlack 293 | 294 | dist = np.abs(cam.t.r[2] - np.mean(sv.r, axis=0)[2]) 295 | im = (render_model( 296 | sv.r, model.f, w, h, cam, far= 20 + dist, img=imBackground[:, :, ::-1]) * 255.).astype('uint8') 297 | if False: 298 | plt.imshow(im[:,:,::-1]) # , cmap='gray') 299 | plt.suptitle('rendered') 300 | _ = raw_input('next?') 301 | 302 | # 4. binary mask 303 | imBinary = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) # gray silhouette 304 | imBinary[imBinary > 0] = 255 # binary (0, 1) 305 | if False: 306 | plt.imshow(imBinary) # , cmap='gray') 307 | plt.suptitle('binary mask') 308 | _ = raw_input('next?') 309 | 310 | ############################### 311 | # 5. new 2d joints 312 | smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20] # , 12 ] # index in Jtr # @TODO correct neck 313 | # lsh,lelb, lwr, neck 314 | 315 | # make the SMPL joints depend on betas 316 | Jdirs = np.dstack([model.J_regressor.dot(model.shapedirs[:, :, i]) for i in range(len(betas))]) 317 | J_onbetas = ch.array(Jdirs).dot(betas) + model.J_regressor.dot( model.v_template.r) 318 | 319 | # get joint positions as a function of model pose, betas and trans 320 | (_, A_global) = global_rigid_transformation( sv.pose, J_onbetas, model.kintree_table, xp=ch) 321 | Jtr = ch.vstack([g[:3, 3] for g in A_global]) + sv.trans 322 | 323 | # add joints, with corresponding to a vertex... 324 | neck_id = 3078 #2951 #3061 # viton's bewtween shoulder 325 | Jtr = ch.vstack((Jtr, sv[neck_id])) 326 | smpl_ids.append(len(Jtr) - 1) 327 | # head_id = 411 328 | nose_id = 331 # nose vertex id 329 | Jtr = ch.vstack((Jtr, sv[nose_id])) 330 | smpl_ids.append(len(Jtr) - 1) 331 | lear_id = 516 332 | Jtr = ch.vstack((Jtr, sv[lear_id])) 333 | smpl_ids.append(len(Jtr) - 1) 334 | rear_id = 3941 # 422# 226 #396 335 | Jtr = ch.vstack((Jtr, sv[rear_id])) 336 | smpl_ids.append(len(Jtr) - 1) 337 | leye_id = 125 #220 # 125 338 | Jtr = ch.vstack((Jtr, sv[leye_id])) 339 | smpl_ids.append(len(Jtr) - 1) 340 | reye_id = 3635 341 | Jtr = ch.vstack((Jtr, sv[reye_id])) 342 | smpl_ids.append(len(Jtr) - 1) 343 | 344 | # project SMPL joints on the image plane using the estimated camera 345 | cam.v = Jtr 346 | 347 | joints_np_wo_confidence = cam.r[smpl_ids] # get the projected value 348 | print(joints_np_wo_confidence) 349 | joints_np = np.zeros([18, 3]) 350 | joints_np[:,:2] = joints_np_wo_confidence 351 | joints_np[:,2] = 1.0 352 | 353 | for i in range(joints_np.shape[0]): 354 | if joints_np[i,0] < 0 or joints_np[i,0] > (w-1) or joints_np[i,1] < 0 or joints_np[i,1] > (h-1): 355 | joints_np[i, 2] = 0.0 356 | 357 | #print(joints_np) 358 | 359 | # check the joints 360 | joints_np_int = joints_np.astype(int) 361 | for i in range(joints_np_int.shape[0]): 362 | cv2.circle(im, tuple(joints_np_int[i,:2]), 2, (0, 0, 255), -1) # 2D joint White 363 | 364 | plt.imshow(im[:,:,::-1]) # , cmap='gray') 365 | plt.suptitle('joint check') 366 | #_ = raw_input('next?') 367 | 368 | 369 | # 6. convert format 370 | joints_json = cvt_joints_np2json(joints_np) # json joints 371 | #print(joints_json) 372 | #json.dumps(joints_json) 373 | 374 | return imBinary, joints_json 375 | 376 | 377 | # checking the redering result, but we are not using this. 378 | # we could drawing the points on it 379 | #print('th:', th, ' tw:', tw) 380 | # plt.figure() 381 | img2 = img3CGray.copy() 382 | ''' 383 | plt.imshow(img2) 384 | plt.hold(True) 385 | # now camera use only joints 386 | plt.plot(cam.r[:,0], cam.r[:, 1], 'r+', markersize=10) # projected pts 387 | ''' 388 | # project all vertices using camera 389 | cam.v = sv.r 390 | #print('>>>>SV.R:', type(sv.r)) 391 | ''' 392 | print('sv.r:', sv.r.shape) 393 | plt.plot(cam.r[:,0], cam.r[:, 1], 'b.', markersize=1) # projected pts 394 | plt.show() 395 | plt.hold(False) 396 | plt.pause(3) 397 | ''' 398 | # 1.2 vertices 399 | vertices = np.around(cam.r).astype(int) 400 | for idx, coord in enumerate(vertices): 401 | cv2.drawMarker(img2, tuple(coord), [0, 255, 0], markerSize=1) 402 | # cv2.circle(im, (int(round(uv[0])), int(round(uv[1]))), 1, [0, 255, 0]) # Green 403 | 404 | # Partmap for vertices 405 | ################################################### 406 | body_colormap = { 0: (255, 0, 0), 407 | 1: (0,255, 0), # lhip 408 | 2: (0,0, 255), # rhip 409 | 3: (255, 0, 0), 410 | 4: (0, 0, 255), 411 | 5: (0,255, 0), # lknee 412 | 6: (0,255, 0), # rknee 413 | 7: (0, 255, 0), # lankle 414 | 8: (0,0, 255), # rankle 415 | 9: (255, 0, 0), 416 | 10: (0,255, 0), # lfoot 417 | 11: (0,0, 255), # rfoot 418 | 12: (0,255, 0), # neck ****** 419 | # arms 420 | 13: (0,255, 0), # shoulder 421 | 14: (0,255, 0), # shoulder 422 | 15: (255, 0, 0),# head ***** 423 | 16: (0, 0, 255), # back arm 424 | 17: (0, 0, 255), 425 | 18: (0,255, 0), # fore-arm 426 | 19: (0,255, 0), 427 | 20: (0,0, 255), # wrist 428 | 21: (0,0, 255), 429 | 22: (0,255, 0), # hands 430 | 23: (0,255, 0) } 431 | 432 | use_partmap = True 433 | check_partmap = True 434 | if use_partmap: 435 | bodyparts = np.argmax(model.weights.r, axis=1) 436 | if check_partmap: 437 | #bodypartmap = graphutil.build_bodypartmap(sv.r, cam, bodyparts, h, w, False) 438 | bodypartmap = graphutil.build_bodypartmap_2d(img3CGray, cam.r, bodyparts, body_colormap, h, w, False) 439 | print('part-max:', np.amax(bodyparts)) 440 | plt.suptitle('body partmap') 441 | plt.subplot(1, 2, 1) 442 | plt.imshow(img3CGray[:, :, ::-1]) # , cmap='gray') 443 | plt.subplot(1, 2, 2) 444 | plt.imshow(bodypartmap[:,:,::-1]) # , cmap='gray') 445 | _ = raw_input('quit?') 446 | #exit() 447 | 448 | 449 | ''' 450 | 451 | # DEPTH MAP 452 | 453 | use_depthmap = True 454 | check_depthmap = True 455 | if use_depthmap: 456 | bodydepth = graphutil.build_depthmap2(sv.r, cam) 457 | if check_depthmap: 458 | # depth in reverse way 459 | plt.suptitle('depthmap') 460 | plt.subplot(1, 2, 1) 461 | plt.imshow(img[:, :, ::-1]) # , cmap='gray') 462 | plt.subplot(1, 2, 2) 463 | depthmap = graphutil.build_depthimage(sv.r, model.f, bodydepth, cam, height=h, width=w) #, near= 0.5, far= 400) 464 | #plt.imshow(depthmap, cmap='gray') 465 | plt.imshow(depthmap) 466 | plt.draw() 467 | plt.show() 468 | 469 | #plt.imshow(depthmap, cmap='gray_r') # the closer to camera, the brighter 470 | _ = raw_input('quit?') 471 | exit() 472 | 473 | ''' 474 | 475 | ####################################################################################### 476 | # load dataset dependent files and call the core processing 477 | #--------------------------------------------------------------- 478 | # smpl_mdoel: SMPL 479 | # inmodel_path : smpl param pkl file (by SMPLify) 480 | # inimg_path: input image 481 | # out mask image 482 | # ind : image index 483 | ####################################################################################### 484 | def smpl2mask_single(smpl_model, inmodel_path, inimg_path, outimg_path, outjoint_path, ind): 485 | 486 | if smpl_model is None or inmodel_path is None or inimg_path is None or outimg_path is None: 487 | print('There is None inputs'), exit() 488 | 489 | plt.ion() 490 | 491 | # model params 492 | with open(inmodel_path, 'rb') as f: 493 | if f is None: 494 | print("cannot open", inmodel_path), exit() 495 | params = pickle.load(f) 496 | 497 | #params['pose'] = params['pose'] % (2.0*np.pi) # modulo 498 | 499 | cam = ProjectPoints(f = params['cam_f'], rt=params['cam_rt'], t=params['cam_t'], k=params['cam_k'], c= params['cam_c']) 500 | params['cam'] = cam 501 | 502 | _examine_smpl_params(params) 503 | 504 | # 2d rgb image for texture 505 | #inimg_path = img_dir + '/dataset10k_%04d.jpg'%idx 506 | img2D = cv2.imread(inimg_path) 507 | if img2D is None: 508 | print("cannot open", inimg_path), exit() 509 | 510 | # segmentation mask 511 | ''' 512 | mask = cv2.imread(mask_path, cv2.IMREAD_UNCHANGED) 513 | if mask is None: 514 | print("cannot open", mask_path), exit() 515 | 516 | # pre-processing for boundary matching 517 | mask[mask == cloth_label_dict['bag']] = 0 # remove bag, FIXME: occlusion recovery 518 | 519 | # remark skin (not face) 520 | neck_y = j2d[12,1] 521 | lsh_y = j2d[9,1] 522 | rsh_y = j2d[8,1] 523 | cut_y = int((neck_y + lsh_y +rsh_y)/3.0) 524 | #print(type(neck_y), neck_y) 525 | mask_skin = mask.copy() 526 | mask_skin[mask == cloth_label_dict['face']] = cloth_label_dict['skin'] # 18 527 | mask[cut_y:,:] = mask_skin[cut_y:, :] # replace only non face region bellow the neck 528 | 529 | # cut the connected legs 530 | if idx == 0: 531 | mask[500:,190] = 0 532 | elif idx == 1: 533 | mask[500:,220] = 0 534 | else: 535 | print('Not prepared Yet for the idx!') 536 | # exit() 537 | 538 | ''' 539 | 540 | # 3. run the SMPL body to cloth processing 541 | cam = params['cam'] # camera model, Ch 542 | betas = params['betas'] 543 | n_betas = betas.shape[0] #10 544 | pose = params['pose'] # angles, 27x3 numpy 545 | img_mask, joints_json = smpl2maskcore(params['cam'], # camera model, Ch 546 | betas, # shape coeff, numpy 547 | n_betas, # num of PCA 548 | pose, # angles, 27x3 numpy 549 | img2D, # img numpy 550 | smpl_model, # SMPL 551 | viz = True) # display 552 | 553 | # 3.2 save result for checking 554 | if outimg_path is not None: 555 | cv2.imwrite(outimg_path, img_mask) 556 | if outjoint_path is not None: 557 | with open(outjoint_path, 'w') as joint_file: 558 | json.dump(joints_json, joint_file) 559 | 560 | 561 | if __name__ == '__main__': 562 | 563 | if len(sys.argv) < 5: 564 | print('usage: %s ase_path dataset start_idx end_idx(exclusive)'% sys.argv[0]), exit() 565 | 566 | # 1. directory check and setting 567 | base_dir = abspath(sys.argv[1]) 568 | #print(base_dir) 569 | dataset = sys.argv[2] 570 | idx_s = int(sys.argv[3]) 571 | idx_e= int(sys.argv[4]) 572 | 573 | if not exists(base_dir): 574 | print('No such a directory for base', base_path, base_dir), exit() 575 | 576 | # input Directory: image 577 | inp_dir = base_dir + "/images/" + dataset 578 | if not exists(inp_dir): 579 | print('No such a directory for dataset', data_set, inp_dir), exit() 580 | 581 | # input directory: preproccesed 582 | data_dir = base_dir + "/results/" + dataset 583 | print(data_dir) 584 | smpl_param_dir = data_dir + "/smpl" 585 | if not exists(smpl_param_dir): 586 | print('No such a directory for smpl param', smpl_param_dir), exit() 587 | ''' 588 | mask_dir = data_dir + "/segmentation" 589 | if not exists(mask_dir): 590 | print('No such a directory for mask', mask_dir), exit() 591 | ''' 592 | 593 | # Output Directory 594 | smplmask_dir = data_dir + "/smplmask" 595 | if not exists(smplmask_dir): 596 | makedirs(smplmask_dir) 597 | 598 | smpljson_dir = data_dir + "/smpljson" 599 | if not exists(smpljson_dir): 600 | makedirs(smpljson_dir) 601 | 602 | # 2. Loading SMPL models (independent from dataset) 603 | use_neutral = False 604 | # Assumes 'models' in the 'code/' directory where this file is in. 605 | MODEL_DIR = join(abspath(dirname(__file__)), 'models') 606 | MODEL_NEUTRAL_PATH = join( 607 | MODEL_DIR, 'basicModel_neutral_lbs_10_207_0_v1.0.0.pkl') 608 | MODEL_FEMALE_PATH = join( 609 | MODEL_DIR, 'basicModel_f_lbs_10_207_0_v1.0.0.pkl') 610 | MODEL_MALE_PATH = join(MODEL_DIR, 611 | 'basicmodel_m_lbs_10_207_0_v1.0.0.pkl') 612 | 613 | if not use_neutral: 614 | # File storing information about gender 615 | with open(join(data_dir, dataset + '_gender.csv')) as f: 616 | genders = f.readlines() 617 | model_female = load_model(MODEL_FEMALE_PATH) 618 | model_male = load_model(MODEL_MALE_PATH) 619 | else: 620 | gender = 'neutral' 621 | smpl_model = load_model(MODEL_NEUTRAL_PATH) 622 | 623 | #_examine_smpl(model_female), exit() 624 | 625 | 626 | # Load joints 627 | ''' 628 | estj2d = np.load(join(data_dir, 'est_joints.npz'))['est_joints'] 629 | #print('est_shape:', est.shape) 630 | joints = estj2d[:2, :, idx].T 631 | ''' 632 | for idx in range(idx_s, idx_e): 633 | 634 | # for i in range(1, 2): 635 | # if not use_neutral: 636 | # gender = 'male' if int(genders[i]) == 0 else 'female' 637 | # if gender == 'female': 638 | smpl_model = model_female 639 | smpl_param_path = smpl_param_dir + '/%04d.pkl'%idx 640 | if dataset == '10k': 641 | inp_path = inp_dir + '/' + 'dataset10k' + '_%04d.jpg'%idx 642 | else: 643 | inp_path = inp_dir + '/' + dataset + '_%06d.jpg'%idx 644 | 645 | ''' 646 | mask_path = mask_dir + '/10kgt_%04d.png'%idx 647 | cloth_path = cloth_dir + '/%04d.png'% idx 648 | #print(smpl_model, smpl_param_path, inp_path, mask_path, cloth_path, idx) 649 | ''' 650 | smplmask_path = smplmask_dir + '/%06d_0.png'% idx 651 | jointfile_path = smpljson_dir + '/%06d_0.json'% idx 652 | smpl2mask_single(smpl_model, smpl_param_path, inp_path, smplmask_path, jointfile_path, idx) 653 | 654 | 655 | # plt.pause(10) 656 | _ = raw_input('quit?') 657 | --------------------------------------------------------------------------------