├── README.md
├── assets
├── dataset.png
├── page1.gif
├── result2.gif
├── result3.gif
└── teasor.png
└── render_code
├── camera_in_ex_para.py
├── data_tutorial.ipynb
├── render.py
├── shape.txt
├── smpl
├── data
│ └── smpl_data
│ │ ├── J_regressor_extra.npy
│ │ └── smpl_mean_params.npz
├── smpl.obj
└── smpl.py
└── utlis.py
/README.md:
--------------------------------------------------------------------------------
1 | # LaserHuman: Language-guided Scene-aware Human Motion Generation in Free Environment.
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
15 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 | Language-guided scene-aware human motion generation has great significance for entertainment and robotics. In response to the limitations of existing datasets, we introduce LaserHuman, which stands out with its inclusion of genuine human motions within 3D environments, unbounded free-form natural language descriptions, a blend of indoor and outdoor scenarios, and dynamic, ever-changing scenes.
27 | Diverse modalities of capture data and rich annotations present great opportunities for the research of Scene-Text-to-Motion generation, and can also facilitate the development of real-life applications.
28 | Moreover, to generate semantically consistent and physically plausible human motions, we propose a multi-conditional diffusion model, which is simple but effective, achieving state-of-the-art performance on existing datasets.
29 |
30 |
31 | ## 💻 Train your own models
32 | 1. Prepare the datasets
33 |
34 |
35 |
36 | We release sample of our dataset on Google Drive.
37 |
38 | The annotation is saved in ```pub_datas.pkl``` for each motion sequence.
39 |
40 | Please prepare the dataset as following folder struction:
41 | ```
42 | ./LaserHuman_path
43 | └── data/
44 | ├──process # the dense point cloud
45 | ├── spst.ply
46 | ├── sistout.ply
47 | └── ...
48 | ├── pub_data.pkl
49 | ├── pc_img_folder
50 | ├── 1
51 | ├── pc # store the LiDAR point cloud
52 | ├── 2
53 | ├── pc
54 | ...
55 | └── render_code/
56 | ```
57 | * data rendering
58 |
59 | You can render the motion sequence on the scene:
60 | ```
61 | python render.py --id 10 --pkl_path 'data/pub_datas.pkl' --path_root 'data/'
62 | ```
63 | A window will first appear to let you adjust the camera view, adjust and close it to display the motion result.
64 |
65 | * data instruction
66 |
67 | We provide ```data_tutorial.ipynb``` to give the detailed instruction of our data.
68 |
69 | ## License
70 | All datasets are published under the [Creative Commons Attribution-NonCommercial-ShareAlike](https://creativecommons.org/licenses/by-nc-sa/4.0/).
71 | This means that you must attribute the work in the manner specified by the authors, you may not use this work for commercial purposes and if you alter, transform, or build upon this work, you may distribute the resulting work only under the same license.
72 |
73 | ## Citation
74 | ```
75 | @article{cong2024laserhuman,
76 | title={Laserhuman: language-guided scene-aware human motion generation in free environment},
77 | author={Cong, Peishan and Wang, Ziyi and Dou, Zhiyang and Ren, Yiming and Yin, Wei and Cheng, Kai and Sun, Yujing and Long, Xiaoxiao and Zhu, Xinge and Ma, Yuexin},
78 | journal={arXiv preprint arXiv:2403.13307},
79 | year={2024}
80 | }
81 | ```
82 |
83 |
--------------------------------------------------------------------------------
/assets/dataset.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/4DVLab/LaserHuman/acff98b095b1771a5ba4abe901f31baff975f20a/assets/dataset.png
--------------------------------------------------------------------------------
/assets/page1.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/4DVLab/LaserHuman/acff98b095b1771a5ba4abe901f31baff975f20a/assets/page1.gif
--------------------------------------------------------------------------------
/assets/result2.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/4DVLab/LaserHuman/acff98b095b1771a5ba4abe901f31baff975f20a/assets/result2.gif
--------------------------------------------------------------------------------
/assets/result3.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/4DVLab/LaserHuman/acff98b095b1771a5ba4abe901f31baff975f20a/assets/result3.gif
--------------------------------------------------------------------------------
/assets/teasor.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/4DVLab/LaserHuman/acff98b095b1771a5ba4abe901f31baff975f20a/assets/teasor.png
--------------------------------------------------------------------------------
/render_code/camera_in_ex_para.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 |
4 | distortion_coefficients1 = np.array([-0.145834, 0.197798, -0.001340, -0.000277, 0.000000])
5 | distortion_coefficients2 = np.array([-0.1377, 0.1915, -0.002118, -0.000510, 0.000000])
6 | distortion_coefficients6 = np.array([-0.148392, 0.208062, -0.000187, 0.000458, 0.000000])
7 | distortion_coefficients = np.array([0,0,0,0,0])
8 | '''
9 | This can be used for projcet the pc on the images, we only use front three cameras
10 | (cam1,cam2,cam6,cam1 is the center camera)
11 | '''
12 | h, w = 1200, 1920
13 | ex_matrix_cam1 = np.array([ # cam1
14 | [0.017452406437283574, -0.999847695156391, -5.551115123125783e-17, -0.024703597383298997],
15 | [-0.012215140126845492, -0.00021321606402130433, -0.999925369660452, 0.03975440225788578],
16 | [0.9997730761834054, 0.01745110395826527, -0.012217000835247127, -0.09080308692722944]
17 | ])
18 | ex_matrix_cam2 = np.array([ # cam2
19 | [-0.8598522715968737, -0.5105429179116056, 0.0, -0.009863877519666886],
20 | [0.004455270896599028, -0.0075035313714476234, -0.9999619230641715, -0.06082809641205974],
21 | [0.5105234780016824, -0.8598195310571061, 0.008726535498373912, -0.08629482217741014]
22 | ])
23 | ex_matrix_cam6 = np.array([ # cam6
24 | [0.8720692724321204, -0.4893824517488462, 0.0, -0.00012454852885848515],
25 | [8.326672684688674e-17, 1.6653345369377348e-16, -0.9999999999999998, -0.04931984031870221],
26 | [0.48938245174884615, 0.8720692724321204, 1.6653345369377348e-16, -0.08243496651085405]
27 | ])
28 | in_matrix_cam1 = np.array([
29 | [1288.27043, 0. , 944.73479],
30 | [0. , 1288.57055, 617.01932],
31 | [0. , 0. , 1. ]
32 | ])
33 | in_matrix_cam2 = np.array([[1293.11391, 0. , 974.14537],
34 | [0. , 1295.19251, 644.25513],
35 | [0. , 0. , 1. ]
36 | ])
37 | in_matrix_cam6 = np.array([
38 | [1294.81375, 0. , 926.57962],
39 | [0. , 1295.83987, 624.76414],
40 | [0. , 0. , 1. ]
41 | ])
42 |
43 | # in_matrix
44 | newcameramtx_cam1, _ = cv2.getOptimalNewCameraMatrix(in_matrix_cam1, distortion_coefficients1, (w,h), 0, (w,h))
45 | newcameramtx_cam2, _ = cv2.getOptimalNewCameraMatrix(in_matrix_cam2, distortion_coefficients2, (w,h), 0, (w,h))
46 | newcameramtx_cam6, _ = cv2.getOptimalNewCameraMatrix(in_matrix_cam6, distortion_coefficients, (w,h), 0, (w,h))
47 |
48 |
49 |
50 | ''' sample code
51 |
52 | # lidar2camera
53 | points = np.fromfile(data_path, dtype=np.float32).reshape([-1,3])
54 | points = np.concatenate((points,np.ones((points.shape[0],1))),1)
55 | points_T = np.transpose(points)
56 |
57 | points_T_camera = np.dot(ex_matrix_cam, points_T)
58 | # camera2pixel
59 | pixel = np.dot(in_matrix_cam, points_T_camera).T
60 | pixel_xy = np.array([x / x[2] for x in pixel])[:, 0:2]
61 | pixel_xy = np.around(pixel_xy).astype(int)
62 |
63 | image = np.array(cv2.imread(image_path))
64 | mask = (pixel_xy[:, 0] >= 0) & (pixel_xy[:, 0] < 1920) & \
65 | (pixel_xy[:, 1] >= 0) & (pixel_xy[:, 1] < 1200) & \
66 | (points_T_camera[2, :] > 0)
67 |
68 | filtered_coords = pixel_xy[mask]
69 | '''
--------------------------------------------------------------------------------
/render_code/data_tutorial.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 111,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "from camera_in_ex_para import *\n",
10 | "from utlis import *\n",
11 | "import shutil\n",
12 | "import open3d as o3d\n",
13 | "import numpy as np\n",
14 | "import sys, os\n",
15 | "sys.path.append(\"./smpl\")\n",
16 | "from smpl import SMPL,SMPL_MODEL_DIR\n"
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": 120,
22 | "metadata": {},
23 | "outputs": [],
24 | "source": [
25 | "pkl_path = './pub_datas.pkl'"
26 | ]
27 | },
28 | {
29 | "cell_type": "code",
30 | "execution_count": 121,
31 | "metadata": {},
32 | "outputs": [],
33 | "source": [
34 | "datas = np.load(pkl_path,allow_pickle=True)\n",
35 | "data = datas[0]"
36 | ]
37 | },
38 | {
39 | "cell_type": "code",
40 | "execution_count": 122,
41 | "metadata": {},
42 | "outputs": [
43 | {
44 | "data": {
45 | "text/plain": [
46 | "dict_keys(['name', 'pc_data', 'description', 'smpl', 'dense', 'smpl_op', 'index', 'livehps_map', 'livehps_pc'])"
47 | ]
48 | },
49 | "execution_count": 122,
50 | "metadata": {},
51 | "output_type": "execute_result"
52 | }
53 | ],
54 | "source": [
55 | "datas[0].keys()"
56 | ]
57 | },
58 | {
59 | "cell_type": "code",
60 | "execution_count": 123,
61 | "metadata": {},
62 | "outputs": [
63 | {
64 | "data": {
65 | "text/plain": [
66 | "['This person use his left hand to brace the pillar , then swing his right leg forward to the right back , end with his left foot behind .',\n",
67 | " 'A person brace the pillar with his left hand , swing his right leg forward and place his left foot behind .',\n",
68 | " 'Person brace pillar with left hand , swung right leg forward , left foot behind .',\n",
69 | " 'The person rest his left hand on the pillar and stand on one foot to the left , then kick his right leg straight downward and sway it backward behind his body , slide his left hand forward on the pillar and bring his right foot to the left rear side of his left foot , lightly turn his body to the right .',\n",
70 | " 'A person rest one hand on a pillar and kick one leg back , slide his hand forward and turn his body right .',\n",
71 | " 'Person rest left hand on pillar , stand on one foot , kick right leg down , sway it back , slid left hand forward , bring right foot to left rear , lightly turn body right .']"
72 | ]
73 | },
74 | "execution_count": 123,
75 | "metadata": {},
76 | "output_type": "execute_result"
77 | }
78 | ],
79 | "source": [
80 | "data['description'] # store the text annotations"
81 | ]
82 | },
83 | {
84 | "cell_type": "code",
85 | "execution_count": 124,
86 | "metadata": {},
87 | "outputs": [
88 | {
89 | "data": {
90 | "text/plain": [
91 | "(12, 75)"
92 | ]
93 | },
94 | "execution_count": 124,
95 | "metadata": {},
96 | "output_type": "execute_result"
97 | }
98 | ],
99 | "source": [
100 | "data['smpl_op'].shape # global R, T, pose "
101 | ]
102 | },
103 | {
104 | "cell_type": "markdown",
105 | "metadata": {},
106 | "source": [
107 | "* obtain the body mesh\n"
108 | ]
109 | },
110 | {
111 | "cell_type": "code",
112 | "execution_count": 125,
113 | "metadata": {},
114 | "outputs": [
115 | {
116 | "name": "stdout",
117 | "output_type": "stream",
118 | "text": [
119 | "WARNING: You are using a SMPL model, with only 10 shape coefficients.\n"
120 | ]
121 | }
122 | ],
123 | "source": [
124 | "smpl_model = SMPL(SMPL_MODEL_DIR, create_transl=False)\n",
125 | "shape_blob = torch.Tensor(np.loadtxt(\"./shape.txt\"))\n",
126 | "globalR,trans,pose_blob=np.array(data[\"smpl_op\"][:,:3]),np.array(data[\"smpl_op\"][:,3:6]),np.array(data[\"smpl_op\"][:,6:])\n",
127 | "output = smpl_model(betas=shape_blob.reshape(1,-1), \n",
128 | " body_pose=torch.Tensor(pose_blob), # M*69 (pose[:,3:])\n",
129 | " global_orient=torch.Tensor(globalR), # M*3 (pose[:,:3])\n",
130 | " transl=torch.Tensor(trans)) # M*3 global T\n"
131 | ]
132 | },
133 | {
134 | "cell_type": "code",
135 | "execution_count": 129,
136 | "metadata": {},
137 | "outputs": [],
138 | "source": [
139 | "data['pc_data'] # store the sparse point cloud path, length--M\n",
140 | "pc_path = '/'.join(['../data','pc_img_data',str(data['index']),data['pc_data'][0]])\n",
141 | "pc_data = np.fromfile(pc_path,dtype = np.float32).reshape(-1,5)\n",
142 | "pc_data.shape # (262144, 5), four dimension is x,y,z,r. this is for cropping the dynamic interaction person or dynamic environment input\n"
143 | ]
144 | },
145 | {
146 | "cell_type": "code",
147 | "execution_count": null,
148 | "metadata": {},
149 | "outputs": [],
150 | "source": []
151 | },
152 | {
153 | "cell_type": "markdown",
154 | "metadata": {},
155 | "source": [
156 | "* dense point cloud infomation\n",
157 | "\n",
158 | "When collecting data, we use collection sparse LiDAR coordinate system. In the algorithm, we use dense LiDAR coordinate system. R & T is the rotation and translation from the dense point cloud to LiDAR coordinate system."
159 | ]
160 | },
161 | {
162 | "cell_type": "code",
163 | "execution_count": 130,
164 | "metadata": {},
165 | "outputs": [],
166 | "source": [
167 | "R,T = np.array(data['dense']['R']).reshape(3,3),np.array(data['dense']['T'])"
168 | ]
169 | },
170 | {
171 | "cell_type": "markdown",
172 | "metadata": {},
173 | "source": [
174 | "dense_name recorded the sequence is from which scene"
175 | ]
176 | },
177 | {
178 | "cell_type": "code",
179 | "execution_count": 131,
180 | "metadata": {},
181 | "outputs": [],
182 | "source": [
183 | "dense_name = data['dense']['dense_name']\n",
184 | "ply_file_path = f\"../data/process/{dense_name}\" # ply path\n",
185 | "point_cloud = o3d.io.read_point_cloud(ply_file_path)\n",
186 | "point_cloud_t = trans_crop(point_cloud,R,T,trans[0]) # transform to LiDAR coordinate system for rendering\n",
187 | "M = 1000000 # downsample point cloud number\n",
188 | "downsample = np.random.choice(point_cloud_t.shape[0], M)\n",
189 | "point_cloud_t = point_cloud_t[downsample]"
190 | ]
191 | },
192 | {
193 | "cell_type": "code",
194 | "execution_count": null,
195 | "metadata": {},
196 | "outputs": [],
197 | "source": []
198 | }
199 | ],
200 | "metadata": {
201 | "kernelspec": {
202 | "display_name": "3d",
203 | "language": "python",
204 | "name": "python3"
205 | },
206 | "language_info": {
207 | "codemirror_mode": {
208 | "name": "ipython",
209 | "version": 3
210 | },
211 | "file_extension": ".py",
212 | "mimetype": "text/x-python",
213 | "name": "python",
214 | "nbconvert_exporter": "python",
215 | "pygments_lexer": "ipython3",
216 | "version": "3.8.18"
217 | }
218 | },
219 | "nbformat": 4,
220 | "nbformat_minor": 2
221 | }
222 |
--------------------------------------------------------------------------------
/render_code/render.py:
--------------------------------------------------------------------------------
1 | import open3d as o3d
2 | import numpy as np
3 | import os
4 | from tqdm import tqdm
5 | import torch
6 | from PIL import Image
7 | import os
8 | import sys
9 | from camera_in_ex_para import *
10 | from utlis import *
11 | import shutil
12 |
13 | import matplotlib.pyplot as plt
14 | import sys, os
15 | sys.path.append("./smpl")
16 | from smpl import SMPL,SMPL_MODEL_DIR
17 | import argparse
18 |
19 | def options():
20 | parser = argparse.ArgumentParser(description='rendering ...')
21 | parser.add_argument('--pkl_path', type=str, help='pkl path',default='../data/pub_datas.pkl')
22 | parser.add_argument('--path_root', type=str, help='path_root',default='../data/')
23 | parser.add_argument('--id', type=int, help='id',default='100')
24 | args = parser.parse_args()
25 | return args
26 |
27 | other_path_list = ["other_1","other_2","other_3","other_4"]
28 | color_list = [
29 | [1, 0.75, 0.2],
30 | [1, 0.75, 0.8],
31 | [0, 1.0, 0.2],
32 | [0, 0.2, 1.0]
33 | ]
34 | save_obj_path = './save_obj/'
35 | save_gif_path = './gif/'
36 | os.makedirs('./frames/',exist_ok=True)
37 | smpl_model = SMPL(SMPL_MODEL_DIR, create_transl=False)
38 | shape_blob = torch.Tensor(np.loadtxt("./shape.txt"))
39 |
40 |
41 | def main():
42 | args = options()
43 | datas = np.load(args.pkl_path,allow_pickle=True)
44 | # datas = np.load('./pub_datas.pkl',allow_pickle=True)
45 |
46 | index = int(args.id)
47 | data = datas[index]
48 | print(data.keys())
49 |
50 |
51 | name_id = data['index']
52 | dense_name = data['dense']['dense_name']
53 | R,T = np.array(data['dense']['R']).reshape(3,3),np.array(data['dense']['T'])
54 |
55 | # prepare for body OBJ saving
56 | os.makedirs(save_obj_path,exist_ok=True)
57 | os.makedirs(save_gif_path,exist_ok=True)
58 | obj_path = f'{save_obj_path}{name_id}_target'
59 | os.makedirs(obj_path,exist_ok=True)
60 |
61 | for other_id in other_path_list:
62 | if other_id in data.keys():
63 | globalR_mul,trans_mul,pose_blob=np.array(data[other_id][:,:3]),np.array(data[other_id][:,3:6]),np.array(data[other_id][:,6:])
64 | output = smpl_model(betas=shape_blob.reshape(1,-1),
65 | body_pose=torch.Tensor(pose_blob), # M*69 (pose[:,3:])
66 | global_orient=torch.Tensor(globalR_mul), # M*3 (pose[:,:3])
67 | transl=torch.Tensor(trans_mul)) # M*3 global T'''
68 | obj_path_instance = f'{save_obj_path}{name_id}_{other_id}'
69 | verts = output.vertices
70 | if not os.path.exists(obj_path_instance):
71 | os.makedirs(obj_path_instance,exist_ok=True)
72 | for j in range(len(verts)):
73 | smpl2obj(verts[j], f"{obj_path_instance}/{j}.obj")
74 |
75 | globalR,trans,pose_blob=np.array(data["smpl_op"][:,:3]),np.array(data["smpl_op"][:,3:6]),np.array(data["smpl_op"][:,6:])
76 | output = smpl_model(betas=shape_blob.reshape(1,-1),
77 | body_pose=torch.Tensor(pose_blob), # M*69 (pose[:,3:])
78 | global_orient=torch.Tensor(globalR), # M*3 (pose[:,:3])
79 | transl=torch.Tensor(trans)) # M*3 global T'''
80 |
81 | for j in range(len(output.vertices)):
82 | smpl2obj(output.vertices[j], f"{obj_path}/{j}.obj")
83 |
84 | # dense scene
85 | M = 1000000
86 | ply_file_path = f"{args.path_root}/process/{dense_name}" # ply path
87 | point_cloud = o3d.io.read_point_cloud(ply_file_path)
88 | point_cloud_t = trans_crop(point_cloud,R,T,trans[0])
89 | downsample = np.random.choice(point_cloud_t.shape[0], M)
90 | point_cloud_t = point_cloud_t[downsample]
91 | point_cloud_crop = o3d.geometry.PointCloud()
92 | point_cloud_crop.points = o3d.utility.Vector3dVector(point_cloud_t[:,:3])
93 | point_cloud_crop.colors = o3d.utility.Vector3dVector(point_cloud_t[:,3:])
94 | save_view_point(point_cloud_crop, "camera.json") #
95 | vis = o3d.visualization.Visualizer()
96 | vis.create_window(window_name='visual', width=800, height=600)
97 | cam_params = o3d.io.read_pinhole_camera_parameters("camera.json")
98 | mesh_normal = o3d.geometry.TriangleMesh()
99 | mesh_other_list = []
100 | for i in range(4):
101 | mesh_other_list.append(o3d.geometry.TriangleMesh())
102 |
103 | other_count = 0
104 |
105 | ## begin rendering
106 | for i in range(trans.shape[0]):
107 | mesh = o3d.io.read_triangle_mesh(f"{obj_path}/{i}.obj")
108 | mesh_normal.triangles = o3d.utility.Vector3iVector(mesh.triangles)
109 | mesh_normal.vertices = o3d.utility.Vector3dVector(mesh.vertices)
110 | mesh_normal.compute_vertex_normals()
111 | for k in range(4):
112 | mesh_path = f'{save_obj_path}{name_id}_other_{k + 1}/{i}.obj'
113 | if os.path.exists(f'{save_obj_path}{name_id}_other_{k + 1}/{i}.obj'):
114 | mesh_mul = o3d.io.read_triangle_mesh(mesh_path)
115 | mesh_other_list[k].triangles = o3d.utility.Vector3iVector(mesh_mul.triangles)
116 | mesh_other_list[k].vertices = o3d.utility.Vector3dVector(mesh_mul.vertices)
117 | mesh_other_list[k].compute_vertex_normals()
118 | num_vertices = np.asarray(mesh_other_list[k].vertices).shape[0]
119 | color = color_list[k]
120 | colors = np.repeat([color], num_vertices, axis=0)
121 | mesh_other_list[k].vertex_colors = o3d.utility.Vector3dVector(colors)
122 | other_count += 1
123 | if i==0:
124 | vis.add_geometry(point_cloud_crop)
125 | vis.add_geometry(mesh_normal)
126 | for k in range(other_count):
127 | vis.add_geometry(mesh_other_list[k])
128 | view_ctl = vis.get_view_control()
129 | view_ctl.convert_from_pinhole_camera_parameters(cam_params)
130 | else:
131 | vis.update_geometry(point_cloud_crop)
132 | vis.update_geometry(mesh_normal)
133 | for k in range(other_count):
134 | try:
135 | vis.update_geometry(mesh_other_list[k])
136 | except:
137 | pass
138 | view_ctl = vis.get_view_control()
139 | view_ctl.convert_from_pinhole_camera_parameters(cam_params)
140 |
141 | vis.poll_events()
142 | vis.update_renderer()
143 | image = vis.capture_screen_float_buffer(False)
144 | image = np.asarray(image)
145 | plt.imsave(f"frames/frame_{i:03d}.png", np.asarray(image), dpi=1)
146 |
147 | shutil.rmtree(save_obj_path)
148 |
149 | images = [Image.open(f"frames/frame_{i:03d}.png") for i in range(trans.shape[0])]
150 | gif_filename = f'{save_gif_path}{name_id}.gif'
151 | images[0].save(gif_filename, save_all=True, append_images=images[1:], optimize=False, duration=3)
152 | shutil.rmtree('./frames/')
153 |
154 | if __name__ == '__main__':
155 | main()
--------------------------------------------------------------------------------
/render_code/shape.txt:
--------------------------------------------------------------------------------
1 | -0.567482 0.494625 -0.0794629 -0.328839 -0.0323936 -0.0600556 0.021654 0.0548773 -0.0163508 -0.01156
--------------------------------------------------------------------------------
/render_code/smpl/data/smpl_data/J_regressor_extra.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/4DVLab/LaserHuman/acff98b095b1771a5ba4abe901f31baff975f20a/render_code/smpl/data/smpl_data/J_regressor_extra.npy
--------------------------------------------------------------------------------
/render_code/smpl/data/smpl_data/smpl_mean_params.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/4DVLab/LaserHuman/acff98b095b1771a5ba4abe901f31baff975f20a/render_code/smpl/data/smpl_data/smpl_mean_params.npz
--------------------------------------------------------------------------------
/render_code/smpl/smpl.py:
--------------------------------------------------------------------------------
1 | # This script is borrowed and extended from https://github.com/nkolot/SPIN/blob/master/models/hmr.py
2 | # Adhere to their licence to use this script
3 |
4 | import torch
5 | import numpy as np
6 | import os.path as osp
7 | from smplx import SMPL as _SMPL
8 | from smplx.utils import ModelOutput, SMPLOutput
9 | import warnings
10 | warnings.filterwarnings("ignore", category=DeprecationWarning)
11 |
12 | # Map joints to SMPL joints
13 | JOINT_MAP = {
14 | 'OP Nose': 24, 'OP Neck': 12, 'OP RShoulder': 17,
15 | 'OP RElbow': 19, 'OP RWrist': 21, 'OP LShoulder': 16,
16 | 'OP LElbow': 18, 'OP LWrist': 20, 'OP MidHip': 0,
17 | 'OP RHip': 2, 'OP RKnee': 5, 'OP RAnkle': 8,
18 | 'OP LHip': 1, 'OP LKnee': 4, 'OP LAnkle': 7,
19 | 'OP REye': 25, 'OP LEye': 26, 'OP REar': 27,
20 | 'OP LEar': 28, 'OP LBigToe': 29, 'OP LSmallToe': 30,
21 | 'OP LHeel': 31, 'OP RBigToe': 32, 'OP RSmallToe': 33, 'OP RHeel': 34,
22 | 'Right Ankle': 8, 'Right Knee': 5, 'Right Hip': 45,
23 | 'Left Hip': 46, 'Left Knee': 4, 'Left Ankle': 7,
24 | 'Right Wrist': 21, 'Right Elbow': 19, 'Right Shoulder': 17,
25 | 'Left Shoulder': 16, 'Left Elbow': 18, 'Left Wrist': 20,
26 | 'Neck (LSP)': 47, 'Top of Head (LSP)': 48,
27 | 'Pelvis (MPII)': 49, 'Thorax (MPII)': 50,
28 | 'Spine (H36M)': 51, 'Jaw (H36M)': 52,
29 | 'Head (H36M)': 53, 'Nose': 24, 'Left Eye': 26,
30 | 'Right Eye': 25, 'Left Ear': 28, 'Right Ear': 27
31 | }
32 |
33 | JOINT_PAIRS = [('OP Neck', 'OP MidHip'), ('OP RHip', 'OP RKnee'), ('OP RKnee', 'OP RAnkle'), ('OP MidHip', 'OP RHip'), ('OP MidHip', 'OP LHip'), ('OP LHip', 'OP LKnee'),
34 | ('OP LKnee', 'OP LAnkle'), ('OP Neck', 'OP RShoulder'), ('OP RShoulder', 'OP RElbow'), ('OP RElbow', 'OP RWrist'), ('OP Neck', 'OP LShoulder'),
35 | ('OP LShoulder', 'OP LElbow'), ('OP LElbow', 'OP LWrist'), ('OP Neck', 'OP Nose'), ('OP Nose', 'OP REye'), ('OP Nose', 'OP LEye'),
36 | ('OP REye', 'OP REar'), ('OP LEye', 'OP LEar'), ('OP LAnkle', 'OP LBigToe'), ('OP RAnkle', 'OP RBigToe')]
37 |
38 |
39 | OP_21_JOINTS = [
40 | 'OP LWrist', 'OP RWrist', 'OP LAnkle', 'OP RAnkle',
41 | 'OP LElbow', 'OP RElbow', 'OP LKnee', 'OP RKnee',
42 | 'OP LShoulder','OP RShoulder',
43 | 'OP LHip', 'OP MidHip','OP RHip',
44 | 'OP Nose', 'OP Neck',
45 | 'OP LEye', 'OP REye', 'OP LEar', 'OP REar',
46 | 'OP LBigToe', 'OP RBigToe',
47 |
48 | ]
49 |
50 |
51 | JOINT_IDS = {OP_21_JOINTS[i]: i for i in range(len(OP_21_JOINTS))}
52 |
53 | JOINT_ID_PAIRS = [(JOINT_IDS[first], JOINT_IDS[second]) for first, second in JOINT_PAIRS]
54 |
55 | # JOINT_REGRESSOR_TRAIN_EXTRA = osp.join(VIBE_DATA_DIR, 'J_regressor_extra.npy')
56 | # SMPL_MEAN_PARAMS = osp.join(VIBE_DATA_DIR, 'smpl_mean_params.npz')
57 | SMPL_MODEL_DIR = 'smpl/data/smpl_data'
58 | H36M_TO_J17 = [6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10, 0, 7, 9]
59 | H36M_TO_J14 = H36M_TO_J17[:14]
60 |
61 |
62 | class SMPL(_SMPL):
63 | """ Extension of the official SMPL implementation to support more joints """
64 |
65 | def __init__(self, *args, **kwargs):
66 | super(SMPL, self).__init__(*args, **kwargs)
67 | joints = [JOINT_MAP[i] for i in OP_21_JOINTS]
68 | # J_regressor_extra = np.load(JOINT_REGRESSOR_TRAIN_EXTRA)
69 | # self.register_buffer('J_regressor_extra', torch.tensor(J_regressor_extra, dtype=torch.float32))
70 | self.joint_map = torch.tensor(joints, dtype=torch.long)
71 |
72 | def forward(self, *args, **kwargs):
73 | kwargs['get_skin'] = True
74 | smpl_output = super(SMPL, self).forward(*args, **kwargs)
75 | # extra_joints = vertices2joints(self.J_regressor_extra, smpl_output.vertices)
76 | # joints = torch.cat([smpl_output.joints, extra_joints], dim=1)
77 | # joints = joints[:, self.joint_map, :]
78 | joints = smpl_output.joints
79 | output = SMPLOutput(vertices=smpl_output.vertices,
80 | global_orient=smpl_output.global_orient,
81 | body_pose=smpl_output.body_pose,
82 | joints=joints,
83 | betas=smpl_output.betas,
84 | full_pose=smpl_output.full_pose)
85 | return output
86 |
87 |
88 | def get_smpl_faces():
89 | smpl = SMPL(SMPL_MODEL_DIR, batch_size=1, create_transl=False)
90 | return smpl.faces
--------------------------------------------------------------------------------
/render_code/utlis.py:
--------------------------------------------------------------------------------
1 | import open3d as o3d
2 | import numpy as np
3 | import os
4 | from tqdm import tqdm
5 | import torch
6 | from PIL import Image
7 | import os
8 | import sys
9 | import warnings
10 | def smpl2obj(vertices, path):
11 | fs = list()
12 | with open("smpl/smpl.obj") as f:
13 | lines = f.readlines()
14 | for line in lines:
15 | l = line.split(" ")
16 | if "f" in l:
17 | fs.append((int(l[1]), int(l[2]), int(l[3])))
18 |
19 | with open(path, "w") as f:
20 | for v in vertices:
21 | f.write(( 'v %f %f %f\n' % ( v[0], v[1], v[2]) ))
22 | for face in fs:
23 | f.write(( 'f %d %d %d\n' % ( face[0], face[1], face[2]) ))
24 |
25 |
26 | def save_view_point(ply, filename):
27 | vis = o3d.visualization.Visualizer()
28 | vis.create_window(window_name='visual', width=800, height=600)
29 | vis.add_geometry(ply)
30 | vis.run() # user changes the view and press "q" to terminate
31 | param = vis.get_view_control().convert_to_pinhole_camera_parameters()
32 | o3d.io.write_pinhole_camera_parameters(filename, param)
33 | vis.destroy_window()
34 |
35 | def load_view_point(ply, filename):
36 | vis = o3d.visualization.Visualizer()
37 | vis.create_window(window_name='visual',width=800, height=600)
38 | ctr = vis.get_view_control()
39 | param = o3d.io.read_pinhole_camera_parameters(filename)
40 | vis.add_geometry(ply)
41 | ctr.convert_from_pinhole_camera_parameters(param)
42 | vis.run()
43 | vis.destroy_window()
44 |
45 | def trans_crop(point_cloud,R,T,trans):
46 | pc = np.array(point_cloud.points)
47 | c = np.array(point_cloud.colors)
48 | T_ = T[:3].dot(R)
49 | # downsample = np.random.choice(self.vertices.shape[0], self.N)
50 | # vertices_downsample = self.vertices[downsample]
51 | # pc = pc[:,:3].dot(R)- T_[:3]
52 | pc = pc[:,:3] - T
53 | pc = np.matmul(R.T,pc.T).T
54 |
55 | L = 10
56 | mask = (pc[:, 0] < trans[0]+L) & (pc[:, 0] > trans[0]-L) & \
57 | (pc[:, 1] < trans[1]+L) & (pc[:, 1] > trans[1]-L) & \
58 | (pc[:, 2] < trans[2]+2) & (pc[:, 2] > trans[2]-4)
59 | crop_downsample = pc[mask]
60 | c = c[mask]
61 | return np.concatenate((crop_downsample,c),1)
62 |
63 |
--------------------------------------------------------------------------------