├── .gitignore
├── LICENSE.md
├── README.md
├── cloudrender
├── __init__.py
├── camera
│ ├── __init__.py
│ ├── models.py
│ └── trajectory.py
├── capturing.py
├── libegl
│ ├── __init__.py
│ ├── context.py
│ ├── devices
│ │ ├── __init__.py
│ │ ├── gbm.py
│ │ └── generic.py
│ ├── libgbm.py
│ └── tools.py
├── render
│ ├── __init__.py
│ ├── depthmap.py
│ ├── lights.py
│ ├── mesh.py
│ ├── pointcloud.py
│ ├── renderable.py
│ ├── rigid.py
│ ├── shaders
│ │ ├── __init__.py
│ │ ├── avgcolor_pointcloud_with_normals
│ │ │ ├── depthmap
│ │ │ │ ├── fragment.glsl
│ │ │ │ ├── geometry.glsl
│ │ │ │ └── vertex_perspective.glsl
│ │ │ ├── fragment.glsl
│ │ │ ├── geometry.glsl
│ │ │ ├── normalization
│ │ │ │ ├── fragment.glsl
│ │ │ │ └── vertex.glsl
│ │ │ └── vertex_perspective.glsl
│ │ ├── shader_loader.py
│ │ ├── simple_mesh
│ │ │ ├── fragment.glsl
│ │ │ ├── shadowdraw
│ │ │ │ ├── fragment.glsl
│ │ │ │ ├── vertex_opencv.glsl
│ │ │ │ └── vertex_perspective.glsl
│ │ │ ├── shadowgen
│ │ │ │ ├── fragment.glsl
│ │ │ │ └── vertex_perspective.glsl
│ │ │ ├── vertex_opencv.glsl
│ │ │ └── vertex_perspective.glsl
│ │ ├── simple_pointcloud
│ │ │ ├── fragment.glsl
│ │ │ ├── geometry.glsl
│ │ │ ├── shadowdraw
│ │ │ │ ├── fragment.glsl
│ │ │ │ ├── geometry.glsl
│ │ │ │ ├── vertex_opencv.glsl
│ │ │ │ └── vertex_perspective.glsl
│ │ │ ├── shadowgen
│ │ │ │ ├── fragment.glsl
│ │ │ │ ├── geometry.glsl
│ │ │ │ └── vertex_perspective.glsl
│ │ │ ├── vertex_opencv.glsl
│ │ │ └── vertex_perspective.glsl
│ │ ├── simple_pointcloud_with_normals
│ │ │ ├── fragment.glsl
│ │ │ ├── geometry.glsl
│ │ │ ├── shadowdraw
│ │ │ │ ├── fragment.glsl
│ │ │ │ ├── geometry.glsl
│ │ │ │ └── vertex_perspective.glsl
│ │ │ ├── shadowgen
│ │ │ │ ├── fragment.glsl
│ │ │ │ ├── geometry.glsl
│ │ │ │ └── vertex_perspective.glsl
│ │ │ └── vertex_perspective.glsl
│ │ └── textured_mesh
│ │ │ ├── fragment.glsl
│ │ │ ├── shadowdraw
│ │ │ ├── fragment.glsl
│ │ │ ├── vertex_opencv.glsl
│ │ │ └── vertex_perspective.glsl
│ │ │ ├── vertex_opencv.glsl
│ │ │ └── vertex_perspective.glsl
│ ├── shadowmap.py
│ ├── smpl.py
│ ├── smpl_legacy.py
│ └── utils.py
├── scene.py
└── utils.py
├── download_test_assets.sh
├── images
└── test_scene_video_output_example.gif
├── requirements.txt
├── setup.py
└── test_scene_video.py
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 | *.pyc
3 | test_assets/
4 | *.egg-info/
5 | build/
6 | dist/
7 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | License Copyright (c) 2021 Vladimir Guzov, Aymen Mir, Max-Planck-Gesellschaft
2 |
3 | **Please read carefully the following terms and conditions and any accompanying documentation before you download and/or use this software and associated documentation files (the "Software").**
4 |
5 | The authors hereby grant you a non-exclusive, non-transferable, free of charge right to copy, modify, merge, publish, distribute, and sublicense the Software for the sole purpose of performing non-commercial scientific research, non-commercial education, or non-commercial artistic projects.
6 |
7 | Any other use, in particular any use for commercial purposes, is prohibited. This includes, without limitation, incorporation in a commercial product, use in a commercial service, or production of other artefacts for commercial purposes.
8 |
9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
10 |
11 | You understand and agree that the authors are under no obligation to provide either maintenance services, update services, notices of latent defects, or corrections of defects with regard to the Software. The authors nevertheless reserve the right to update, modify, or discontinue the Software at any time.
12 |
13 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. You agree to cite **Human POSEitioning System (HPS): 3D Human Pose Estimation and Self-localization in Large Scenes from Body-Mounted Sensors** paper in documents and papers that report on research using this Software.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # cloudrender: an OpenGL framework for pointcloud and mesh rendering
2 | A visualization framework capable of rendering large pointclouds, dynamic SMPL models and more. Used to visualize results in our Human POSEitioning System (HPS) project: http://virtualhumans.mpi-inf.mpg.de/hps/
3 |
4 | ## Requirements
5 | - GPU with OpenGL 4.0
6 |
7 | Optionally, if you want to run included test script:
8 | - EGL support (for headless rendering)
9 | - ffmpeg>=2.1 with libx264 enabled and ffprobe installed (for saving to video)
10 | - SMPL model files (see below)
11 |
12 | ## Installation
13 | ### Step 1. Get the code
14 | Copy the code without installation
15 | ```bash
16 | git clone https://github.com/vguzov/cloudrender
17 | pip install -r requirements.txt
18 | ```
19 | or install as a package with
20 | ```
21 | pip install cloudrender
22 | ```
23 | ### Step 2. Get the SMPL model (optional, required for human body visualization)
24 | - Follow the installation instructions at https://github.com/vchoutas/smplx
25 | - For legacy version, follow the installation instructions at https://github.com/gulvarol/smplpytorch
26 |
27 | ## Running test script
28 | ### test_scene_video.py
29 | Run `bash download_test_assets.sh` – it will create `test_assets` folder and download assets for the test script.
30 | (human shape and motion files, camera trajectory file)
31 | Additionally, you need to download the 3D scene scan from here: https://edmond.mpg.de/file.xhtml?fileId=274762
32 | and put it as `MPI_Etage6-pc.zip` in `test_assets` folder without unpacking.
33 |
34 | Run `test_scene_video.py`, make sure to change `SMPLX_ROOT` path in the script if needed.
35 |
36 |
37 | The following script will write a short video `test_assets/output.mp4` which should look similar to this:
38 |
39 |
40 |
41 |
42 | ## More data
43 | Please check our HPS project page for more 3D scans and motion data: http://virtualhumans.mpi-inf.mpg.de/hps/
44 |
45 | Camera trajectory is created using [CloudVis interactive viewer](https://github.com/vguzov/cloudvis).
46 |
47 | ## Citation
48 |
49 | If you find the code or data useful, please cite:
50 |
51 | ```
52 | @inproceedings{HPS,
53 | title = {Human POSEitioning System (HPS): 3D Human Pose Estimation and Self-localization in Large Scenes from Body-Mounted Sensors },
54 | author = {Guzov, Vladimir and Mir, Aymen and Sattler, Torsten and Pons-Moll, Gerard},
55 | booktitle = {{IEEE} Conference on Computer Vision and Pattern Recognition (CVPR)},
56 | month = {jun},
57 | organization = {{IEEE}},
58 | year = {2021},
59 | }
60 | ```
61 |
--------------------------------------------------------------------------------
/cloudrender/__init__.py:
--------------------------------------------------------------------------------
1 | __version__ = '1.3.6'
--------------------------------------------------------------------------------
/cloudrender/camera/__init__.py:
--------------------------------------------------------------------------------
1 | from .models import *
2 |
--------------------------------------------------------------------------------
/cloudrender/camera/models.py:
--------------------------------------------------------------------------------
1 | from typing import Sequence, Union, Optional
2 |
3 | import numpy as np
4 | from abc import ABC, abstractmethod
5 | from OpenGL import GL as gl
6 | import glm
7 | import os
8 | from scipy.spatial.transform import Rotation
9 |
10 |
11 | class BaseCameraModel(ABC):
12 | uniforms_names = ["M", "V"]
13 |
14 | class CameraContext(object):
15 | pass
16 |
17 | def __init__(self, camera_model):
18 | self.context = self.CameraContext()
19 | self.model = camera_model
20 | self.context.View = glm.mat4(1.0)
21 | self.quat = np.array([1., 0, 0, 0])
22 | self.pose = np.zeros(3)
23 | self.world2cam = False
24 |
25 | def init_extrinsics(self, quat=None, pose=None, world2cam=False):
26 | quat = self.quat if quat is None else quat
27 | pose = self.pose if pose is None else pose
28 | self.quat = quat
29 | self.pose = pose
30 | self.world2cam = world2cam
31 | R = Rotation.from_quat(np.roll(quat, -1)).as_matrix()
32 | t = np.array([pose]).T
33 | if world2cam:
34 | RT = np.vstack([np.hstack([R, t]), [[0, 0, 0, 1]]])
35 | else:
36 | # otherwise invert cam2world to world2cam
37 | RT = np.vstack([np.hstack([R.T, -np.matmul(R.T, t)]), [[0, 0, 0, 1]]])
38 | self.context.View = glm.mat4(*(RT.T.astype(np.float32).copy().flatten()))
39 |
40 | @abstractmethod
41 | def init_intrinsics(self, **kwargs):
42 | pass
43 |
44 | def upload_extrinsics(self, shader_ids):
45 | gl.glUniformMatrix4fv(shader_ids['V'], 1, gl.GL_FALSE, glm.value_ptr(self.context.View))
46 |
47 | @abstractmethod
48 | def upload_intrinsics(self, shader_ids):
49 | pass
50 |
51 | def upload(self, shader_ids):
52 | self.upload_extrinsics(shader_ids)
53 | self.upload_intrinsics(shader_ids)
54 |
55 | def locate_uniforms(self, shader, keys=None):
56 | keys = self.uniforms_names if keys is None else keys
57 | return {k: gl.glGetUniformLocation(shader.program, k) for k in keys}
58 |
59 | def project(self, points: np.ndarray, model_mtx: Optional[Union[glm.mat4x4, np.ndarray]] = None) -> np.ndarray:
60 | """
61 | Projects the points according to the camera model and returns the projected points in NDC (normalized device coordinates)
62 | Args:
63 | points (np.ndarray): points in the world coordinates
64 |
65 | Returns:
66 | np.ndarray: projected points in NDC, shape (-1,3)
67 | """
68 | raise NotImplementedError("Projection is not implemented for this camera type")
69 |
70 |
71 | class OcamCameraModel(BaseCameraModel):
72 | uniforms_names = BaseCameraModel.uniforms_names + ['ocam_invpol', 'ocam_affine', 'ocam_center_off',
73 | 'ocam_theta_thresh', 'far', 'width_mul']
74 |
75 | def __init__(self):
76 | super().__init__("ocam")
77 |
78 | def init_intrinsics(self, cameramodel_dict, fov=360, far=20.):
79 | ocammodel_dict = cameramodel_dict['OCamModel']
80 | # polynomial coefficients for the DIRECT mapping function
81 | ocam_pol = [float(x) for x in ocammodel_dict['cam2world']['coeff']]
82 | # polynomial coefficients for the inverse mapping function
83 | ocam_invpol = np.array([float(x) for x in ocammodel_dict['world2cam']['coeff']])
84 | # center: "row" and "column", starting from 0 (C convention)
85 | ocam_xy_center = np.array((float(ocammodel_dict['cx']), float(ocammodel_dict['cy'])))
86 | # _affine parameters "c", "d", "e"
87 | ocam_affine = np.array([float(ocammodel_dict[x]) for x in ['c', 'd', 'e']])
88 | # image size: "height" and "width"
89 | ocam_imsize = cameramodel_dict['ImageSize']
90 | ocam_img_size = np.array((int(ocam_imsize['Width']), int(ocam_imsize['Height'])))
91 |
92 | self.context.ocam_invpol = ocam_invpol / ocam_img_size[0] * 2
93 |
94 | self.context.ocam_center_off = ocam_xy_center / ocam_img_size[::-1] * 2 - 1
95 | # self.context.ocam_center_off = (ocam_xy_center - ocam_img_size[::-1] / 2) / ocam_img_size * 2
96 | self.context.ocam_theta_thresh = np.deg2rad(fov / 2) - np.pi / 2
97 | self.context.ocam_affine = ocam_affine.copy()
98 | self.context.ocam_affine[:2] *= ocam_img_size[0] / ocam_img_size[1]
99 | self.context.far = far
100 | self.context.width_mul = ocam_img_size[1] / ocam_img_size[0]
101 |
102 | def upload_intrinsics(self, shader_ids):
103 | gl.glUniform1dv(shader_ids['ocam_invpol'], 18, self.context.ocam_invpol.astype(np.float64).copy())
104 | gl.glUniform3dv(shader_ids['ocam_affine'], 1, self.context.ocam_affine.astype(np.float64).copy())
105 | gl.glUniform2dv(shader_ids['ocam_center_off'], 1,
106 | self.context.ocam_center_off.astype(np.float64).copy())
107 | gl.glUniform1f(shader_ids['ocam_theta_thresh'], float(self.context.ocam_theta_thresh))
108 | gl.glUniform1f(shader_ids['far'], float(self.context.far))
109 | gl.glUniform1f(shader_ids['width_mul'], self.context.width_mul)
110 |
111 |
112 | class OpenCVCameraModel(BaseCameraModel):
113 | uniforms_names = BaseCameraModel.uniforms_names + ['distortion_coeff', 'center_off',
114 | 'focal_dist', 'far', 'width_mul']
115 |
116 | def __init__(self):
117 | super().__init__("opencv")
118 |
119 | def init_intrinsics(self, image_size, focal_dist, center, distortion_coeffs, far=20.):
120 | assert len(distortion_coeffs) == 5
121 | image_size = np.array(image_size)
122 | focal_dist = np.array(focal_dist)
123 | center = np.array(center)
124 | distortion_coeffs = np.array(distortion_coeffs)
125 | self.context.focal_dist = (focal_dist / image_size * 2).astype(np.float32).copy()
126 | self.context.center_off = (center / image_size * 2 - 1).astype(np.float32).copy()
127 | self.context.distortion_coeffs = distortion_coeffs.astype(np.float32).copy()
128 | self.context.far = np.array(far).astype(np.float32).copy()
129 | self.context.width_mul = image_size[1] / image_size[0]
130 |
131 | def upload_intrinsics(self, shader_ids):
132 | gl.glUniform1fv(shader_ids['distortion_coeff'], 5, self.context.distortion_coeffs)
133 | gl.glUniform2fv(shader_ids['center_off'], 1, self.context.center_off)
134 | gl.glUniform2fv(shader_ids['focal_dist'], 1, self.context.focal_dist)
135 | gl.glUniform1f(shader_ids['far'], self.context.far)
136 | gl.glUniform1f(shader_ids['width_mul'], self.context.width_mul)
137 |
138 |
139 | class StandardProjectionCameraModel(BaseCameraModel, ABC):
140 | uniforms_names = BaseCameraModel.uniforms_names + ['P', 'width_mul']
141 |
142 | def __init__(self, name):
143 | super().__init__(name)
144 |
145 | def upload_intrinsics(self, shader_ids):
146 | gl.glUniformMatrix4fv(shader_ids['P'], 1, gl.GL_FALSE, glm.value_ptr(self.context.Projection))
147 | gl.glUniform1f(shader_ids['width_mul'], self.context.width_mul)
148 |
149 | def project(self, points: np.ndarray, model_mtx: Optional[Union[glm.mat4x4, np.ndarray]] = None):
150 | VP = np.asarray(self.context.Projection * self.context.View)
151 | if model_mtx is not None:
152 | VP = np.matmul(VP, np.asarray(model_mtx))
153 | points = np.concatenate([points, np.ones((points.shape[0], 1), dtype=points.dtype)])
154 | points_gl_projected = np.matmul(points, VP.T)
155 | points_gl_projected = points_gl_projected[:, :3] / points_gl_projected[:, 3] # NDC
156 | return points_gl_projected
157 |
158 |
159 | class PerspectiveCameraModel(StandardProjectionCameraModel):
160 | def __init__(self):
161 | super().__init__("perspective")
162 |
163 | def init_intrinsics(self, image_size, fov=45., far=20., near=0.05):
164 | width, height = image_size
165 | self.context.Projection = glm.perspective(glm.radians(fov), float(width) / float(height), near, far)
166 | self.context.width_mul = image_size[1] / image_size[0]
167 |
168 |
169 | class OrthogonalCameraModel(StandardProjectionCameraModel):
170 | def __init__(self):
171 | super().__init__("orthogonal")
172 |
173 | def init_intrinsics(self, image_size, left, right, bottom, top, far=20., near=0.05):
174 | width, height = image_size
175 | self.context.Projection = glm.orthoLH(left, right, bottom, top, near, far)
176 | self.context.width_mul = image_size[1] / image_size[0]
177 |
178 |
179 | camera_models = {'ocam': OcamCameraModel, 'opencv': OpenCVCameraModel, 'perspective': PerspectiveCameraModel}
180 |
--------------------------------------------------------------------------------
/cloudrender/camera/trajectory.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import logging
3 | from typing import Sequence
4 | from scipy.spatial.transform import Rotation, Slerp
5 | from scipy.interpolate import interp1d
6 | from scipy.interpolate import splev, splrep
7 | from scipy.ndimage import gaussian_filter1d
8 | from scipy.signal.windows import gaussian
9 |
10 | from .models import BaseCameraModel
11 |
12 | logger = logging.getLogger("trajectory")
13 |
14 | class Trajectory:
15 | def __init__(self):
16 | self.trajectory = []
17 |
18 | def set_trajectory(self, keypoints):
19 | self.trajectory = [{k: np.array(v) for k, v in x.items()} for x in keypoints]
20 |
21 | def find_closest_kp_in_traj(self, time:float):
22 | times = np.array([x['time'] for x in self.trajectory])
23 | times_diff = times-time
24 | times_mask = times_diff>0
25 | if times_mask.sum() == 0:
26 | return self.trajectory[-1], self.trajectory[-1]
27 | times_inds = np.flatnonzero(times_mask)
28 | curr_ind = times_inds[0]
29 | if curr_ind == 0:
30 | return self.trajectory[0], self.trajectory[0]
31 | return self.trajectory[curr_ind], self.trajectory[curr_ind-1]
32 |
33 | def apply(self, camera: BaseCameraModel, time: float, interpolate_closest=False):
34 | point1, point2 = self.find_closest_kp_in_traj(time)
35 | if interpolate_closest:
36 | pose = (point1['position'] + point2['position'])/2.
37 | quat = (point1['quaternion'] + point2['quaternion'])/2.
38 | else:
39 | pose = point1['position']
40 | quat = point1['quaternion']
41 | camera.init_extrinsics(quat, pose)
42 |
43 | def serialize_trajectory(self):
44 | """
45 | Make trajectory json-serializable
46 | Returns:
47 | List[dict]: trajectory keypoints - each keypoint has "time", "position" and "quaternion"
48 | """
49 | s_traj = [{k: v.tolist() if isinstance(v, np.ndarray) else v for k, v in x.items()} for x in self.trajectory]
50 | return s_traj
51 |
52 | def sort_trajectory(self):
53 | times = [x['time'] for x in self.trajectory]
54 | ind_sorted = np.argsort(times)
55 | self.trajectory = [self.trajectory[k] for k in ind_sorted]
56 | return ind_sorted
57 |
58 | def add_keypoint(self, quaternion: Sequence[float], position: Sequence[float],
59 | time: float, check_time: bool = True):
60 | keypoint = {"position": np.array(position), "quaternion": np.array(quaternion), "time": time}
61 | if check_time:
62 | times = np.array([x['time'] for i,x in enumerate(self.trajectory)])
63 | diff = times - keypoint['time']
64 | if np.any(np.abs(diff)<1e-4):
65 | return
66 | self.trajectory.append(keypoint)
67 | self.sort_trajectory()
68 |
69 | def rot_gaussian_smoothing(self, rots, sigma=5.):
70 | def get_rot_ind(ind):
71 | while ind>=len(rots) or ind <0:
72 | if ind >= len(rots):
73 | ind=2*len(rots)-1-ind
74 | if ind < 0:
75 | ind = -ind
76 | return ind
77 |
78 | winradius = round(2*3*sigma)
79 | if winradius<1:
80 | return rots
81 | weights = gaussian(winradius*2+1, sigma)
82 | res = []
83 | for ind in range(len(rots)):
84 | window_inds = [get_rot_ind(i) for i in range(ind-winradius,ind+winradius+1)]
85 | res.append(rots[window_inds].mean(weights))
86 | return res
87 |
88 | def refine_trajectory(self, time_step:float = 1/60., interp_type:str = "quadratic", smoothness:float = 5.0):
89 | """
90 | Refines the trajectory by creating keypoints inbetween existion ones via interpolation
91 | Args:
92 | time_step: how often to create new points
93 | interp_type: interpolation type, "linear", "quadratic", "cubic"
94 | smoothness: how hard to smooth the pose trajectory
95 | """
96 | min_pts_for_interp = {"linear": 2, "quadratic": 3, "cubic": 4}
97 | assert interp_type in min_pts_for_interp.keys(), \
98 | f"Available interpolations are: {list(min_pts_for_interp.keys())}"
99 | if len(self.trajectory) < min_pts_for_interp[interp_type]:
100 | logger.warning(f'Not enough points for interpolation with "{interp_type}", returning unchanged')
101 | return
102 | start_time = self.trajectory[0]['time']
103 | end_time = self.trajectory[-1]['time']
104 | cam_times = [x['time'] for x in self.trajectory]
105 | cam_rots = Rotation.from_quat([np.roll(x['quaternion'],-1) for x in self.trajectory])
106 | cam_poses = [x['position'] for x in self.trajectory]
107 | rot_slerp = Slerp(cam_times, cam_rots)
108 | interp_times = np.concatenate([np.arange(start_time,end_time, time_step), [end_time]])
109 | interp_rots = rot_slerp(interp_times)
110 | pos_intrp = interp1d(cam_times, cam_poses, axis=0, kind=interp_type)
111 | interp_poses = pos_intrp(interp_times)
112 | interp_poses = np.array(list(zip(*[gaussian_filter1d(x, smoothness) for x in zip(*interp_poses)])))
113 | interp_rots = self.rot_gaussian_smoothing(interp_rots, smoothness)
114 | interp_quats = [np.roll(x.as_quat(),1) for x in interp_rots]
115 | interp_traj = [{'position':interp_poses[i], 'quaternion':interp_quats[i], 'time':interp_times[i]}
116 | for i in range(len(interp_times))]
117 | self.trajectory = interp_traj
118 |
--------------------------------------------------------------------------------
/cloudrender/capturing.py:
--------------------------------------------------------------------------------
1 | import ctypes
2 | import numpy as np
3 | from OpenGL import GL as gl
4 |
5 | class DirectCapture:
6 | """A helper capturing class. Gets the color or depth data from the current FBO"""
7 | def __init__(self, resolution):
8 | self.resolution = resolution
9 |
10 | def request_color(self):
11 | width, height = self.resolution
12 | color_buf = gl.glReadPixels(0, 0, width, height, gl.GL_RGB, gl.GL_UNSIGNED_BYTE)
13 | color = np.frombuffer(color_buf, np.uint8).reshape(height, width, 3)
14 | color = color[::-1].copy()
15 | return color
16 |
17 | def request_depth(self):
18 | width, height = self.resolution
19 | depth_buf = gl.glReadPixels(0, 0, width, height, gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT)
20 | depth = np.frombuffer(depth_buf, np.float32).reshape(height, width)
21 | depth = depth[::-1].copy()
22 | return depth
23 |
24 | def request_inds(self):
25 | width, height = self.resolution
26 | gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT1)
27 | ind_buf = gl.glReadPixels(0, 0, width, height, gl.GL_RED_INTEGER, gl.GL_INT)
28 | indices = np.frombuffer(ind_buf, np.int32).reshape(height, width)[::-1]
29 | gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0)
30 | return indices
31 |
32 | def __enter__(self):
33 | return self
34 |
35 | def __exit__(self, exc_type, exc_val, exc_tb):
36 | pass
37 |
38 |
39 | class AsyncPBOCapture:
40 | """A helper capturing class. Gets the color from the current FBO.
41 | Submits the task to the OpenGL driver to be executed asynchronously.
42 | Doesn't wait on CPU<->GPU exchange which improves speed in most cases"""
43 | def __init__(self, resolution, queue_size):
44 | self.queue_size = queue_size
45 | self.resolution = resolution
46 | self.qstart = 0
47 | self.qlen = 0
48 | self.pbos = None
49 |
50 | def create_pbos(self):
51 | pbos = gl.glGenBuffers(self.queue_size)
52 | for pbo in pbos:
53 | gl.glBindBuffer(gl.GL_PIXEL_PACK_BUFFER, pbo)
54 | gl.glBufferData(gl.GL_PIXEL_PACK_BUFFER, (3 * self.resolution[0] * self.resolution[1]), None,
55 | gl.GL_STREAM_READ)
56 | gl.glBindBuffer(gl.GL_PIXEL_PACK_BUFFER, 0)
57 | self.pbos = pbos
58 |
59 | def delete_pbos(self):
60 | if self.pbos is not None:
61 | gl.glDeleteBuffers(self.queue_size, self.pbos)
62 | self.pbos = None
63 |
64 | def __len__(self):
65 | return self.qlen
66 |
67 | def __enter__(self):
68 | self.create_pbos()
69 | return self
70 |
71 | def __exit__(self, exc_type, exc_val, exc_tb):
72 | self.delete_pbos()
73 |
74 | def get_first_requested_color(self):
75 | if self.qlen == 0:
76 | return None
77 | width, height = self.resolution
78 | pbo = self.pbos[self.qstart]
79 | gl.glBindBuffer(gl.GL_PIXEL_PACK_BUFFER, pbo)
80 | bufferdata = gl.glMapBuffer(gl.GL_PIXEL_PACK_BUFFER, gl.GL_READ_ONLY)
81 | data = np.frombuffer(ctypes.string_at(bufferdata, (3 * width * height)), np.uint8).reshape(height, width, 3)
82 | data = data[::-1].copy()
83 | gl.glUnmapBuffer(gl.GL_PIXEL_PACK_BUFFER)
84 | gl.glBindBuffer(gl.GL_PIXEL_PACK_BUFFER, 0)
85 | self.qlen -= 1
86 | self.qstart += 1
87 | if self.qstart >= self.queue_size:
88 | self.qstart = 0
89 | return data
90 |
91 | @property
92 | def qend(self):
93 | return (self.qstart+self.qlen) % self.queue_size
94 |
95 | def request_color_async(self):
96 | if self.qlen >= self.queue_size:
97 | res = self.get_first_requested_color()
98 | else:
99 | res = None
100 | pbo = self.pbos[self.qend]
101 | self.qlen += 1
102 | width, height = self.resolution
103 | gl.glBindBuffer(gl.GL_PIXEL_PACK_BUFFER, pbo)
104 | gl.glReadPixels(0, 0, width, height, gl.GL_RGB, gl.GL_UNSIGNED_BYTE, 0)
105 | gl.glBindBuffer(gl.GL_PIXEL_PACK_BUFFER, 0)
106 | return res
107 |
108 |
109 |
--------------------------------------------------------------------------------
/cloudrender/libegl/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | # BSD 3-Clause License
4 | #
5 | # Copyright (c) 2018, Centre National de la Recherche Scientifique
6 | # All rights reserved.
7 | #
8 | # Redistribution and use in source and binary forms, with or without
9 | # modification, are permitted provided that the following conditions are met:
10 | #
11 | # * Redistributions of source code must retain the above copyright notice, this
12 | # list of conditions and the following disclaimer.
13 | #
14 | # * Redistributions in binary form must reproduce the above copyright notice,
15 | # this list of conditions and the following disclaimer in the documentation
16 | # and/or other materials provided with the distribution.
17 | #
18 | # * Neither the name of the copyright holder nor the names of its
19 | # contributors may be used to endorse or promote products derived from
20 | # this software without specific prior written permission.
21 | #
22 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
26 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 |
33 | import os, sys
34 | # if OpenGL was already loaded, we have to reload it after the
35 | # PYOPENGL_PLATFORM variable is set...
36 | ogl_module_names = list(k for k in sys.modules.keys() if k.startswith('OpenGL'))
37 | for mod_name in ogl_module_names:
38 | del sys.modules[mod_name]
39 | os.environ['PYOPENGL_PLATFORM'] = 'egl'
40 | import OpenGL.EGL as egl
41 | import ctypes
42 |
43 | # we have to define a few missing objects in PyOpenGL implementation
44 | # (as of version 3.1.0)
45 |
46 | def define_egl_ext_function(func_name, res_type, *arg_types):
47 | if hasattr(egl, func_name):
48 | return # function already exists
49 | addr = egl.eglGetProcAddress(func_name)
50 | if addr is None:
51 | return # function is not available
52 | else:
53 | proto = ctypes.CFUNCTYPE(res_type)
54 | proto.argtypes = arg_types
55 | globals()['proto__' + func_name] = proto # avoid garbage collection
56 | func = proto(addr)
57 | setattr(egl, func_name, func)
58 |
59 | def define_egl_ext_structure(struct_name):
60 | if hasattr(egl, struct_name):
61 | return # structure already exists
62 | from OpenGL._opaque import opaque_pointer_cls
63 | setattr(egl, struct_name, opaque_pointer_cls(struct_name))
64 |
65 | define_egl_ext_structure('EGLDeviceEXT')
66 | define_egl_ext_function('eglGetPlatformDisplayEXT', egl.EGLDisplay)
67 | define_egl_ext_function('eglQueryDevicesEXT', egl.EGLBoolean)
68 | define_egl_ext_function('eglQueryDeviceStringEXT', ctypes.c_char_p)
69 |
70 | EGL_PLATFORM_DEVICE_EXT = 0x313F
71 | EGL_DRM_DEVICE_FILE_EXT = 0x3233
72 |
73 | # utility function for egl attributes management
74 | def egl_convert_to_int_array(dict_attrs):
75 | # convert to EGL_NONE terminated list
76 | attrs = sum(([ k, v] for k, v in dict_attrs.items()), []) + [ egl.EGL_NONE ]
77 | # convert to ctypes array
78 | return (egl.EGLint * len(attrs))(*attrs)
79 |
80 | # expose objects at this level
81 | from .context import EGLContext
82 |
--------------------------------------------------------------------------------
/cloudrender/libegl/context.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | # BSD 3-Clause License
4 | #
5 | # Copyright (c) 2018, Centre National de la Recherche Scientifique
6 | # All rights reserved.
7 | #
8 | # Redistribution and use in source and binary forms, with or without
9 | # modification, are permitted provided that the following conditions are met:
10 | #
11 | # * Redistributions of source code must retain the above copyright notice, this
12 | # list of conditions and the following disclaimer.
13 | #
14 | # * Redistributions in binary form must reproduce the above copyright notice,
15 | # this list of conditions and the following disclaimer in the documentation
16 | # and/or other materials provided with the distribution.
17 | #
18 | # * Neither the name of the copyright holder nor the names of its
19 | # contributors may be used to endorse or promote products derived from
20 | # this software without specific prior written permission.
21 | #
22 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
26 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 |
33 | import OpenGL.EGL as egl
34 | from ctypes import pointer
35 | from . import devices, egl_convert_to_int_array
36 | from .tools import TransactionMixin
37 | import logging
38 |
39 | class EGLContext(TransactionMixin):
40 | def initialize(self, width, height):
41 | for device in devices.probe():
42 | if not self.initialize_on_device(device, width, height):
43 | continue
44 | return True
45 | logging.error("Failed to initialize OpenGL context.")
46 | return False
47 | def initialize_on_device(self, device, width, height):
48 | print("selected: " + device.name)
49 | # step 1
50 | if device.initialize():
51 | self.add_rollback_cb(lambda: device.release())
52 | else:
53 | self.rollback(); return False
54 | # step 2
55 | egl_dpy = device.get_egl_display()
56 | if egl_dpy != egl.EGL_NO_DISPLAY:
57 | self.add_rollback_cb(lambda: egl.eglTerminate(egl_dpy))
58 | else:
59 | self.rollback(); return False
60 | # step 3
61 | major, minor = egl.EGLint(), egl.EGLint()
62 | if not egl.eglInitialize(egl_dpy, pointer(major), pointer(minor)):
63 | self.rollback(); return False
64 | logging.info("EGL version %d.%d" % (major.value, minor.value))
65 | # step 4
66 | egl_config = self.get_config(egl_dpy, device.compatible_surface_type())
67 | if egl_config is None:
68 | self.rollback(); return False
69 | # step 5
70 | egl_surface = device.create_surface(egl_dpy, egl_config)
71 | if egl_surface.initialize(width, height):
72 | self.add_rollback_cb(lambda: egl_surface.release())
73 | else:
74 | self.rollback(); return False
75 | # step 6
76 | egl_context = self.get_context(egl_dpy, egl_config)
77 | if egl_context is not None:
78 | self.add_rollback_cb(lambda: egl.eglDestroyContext(egl_dpy, egl_context))
79 | else:
80 | self.rollback(); return False
81 | # step 7
82 | if not egl_surface.make_current(egl_context):
83 | self.rollback(); return False
84 | # device seems to be working
85 | return True
86 | def get_config(self, egl_dpy, surface_type):
87 | egl_config_attribs = {
88 | egl.EGL_RED_SIZE: 8,
89 | egl.EGL_GREEN_SIZE: 8,
90 | egl.EGL_BLUE_SIZE: 8,
91 | egl.EGL_ALPHA_SIZE: 8,
92 | egl.EGL_DEPTH_SIZE: egl.EGL_DONT_CARE,
93 | egl.EGL_STENCIL_SIZE: egl.EGL_DONT_CARE,
94 | egl.EGL_RENDERABLE_TYPE: egl.EGL_OPENGL_BIT,
95 | egl.EGL_SURFACE_TYPE: surface_type,
96 | }
97 | egl_config_attribs = egl_convert_to_int_array(egl_config_attribs)
98 | egl_config = egl.EGLConfig()
99 | num_configs = egl.EGLint()
100 | if not egl.eglChooseConfig(egl_dpy, egl_config_attribs,
101 | pointer(egl_config), 1, pointer(num_configs)):
102 | return None
103 | if num_configs.value == 0:
104 | return None
105 | return egl_config
106 | def get_context(self, egl_dpy, egl_config):
107 | if not egl.eglBindAPI(egl.EGL_OPENGL_API):
108 | return None
109 | egl_context = egl.eglCreateContext(egl_dpy, egl_config, egl.EGL_NO_CONTEXT, None)
110 | if egl_context == egl.EGL_NO_CONTEXT:
111 | return None
112 | return egl_context
113 | def release(self):
114 | self.rollback()
115 |
--------------------------------------------------------------------------------
/cloudrender/libegl/devices/__init__.py:
--------------------------------------------------------------------------------
1 | # BSD 3-Clause License
2 | #
3 | # Copyright (c) 2018, Centre National de la Recherche Scientifique
4 | # All rights reserved.
5 | #
6 | # Redistribution and use in source and binary forms, with or without
7 | # modification, are permitted provided that the following conditions are met:
8 | #
9 | # * Redistributions of source code must retain the above copyright notice, this
10 | # list of conditions and the following disclaimer.
11 | #
12 | # * Redistributions in binary form must reproduce the above copyright notice,
13 | # this list of conditions and the following disclaimer in the documentation
14 | # and/or other materials provided with the distribution.
15 | #
16 | # * Neither the name of the copyright holder nor the names of its
17 | # contributors may be used to endorse or promote products derived from
18 | # this software without specific prior written permission.
19 | #
20 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
31 | from .generic import GenericEGLDevice
32 | from .gbm import GBMDevice
33 |
34 | def probe():
35 | for cls in (GenericEGLDevice, GBMDevice):
36 | for device in cls.probe():
37 | yield device
38 |
--------------------------------------------------------------------------------
/cloudrender/libegl/devices/gbm.py:
--------------------------------------------------------------------------------
1 | # BSD 3-Clause License
2 | #
3 | # Copyright (c) 2018, Centre National de la Recherche Scientifique
4 | # All rights reserved.
5 | #
6 | # Redistribution and use in source and binary forms, with or without
7 | # modification, are permitted provided that the following conditions are met:
8 | #
9 | # * Redistributions of source code must retain the above copyright notice, this
10 | # list of conditions and the following disclaimer.
11 | #
12 | # * Redistributions in binary form must reproduce the above copyright notice,
13 | # this list of conditions and the following disclaimer in the documentation
14 | # and/or other materials provided with the distribution.
15 | #
16 | # * Neither the name of the copyright holder nor the names of its
17 | # contributors may be used to endorse or promote products derived from
18 | # this software without specific prior written permission.
19 | #
20 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
31 | import os, glob
32 | from .. import libgbm
33 | import OpenGL.EGL as egl
34 | from ctypes import pointer
35 |
36 | class GBMSurface:
37 | def __init__(self, gbm_dev, egl_dpy, egl_config):
38 | self.gbm_dev, self.egl_dpy, self.egl_config = gbm_dev, egl_dpy, egl_config
39 | self.gbm_surf = None
40 | self.egl_surface = None
41 | def initialize(self, width, height):
42 | gbm_format = egl.EGLint()
43 | if not egl.eglGetConfigAttrib(self.egl_dpy, self.egl_config,
44 | egl.EGL_NATIVE_VISUAL_ID, pointer(gbm_format)):
45 | return False
46 | self.gbm_surf = libgbm.gbm_surface_create(
47 | self.gbm_dev,
48 | width, height,
49 | gbm_format,
50 | libgbm.GBM_BO_USE_RENDERING)
51 | if not self.gbm_surf:
52 | self.gbm_surf = None
53 | return False
54 | self.egl_surface = egl.eglCreateWindowSurface(
55 | self.egl_dpy, self.egl_config, self.gbm_surf, None)
56 | if self.egl_surface == egl.EGL_NO_SURFACE:
57 | self.egl_surface = None
58 | self.release()
59 | return False
60 | return True
61 | def release(self):
62 | if self.gbm_surf is not None:
63 | libgbm.gbm_surface_destroy(self.gbm_surf)
64 | if self.egl_surface is not None:
65 | egl.eglDestroySurface(self.egl_dpy, self.egl_surface)
66 | def make_current(self, egl_context):
67 | return egl.eglMakeCurrent(self.egl_dpy, self.egl_surface, self.egl_surface, egl_context)
68 |
69 | class GBMDevice:
70 | @staticmethod
71 | def probe():
72 | cards = sorted(glob.glob("/dev/dri/renderD*"))
73 | return [ GBMDevice(card) for card in cards ]
74 | def __init__(self, dev_path):
75 | self.dev_path = dev_path
76 | self.name = "GBM device " + dev_path
77 | def initialize(self):
78 | self.gbm_fd = os.open(self.dev_path, os.O_RDWR|os.O_CLOEXEC)
79 | if self.gbm_fd < 0:
80 | return False
81 | self.gbm_dev = libgbm.gbm_create_device(self.gbm_fd)
82 | if self.gbm_dev is None:
83 | os.close(self.gbm_fd)
84 | return False
85 | return True
86 | def release(self):
87 | libgbm.gbm_device_destroy(self.gbm_dev)
88 | os.close(self.gbm_fd)
89 | def compatible_surface_type(self):
90 | return egl.EGL_WINDOW_BIT
91 | def get_egl_display(self):
92 | return egl.eglGetDisplay(self.gbm_dev)
93 | def create_surface(self, egl_dpy, egl_config):
94 | return GBMSurface(self.gbm_dev, egl_dpy, egl_config)
95 |
--------------------------------------------------------------------------------
/cloudrender/libegl/devices/generic.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | # BSD 3-Clause License
4 | #
5 | # Copyright (c) 2018, Centre National de la Recherche Scientifique
6 | # All rights reserved.
7 | #
8 | # Redistribution and use in source and binary forms, with or without
9 | # modification, are permitted provided that the following conditions are met:
10 | #
11 | # * Redistributions of source code must retain the above copyright notice, this
12 | # list of conditions and the following disclaimer.
13 | #
14 | # * Redistributions in binary form must reproduce the above copyright notice,
15 | # this list of conditions and the following disclaimer in the documentation
16 | # and/or other materials provided with the distribution.
17 | #
18 | # * Neither the name of the copyright holder nor the names of its
19 | # contributors may be used to endorse or promote products derived from
20 | # this software without specific prior written permission.
21 | #
22 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
26 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 |
33 | # Modified by Vladimir Guzov, 2021
34 |
35 | import OpenGL.EGL as egl
36 | from .. import EGL_PLATFORM_DEVICE_EXT, EGL_DRM_DEVICE_FILE_EXT, egl_convert_to_int_array
37 | from ctypes import pointer
38 |
39 | class GenericEGLSurface:
40 | def __init__(self, egl_dpy, egl_config):
41 | self.egl_dpy, self.egl_config = egl_dpy, egl_config
42 | def initialize(self, width, height):
43 | pb_surf_attribs = egl_convert_to_int_array({
44 | egl.EGL_WIDTH: width,
45 | egl.EGL_HEIGHT: height,
46 | })
47 | self.egl_surface = egl.eglCreatePbufferSurface(
48 | self.egl_dpy, self.egl_config, pb_surf_attribs)
49 | if self.egl_surface == egl.EGL_NO_SURFACE:
50 | return False
51 | return True
52 | def release(self):
53 | egl.eglDestroySurface(self.egl_dpy, self.egl_surface)
54 | def make_current(self, egl_context):
55 | return egl.eglMakeCurrent(self.egl_dpy, self.egl_surface, self.egl_surface, egl_context)
56 |
57 | class GenericEGLDevice:
58 | @staticmethod
59 | def probe():
60 | if not hasattr(egl, 'eglQueryDevicesEXT'):
61 | # if no enumeration support in EGL, return empty list
62 | return []
63 | num_devices = egl.EGLint()
64 | if not egl.eglQueryDevicesEXT(0, None, pointer(num_devices)) or num_devices.value < 1:
65 | return []
66 | devices = (egl.EGLDeviceEXT * num_devices.value)() # array of size num_devices
67 | if not egl.eglQueryDevicesEXT(num_devices.value, devices, pointer(num_devices)) or num_devices.value < 1:
68 | return []
69 | return [ GenericEGLDevice(devices[i]) for i in range(num_devices.value) ]
70 | def __init__(self, egl_dev):
71 | self.egl_dev = egl_dev
72 | def get_egl_display(self):
73 | return egl.eglGetPlatformDisplayEXT(EGL_PLATFORM_DEVICE_EXT, self.egl_dev, None)
74 | def initialize(self):
75 | return True
76 | def release(self):
77 | pass
78 | def compatible_surface_type(self):
79 | return egl.EGL_PBUFFER_BIT
80 | @property
81 | def name(self):
82 | if not hasattr(egl, 'eglQueryDeviceStringEXT'):
83 | return ""
84 | devstr = egl.eglQueryDeviceStringEXT(self.egl_dev, EGL_DRM_DEVICE_FILE_EXT)
85 | if devstr is None:
86 | return ""
87 | return "EGL device " + devstr.decode('ASCII')
88 | def create_surface(self, egl_dpy, egl_config):
89 | return GenericEGLSurface(egl_dpy, egl_config)
90 |
--------------------------------------------------------------------------------
/cloudrender/libegl/libgbm.py:
--------------------------------------------------------------------------------
1 | # BSD 3-Clause License
2 | #
3 | # Copyright (c) 2018, Centre National de la Recherche Scientifique
4 | # All rights reserved.
5 | #
6 | # Redistribution and use in source and binary forms, with or without
7 | # modification, are permitted provided that the following conditions are met:
8 | #
9 | # * Redistributions of source code must retain the above copyright notice, this
10 | # list of conditions and the following disclaimer.
11 | #
12 | # * Redistributions in binary form must reproduce the above copyright notice,
13 | # this list of conditions and the following disclaimer in the documentation
14 | # and/or other materials provided with the distribution.
15 | #
16 | # * Neither the name of the copyright holder nor the names of its
17 | # contributors may be used to endorse or promote products derived from
18 | # this software without specific prior written permission.
19 | #
20 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
31 | from .tools import LazyFuncCaller
32 |
33 | LIB_NAME = "libgbm.so.1" # On Ubuntu, package libgbm1
34 |
35 | # calls to compiled library
36 | gbm_create_device = LazyFuncCaller(LIB_NAME, 'gbm_create_device')
37 | gbm_device_destroy = LazyFuncCaller(LIB_NAME, 'gbm_device_destroy')
38 | gbm_surface_create = LazyFuncCaller(LIB_NAME, 'gbm_surface_create')
39 | gbm_surface_destroy = LazyFuncCaller(LIB_NAME, 'gbm_surface_destroy')
40 |
41 | # found in gbm.h
42 | GBM_BO_USE_RENDERING = 4
43 |
44 |
45 |
--------------------------------------------------------------------------------
/cloudrender/libegl/tools.py:
--------------------------------------------------------------------------------
1 | # BSD 3-Clause License
2 | #
3 | # Copyright (c) 2018, Centre National de la Recherche Scientifique
4 | # All rights reserved.
5 | #
6 | # Redistribution and use in source and binary forms, with or without
7 | # modification, are permitted provided that the following conditions are met:
8 | #
9 | # * Redistributions of source code must retain the above copyright notice, this
10 | # list of conditions and the following disclaimer.
11 | #
12 | # * Redistributions in binary form must reproduce the above copyright notice,
13 | # this list of conditions and the following disclaimer in the documentation
14 | # and/or other materials provided with the distribution.
15 | #
16 | # * Neither the name of the copyright holder nor the names of its
17 | # contributors may be used to endorse or promote products derived from
18 | # this software without specific prior written permission.
19 | #
20 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
31 | import ctypes
32 |
33 | # only load libraries if they are needed
34 | class LazyFuncCaller:
35 | libs = {}
36 | def __init__(self, lib_name, func_name):
37 | self.lib_name = lib_name
38 | self.func_name = func_name
39 | def __call__(self, *args, **kwargs):
40 | if self.lib_name not in LazyFuncCaller.libs:
41 | LazyFuncCaller.libs[self.lib_name] = ctypes.CDLL(self.lib_name)
42 | func = getattr(LazyFuncCaller.libs[self.lib_name], self.func_name)
43 | return func(*args, **kwargs)
44 |
45 | # provide rollback capability to classes
46 | class TransactionMixin:
47 | def __init__(self):
48 | self.rollback_cbs = []
49 | def rollback(self):
50 | for cb in reversed(self.rollback_cbs):
51 | cb()
52 | self.rollback_cbs = []
53 | def add_rollback_cb(self, cb):
54 | self.rollback_cbs += [ cb ]
55 | def __enter__(self):
56 | return self
57 | def __exit__(self, type, value, traceback):
58 | self.rollback()
59 |
60 |
--------------------------------------------------------------------------------
/cloudrender/render/__init__.py:
--------------------------------------------------------------------------------
1 | from .mesh import SimpleMesh
2 | from .pointcloud import SimplePointcloud, SimplePointcloudProgressive, SimplePointcloudWithNormals, AvgcolorPointcloudWithNormals
3 | from .shadowmap import ShadowMap
4 | from .lights import *
5 |
--------------------------------------------------------------------------------
/cloudrender/render/depthmap.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from videoio import Uint16Reader, VideoReader
3 |
4 | from .pointcloud import SimplePointcloud
5 | from .renderable import DynamicTimedRenderable
6 |
7 |
8 | class DepthVideo(SimplePointcloud, DynamicTimedRenderable):
9 | VIDEO_RELOAD_THRESH = 100
10 | DEFAULT_COLOR = (255, 255, 0, 255)
11 |
12 | def __init__(self, pc_table, color = None, *args, **kwargs):
13 | super().__init__(*args, **kwargs)
14 | self.pc_table_ext = np.dstack([pc_table, np.ones(pc_table.shape[:2] + (1,), dtype=pc_table.dtype)])
15 | self.color_override = False
16 | self.start_ind = 0
17 | self.current_sequence_frame_ind = -1
18 | self.seqlen = 0
19 | self.current_frame_cloud = None
20 | if color is None:
21 | self.color = self.DEFAULT_COLOR
22 | else:
23 | self.color = color
24 |
25 | def _set_sequence(self, depthvideo_path, colorvideo_path = None):
26 | self.depthvideo_path = depthvideo_path
27 | self.colorvideo_path = colorvideo_path
28 | self.dmaps_reader = Uint16Reader(depthvideo_path)
29 | self.dmaps_iter = iter(self.dmaps_reader)
30 | self.sequence_len = len(self.dmaps_reader)
31 | if colorvideo_path is not None:
32 | self.colors_reader = VideoReader(colorvideo_path)
33 | self.colors_iter = iter(self.colors_reader)
34 | self.sequence_len = min(self.sequence_len, len(self.colors_reader))
35 | else:
36 | self.colors_reader = None
37 | self.colors_iter = None
38 | self.next_frame_ind = 0
39 | self.reset_current_frame()
40 |
41 | def switch_color_override(self):
42 | self.color_override = not self.color_override
43 | if self.sequence_len != 0:
44 | self.load_current_frame()
45 |
46 | def unset_video(self):
47 | self.depthvideo_path = None
48 | self.dmaps_reader = None
49 | self.dmaps_iter = None
50 | self.colorvideo_path = None
51 | self.colors_reader = None
52 | self.colors_iter = None
53 | self.sequence_len = 0
54 | self.times = None
55 | self.current_frame_cloud = None
56 | self.current_sequence_frame_ind = self.start_ind
57 | self.delete_buffers()
58 |
59 | def _load_current_frame(self):
60 | pc = self.get_curr_pointcloud()
61 | self.update_buffers(pc)
62 |
63 | def load_depth_color(self, frame_ind):
64 | diff = frame_ind - self.next_frame_ind
65 | if diff < self.VIDEO_RELOAD_THRESH and diff>=0:
66 | for _ in range(diff):
67 | next(self.dmaps_iter)
68 | if self.colorvideo_path is not None:
69 | next(self.colors_iter)
70 | else:
71 | self.dmaps_reader = Uint16Reader(self.depthvideo_path, start_frame=frame_ind)
72 | self.dmaps_iter = iter(self.dmaps_reader)
73 | if self.colorvideo_path is not None:
74 | self.colors_reader = VideoReader(self.colorvideo_path, start_frame=frame_ind)
75 | self.colors_iter = iter(self.colors_reader)
76 | dmap_frame = next(self.dmaps_iter)
77 | self.next_frame_ind = frame_ind+1
78 | if self.colorvideo_path is not None:
79 | color_frame = next(self.colors_iter)
80 | color_frame = np.concatenate([color_frame, np.full(color_frame.shape[:-1] + (1,), 255, np.uint8)], axis=2)
81 | return dmap_frame, color_frame
82 | else:
83 | return dmap_frame, None
84 |
85 | def get_curr_pointcloud(self):
86 | if self.current_frame_cloud is not None and self.current_frame_cloud[1] == self.current_sequence_frame_ind:
87 | return self.current_frame_cloud[0]
88 | depth, colors = self.load_depth_color(self.current_sequence_frame_ind)
89 | nanmask = depth == 0
90 | d = depth.copy().astype(np.float64) / 1000.
91 | d[nanmask] = np.nan
92 | pc = self.pc_table_ext * d[..., np.newaxis]
93 | pc_validmask = np.isfinite(pc[:, :, 0])
94 | pc = pc[pc_validmask]
95 | if colors is None or self.color_override:
96 | colors = np.tile(np.array(self.color).astype(np.uint8).reshape(1, 4), (len(pc), 1))
97 | else:
98 | colors = colors[pc_validmask, :]
99 | cloud = self.PointcloudContainer(pc, colors)
100 | self.current_frame_cloud = (cloud, self.current_sequence_frame_ind)
101 | return cloud
--------------------------------------------------------------------------------
/cloudrender/render/lights.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | class Light:
4 | def __init__(self, light_type):
5 | self.type = light_type
6 |
7 | @staticmethod
8 | def normalize(v):
9 | vnorm = np.linalg.norm(v)
10 | if vnorm > 0:
11 | return v/vnorm
12 | else:
13 | return v
14 |
15 |
16 | class DirectionalLight(Light):
17 | def __init__(self, direction: np.ndarray, intensity: np.ndarray):
18 | super().__init__('directional')
19 | self.direction = self.normalize(direction)
20 | self.intensity = intensity
21 |
--------------------------------------------------------------------------------
/cloudrender/render/mesh.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import logging
4 | import numpy as np
5 | from abc import ABC
6 | from typing import List, Union, Optional
7 | from dataclasses import dataclass
8 |
9 | import trimesh
10 | from OpenGL import GL as gl
11 | from .shaders.shader_loader import Shader
12 | from .renderable import Renderable
13 | from .lights import Light, DirectionalLight
14 | from .shadowmap import ShadowMap
15 | from ..camera.models import StandardProjectionCameraModel
16 |
17 |
18 | class Mesh(Renderable, ABC):
19 | @dataclass
20 | class MeshContainer:
21 | vertices: np.ndarray
22 | faces: np.ndarray
23 | colors: Optional[np.ndarray] = None
24 | vertex_normals: Optional[np.ndarray] = None
25 | texture: Optional[np.ndarray] = None
26 | face_uv_map: Optional[np.ndarray] = None # face-wise UV map
27 |
28 | def __init__(self, *args, draw_shadows: bool = True, generate_shadows: bool = True, **kwargs):
29 | super().__init__(*args, draw_shadows=draw_shadows, generate_shadows=generate_shadows, **kwargs)
30 |
31 |
32 | class SimpleMesh(Mesh):
33 | @dataclass
34 | class MaterialProps:
35 | ambient: float = 1.
36 | diffuse: float = 0.
37 | specular: float = 0.
38 | shininess: float = 0.
39 |
40 | """
41 | Vertex-colored mesh with directional lighting support
42 | """
43 |
44 | def __init__(self, *args, **kwargs):
45 | super().__init__(*args, **kwargs)
46 | self.material = SimpleMesh.MaterialProps()
47 | self.set_overlay_color()
48 |
49 | def _init_shaders(self, camera_model, shader_mode):
50 | self.shader = shader = Shader()
51 | dirname = os.path.dirname(os.path.abspath(__file__))
52 |
53 | if self.draw_shadows:
54 | shader.initShaderFromGLSL([os.path.join(dirname, f"shaders/simple_mesh/shadowdraw/vertex_{camera_model}.glsl")],
55 | [os.path.join(dirname, "shaders/simple_mesh/shadowdraw/fragment.glsl")])
56 | self.context.shader_ids.update(self.locate_uniforms(self.shader, ['shadowmap_MVP', 'shadowmap_enabled',
57 | 'shadowmaps', 'shadow_color']))
58 | else:
59 | shader.initShaderFromGLSL([os.path.join(dirname, f"shaders/simple_mesh/vertex_{camera_model}.glsl")],
60 | [os.path.join(dirname, "shaders/simple_mesh/fragment.glsl")])
61 | self.context.shader_ids.update(self.locate_uniforms(self.shader, ['dirlight.direction', 'dirlight.intensity',
62 | 'specular', 'shininess',
63 | 'ambient', 'diffuse', 'overlay_color']))
64 |
65 | if self.generate_shadows:
66 | self.shadowgen_shader = Shader()
67 | self.shadowgen_shader.initShaderFromGLSL([
68 | os.path.join(dirname, f"shaders/simple_mesh/shadowgen/vertex_perspective.glsl")],
69 | [os.path.join(dirname, "shaders/simple_mesh/shadowgen/fragment.glsl")])
70 |
71 | def _delete_buffers(self):
72 | gl.glDeleteBuffers(3, [self.context.vertexbuffer, self.context.colorbuffer, self.context.normalbuffer])
73 | gl.glDeleteVertexArrays(1, [self.context.vao])
74 |
75 | def set_material(self, ambient=1., diffuse=0., specular=0., shininess=0.):
76 | self.material = self.MaterialProps(ambient, diffuse, specular, shininess)
77 |
78 | def set_overlay_color(self, color=(200, 200, 200, 0)):
79 | self.overlay_color = np.asarray(color, dtype=np.uint8)
80 |
81 | def _set_buffers(self, mesh: Union[Mesh.MeshContainer, trimesh.Trimesh]):
82 | faces = mesh.faces
83 | glverts = np.copy(mesh.vertices.astype(np.float32)[faces.reshape(-1), :], order='C')
84 | if hasattr(mesh, "colors"):
85 | glcolors = np.copy(mesh.colors.astype(np.float32)[faces.reshape(-1), :] / 255., order='C')
86 | else:
87 | glcolors = np.copy(mesh.visual.vertex_colors.astype(np.float32)[faces.reshape(-1), :] / 255., order='C')
88 | assert glcolors.shape[1] == 4
89 | glnorms = np.copy(mesh.vertex_normals.astype(np.float32)[faces.reshape(-1), :], order='C')
90 |
91 | self.nglverts = len(glverts)
92 |
93 | self.context.vao = gl.glGenVertexArrays(1)
94 | gl.glBindVertexArray(self.context.vao)
95 |
96 | self.context.vertexbuffer = gl.glGenBuffers(1)
97 | gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.vertexbuffer)
98 | gl.glBufferData(gl.GL_ARRAY_BUFFER, glverts.nbytes, glverts, gl.GL_DYNAMIC_DRAW)
99 |
100 | self.context.colorbuffer = gl.glGenBuffers(1)
101 | gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.colorbuffer)
102 | gl.glBufferData(gl.GL_ARRAY_BUFFER, glcolors.nbytes, glcolors, gl.GL_DYNAMIC_DRAW)
103 |
104 | self.context.normalbuffer = gl.glGenBuffers(1)
105 | gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.normalbuffer)
106 | gl.glBufferData(gl.GL_ARRAY_BUFFER, glnorms.nbytes, glnorms, gl.GL_DYNAMIC_DRAW)
107 |
108 | def _update_buffers(self, mesh: Union[Mesh.MeshContainer, trimesh.Trimesh]):
109 | faces = mesh.faces
110 | glverts = np.copy(mesh.vertices.astype(np.float32)[faces.reshape(-1), :], order='C')
111 | if hasattr(mesh, "colors"):
112 | glcolors = np.copy(mesh.colors.astype(np.float32)[faces.reshape(-1), :] / 255., order='C')
113 | else:
114 | glcolors = np.copy(mesh.visual.vertex_colors.astype(np.float32)[faces.reshape(-1), :] / 255., order='C')
115 | assert glcolors.shape[1] == 4
116 | glnorms = np.copy(mesh.vertex_normals.astype(np.float32)[faces.reshape(-1), :], order='C')
117 | gl.glBindVertexArray(self.context.vao)
118 | gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.vertexbuffer)
119 | gl.glBufferData(gl.GL_ARRAY_BUFFER, glverts.nbytes, glverts, gl.GL_DYNAMIC_DRAW)
120 | gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.colorbuffer)
121 | gl.glBufferData(gl.GL_ARRAY_BUFFER, glcolors.nbytes, glcolors, gl.GL_DYNAMIC_DRAW)
122 | gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.normalbuffer)
123 | gl.glBufferData(gl.GL_ARRAY_BUFFER, glnorms.nbytes, glnorms, gl.GL_DYNAMIC_DRAW)
124 |
125 | def _upload_uniforms(self, shader_ids, lights=(), shadowmaps=()):
126 | shadowmaps_enabled = np.zeros(self.SHADOWMAPS_MAX, dtype=np.int32)
127 | shadowmaps_enabled[:len(shadowmaps)] = 1
128 | M = self.context.Model
129 | shadowmaps_lightMVP = [np.array(s.light_VP * M) for s in shadowmaps]
130 | shadowmaps_lightMVP = np.array(shadowmaps_lightMVP, dtype='f4')
131 | if self.draw_shadows:
132 | gl.glUniform1iv(self.context.shader_ids['shadowmap_enabled'], self.SHADOWMAPS_MAX, shadowmaps_enabled)
133 | gl.glUniformMatrix4fv(self.context.shader_ids['shadowmap_MVP'], len(shadowmaps), gl.GL_TRUE, shadowmaps_lightMVP)
134 | gl.glUniform4f(self.context.shader_ids['shadow_color'], *self.shadowcolor)
135 | for shadow_ind, shadowmap in enumerate(shadowmaps):
136 | gl.glActiveTexture(gl.GL_TEXTURE0 + shadow_ind)
137 | gl.glBindTexture(gl.GL_TEXTURE_2D, shadowmap.texture)
138 | if len(lights) > 0:
139 | # currently only 1 directional light is supported
140 | light = lights[0]
141 | material = self.material
142 | else:
143 | # if no light is supplied, make the object fully ambient
144 | light = DirectionalLight(np.ones(3), np.ones(3))
145 | material = self.MaterialProps()
146 | gl.glUniform3f(self.context.shader_ids['dirlight.direction'], *light.direction)
147 | gl.glUniform3f(self.context.shader_ids['dirlight.intensity'], *light.intensity)
148 | gl.glUniform1f(self.context.shader_ids['ambient'], material.ambient)
149 | gl.glUniform1f(self.context.shader_ids['diffuse'], material.diffuse)
150 | gl.glUniform1f(self.context.shader_ids['specular'], material.specular)
151 | gl.glUniform1f(self.context.shader_ids['shininess'], material.shininess)
152 | gl.glUniform4f(self.context.shader_ids['overlay_color'], *(self.overlay_color.astype(np.float32)/255.))
153 |
154 | def _draw(self, reset: bool, lights: List[Light], shadowmaps: List[ShadowMap]) -> bool:
155 | """
156 | Internal draw pass
157 | Args:
158 | reset (bool): Reset drawing progress (for progressive drawing)
159 | lights (List[Light]): All light objects that influence the current object
160 | Returns:
161 | bool: if drawing buffer was changed (if something was actually drawn)
162 | """
163 | if not reset:
164 | return False
165 | self.shader.begin()
166 | self.upload_uniforms(self.context.shader_ids, lights, shadowmaps)
167 |
168 | gl.glBindVertexArray(self.context.vao)
169 |
170 | gl.glEnableVertexAttribArray(0)
171 | gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.vertexbuffer)
172 | gl.glVertexAttribPointer(0, 3, gl.GL_FLOAT, gl.GL_FALSE, 0, None)
173 |
174 | gl.glEnableVertexAttribArray(1)
175 | gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.colorbuffer)
176 | gl.glVertexAttribPointer(1, 4, gl.GL_FLOAT, gl.GL_FALSE, 0, None)
177 |
178 | gl.glEnableVertexAttribArray(2)
179 | gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.normalbuffer)
180 | gl.glVertexAttribPointer(2, 3, gl.GL_FLOAT, gl.GL_FALSE, 0, None)
181 |
182 | gl.glDrawArrays(gl.GL_TRIANGLES, 0, self.nglverts)
183 |
184 | gl.glDisableVertexAttribArray(0)
185 | gl.glDisableVertexAttribArray(1)
186 | gl.glDisableVertexAttribArray(2)
187 | self.shader.end()
188 | return True
189 |
190 | def _draw_shadowmap(self, shadowmap_camera: StandardProjectionCameraModel) -> bool:
191 | """
192 | Shadow map draw pass - just to get depthmap values
193 | Args:
194 | shadowmap_camera (StandardProjectionCameraModel): perspective/ortho camera for shadow calculation
195 | Returns:
196 | bool: if drawing buffer was changed (if something was actually drawn)
197 | """
198 | self.shadowgen_shader.begin()
199 | self.upload_shadowgen_uniforms(shadowmap_camera, self.shadowgen_context.shader_ids)
200 |
201 | gl.glBindVertexArray(self.context.vao)
202 |
203 | gl.glEnableVertexAttribArray(0)
204 | gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.vertexbuffer)
205 | gl.glVertexAttribPointer(0, 3, gl.GL_FLOAT, gl.GL_FALSE, 0, None)
206 |
207 | gl.glDrawArrays(gl.GL_TRIANGLES, 0, self.nglverts)
208 |
209 | gl.glDisableVertexAttribArray(0)
210 | self.shadowgen_shader.end()
211 | return True
212 |
213 |
214 | class TexturedMesh(SimpleMesh):
215 | def __init__(self, *args, **kwargs):
216 | super().__init__(*args, **kwargs)
217 | self.material = SimpleMesh.MaterialProps()
218 |
219 | def _init_shaders(self, camera_model, shader_mode):
220 | self.shader = shader = Shader()
221 | dirname = os.path.dirname(os.path.abspath(__file__))
222 |
223 | if self.draw_shadows:
224 | shader.initShaderFromGLSL([os.path.join(dirname, f"shaders/textured_mesh/shadowdraw/vertex_{camera_model}.glsl")],
225 | [os.path.join(dirname, "shaders/textured_mesh/shadowdraw/fragment.glsl")])
226 | self.context.shader_ids.update(self.locate_uniforms(self.shader, ['shadowmap_MVP', 'shadowmap_enabled',
227 | 'shadowmaps', 'shadow_color']))
228 | else:
229 | shader.initShaderFromGLSL([os.path.join(dirname, f"shaders/textured_mesh/vertex_{camera_model}.glsl")],
230 | [os.path.join(dirname, "shaders/textured_mesh/fragment.glsl")])
231 | self.context.shader_ids.update(self.locate_uniforms(self.shader, ['dirlight.direction', 'dirlight.intensity',
232 | 'specular', 'shininess',
233 | 'ambient', 'diffuse', 'overlay_color']))
234 |
235 | if self.generate_shadows:
236 | self.shadowgen_shader = Shader()
237 | self.shadowgen_shader.initShaderFromGLSL([
238 | os.path.join(dirname, f"shaders/simple_mesh/shadowgen/vertex_perspective.glsl")],
239 | [os.path.join(dirname, "shaders/simple_mesh/shadowgen/fragment.glsl")])
240 |
241 | def _delete_buffers(self):
242 | gl.glDeleteBuffers(4, [self.context.vertexbuffer, self.context.uvbuffer, self.context.normalbuffer, self.context.texturebuffer])
243 | gl.glDeleteVertexArrays(1, [self.context.vao])
244 |
245 | def _set_buffers(self, mesh: Union[Mesh.MeshContainer, trimesh.Trimesh]):
246 | faces = mesh.faces
247 | glverts = np.copy(mesh.vertices.astype(np.float32)[faces.reshape(-1), :], order='C')
248 | if hasattr(mesh, "face_uv_map"):
249 | gluvmap = np.copy(mesh.face_uv_map.astype(np.float32), order='C')
250 | else:
251 | # Trimesh stores vertex-wise UV map
252 | gluvmap = np.copy(mesh.visual.uv.astype(np.float32)[faces.reshape(-1), :], order='C')
253 | assert gluvmap.shape[1] == 2
254 | glnorms = np.copy(mesh.vertex_normals.astype(np.float32)[faces.reshape(-1), :], order='C')
255 |
256 | self.nglverts = len(glverts)
257 |
258 | self.context.vao = gl.glGenVertexArrays(1)
259 | gl.glBindVertexArray(self.context.vao)
260 |
261 | self.context.vertexbuffer = gl.glGenBuffers(1)
262 | gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.vertexbuffer)
263 | gl.glBufferData(gl.GL_ARRAY_BUFFER, glverts.nbytes, glverts, gl.GL_DYNAMIC_DRAW)
264 |
265 | self.context.uvbuffer = gl.glGenBuffers(1)
266 | gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.uvbuffer)
267 | gl.glBufferData(gl.GL_ARRAY_BUFFER, gluvmap.nbytes, gluvmap, gl.GL_DYNAMIC_DRAW)
268 |
269 | self.context.normalbuffer = gl.glGenBuffers(1)
270 | gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.normalbuffer)
271 | gl.glBufferData(gl.GL_ARRAY_BUFFER, glnorms.nbytes, glnorms, gl.GL_DYNAMIC_DRAW)
272 |
273 | if hasattr(mesh, "texture"):
274 | texture_data = mesh.texture
275 | else:
276 | texture_data = np.array(mesh.visual.image, order='C')
277 |
278 | self.context.texturebuffer = gl.glGenTextures(1)
279 | gl.glBindTexture(gl.GL_TEXTURE_2D, self.context.texturebuffer)
280 |
281 | if not isinstance(texture_data, np.uint8):
282 | texture_data = (texture_data * 255.).astype(np.uint8)
283 | # TODO: add support for the alpha channel
284 | # if texture_data.shape[2] == 3:
285 | # texture_data = np.concatenate([texture_data, np.full(texture_data.shape[:2] + (1,), 255, dtype=np.uint8)], axis=2)
286 | texture_data = texture_data[:, :, :3]
287 | texture_data = 255 - texture_data[::-1, :, :]
288 | gltexture = np.copy(texture_data.reshape(-1), order='C')
289 | gl.glTexParameterf(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_REPEAT)
290 | gl.glTexParameterf(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_REPEAT)
291 | gl.glTexParameterf(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)
292 | gl.glTexParameterf(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)
293 | gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
294 | gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGB, texture_data.shape[1], texture_data.shape[0], 0, gl.GL_RGB, gl.GL_UNSIGNED_BYTE,
295 | gltexture)
296 |
297 | def _update_buffers(self, mesh: Union[Mesh.MeshContainer, trimesh.Trimesh]):
298 | faces = mesh.faces
299 | glverts = np.copy(mesh.vertices.astype(np.float32)[faces.reshape(-1), :], order='C')
300 | gluvmap = None
301 | if hasattr(mesh, "face_uv_map"):
302 | if mesh.face_uv_map is not None:
303 | gluvmap = np.copy(mesh.face_uv_map.astype(np.float32), order='C')
304 | else:
305 | if mesh.visual is not None and mesh.visual.uv is not None:
306 | # Trimesh stores vertex-wise UV map
307 | gluvmap = np.copy(mesh.visual.uv.astype(np.float32)[faces.reshape(-1), :], order='C')
308 | glnorms = np.copy(mesh.vertex_normals.astype(np.float32)[faces.reshape(-1), :], order='C')
309 | gl.glBindVertexArray(self.context.vao)
310 | gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.vertexbuffer)
311 | gl.glBufferData(gl.GL_ARRAY_BUFFER, glverts.nbytes, glverts, gl.GL_DYNAMIC_DRAW)
312 | if gluvmap is not None:
313 | assert gluvmap.shape[1] == 2
314 | gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.uvbuffer)
315 | gl.glBufferData(gl.GL_ARRAY_BUFFER, gluvmap.nbytes, gluvmap, gl.GL_DYNAMIC_DRAW)
316 | gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.normalbuffer)
317 | gl.glBufferData(gl.GL_ARRAY_BUFFER, glnorms.nbytes, glnorms, gl.GL_DYNAMIC_DRAW)
318 |
319 | if hasattr(mesh, "texture"):
320 | texture_data = mesh.texture
321 | else:
322 | texture_data = np.array(mesh.visual.image, order='C')
323 | if texture_data is not None:
324 | gl.glBindTexture(gl.GL_TEXTURE_2D, self.context.texturebuffer)
325 | if not isinstance(texture_data, np.uint8):
326 | texture_data = (texture_data * 255.).astype(np.uint8)
327 | # if texture_data.shape[2] == 3:
328 | # texture_data = np.concatenate([texture_data, np.full(texture_data.shape[:2] + (1,), 255, dtype=np.uint8)], axis=2)
329 | gl.glTexParameterf(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_REPEAT)
330 | gl.glTexParameterf(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_REPEAT)
331 | gl.glTexParameterf(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)
332 | gl.glTexParameterf(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)
333 | texture_data = texture_data[:, :, :3]
334 | texture_data = 255 - texture_data[::-1, :, :]
335 | gltexture = np.copy(texture_data.reshape(-1), order='C')
336 | gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGB, texture_data.shape[1], texture_data.shape[0], 0, gl.GL_RGB, gl.GL_UNSIGNED_BYTE,
337 | gltexture)
338 |
339 | def _draw(self, reset: bool, lights: List[Light], shadowmaps: List[ShadowMap]) -> bool:
340 | """
341 | Internal draw pass
342 | Args:
343 | reset (bool): Reset drawing progress (for progressive drawing)
344 | lights (List[Light]): All light objects that influence the current object
345 | Returns:
346 | bool: if drawing buffer was changed (if something was actually drawn)
347 | """
348 | if not reset:
349 | return False
350 | self.shader.begin()
351 | self.upload_uniforms(self.context.shader_ids, lights, shadowmaps)
352 |
353 | gl.glBindVertexArray(self.context.vao)
354 |
355 | gl.glEnableVertexAttribArray(0)
356 | gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.vertexbuffer)
357 | gl.glVertexAttribPointer(0, 3, gl.GL_FLOAT, gl.GL_FALSE, 0, None)
358 |
359 | gl.glEnableVertexAttribArray(1)
360 | gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.uvbuffer)
361 | gl.glVertexAttribPointer(1, 2, gl.GL_FLOAT, gl.GL_FALSE, 0, None)
362 |
363 | gl.glEnableVertexAttribArray(2)
364 | gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.context.normalbuffer)
365 | gl.glVertexAttribPointer(2, 3, gl.GL_FLOAT, gl.GL_FALSE, 0, None)
366 |
367 | gl.glActiveTexture(gl.GL_TEXTURE0)
368 | gl.glBindTexture(gl.GL_TEXTURE_2D, self.context.texturebuffer)
369 |
370 | gl.glDrawArrays(gl.GL_TRIANGLES, 0, self.nglverts)
371 |
372 | gl.glDisableVertexAttribArray(0)
373 | gl.glDisableVertexAttribArray(1)
374 | gl.glDisableVertexAttribArray(2)
375 | self.shader.end()
376 | return True
377 |
--------------------------------------------------------------------------------
/cloudrender/render/renderable.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import glm
3 | from typing import List, Dict
4 | from collections import defaultdict
5 | from scipy.spatial.transform import Rotation
6 | from OpenGL import GL as gl
7 | from ..camera.models import BaseCameraModel, StandardProjectionCameraModel
8 | from .lights import Light
9 | from .shadowmap import ShadowMap
10 | from .shaders.shader_loader import Shader
11 |
12 |
13 | class Renderable:
14 | SHADOWMAPS_MAX = 6
15 | LIGHTS_MAX = {"directional": 1}
16 |
17 | class GLContext(object):
18 | pass
19 |
20 | @staticmethod
21 | def locate_uniforms(shader: Shader, keys: List[str]) -> Dict[str, int]:
22 | """
23 | Locates uniforms inside the supplied shader
24 | Args:
25 | shader (Shader): shader to seek uniforms in
26 | keys: list of uniforms names
27 | Returns:
28 | Dict[str, int]: dict with uniforms locations
29 | """
30 | shader_ids = {k: gl.glGetUniformLocation(shader.program, k) for k in keys}
31 | return shader_ids
32 |
33 | def __init__(self, camera: BaseCameraModel = None, draw_shadows: bool = False, generate_shadows: bool = False):
34 | self.camera = camera
35 | # whether to draw this object during the next draw() pass
36 | self.visible = True
37 | # defines overall initialization progress
38 | self.initialized = False
39 | # defines whether OpenGL buffers and shaders are loaded
40 | self.context_initialized = False
41 | # additional features what requires shader replacement
42 | self.shader_mode = ''
43 | # defines whether object supports drawing shadow during rendering
44 | self.draw_shadows = draw_shadows
45 | # defines whether object supports shadowmap update
46 | self.generate_shadows = generate_shadows
47 | # defines whether several draw() calls are needed to fully draw the object
48 | self.is_progressive = False
49 | # defines color of the shadow
50 | self.shadowcolor = np.array([0, 0, 0, 0.5])
51 |
52 | def check_lights(self, lights: List[Light]):
53 | lights_count = defaultdict(int)
54 | for light in lights:
55 | lights_count[light.type] += 1
56 | for light_type, count in lights_count.items():
57 | if light_type not in self.LIGHTS_MAX:
58 | raise NotImplementedError(f"Light '{light_type}' is not supported for this object")
59 | if self.LIGHTS_MAX[light_type] < count:
60 | raise NotImplementedError(f"No more than {self.LIGHTS_MAX[light_type]} light of type '{light_type}'"
61 | f"are supported, got {count}")
62 |
63 | def check_shadowmaps(self, shadowmaps):
64 | assert len(shadowmaps) == 0 or self.draw_shadows, "Shadow drawing is disabled for that object"
65 | assert len(shadowmaps) < self.SHADOWMAPS_MAX, f"No more than {self.SHADOWMAPS_MAX} are supported"
66 |
67 | def draw(self, reset: bool = True, lights: List[Light] = None, shadowmaps: List[ShadowMap] = None) -> bool:
68 | """
69 | Main draw pass
70 | Args:
71 | reset (bool): Reset drawing progress (for progressive drawing)
72 | lights (List[Light]): All light objects that insfuence the current object
73 | shadowmaps (List[ShadowMap]): List of shadowmaps to draw shadows from
74 | Returns:
75 | bool: if drawing buffer was changed (if something was actually drawn)
76 | """
77 | if not self.visible:
78 | return False
79 | lights = [] if lights is None else lights
80 | shadowmaps = [] if shadowmaps is None else shadowmaps
81 | self.check_lights(lights)
82 | self.check_shadowmaps(shadowmaps)
83 | return self._draw(reset, lights, shadowmaps)
84 |
85 | def _draw(self, reset, lights, shadowmaps) -> bool:
86 | """
87 | Internal draw pass
88 | Args:
89 | reset (bool): Reset drawing progress (for progressive drawing)
90 | lights (List[Light]): All light objects that insfuence the current object
91 | shadowmaps (List[ShadowMap]): List of shadowmaps to draw shadows from
92 | Returns:
93 | bool: if drawing buffer was changed (if something was actually drawn)
94 | """
95 | return False
96 |
97 | def set_buffers(self, *args, **kwargs):
98 | """
99 | Sets the content and prepares render buffers
100 | """
101 | if self.context_initialized:
102 | if self.initialized:
103 | self._delete_buffers()
104 | self._set_buffers(*args, **kwargs)
105 | self.initialized = True
106 |
107 | def update_buffers(self, *args, **kwargs):
108 | """
109 | Updates the content and render buffers
110 | """
111 | if not self.initialized:
112 | self.set_buffers(*args, **kwargs)
113 | else:
114 | self._update_buffers(*args, **kwargs)
115 |
116 | def delete_buffers(self):
117 | """
118 | Deletes the content and render buffers
119 | """
120 | if self.initialized:
121 | self._delete_buffers()
122 | self.initialized = False
123 |
124 | def _set_buffers(self, *args, **kwargs):
125 | pass
126 |
127 | def _update_buffers(self, *args, **kwargs):
128 | self.set_buffers(*args, **kwargs)
129 |
130 | def _delete_buffers(self):
131 | pass
132 |
133 | def _finalize_init(self):
134 | pass
135 |
136 | def _init_shaders(self, camera_model, shader_mode):
137 | self.shader = None
138 | self.shadowgen_shader = None
139 |
140 | def _reload_shaders(self, shader_mode: str = None):
141 | self._init_shaders(self.camera.model, shader_mode if shader_mode is not None else self.shader_mode)
142 | self.context.shader_ids.update(self.camera.locate_uniforms(self.shader))
143 | self.context.shader_ids.update(self.locate_uniforms(self.shader, ["M"]))
144 | if self.generate_shadows:
145 | self.shadowgen_context.shader_ids.update(self.locate_uniforms(self.shadowgen_shader,
146 | StandardProjectionCameraModel.uniforms_names))
147 | self.shadowgen_context.shader_ids.update(self.locate_uniforms(self.shadowgen_shader, ["M"]))
148 |
149 |
150 | def init_context(self, shader_mode: str = ''):
151 | """
152 | Inits some OpenGL buffers and loads shaders
153 | Args:
154 | shader_mode (str): additional features what requires shader replacement
155 | """
156 | assert self.camera is not None, "Camera must be set before context initialization"
157 | self.shader_mode = shader_mode
158 | self.context = self.GLContext()
159 | self.context.shader_ids = {}
160 | if self.generate_shadows:
161 | self.shadowgen_context = self.GLContext()
162 | self.shadowgen_context.shader_ids = {}
163 | self._reload_shaders()
164 | self.init_model_extrinsics(np.array([1.,0,0,0]), np.zeros(3))
165 | self._finalize_init()
166 | self.context_initialized = True
167 |
168 | def init_model_extrinsics(self, quat: np.ndarray, pose: np.ndarray):
169 | """
170 | Positions the object in the scene
171 | Args:
172 | quat: quaternion in WXYZ format stored in np array of shape (4,)
173 | pose: translation offset vector of shape (3,)
174 | """
175 | self.model_quat = quat
176 | self.model_pose = pose
177 | # Only cam/local2world supported here
178 | R = Rotation.from_quat(np.roll(quat, -1)).as_matrix()
179 | t = np.array([pose]).T
180 | RT = np.vstack([np.hstack([R, t]), [[0, 0, 0, 1]]])
181 | self.context.Model = glm.mat4(*(RT.T.astype(np.float32).copy()))
182 |
183 | def set_camera(self, camera: BaseCameraModel):
184 | """
185 | Sets the main rendering camera
186 | Args:
187 | camera (BaseCameraModel): the rendering camera
188 | """
189 | self.camera = camera
190 | if self.context_initialized:
191 | self._reload_shaders()
192 |
193 | def upload_uniforms(self, shader_ids: Dict[str, int], lights: List[Light], shadowmaps: List[ShadowMap]):
194 | """
195 | Upload all uniform variables for the main drawing pass
196 | Args:
197 | shader_ids: dictionary containing uniforms locations
198 | lights: list of lights affecting current draw pass
199 | shadowmaps: List of shadowmaps affecting current draw pass
200 | """
201 | self.camera.upload(shader_ids)
202 | gl.glUniformMatrix4fv(shader_ids['M'], 1, gl.GL_FALSE, glm.value_ptr(self.context.Model))
203 | self._upload_uniforms(shader_ids, lights, shadowmaps)
204 |
205 | def _upload_uniforms(self, shader_ids: Dict[str, int], lights: List[Light] = (), shadowmaps = ()):
206 | pass
207 |
208 | def upload_shadowgen_uniforms(self, shadowmap_camera: StandardProjectionCameraModel, shader_ids: dict):
209 | """
210 | Upload all uniform variables for the shadowmap update drawing pass
211 | Args:
212 | shadowmap_camera: perspective camera for shadow calculation
213 | shader_ids: dictionary containing uniforms locations
214 | """
215 | shadowmap_camera.upload(shader_ids)
216 | gl.glUniformMatrix4fv(shader_ids['M'], 1, gl.GL_FALSE, glm.value_ptr(self.context.Model))
217 | self._upload_shadowngen_uniforms(shader_ids)
218 |
219 | def _upload_shadowngen_uniforms(self, shader_ids):
220 | pass
221 |
222 | def draw_shadowmap(self, shadowmap_camera: StandardProjectionCameraModel):
223 | self._draw_shadowmap(shadowmap_camera)
224 |
225 | def _draw_shadowmap(self, shadowmap_camera: StandardProjectionCameraModel):
226 | pass
227 |
228 |
229 | class DynamicRenderable(Renderable):
230 | def __init__(self, camera: BaseCameraModel = None, draw_shadows: bool = False, generate_shadows: bool = False,
231 | *args, **kwargs):
232 | super().__init__(camera, draw_shadows, generate_shadows)
233 | self.sequence_initialized = False
234 | self.current_sequence_frame_ind = 0
235 | self.sequence_len = 0
236 | self.loaded_frame_ind = -1
237 |
238 | def set_sequence(self, *args, **kwargs):
239 | self._set_sequence(*args, **kwargs)
240 | assert self.sequence_len > 0, "Sequence length must be positive, make sure to set it during _set_sequence()"
241 | self.sequence_initialized = True
242 |
243 | def _set_sequence(self, *args, **kwargs):
244 | pass
245 |
246 | def unset_sequence(self):
247 | self._unset_sequence()
248 | self.sequence_initialized = False
249 | self.current_sequence_frame_ind = 0
250 | self.sequence_len = 0
251 | self.loaded_frame_ind = -1
252 |
253 | def _unset_sequence(self):
254 | pass
255 |
256 | def load_current_frame(self):
257 | if self.loaded_frame_ind != self.current_sequence_frame_ind:
258 | self._load_current_frame()
259 | self.loaded_frame_ind = self.current_sequence_frame_ind
260 |
261 | def reload_current_frame(self):
262 | self._load_current_frame()
263 | self.loaded_frame_ind = self.current_sequence_frame_ind
264 |
265 | def _load_current_frame(self):
266 | pass
267 |
268 | def reset_current_frame(self):
269 | self.current_sequence_frame_ind = 0
270 | self.load_current_frame()
271 |
272 | def set_current_frame(self, frame_index: int) -> bool:
273 | """
274 | Set the current frame for the next draw cycle
275 | Args:
276 | frame_index: frame index
277 | Returns:
278 | bool: whether the index was set successfully
279 | """
280 | if self.sequence_initialized and frame_index>=0 and frame_index 0 and self.sequence_frame_times is not None:
319 | times_diff = (self.current_time - self.sequence_frame_times)
320 | mask = times_diff >= 0
321 | if mask.sum() == 0:
322 | index = 0
323 | else:
324 | masked_argmin = np.argmin(times_diff[mask])
325 | index = np.arange(times_diff.shape[0])[mask][masked_argmin]
326 | self.set_current_frame(index)
327 |
328 | def set_time(self, time):
329 | self.current_time = time+self.time_offset
330 | self.load_timed()
331 |
332 | def set_time_offset(self, offset):
333 | time_diff = offset-self.time_offset
334 | self.time_offset = offset
335 | self.advance_time(time_diff)
336 |
337 | def advance_time(self, time_delta):
338 | self.current_time += time_delta
339 | self.load_timed()
340 |
341 | def reset_time(self):
342 | self.current_time = self.time_offset
343 | self.load_timed()
344 |
345 |
346 |
347 |
348 |
349 |
--------------------------------------------------------------------------------
/cloudrender/render/rigid.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | from torch.nn import Module
4 | import smplx
5 | from loguru import logger
6 | from typing import Dict, List
7 |
8 | from .mesh import TexturedMesh, SimpleMesh, Mesh
9 | from .pointcloud import SimplePointcloud
10 | from .renderable import DynamicTimedRenderable
11 | from .utils import MeshNorms, centrify_smplx_root_joint
12 | from ..utils import get_closest_ind_after, get_closest_ind_before, ObjectTrajectory
13 |
14 |
15 |
16 | class RigidObject(DynamicTimedRenderable):
17 | def __init__(self, *args, **kwargs):
18 | super().__init__(*args, **kwargs)
19 |
20 | def _set_sequence(self, params_seq: ObjectTrajectory):
21 | self.params_sequence = params_seq
22 | self.sequence_len = len(params_seq)
23 |
24 | def _load_current_frame(self):
25 | obj_location = self.params_sequence[self.current_sequence_frame_ind]
26 | obj_model_position = obj_location["position"]
27 | obj_model_quat = obj_location["quaternion"]
28 | self.init_model_extrinsics(obj_model_quat, obj_model_position)
29 |
30 | class RigidObjectSimpleMesh(SimpleMesh, RigidObject):
31 | def __init__(self, *args, **kwargs):
32 | super().__init__(*args, **kwargs)
33 |
34 | class RigidObjectSimplePointcloud(SimplePointcloud, RigidObject):
35 | def __init__(self, *args, **kwargs):
36 | super().__init__(*args, **kwargs)
37 |
38 |
39 |
--------------------------------------------------------------------------------
/cloudrender/render/shaders/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vguzov/cloudrender/7fb4ce6847bb00fea5eb8207fcb983552334932e/cloudrender/render/shaders/__init__.py
--------------------------------------------------------------------------------
/cloudrender/render/shaders/avgcolor_pointcloud_with_normals/depthmap/fragment.glsl:
--------------------------------------------------------------------------------
1 | #version 330 core
2 |
3 | layout(location = 0) out vec4 color;
4 | uniform float depth_offset;
5 |
6 | void main() {
7 | // color = vec4(gl_FragDepth,gl_FragDepth,gl_FragDepth,1);
8 | gl_FragDepth = gl_FragCoord.z + depth_offset;
9 | color = vec4(1,0,0,1);
10 | }
--------------------------------------------------------------------------------
/cloudrender/render/shaders/avgcolor_pointcloud_with_normals/depthmap/geometry.glsl:
--------------------------------------------------------------------------------
1 | #version 330 core
2 | layout (points) in;
3 | layout (triangle_strip, max_vertices = 4) out;
4 |
5 | uniform float width_mul;
6 | uniform float splat_size;
7 | uniform mat4 M;
8 | uniform mat4 V;
9 | uniform mat4 P;
10 | //uniform float depth_offset;
11 |
12 | in VS_OUT {
13 | vec3 norm;
14 | vec4 poseMV;
15 | } gs_in[];
16 |
17 | void main() {
18 | mat4 MVP = P*V*M;
19 | // vec4 depth_offset_perspective = P*vec4(0,0,depth_offset,0);
20 | vec4 vertexPosMV = gs_in[0].poseMV;
21 | vec3 norm = normalize(gs_in[0].norm);
22 | vec4 position = gl_in[0].gl_Position;
23 | float base_size = splat_size*0.015;
24 | vec3 starting_cross_vct = abs(norm[0]-1)<1e-5 ? vec3(0,1,0) : vec3(1,0,0);
25 | vec3 splat_plane_vct1 = cross(norm, starting_cross_vct);
26 | vec3 splat_plane_vct2 = cross(norm, splat_plane_vct1);
27 | vec4 width_offset = MVP*vec4(base_size*splat_plane_vct1,0);
28 | vec4 height_offset = MVP*vec4(base_size*splat_plane_vct2,0);
29 |
30 | gl_Position = position - width_offset - height_offset; //+ depth_offset_perspective;
31 | EmitVertex();
32 |
33 | gl_Position = position + width_offset - height_offset; //+ depth_offset_perspective;
34 | EmitVertex();
35 |
36 | gl_Position = position - width_offset + height_offset; //+ depth_offset_perspective;
37 | EmitVertex();
38 |
39 | gl_Position = position + width_offset + height_offset; //+ depth_offset_perspective;
40 | EmitVertex();
41 |
42 | EndPrimitive();
43 | }
44 |
--------------------------------------------------------------------------------
/cloudrender/render/shaders/avgcolor_pointcloud_with_normals/depthmap/vertex_perspective.glsl:
--------------------------------------------------------------------------------
1 | #version 330 core
2 |
3 | layout(location = 0) in vec3 vertexPos;
4 | layout(location = 1) in vec3 vertexNorm;
5 |
6 | out VS_OUT {
7 | vec3 norm;
8 | vec4 poseMV;
9 | } vs_out;
10 |
11 | uniform mat4 M;
12 | uniform mat4 V;
13 | uniform mat4 P;
14 | void main(){
15 | mat4 MV = V*M;
16 | vec4 vertexPosMV = MV * vec4(vertexPos, 1);
17 | gl_Position = P * vertexPosMV;
18 | vs_out.poseMV = vertexPosMV;
19 | vs_out.norm = vertexNorm;
20 | }
--------------------------------------------------------------------------------
/cloudrender/render/shaders/avgcolor_pointcloud_with_normals/fragment.glsl:
--------------------------------------------------------------------------------
1 | #version 330 core
2 |
3 | in vec4 vcolor;
4 |
5 | layout(location = 0) out vec4 color;
6 | //layout(location = 1) out float inst_flag;
7 |
8 | void main() {
9 | // color = vcolor;
10 | // inst_flag = 1.;
11 | color = vec4(vcolor.rgb,1.);
12 | }
--------------------------------------------------------------------------------
/cloudrender/render/shaders/avgcolor_pointcloud_with_normals/geometry.glsl:
--------------------------------------------------------------------------------
1 | #version 330 core
2 | layout (points) in;
3 | layout (triangle_strip, max_vertices = 4) out;
4 |
5 | uniform float width_mul;
6 | uniform float splat_size;
7 | uniform mat4 M;
8 | uniform mat4 V;
9 | uniform mat4 P;
10 |
11 | in VS_OUT {
12 | vec4 color;
13 | vec3 norm;
14 | vec4 poseMV;
15 | } gs_in[];
16 |
17 | out vec4 vcolor;
18 |
19 | void main() {
20 | mat4 MVP = P*V*M;
21 | vec4 vertexPosMV = gs_in[0].poseMV;
22 | vec3 norm = normalize(gs_in[0].norm);
23 | vec4 position = gl_in[0].gl_Position;
24 | float base_size = splat_size*0.015;
25 | vec3 starting_cross_vct = abs(norm[0]-1)<1e-5 ? vec3(0,1,0) : vec3(1,0,0);
26 | vec3 splat_plane_vct1 = cross(norm, starting_cross_vct);
27 | vec3 splat_plane_vct2 = cross(norm, splat_plane_vct1);
28 | vec4 width_offset = MVP*vec4(base_size*splat_plane_vct1,0);
29 | vec4 height_offset = MVP*vec4(base_size*splat_plane_vct2,0);
30 |
31 | float color_mul = 1;
32 | vcolor = vec4(gs_in[0].color.rgb*color_mul+(1-color_mul), gs_in[0].color.a);
33 |
34 | gl_Position = position - width_offset - height_offset;
35 | EmitVertex();
36 |
37 | vcolor = vec4(gs_in[0].color.rgb*color_mul+(1-color_mul), gs_in[0].color.a);
38 | gl_Position = position + width_offset - height_offset;
39 | EmitVertex();
40 |
41 | vcolor = vec4(gs_in[0].color.rgb*color_mul+(1-color_mul), gs_in[0].color.a);
42 | gl_Position = position - width_offset + height_offset;
43 | EmitVertex();
44 |
45 | vcolor = vec4(gs_in[0].color.rgb*color_mul+(1-color_mul), gs_in[0].color.a);
46 | gl_Position = position + width_offset + height_offset;
47 | EmitVertex();
48 |
49 | EndPrimitive();
50 | }
51 |
--------------------------------------------------------------------------------
/cloudrender/render/shaders/avgcolor_pointcloud_with_normals/normalization/fragment.glsl:
--------------------------------------------------------------------------------
1 | #version 330 core
2 |
3 | layout(location = 0) out vec4 color;
4 |
5 | uniform sampler2D pixColors;
6 | uniform vec2 resolution;
7 | //uniform sampler2D pixWeights;
8 |
9 | void main() {
10 | vec4 sumcolor = texture(pixColors, gl_FragCoord.xy/resolution);
11 | color = vec4(sumcolor.rgb/max(1., sumcolor.a), 1.);
12 | // float cval = sumcolor.a/10.;
13 | // color = vec4(cval,cval,cval,1.);
14 | // color = texture(pixColors, gl_FragCoord.xy)/texture(pixWeights, gl_FragCoord.xy);
15 | }
--------------------------------------------------------------------------------
/cloudrender/render/shaders/avgcolor_pointcloud_with_normals/normalization/vertex.glsl:
--------------------------------------------------------------------------------
1 | #version 330 core
2 |
3 | layout(location = 0) in vec2 pixelCoord;
4 |
5 | uniform vec2 resolution;
6 |
7 | void main(){
8 | gl_Position = vec4(2*pixelCoord/resolution-1, 0.5, 1);
9 | }
--------------------------------------------------------------------------------
/cloudrender/render/shaders/avgcolor_pointcloud_with_normals/vertex_perspective.glsl:
--------------------------------------------------------------------------------
1 | #version 330 core
2 |
3 | layout(location = 0) in vec3 vertexPos;
4 | layout(location = 1) in vec4 vertexColor;
5 | layout(location = 2) in vec3 vertexNorm;
6 |
7 | out VS_OUT {
8 | vec4 color;
9 | vec3 norm;
10 | vec4 poseMV;
11 | } vs_out;
12 |
13 | uniform mat4 M;
14 | uniform mat4 V;
15 | uniform mat4 P;
16 | void main(){
17 | mat4 MV = V*M;
18 | vec4 vertexPosMV = MV * vec4(vertexPos, 1);
19 | gl_Position = P * vertexPosMV;
20 | vs_out.color = vertexColor;
21 | vs_out.poseMV = vertexPosMV;
22 | vs_out.norm = vertexNorm;
23 | }
--------------------------------------------------------------------------------
/cloudrender/render/shaders/shader_loader.py:
--------------------------------------------------------------------------------
1 | import os
2 | from OpenGL import GL as gl
3 |
4 |
5 | def printOpenGLError():
6 | err = gl.glGetError() # pylint: disable=E1111
7 | if (err != gl.GL_NO_ERROR):
8 | print('GLERROR: ', gl.gluErrorString(err)) # pylint: disable=E1101
9 |
10 |
11 | # sys.path.append(os.path.abspath(os.path.dirname(__file__)))
12 | class Shader(object):
13 |
14 | def initShaderFromGLSL(self, vertex_shader_paths, fragment_shader_paths, geometry_shader_paths = None):
15 | vertex_shader_source_list = []
16 | fragment_shader_source_list = []
17 | geometry_shader_source_list = []
18 | if (isinstance(vertex_shader_paths, list)):
19 |
20 | for GLSL in vertex_shader_paths:
21 | absDIR = os.path.abspath(os.path.join(os.path.join(os.path.dirname(__file__), "../.."), GLSL))
22 | f = open(absDIR, 'rb')
23 | vertex_shader_source_list.append(f.read())
24 | f.close()
25 | for GLSL in fragment_shader_paths:
26 | absDIR = os.path.abspath(os.path.join(os.path.join(os.path.dirname(__file__), "../.."), GLSL))
27 | f = open(absDIR, 'rb')
28 | fragment_shader_source_list.append(f.read())
29 | f.close()
30 | if geometry_shader_paths is not None:
31 | for GLSL in geometry_shader_paths:
32 | absDIR = os.path.abspath(os.path.join(os.path.join(os.path.dirname(__file__), "../.."), GLSL))
33 | f = open(absDIR, 'rb')
34 | geometry_shader_source_list.append(f.read())
35 | f.close()
36 | self.initShader(vertex_shader_source_list, fragment_shader_source_list, geometry_shader_source_list)
37 |
38 | def initShader(self, vertex_shader_source_list, fragment_shader_source_list, geometry_shader_source_list):
39 | # create program
40 | self.program = gl.glCreateProgram() # pylint: disable=E1111
41 | # print('create program ',self.program)
42 | printOpenGLError()
43 |
44 | # vertex shader
45 | # print('compile vertex shader...')
46 | self.vs = gl.glCreateShader(gl.GL_VERTEX_SHADER) # pylint: disable=E1111
47 | gl.glShaderSource(self.vs, vertex_shader_source_list)
48 | gl.glCompileShader(self.vs)
49 | if (gl.GL_TRUE != gl.glGetShaderiv(self.vs, gl.GL_COMPILE_STATUS)):
50 | err = gl.glGetShaderInfoLog(self.vs)
51 | raise Exception(err)
52 | gl.glAttachShader(self.program, self.vs)
53 | printOpenGLError()
54 |
55 | # fragment shader
56 | # print('compile fragment shader...')
57 | self.fs = gl.glCreateShader(gl.GL_FRAGMENT_SHADER) # pylint: disable=E1111
58 | gl.glShaderSource(self.fs, fragment_shader_source_list)
59 | gl.glCompileShader(self.fs)
60 | if (gl.GL_TRUE != gl.glGetShaderiv(self.fs, gl.GL_COMPILE_STATUS)):
61 | err = gl.glGetShaderInfoLog(self.fs)
62 | raise Exception(err)
63 | gl.glAttachShader(self.program, self.fs)
64 | printOpenGLError()
65 |
66 | if len(geometry_shader_source_list)>0:
67 | self.gs = gl.glCreateShader(gl.GL_GEOMETRY_SHADER)
68 | gl.glShaderSource(self.gs, geometry_shader_source_list)
69 | gl.glCompileShader(self.gs)
70 | if (gl.GL_TRUE != gl.glGetShaderiv(self.gs, gl.GL_COMPILE_STATUS)):
71 | err = gl.glGetShaderInfoLog(self.gs)
72 | raise Exception(err)
73 | gl.glAttachShader(self.program, self.gs)
74 | printOpenGLError()
75 |
76 | # print('link...')
77 | gl.glLinkProgram(self.program)
78 | if (gl.GL_TRUE != gl.glGetProgramiv(self.program, gl.GL_LINK_STATUS)):
79 | err = gl.glGetProgramInfoLog(self.program)
80 | raise Exception(err)
81 | printOpenGLError()
82 |
83 | def begin(self):
84 | if gl.glUseProgram(self.program):
85 | printOpenGLError()
86 |
87 | def end(self):
88 | gl.glUseProgram(0)
--------------------------------------------------------------------------------
/cloudrender/render/shaders/simple_mesh/fragment.glsl:
--------------------------------------------------------------------------------
1 | #version 330 core
2 |
3 | struct DirLight {
4 | vec3 direction;
5 | vec3 intensity;
6 | };
7 |
8 | uniform DirLight dirlight;
9 | uniform float diffuse;
10 | uniform float ambient;
11 | uniform float specular;
12 | uniform float shininess;
13 | uniform vec4 shadow_color;
14 | uniform mat4 V;
15 | uniform vec4 overlay_color;
16 |
17 | in VS_OUT {
18 | vec3 pose;
19 | vec4 color;
20 | float depth;
21 | vec3 normal;
22 | vec3 MVnormal;
23 | } fs_in;
24 |
25 | layout(location = 0) out vec4 out_color;
26 |
27 | vec4 dirlight_calculation(DirLight light, vec4 color, vec3 normal, vec3 view_dir)
28 | {
29 | vec3 light_dir = normalize(-light.direction);
30 | // diffuse shading
31 | float diff = max(dot(normal, light_dir), 0.0);
32 | // specular shading
33 | vec3 reflect_dir = reflect(-light_dir, normal);
34 | float spec = shininess > 0 ? pow(max(dot(view_dir, reflect_dir), 0.0), shininess) : 1.0;
35 | // combine results
36 | vec3 frag_ambient = vec3(ambient);
37 | vec3 frag_diffuse = vec3(light.intensity * diff * diffuse);
38 | vec3 frag_specular = vec3(light.intensity * spec * specular);
39 | vec3 color_sum = frag_ambient + frag_diffuse;
40 | return vec4(color_sum, 1.)*color + vec4(frag_specular, 0.);
41 | }
42 |
43 | vec4 alpha_blending(vec4 orig_color, vec4 overlay_color)
44 | {
45 | float res_alpha = overlay_color.a + orig_color.a*(1-overlay_color.a);
46 | return vec4((res_alpha==0.0)?orig_color.rgb:((overlay_color.rgb*overlay_color.a+orig_color.rgb*(1-overlay_color.a))/res_alpha), res_alpha);
47 | }
48 |
49 | void main() {
50 | vec3 camera_position = transpose(V)[3].xyz;
51 | vec3 view_dir = normalize(camera_position - fs_in.pose);
52 | vec4 input_color = alpha_blending(fs_in.color, overlay_color);
53 | // vec4 input_color = vec4(overlay_color.rgb*overlay_color.a+fs_in.color.rgb*(1-overlay_color.a), overlay_color.a + fs_in.color.a*(1-overlay_color.a));
54 | vec4 color = dirlight_calculation(dirlight, input_color, fs_in.normal, view_dir);
55 | out_color = color;
56 | }
--------------------------------------------------------------------------------
/cloudrender/render/shaders/simple_mesh/shadowdraw/fragment.glsl:
--------------------------------------------------------------------------------
1 | #version 400
2 | #define SHADOWMAPS_MAX 6
3 | #define SHADOWDEPTH_EPS 5e-3
4 |
5 | struct DirLight {
6 | vec3 direction;
7 | vec3 intensity;
8 | };
9 |
10 | uniform DirLight dirlight;
11 | uniform float diffuse;
12 | uniform float ambient;
13 | uniform float specular;
14 | uniform float shininess;
15 | uniform sampler2D shadowmaps[SHADOWMAPS_MAX];
16 | uniform bool shadowmap_enabled[SHADOWMAPS_MAX];
17 | uniform vec4 shadow_color;
18 | uniform mat4 V;
19 | uniform vec4 overlay_color;
20 |
21 | in VS_OUT {
22 | vec3 pose;
23 | vec4 color;
24 | float depth;
25 | vec3 normal;
26 | vec3 MVnormal;
27 | vec4 pose_shadow[SHADOWMAPS_MAX];
28 | } fs_in;
29 |
30 | layout(location = 0) out vec4 out_color;
31 |
32 | vec4 dirlight_calculation(DirLight light, vec4 color, vec3 normal, vec3 view_dir)
33 | {
34 | vec3 light_dir = normalize(-light.direction);
35 | // diffuse shading
36 | float diff = max(dot(normal, light_dir), 0.0);
37 | // specular shading
38 | vec3 reflect_dir = reflect(-light_dir, normal);
39 | float spec = shininess > 0 ? pow(max(dot(view_dir, reflect_dir), 0.0), shininess) : 1.0;
40 | // combine results
41 | vec3 frag_ambient = vec3(ambient);
42 | vec3 frag_diffuse = vec3(light.intensity * diff * diffuse);
43 | vec3 frag_specular = vec3(light.intensity * spec * specular);
44 | vec3 color_sum = frag_ambient + frag_diffuse;
45 | return vec4(color_sum, 1.)*color + vec4(frag_specular, 0.);
46 | }
47 |
48 | bool shadow_calculation(sampler2D shadowmap, vec4 pose_shadow) {
49 | vec3 projected_shadow = pose_shadow.xyz/pose_shadow.w;
50 | projected_shadow = projected_shadow*0.5+0.5;
51 | float closest_depth = texture(shadowmap, projected_shadow.xy).r;
52 | float current_depth = projected_shadow.z;
53 | bool shadow = (projected_shadow.x>=0 && projected_shadow.x<=1 &&projected_shadow.y >=0 && projected_shadow.y<=1 &&
54 | projected_shadow.z<1) && (current_depth-SHADOWDEPTH_EPS > closest_depth) && (current_depth < closest_depth+0.5);
55 | return shadow;
56 | }
57 |
58 | vec4 alpha_blending(vec4 orig_color, vec4 overlay_color)
59 | {
60 | float res_alpha = overlay_color.a + orig_color.a*(1-overlay_color.a);
61 | return vec4((res_alpha==0.0)?orig_color.rgb:((overlay_color.rgb*overlay_color.a+orig_color.rgb*(1-overlay_color.a))/res_alpha), res_alpha);
62 | }
63 |
64 | void main() {
65 | vec3 camera_position = transpose(V)[3].xyz;
66 | vec3 view_dir = normalize(camera_position - fs_in.pose);
67 | vec4 input_color = alpha_blending(fs_in.color, overlay_color);
68 | // vec4 input_color = vec4(overlay_color.rgb*(1-orig_color.a)+orig_color.rgb*orig_color.a, orig_color.a + overlay_color.a*(1-orig_color.a));
69 | vec4 color = dirlight_calculation(dirlight, input_color, fs_in.normal, view_dir);
70 |
71 | for (int i=0; i1||res.x<-1||res.y>1||res.y<-1)
38 | {
39 | gl_Position = vec4(-res,
40 | -2.,
41 | 1.0);
42 | }
43 | else
44 | {
45 | gl_Position = vec4(res,
46 | length(vertexPosMV)*(sign(vertexPosMV.z))/far*2-1,
47 | 1.0);
48 | }
49 |
50 | // float pos_d = sign(vertexPosMV.z)*(far-length(vertexPosMV));
51 | // pos_d = (pos_d>0.05)?pos_d:-1;
52 |
53 |
54 |
55 | vs_out.color = vertexColor;
56 | vs_out.depth = abs(vertexPosMV.z);
57 | vs_out.normal = mat3(M) * vertexNorm;
58 | for (int i=0; i1||res.x<-1||res.y>1||res.y<-1)
34 | {
35 | gl_Position = vec4(-res,
36 | -2.,
37 | 1.0);
38 | }
39 | else
40 | {
41 | gl_Position = vec4(res,
42 | length(vertexPosMV)*(sign(vertexPosMV.z))/far*2-1,
43 | 1.0);
44 | }
45 |
46 | // float pos_d = sign(vertexPosMV.z)*(far-length(vertexPosMV));
47 | // pos_d = (pos_d>0.05)?pos_d:-1;
48 |
49 |
50 |
51 | vs_out.color = vertexColor;
52 | vs_out.depth = abs(vertexPosMV.z);
53 | vs_out.normal = mat3(M) * vertexNorm;
54 | }
--------------------------------------------------------------------------------
/cloudrender/render/shaders/simple_mesh/vertex_perspective.glsl:
--------------------------------------------------------------------------------
1 | #version 330 core
2 |
3 | // Input vertex data, different for all executions of this shader.
4 | layout(location = 0) in vec3 vertexPos;
5 | layout(location = 1) in vec4 vertexColor;
6 | layout(location = 2) in vec3 vertexNorm;
7 |
8 | // Output data ; will be interpolated for each fragment.
9 | out VS_OUT {
10 | vec3 pose;
11 | vec4 color;
12 | float depth;
13 | vec3 normal;
14 | vec3 MVnormal;
15 | } vs_out;
16 |
17 | // Values that stay constant for the whole mesh.
18 | uniform mat4 M;
19 | uniform mat4 V;
20 | uniform mat4 P;
21 | void main(){
22 | mat4 MV = V*M;
23 | vec4 vertexPosMV = MV * vec4(vertexPos, 1);
24 | gl_Position = P * vertexPosMV;
25 | vs_out.pose = vec3(M * vec4(vertexPos, 1.0));
26 | vs_out.color = vertexColor;
27 | vs_out.normal = mat3(M) * vertexNorm;
28 | // vs_out.MVnormal = -mat3(MV) * vertexNorm;
29 | vs_out.depth = abs(vertexPosMV.z);
30 | }
--------------------------------------------------------------------------------
/cloudrender/render/shaders/simple_pointcloud/fragment.glsl:
--------------------------------------------------------------------------------
1 | #version 330 core
2 |
3 | in vec4 vcolor;
4 | flat in int frag_inst_id;
5 |
6 | layout(location = 0) out vec4 color;
7 | layout(location = 1) out int inst_id;
8 |
9 | uniform vec4 overlay_color;
10 | uniform vec3 hsv_multiplier;
11 |
12 | vec4 alpha_blending(vec4 orig_color, vec4 overlay_color)
13 | {
14 | float res_alpha = overlay_color.a + orig_color.a*(1-overlay_color.a);
15 | return vec4((res_alpha==0.0)?orig_color.rgb:((overlay_color.rgb*overlay_color.a+orig_color.rgb*(1-overlay_color.a))/res_alpha), res_alpha);
16 | }
17 |
18 | vec3 rgb2hsv(vec3 c)
19 | {
20 | vec4 K = vec4(0.0, -1.0 / 3.0, 2.0 / 3.0, -1.0);
21 | vec4 p = mix(vec4(c.bg, K.wz), vec4(c.gb, K.xy), step(c.b, c.g));
22 | vec4 q = mix(vec4(p.xyw, c.r), vec4(c.r, p.yzx), step(p.x, c.r));
23 |
24 | float d = q.x - min(q.w, q.y);
25 | float e = 1.0e-10;
26 | return vec3(abs(q.z + (q.w - q.y) / (6.0 * d + e)), d / (q.x + e), q.x);
27 | }
28 |
29 | vec3 hsv2rgb(vec3 c)
30 | {
31 | vec4 K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0);
32 | vec3 p = abs(fract(c.xxx + K.xyz) * 6.0 - K.www);
33 | return c.z * mix(K.xxx, clamp(p - K.xxx, 0.0, 1.0), c.y);
34 | }
35 |
36 | void main() {
37 | color = vcolor;
38 | color = vec4(hsv2rgb(rgb2hsv(color.rgb)*hsv_multiplier), color.a);
39 | color = alpha_blending(color, overlay_color);
40 | inst_id = frag_inst_id;
41 | }
--------------------------------------------------------------------------------
/cloudrender/render/shaders/simple_pointcloud/geometry.glsl:
--------------------------------------------------------------------------------
1 | #version 330 core
2 | layout (points) in;
3 | layout (triangle_strip, max_vertices = 4) out;
4 |
5 | uniform float width_mul;
6 | uniform float splat_size;
7 |
8 | in VS_OUT {
9 | vec4 color;
10 | int inst_id;
11 | float depth;
12 | } gs_in[];
13 |
14 | out vec4 vcolor;
15 | flat out int frag_inst_id;
16 |
17 | void main() {
18 | vec4 position = gl_in[0].gl_Position;
19 | float size_mul = splat_size/(0.1+0.4*gs_in[0].depth)*position.w;
20 | float color_mul = 1;
21 | vcolor = vec4(gs_in[0].color.rgb*color_mul+(1-color_mul), gs_in[0].color.a);
22 | frag_inst_id = gs_in[0].inst_id;
23 |
24 | gl_Position = position + vec4(-0.01*width_mul, -0.01, 0.0, 0.0)*size_mul;
25 | EmitVertex();
26 |
27 | gl_Position = position + vec4(0.01*width_mul, -0.01, 0.0, 0.0)*size_mul;
28 | EmitVertex();
29 |
30 | gl_Position = position + vec4(-0.01*width_mul, 0.01, 0.0, 0.0)*size_mul;
31 | EmitVertex();
32 |
33 | gl_Position = position + vec4(0.01*width_mul, 0.01, 0.0, 0.0)*size_mul;
34 | EmitVertex();
35 |
36 | EndPrimitive();
37 | }
38 |
--------------------------------------------------------------------------------
/cloudrender/render/shaders/simple_pointcloud/shadowdraw/fragment.glsl:
--------------------------------------------------------------------------------
1 | #version 400
2 | #define SHADOWMAPS_MAX 6
3 | #define SHADOWDEPTH_EPS 5e-3
4 |
5 | in vec4 vcolor;
6 | flat in int frag_inst_id;
7 | in vec4 pose_shadow[SHADOWMAPS_MAX];
8 | in vec3 vnormal;
9 |
10 | uniform bool shadowmap_enabled[SHADOWMAPS_MAX];
11 | uniform sampler2D shadowmaps[SHADOWMAPS_MAX];
12 | uniform vec4 shadow_color;
13 | uniform vec4 overlay_color;
14 | uniform vec3 hsv_multiplier;
15 |
16 |
17 | layout(location = 0) out vec4 color;
18 |
19 | bool shadow_calculation(sampler2D shadowmap, vec4 pose_shadow)
20 | {
21 | vec3 projected_shadow = pose_shadow.xyz/pose_shadow.w;
22 | projected_shadow = projected_shadow*0.5+0.5;
23 | float closest_depth = texture(shadowmap, projected_shadow.xy).r;
24 | float current_depth = projected_shadow.z;
25 | bool shadow = (projected_shadow.x>=0 && projected_shadow.x<=1 &&projected_shadow.y >=0 && projected_shadow.y<=1 &&
26 | projected_shadow.z<1) && (current_depth-SHADOWDEPTH_EPS > closest_depth) && (current_depth < closest_depth+0.5);
27 | return shadow;
28 | }
29 |
30 | vec4 alpha_blending(vec4 orig_color, vec4 overlay_color)
31 | {
32 | float res_alpha = overlay_color.a + orig_color.a*(1-overlay_color.a);
33 | return vec4((res_alpha==0.0)?orig_color.rgb:((overlay_color.rgb*overlay_color.a+orig_color.rgb*(1-overlay_color.a))/res_alpha), res_alpha);
34 | }
35 |
36 | vec3 rgb2hsv(vec3 c)
37 | {
38 | vec4 K = vec4(0.0, -1.0 / 3.0, 2.0 / 3.0, -1.0);
39 | vec4 p = mix(vec4(c.bg, K.wz), vec4(c.gb, K.xy), step(c.b, c.g));
40 | vec4 q = mix(vec4(p.xyw, c.r), vec4(c.r, p.yzx), step(p.x, c.r));
41 |
42 | float d = q.x - min(q.w, q.y);
43 | float e = 1.0e-10;
44 | return vec3(abs(q.z + (q.w - q.y) / (6.0 * d + e)), d / (q.x + e), q.x);
45 | }
46 |
47 | vec3 hsv2rgb(vec3 c)
48 | {
49 | vec4 K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0);
50 | vec3 p = abs(fract(c.xxx + K.xyz) * 6.0 - K.www);
51 | return c.z * mix(K.xxx, clamp(p - K.xxx, 0.0, 1.0), c.y);
52 | }
53 |
54 | void main() {
55 | color = vcolor;
56 | for (int i=0; i=0 && projected_shadow.x<=1 &&projected_shadow.y >=0 && projected_shadow.y<=1 &&
24 | projected_shadow.z<1) && (current_depth-SHADOWDEPTH_EPS > closest_depth) && (current_depth < closest_depth+0.5);
25 | return shadow;
26 | }
27 |
28 | void main() {
29 | color = vcolor;
30 | for (int i=0; i 0 ? pow(max(dot(view_dir, reflect_dir), 0.0), shininess) : 1.0;
37 | // combine results
38 | vec3 frag_ambient = vec3(ambient);
39 | vec3 frag_diffuse = vec3(light.intensity * diff * diffuse);
40 | vec3 frag_specular = vec3(light.intensity * spec * specular);
41 | vec3 color_sum = frag_ambient + frag_diffuse;
42 | return vec4(color_sum, 1.)*color + vec4(frag_specular, 0.);
43 | }
44 |
45 | vec4 alpha_blending(vec4 orig_color, vec4 overlay_color)
46 | {
47 | float res_alpha = overlay_color.a + orig_color.a*(1-overlay_color.a);
48 | return vec4((res_alpha==0.0)?orig_color.rgb:((overlay_color.rgb*overlay_color.a+orig_color.rgb*(1-overlay_color.a))/res_alpha), res_alpha);
49 | }
50 |
51 | void main() {
52 | vec3 camera_position = transpose(V)[3].xyz;
53 | vec3 view_dir = normalize(camera_position - fs_in.pose);
54 | vec4 tex_color = texture(meshTexture, fs_in.texUV);
55 | tex_color = alpha_blending(tex_color, overlay_color);
56 | vec4 color = dirlight_calculation(dirlight, tex_color, fs_in.normal, view_dir);
57 | out_color = color;
58 | }
--------------------------------------------------------------------------------
/cloudrender/render/shaders/textured_mesh/shadowdraw/fragment.glsl:
--------------------------------------------------------------------------------
1 | #version 400
2 | #define SHADOWMAPS_MAX 6
3 | #define SHADOWDEPTH_EPS 5e-3
4 |
5 | struct DirLight {
6 | vec3 direction;
7 | vec3 intensity;
8 | };
9 |
10 | uniform DirLight dirlight;
11 | uniform float diffuse;
12 | uniform float ambient;
13 | uniform float specular;
14 | uniform float shininess;
15 | uniform sampler2D shadowmaps[SHADOWMAPS_MAX];
16 | uniform bool shadowmap_enabled[SHADOWMAPS_MAX];
17 | uniform vec4 shadow_color;
18 | uniform mat4 V;
19 | uniform vec4 overlay_color;
20 |
21 | uniform sampler2D meshTexture;
22 |
23 | in VS_OUT {
24 | vec3 pose;
25 | float depth;
26 | vec3 normal;
27 | vec3 MVnormal;
28 | vec4 pose_shadow[SHADOWMAPS_MAX];
29 | vec2 texUV;
30 | } fs_in;
31 |
32 | layout(location = 0) out vec4 out_color;
33 |
34 | vec4 dirlight_calculation(DirLight light, vec4 color, vec3 normal, vec3 view_dir)
35 | {
36 | vec3 light_dir = normalize(-light.direction);
37 | // diffuse shading
38 | float diff = max(dot(normal, light_dir), 0.0);
39 | // specular shading
40 | vec3 reflect_dir = reflect(-light_dir, normal);
41 | float spec = shininess > 0 ? pow(max(dot(view_dir, reflect_dir), 0.0), shininess) : 1.0;
42 | // combine results
43 | vec3 frag_ambient = vec3(ambient);
44 | vec3 frag_diffuse = vec3(light.intensity * diff * diffuse);
45 | vec3 frag_specular = vec3(light.intensity * spec * specular);
46 | vec3 color_sum = frag_ambient + frag_diffuse;
47 | return vec4(color_sum, 1.)*color + vec4(frag_specular, 0.);
48 | }
49 |
50 | bool shadow_calculation(sampler2D shadowmap, vec4 pose_shadow) {
51 | vec3 projected_shadow = pose_shadow.xyz/pose_shadow.w;
52 | projected_shadow = projected_shadow*0.5+0.5;
53 | float closest_depth = texture(shadowmap, projected_shadow.xy).r;
54 | float current_depth = projected_shadow.z;
55 | bool shadow = (projected_shadow.x>=0 && projected_shadow.x<=1 &&projected_shadow.y >=0 && projected_shadow.y<=1 &&
56 | projected_shadow.z<1) && (current_depth-SHADOWDEPTH_EPS > closest_depth) && (current_depth < closest_depth+0.5);
57 | return shadow;
58 | }
59 |
60 | vec4 alpha_blending(vec4 orig_color, vec4 overlay_color)
61 | {
62 | float res_alpha = overlay_color.a + orig_color.a*(1-overlay_color.a);
63 | return vec4((res_alpha==0.0)?orig_color.rgb:((overlay_color.rgb*overlay_color.a+orig_color.rgb*(1-overlay_color.a))/res_alpha), res_alpha);
64 | }
65 |
66 | void main() {
67 | vec3 camera_position = transpose(V)[3].xyz;
68 | vec3 view_dir = normalize(camera_position - fs_in.pose);
69 | vec4 tex_color = texture(meshTexture, fs_in.texUV);
70 | tex_color = alpha_blending(tex_color, overlay_color);
71 | vec4 color = dirlight_calculation(dirlight, tex_color, fs_in.normal, view_dir);
72 |
73 | for (int i=0; i1||res.x<-1||res.y>1||res.y<-1)
38 | {
39 | gl_Position = vec4(-res,
40 | -2.,
41 | 1.0);
42 | }
43 | else
44 | {
45 | gl_Position = vec4(res,
46 | length(vertexPosMV)*(sign(vertexPosMV.z))/far*2-1,
47 | 1.0);
48 | }
49 |
50 | // float pos_d = sign(vertexPosMV.z)*(far-length(vertexPosMV));
51 | // pos_d = (pos_d>0.05)?pos_d:-1;
52 |
53 |
54 |
55 | vs_out.color = vertexColor;
56 | vs_out.depth = abs(vertexPosMV.z);
57 | vs_out.normal = mat3(M) * vertexNorm;
58 | for (int i=0; i1||res.x<-1||res.y>1||res.y<-1)
35 | {
36 | gl_Position = vec4(-res,
37 | -2.,
38 | 1.0);
39 | }
40 | else
41 | {
42 | gl_Position = vec4(res,
43 | length(vertexPosMV)*(sign(vertexPosMV.z))/far*2-1,
44 | 1.0);
45 | }
46 |
47 | // float pos_d = sign(vertexPosMV.z)*(far-length(vertexPosMV));
48 | // pos_d = (pos_d>0.05)?pos_d:-1;
49 |
50 |
51 |
52 | vs_out.depth = abs(vertexPosMV.z);
53 | vs_out.normal = mat3(M) * vertexNorm;
54 | vs_out.texUV = vertexTexUV;
55 | }
--------------------------------------------------------------------------------
/cloudrender/render/shaders/textured_mesh/vertex_perspective.glsl:
--------------------------------------------------------------------------------
1 | #version 330 core
2 |
3 | // Input vertex data, different for all executions of this shader.
4 | layout(location = 0) in vec3 vertexPos;
5 | layout(location = 1) in vec2 vertexTexUV;
6 | layout(location = 2) in vec3 vertexNorm;
7 |
8 |
9 | // Output data ; will be interpolated for each fragment.
10 | out VS_OUT {
11 | vec3 pose;
12 | float depth;
13 | vec3 normal;
14 | vec3 MVnormal;
15 | vec2 texUV;
16 | } vs_out;
17 |
18 | // Values that stay constant for the whole mesh.
19 | uniform mat4 M;
20 | uniform mat4 V;
21 | uniform mat4 P;
22 | void main(){
23 | mat4 MV = V*M;
24 | vec4 vertexPosMV = MV * vec4(vertexPos, 1);
25 | gl_Position = P * vertexPosMV;
26 | vs_out.pose = vec3(M * vec4(vertexPos, 1.0));
27 | vs_out.normal = mat3(M) * vertexNorm;
28 | // vs_out.MVnormal = -mat3(MV) * vertexNorm;
29 | vs_out.depth = abs(vertexPosMV.z);
30 | vs_out.texUV = vertexTexUV;
31 | }
--------------------------------------------------------------------------------
/cloudrender/render/shadowmap.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from OpenGL import GL as gl
3 | from typing import List, Sequence
4 | from ..camera.models import StandardProjectionCameraModel
5 |
6 | class ShadowMap():
7 | def __init__(self, camera: StandardProjectionCameraModel,
8 | shadowmap_size: Sequence[int]):
9 | self.camera = camera
10 | self.shadowmap_size = shadowmap_size
11 | self._init()
12 |
13 | def _remember_fbo(self):
14 | vport_params = np.zeros(4, dtype=np.int32)
15 | gl.glGetIntegerv(gl.GL_VIEWPORT, vport_params)
16 | self.original_viewport_size = vport_params[2:]
17 | fbo = np.zeros(1, dtype=np.int32)
18 | gl.glGetIntegerv(gl.GL_DRAW_FRAMEBUFFER_BINDING, fbo)
19 | self.prev_draw_fbo = int(fbo)
20 | fbo = np.zeros(1, dtype=np.int32)
21 | gl.glGetIntegerv(gl.GL_READ_FRAMEBUFFER_BINDING, fbo)
22 | self.prev_read_fbo = int(fbo)
23 |
24 | def _restore_fbo(self):
25 | gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, self.prev_draw_fbo)
26 | gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, self.prev_read_fbo)
27 | gl.glViewport(0, 0, *self.original_viewport_size)
28 |
29 | def _init(self):
30 | self._remember_fbo()
31 | gl.glViewport(0, 0, *self.shadowmap_size)
32 | self.fbo = gl.glGenFramebuffers(1)
33 | gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.fbo)
34 | self.texture = gl.glGenTextures(1)
35 | gl.glBindTexture(gl.GL_TEXTURE_2D, self.texture)
36 | gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT, *self.shadowmap_size,
37 | 0, gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT, None)
38 | gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
39 | gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
40 | gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_REPEAT)
41 | gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_REPEAT)
42 | gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, self.texture, 0)
43 | gl.glDrawBuffer(gl.GL_NONE)
44 | gl.glReadBuffer(gl.GL_NONE)
45 | self._restore_fbo()
46 |
47 | # def upload(self, tracked_object: Renderable):
48 | # model_mtx = tracked_object.context.Model
49 | # light_MVP = self.camera.context.Projection * self.camera.context.View * model_mtx
50 | # shader_ids = tracked_object.shadowgen_context.shader_ids
51 | # gl.glUniformMatrix4fv(shader_ids['light_MVP'], 1, gl.GL_FALSE,
52 | # glm.value_ptr(light_MVP))
53 | # tracked_object._upload_uniforms(shader_ids)
54 |
55 | def update_shadowmap(self, tracked_objects):
56 | self._remember_fbo()
57 | gl.glViewport(0, 0, *self.shadowmap_size)
58 | gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.fbo)
59 | gl.glClear(gl.GL_DEPTH_BUFFER_BIT)
60 | for tracked_object in tracked_objects:
61 | tracked_object.draw_shadowmap(self.camera)
62 | self._restore_fbo()
63 |
64 | @property
65 | def light_VP(self):
66 | return self.camera.context.Projection * self.camera.context.View
67 |
--------------------------------------------------------------------------------
/cloudrender/render/smpl.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | from torch.nn import Module
4 | import smplx
5 | from loguru import logger
6 | from typing import Dict
7 |
8 | from .mesh import TexturedMesh, SimpleMesh, Mesh
9 | from .renderable import DynamicTimedRenderable
10 | from .utils import MeshNorms
11 |
12 |
13 | class SMPLXModelBase(DynamicTimedRenderable):
14 | MODEL_PARAM_NAMES = {
15 | "smpl": ["betas", "body_pose", "global_orient", "transl"],
16 | "smplh": ["betas", "body_pose", "global_orient", "transl", "left_hand_pose", "right_hand_pose"],
17 | "smplx": ["betas", "body_pose", "global_orient", "transl", "left_hand_pose", "right_hand_pose", "expression", "jaw_pose", "leye_pose",
18 | "reye_pose"],
19 | }
20 |
21 | def __init__(self, device=None, smpl_root=None, template=None, gender="neutral", flat_hand_mean=True, model_type="smpl",center_root_joint=True, global_offset = None, use_hand_pca=False,
22 | *args, **kwargs):
23 | super().__init__(*args, **kwargs)
24 | self.color = None
25 | self.smpl_root = str(smpl_root)
26 | self.device = torch.device(device if device is not None else "cpu")
27 | self.template = template
28 | self.set_global_offset(global_offset)
29 | self.center_root_joint = center_root_joint
30 | self.use_hand_pca = use_hand_pca
31 | self.model_type = model_type
32 | smpl_compatible = False
33 | if self.smpl_root is None:
34 | self.smpl_root = "./models"
35 | if "compat" in self.model_type:
36 | self.model_type = self.model_type.split("_")[0]
37 | smpl_compatible = True
38 | self.available_params = SMPLXModelBase.MODEL_PARAM_NAMES[self.model_type]
39 | self._init_model(gender, smpl_compatible, flat_hand_mean)
40 | self.nglverts = len(self.get_vertices()[0])
41 |
42 | def _init_model(self, gender='neutral', smpl_compatible=False, flat_hand_mean=True):
43 | self.model_layer = smplx.create(self.smpl_root, model_type=self.model_type, gender=gender, use_pca=self.use_hand_pca, flat_hand_mean=flat_hand_mean).to(
44 | self.device)
45 | self.model_layer.requires_grad_(False)
46 | if smpl_compatible:
47 | smpl_model = smplx.create(self.smpl_root, model_type="smpl", gender=gender)
48 | self.model_layer.shapedirs[:] = smpl_model.shapedirs.detach().to(self.device)
49 | if self.template is not None:
50 | self.model_layer.v_template[:] = torch.tensor(self.template, dtype=self.model_layer.v_template.dtype,
51 | device=self.device)
52 | self.normals_layer = MeshNorms(self.model_layer.faces_tensor)
53 | self.gender = gender
54 | self.smpl_compatible = smpl_compatible
55 | self._current_params = {x: getattr(self.model_layer, x).squeeze(0).clone() for x in self.available_params}
56 |
57 | def _preprocess_param(self, param):
58 | if not isinstance(param, torch.Tensor):
59 | param = torch.tensor(param, dtype=torch.float32)
60 | param = param.to(self.device)
61 | return param
62 |
63 | def _finalize_init(self):
64 | self.faces_numpy = self.model_layer.faces.astype(int)
65 | self.faces = self.model_layer.faces_tensor
66 | self.flat_faces = self.faces.view(-1)
67 |
68 | def set_body_template(self, template):
69 | self.template = template
70 | self.model_layer.v_template[:] = torch.tensor(self.template, dtype=self.model_layer.v_template.dtype,
71 | device=self.device)
72 |
73 | def update_params(self, **model_params):
74 | for param_name, param_val in model_params.items():
75 | if param_name in self.available_params:
76 | param_val = self._preprocess_param(param_val)
77 | self._current_params[param_name] = param_val
78 |
79 | @staticmethod
80 | def center_output(smpl_model, params, smpl_output):
81 | if 'transl' in params and params['transl'] is not None:
82 | transl = params['transl']
83 | else:
84 | transl = None
85 | apply_trans = transl is not None or hasattr(smpl_model, 'transl')
86 | if transl is None and hasattr(smpl_model, 'transl'):
87 | transl = smpl_model.transl
88 | diff = -smpl_output.joints[:, 0, :]
89 | if apply_trans:
90 | diff = diff + transl
91 | smpl_output.joints = smpl_output.joints + diff.view(1, 1, 3)
92 | smpl_output.vertices = smpl_output.vertices + diff.view(1, 1, 3)
93 | return smpl_output
94 |
95 | def process_output(self, smpl_output, batch_params):
96 | if self.center_root_joint:
97 | return self.center_output(self.model_layer, batch_params, smpl_output)
98 | else:
99 | return smpl_output
100 |
101 | def get_vertices(self, return_normals=True, **model_params):
102 | self.update_params(**model_params)
103 | batch_params = {x: self._current_params[x].unsqueeze(0) for x in self.available_params}
104 | if self.global_offset is not None:
105 | batch_params["transl"] = batch_params["transl"] + self.global_offset_torch
106 | output = self.model_layer(**batch_params)
107 | output = self.process_output(output, batch_params)
108 | verts = output.vertices.squeeze(0)
109 | if return_normals:
110 | normals = self.normals_layer.vertices_norms(verts)
111 | return verts, normals
112 | else:
113 | return verts
114 |
115 | def get_mesh(self, **model_params):
116 | verts, normals = self.get_vertices(**model_params)
117 | mesh = Mesh.MeshContainer(verts.cpu().numpy(), self.faces_numpy, vertex_normals=normals.cpu().numpy())
118 | return mesh
119 |
120 | def set_global_offset(self, global_offset):
121 | self.global_offset = global_offset
122 | if global_offset is not None:
123 | self.global_offset_torch = torch.tensor(global_offset, dtype=torch.float32, device=self.device).unsqueeze(0)
124 | else:
125 | self.global_offset_torch = None
126 |
127 | def get_joints(self, **model_params):
128 | self.update_params(**model_params)
129 | batch_params = {x: self._current_params[x].unsqueeze(0) for x in self.available_params}
130 | if self.global_offset is not None:
131 | batch_params["transl"] = batch_params["transl"] + self.global_offset_torch
132 | output = self.model_layer(**batch_params)
133 | output = self.process_output(output, batch_params)
134 | joints = output.joints.squeeze(0)
135 | return joints.cpu().numpy()
136 |
137 | def _set_sequence(self, params_seq):
138 | self.params_sequence = params_seq
139 | self.sequence_len = len(params_seq)
140 |
141 | def _load_current_frame(self):
142 | params = self.params_sequence[self.current_sequence_frame_ind]
143 | self.update_buffers(**params)
144 |
145 | @property
146 | def global_translation(self) -> np.ndarray:
147 | return self._current_params["transl"].cpu().numpy()
148 |
149 | @property
150 | def current_params(self) -> Dict[str, np.ndarray]:
151 | return {k: v.cpu().numpy() for k, v in self._current_params.items()}
152 |
153 |
154 | class SMPLXColoredModel(SimpleMesh, SMPLXModelBase):
155 | def __init__(self, *args, **kwargs):
156 | super().__init__(*args, **kwargs)
157 | self.set_uniform_color()
158 |
159 | def set_uniform_color(self, color=(100, 100, 100, 255)):
160 | self.color = color
161 | self.vertex_colors = np.tile(np.array(color, dtype=np.uint8).reshape(1, 4), (self.nglverts, 1))
162 |
163 | def _set_buffers(self, **model_params):
164 | mesh = self.get_mesh(**model_params)
165 | mesh.colors = self.vertex_colors
166 | super()._set_buffers(mesh)
167 |
168 | def _update_buffers(self, **model_params):
169 | mesh = self.get_mesh(**model_params)
170 | mesh.colors = self.vertex_colors
171 | super()._update_buffers(mesh)
172 |
173 |
174 | class SMPLXTexturedModel(TexturedMesh, SMPLXModelBase):
175 | def __init__(self, *args, **kwargs):
176 | super().__init__(*args, **kwargs)
177 | self.update_texture = False
178 |
179 | def _set_buffers(self, **model_params):
180 | mesh = self.get_mesh(**model_params)
181 | if self.update_texture:
182 | mesh.texture = self.texture
183 | mesh.face_uv_map = self.uv_map
184 | self.update_texture = False
185 | super()._set_buffers(mesh)
186 |
187 | def _update_buffers(self, **model_params):
188 | mesh = self.get_mesh(**model_params)
189 | if self.update_texture:
190 | mesh.texture = self.texture
191 | mesh.face_uv_map = self.uv_map
192 | self.update_texture = False
193 | super()._update_buffers(mesh)
194 |
195 | def set_texture(self, texture, uv_map):
196 | self.texture = texture
197 | self.uv_map = uv_map
198 | self.update_texture = True
199 |
200 | def set_uniform_color(self, color=(200, 200, 200, 255)):
201 | pass
202 |
--------------------------------------------------------------------------------
/cloudrender/render/smpl_legacy.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | import smplx
4 |
5 | from .mesh import SimpleMesh
6 | from .renderable import DynamicTimedRenderable
7 | from .utils import MeshNorms
8 |
9 |
10 | class SMPLModel(SimpleMesh):
11 | def __init__(self, device=None, smpl_root=None, template=None, gender="neutral", model_type="smpl", global_offset=None, *args, **kwargs):
12 | super().__init__(*args, **kwargs)
13 | self.color = None
14 | self.smpl_root = smpl_root
15 | self.device = torch.device(device if device is not None else "cpu")
16 | self.pose_params = torch.zeros(72, device=self.device)
17 | self.translation_params = torch.zeros(3, device=self.device)
18 | self.template = template
19 | self.global_offset = global_offset
20 | self.model_type = model_type
21 | self.smpl_compatible = False
22 | if self.smpl_root is None:
23 | self.smpl_root = "./models"
24 | if "compat" in self.model_type:
25 | self.model_type = self.model_type.split("_")[0]
26 | self.smpl_compatible = True
27 | self._set_smpl(gender)
28 | self.nglverts = len(self.get_smpl()[0])
29 | self.set_uniform_color()
30 |
31 | def _set_smpl(self, gender='neutral', shape_params=None):
32 | self.model_layer = smplx.create(self.smpl_root, model_type=self.model_type, gender=gender).to(self.device)
33 | self.model_layer.requires_grad_(False)
34 | if self.smpl_compatible:
35 | smpl_model = smplx.create(self.smpl_root, model_type="smpl", gender=gender)
36 | self.model_layer.shapedirs[:] = smpl_model.shapedirs.detach().to(self.device)
37 | if self.template is not None:
38 | self.model_layer.v_template[:] = torch.tensor(self.template, dtype=self.model_layer.v_template.dtype,
39 | device=self.device)
40 | if self.global_offset is not None:
41 | self.model_layer.v_template[:] += torch.tensor(self.global_offset[np.newaxis, :], dtype=self.model_layer.v_template.dtype,
42 | device=self.device)
43 | self.normals_layer = MeshNorms(self.model_layer.faces_tensor) #torch.tensor(self.model_layer.faces.astype(int), dtype=torch.long, device=self.device))
44 | self.gender = gender
45 | self.shape_params = torch.zeros(10, device=self.device) if shape_params is None else \
46 | torch.tensor(shape_params, dtype=torch.float32, device=self.device)
47 |
48 | def _preprocess_param(self, param):
49 | if not isinstance(param, torch.Tensor):
50 | param = torch.tensor(param, dtype=torch.float32)
51 | param = param.to(self.device)
52 | return param
53 |
54 | def _finalize_init(self):
55 | super()._finalize_init()
56 | self.faces_numpy = self.model_layer.faces.astype(int)
57 | self.faces = self.model_layer.faces_tensor #torch.tensor(self.model_layer.faces.astype(int), dtype=torch.long, device=self.device)
58 | self.flat_faces = self.faces.view(-1)
59 |
60 | def update_params(self, pose=None, shape=None, translation=None):
61 | if pose is not None:
62 | self.pose_params = self._preprocess_param(pose)
63 | if shape is not None:
64 | self.shape_params = self._preprocess_param(shape)
65 | if translation is not None:
66 | self.translation_params = self._preprocess_param(translation)
67 |
68 | def set_uniform_color(self, color=(200, 200, 200, 255)):
69 | self.color = color
70 | self.vertex_colors = np.tile(np.array(color, dtype=np.uint8).reshape(1, 4), (self.nglverts, 1))
71 |
72 | def get_smpl(self, pose_params=None, shape_params=None, translation_params=None):
73 | self.update_params(pose_params, shape_params, translation_params)
74 | batch_pose_params = self.pose_params.unsqueeze(0)
75 | batch_shape_params = self.shape_params.unsqueeze(0)
76 | if self.model_type == "smplh":
77 | batch_pose_params = batch_pose_params[:,:-6]
78 | output = self.model_layer(global_orient =batch_pose_params[:, :3],
79 | body_pose=batch_pose_params[:,3:], betas=batch_shape_params)
80 | verts = output.vertices
81 | normals = self.normals_layer.vertices_norms(verts.squeeze(0))
82 | return verts.squeeze(0) + self.translation_params.unsqueeze(0), normals
83 |
84 | def get_smpl_mesh(self, pose_params=None, shape_params=None, translation_params=None):
85 | verts, normals = self.get_smpl(pose_params, shape_params, translation_params)
86 | mesh = self.MeshContainer(verts.cpu().numpy(), self.faces_numpy, self.vertex_colors, normals.cpu().numpy())
87 | return mesh
88 |
89 | def _set_buffers(self, pose_params=None, shape_params=None, translation_params=None):
90 | mesh = self.get_smpl_mesh(pose_params, shape_params, translation_params)
91 | super()._set_buffers(mesh)
92 |
93 | def _update_buffers(self, pose_params=None, shape_params=None, translation_params=None):
94 | mesh = self.get_smpl_mesh(pose_params, shape_params, translation_params)
95 | super()._update_buffers(mesh)
96 |
97 |
98 | class AnimatableSMPLModel(SMPLModel, DynamicTimedRenderable):
99 | def __init__(self, *args, **kwargs):
100 | super().__init__(*args, **kwargs)
101 |
102 | def _set_sequence(self, params_seq):
103 | self.params_sequence = params_seq
104 | self.sequence_len = len(params_seq)
105 |
106 | def _load_current_frame(self):
107 | params = self.params_sequence[self.current_sequence_frame_ind]
108 | pose = params['pose'] if 'pose' in params else None
109 | shape = params['shape'] if 'shape' in params else None
110 | translation = params['translation'] if 'translation' in params else None
111 | self.update_buffers(pose, shape, translation)
112 |
--------------------------------------------------------------------------------
/cloudrender/render/utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import Tensor
3 | from torch.nn import Module
4 | from smplx import SMPL, SMPLH, SMPLX
5 | from typing import Union, Optional
6 |
7 | def centrify_smplx_root_joint(smpl_model: Union[SMPL, SMPLH, SMPLX]):
8 | def centrifying_forward(
9 | betas: Optional[Tensor] = None,
10 | body_pose: Optional[Tensor] = None,
11 | global_orient: Optional[Tensor] = None,
12 | transl: Optional[Tensor] = None,
13 | return_verts=True,
14 | return_full_pose: bool = False,
15 | pose2rot: bool = True,
16 | **kwargs
17 | ):
18 | smpl_output = old_forward(betas=betas, body_pose=body_pose, global_orient=global_orient, transl=transl,return_verts=return_verts, return_full_pose=return_full_pose, pose2rot=pose2rot, **kwargs)
19 | apply_trans = transl is not None or hasattr(smpl_model, 'transl')
20 | if transl is None and hasattr(smpl_model, 'transl'):
21 | transl = smpl_model.transl
22 | diff = -smpl_output.joints[0, 0, :]
23 | if apply_trans:
24 | diff = diff + transl
25 | smpl_output.joints = smpl_output.joints + diff.view(1, 1, 3)
26 | smpl_output.vertices = smpl_output.vertices + diff.view(1, 1, 3)
27 | return smpl_output
28 |
29 | old_forward = smpl_model.forward
30 | smpl_model.forward = centrifying_forward
31 | return smpl_model
32 |
33 | class MeshNorms(Module):
34 | def __init__(self, faces: torch.Tensor):
35 | super().__init__()
36 | self.faces_count = faces.size(0)
37 | normmap = self.compute_face2verts_normmap(faces)
38 | self.register_buffer("normmap", normmap)
39 | self.register_buffer("faces", faces)
40 |
41 | @staticmethod
42 | def compute_face2verts_normmap(faces: torch.Tensor):
43 | _, faces_in_vertices_count = torch.unique(faces, sorted=True, return_counts=True)
44 | verts_count = len(faces_in_vertices_count)
45 | faces_in_vertex_max = faces_in_vertices_count.max().item()
46 | faces_appearance = torch.argsort(faces.view(-1)) // 3
47 | appearance_array_off = 0
48 | # print(faces.size())
49 | normmap = torch.ones(verts_count, faces_in_vertex_max, dtype=torch.long, device=faces.device) * faces.size(0)
50 | for i in range(verts_count):
51 | faces_in_vertex = faces_in_vertices_count[i]
52 | normmap[i, :faces_in_vertex] = faces_appearance[appearance_array_off:appearance_array_off + faces_in_vertex]
53 | appearance_array_off += faces_in_vertex
54 | return normmap
55 |
56 | def faces_norms(self, verts: torch.Tensor):
57 | verts_size = verts.size()
58 | verts_faces = verts[..., self.faces.view(-1), :].view(*verts_size[:-2], -1, 3, 3)
59 | vct1 = verts_faces[..., 0, :] - verts_faces[..., 1, :]
60 | vct2 = verts_faces[..., 0, :] - verts_faces[..., 2, :]
61 | cross = torch.cross(vct1, vct2, dim=-1)
62 | faces_norms = cross / torch.norm(cross, dim=-1, keepdim=True)
63 | return faces_norms
64 |
65 | def vertices_norms(self, verts: torch.Tensor):
66 | faces_norms = self.faces_norms(verts)
67 | faces_norms = torch.cat([faces_norms, torch.zeros(*faces_norms.size()[:-2], 1, 3, device=verts.device)], dim=-2)
68 | vertices_norms = faces_norms[..., self.normmap.view(-1), :].view(*faces_norms.size()[:-2], -1,
69 | self.normmap.size(1), 3).sum(dim=-2)
70 | vertices_norms = vertices_norms / torch.norm(vertices_norms, dim=-1, keepdim=True)
71 | return vertices_norms
--------------------------------------------------------------------------------
/cloudrender/scene.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from scipy.spatial.transform import Rotation
3 | from typing import List, Sequence
4 | from .render.renderable import Renderable
5 | from .camera import BaseCameraModel, OrthogonalCameraModel
6 | from .render.lights import Light, DirectionalLight
7 | from .render.shadowmap import ShadowMap
8 |
9 |
10 | class Scene:
11 | def __init__(self):
12 | self.objects: List[Renderable] = []
13 | self.lights: List[Light] = []
14 | self.shadowmaps: List[ShadowMap] = []
15 | self.camera: BaseCameraModel = None
16 |
17 | def set_camera(self, camera: BaseCameraModel):
18 | self.camera = camera
19 | for renderable_object in self.objects:
20 | renderable_object.set_camera(camera)
21 |
22 | def add_object(self, obj: Renderable):
23 | self.objects.append(obj)
24 |
25 | def add_light(self, light: Light):
26 | self.lights.append(light)
27 |
28 | def add_shadowmap(self, shadowmap: ShadowMap):
29 | self.shadowmaps.append(shadowmap)
30 |
31 | def add_dirlight_with_shadow(self, light: DirectionalLight, shadowmap_texsize: Sequence[int],
32 | shadowmap_worldsize: Sequence[float], shadowmap_center: Sequence[float]):
33 | self.add_light(light)
34 | light_camera = OrthogonalCameraModel()
35 | shadowmap_worldsize = np.array(shadowmap_worldsize)
36 | shadowmap_center = np.array(shadowmap_center)
37 | shadowmap_halfsize = shadowmap_worldsize/2
38 | near = 0
39 | mincorner = np.array([-shadowmap_halfsize[0], -shadowmap_halfsize[1], near-shadowmap_halfsize[2]])
40 | maxcorner = np.array([shadowmap_halfsize[0], shadowmap_halfsize[1], near+shadowmap_halfsize[2]])
41 | light_camera.init_intrinsics(shadowmap_texsize,mincorner[0], maxcorner[0],
42 | mincorner[1], maxcorner[1], mincorner[2], maxcorner[2])
43 | current_dir = np.array([0,0,-1.])
44 | target_dir = light.direction
45 | min_rot_vector = np.cross(current_dir, target_dir)
46 | quat = np.roll(Rotation.from_rotvec(min_rot_vector).as_quat(), 1)
47 | light_camera.init_extrinsics(quat, shadowmap_center)
48 | shadowmap = ShadowMap(light_camera, shadowmap_texsize)
49 | self.add_shadowmap(shadowmap)
50 | return shadowmap
51 |
52 | def draw(self, reset=True):
53 | shading_objects = [o for o in self.objects if o.generate_shadows]
54 | for shadowmap in self.shadowmaps:
55 | shadowmap.update_shadowmap(shading_objects)
56 |
57 | is_buffer_changed = False
58 | for renderable_object in self.objects:
59 | if renderable_object.draw_shadows:
60 | is_buffer_changed |= renderable_object.draw(reset, self.lights, self.shadowmaps)
61 | else:
62 | is_buffer_changed |= renderable_object.draw(reset, self.lights)
63 | return is_buffer_changed
64 |
--------------------------------------------------------------------------------
/cloudrender/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | import pickle
4 | import trimesh
5 | import numpy as np
6 | import fnmatch
7 | from zipfile import ZipFile
8 | from io import BytesIO
9 | from typing import Union, Tuple, List, Sequence
10 |
11 | def list_zip(zippath: str, folder: str = '') -> List[str]:
12 | input_zip = ZipFile(zippath)
13 | if folder == '':
14 | return list(input_zip.namelist())
15 | else:
16 | if folder[-1]!='/':
17 | folder = folder + '/'
18 | return list(filter(lambda x: x.startswith(folder) and x != folder, input_zip.namelist()))
19 |
20 | def get_closest_ind_after(arr_times, current_time):
21 | diff = arr_times - current_time
22 | mask = diff > 0
23 | if mask.sum() == 0:
24 | return len(arr_times) - 1
25 | mask_inds = np.nonzero(mask)[0]
26 | mindiff_ind = mask_inds[np.argmin(diff[mask])]
27 | return mindiff_ind
28 |
29 |
30 | def get_closest_ind_before(arr_times, current_time):
31 | diff = arr_times - current_time
32 | mask = diff < 0
33 | if mask.sum() == 0:
34 | return 0
35 | mask_inds = np.nonzero(mask)[0]
36 | mindiff_ind = mask_inds[np.argmax(diff[mask])]
37 | return mindiff_ind
38 |
39 | class ObjectLocation:
40 | def __init__(self, translation, quaternion, time=None):
41 | self._translation = np.asarray(translation)
42 | self._quaternion = np.asarray(quaternion)
43 | if time is None:
44 | self._time = None
45 | else:
46 | self._time = float(time)
47 |
48 | def to_dict(self):
49 | return {"position": self._translation.tolist(),
50 | "quaternion": self._quaternion.tolist()}
51 |
52 | @property
53 | def translation(self) -> np.ndarray:
54 | return self._translation
55 |
56 | @property
57 | def position(self) -> np.ndarray:
58 | return self._translation
59 |
60 | @property
61 | def quaternion(self) -> np.ndarray:
62 | return self._quaternion
63 |
64 | @property
65 | def time(self) -> float:
66 | return self._time
67 |
68 | def __getitem__(self, item):
69 | if item in ["position", "translation"]:
70 | return self.translation
71 | elif item == "quaternion":
72 | return self.quaternion
73 | elif item == "time":
74 | return self.time
75 | else:
76 | raise IndexError(f"No such index '{item}'")
77 |
78 | class ObjectTrajectory:
79 | def __init__(self, traj_poses, traj_quats, traj_times):
80 | assert len(traj_poses) == len(traj_quats) == len(traj_times)
81 | self._translations = np.asarray(traj_poses)
82 | self._quaternions = np.asarray(traj_quats)
83 | self._times = np.asarray(traj_times)
84 |
85 | @property
86 | def translations(self) -> np.ndarray:
87 | return self._translations
88 |
89 | @property
90 | def positions(self) -> np.ndarray:
91 | return self._translations
92 |
93 | @property
94 | def quaternions(self) -> np.ndarray:
95 | return self._quaternions
96 |
97 | @property
98 | def times(self) -> np.ndarray:
99 | return self._times
100 |
101 | def __getitem__(self, item: int):
102 | return ObjectLocation(self.translations[item], self.quaternions[item], time=self.times[item])
103 |
104 | def __len__(self):
105 | return len(self._translations)
106 |
107 | @classmethod
108 | def cat_trajectories(cls, traj_list: List["ObjectTrajectory"]):
109 | transls = []
110 | quats = []
111 | times = []
112 | for traj in traj_list:
113 | transls.append(traj.translations)
114 | quats.append(traj.quaternions)
115 | times.append(traj.times)
116 | res = cls(np.concatenate(transls, axis=0), np.concatenate(quats, axis=0), np.concatenate(times, axis=0))
117 | return res
118 |
119 |
120 | def open_from_zip(zippath: str, datapath: str, return_zip_path: bool = False) -> Union[BytesIO, Tuple[BytesIO, str]]:
121 | """
122 | Finds and opens the file inside the zip archive
123 | Args:
124 | zippath: path to .zip archive
125 | datapath: path to file inside .zip, can be wildcard (i.e. 'dirname/*a??b.png').
126 | In case wildcard is supplied, there should be only one file matching it in the archive.
127 | return_zip_path: whether to return the path of the loaded file inside .zip
128 | Returns:
129 | BytesIO: filehandler containing the loaded file
130 | str: path to the opened file (if return_zip_path == True)
131 | """
132 | input_zip = ZipFile(zippath)
133 | match_fn = lambda x: fnmatch.fnmatch(x, datapath)
134 | filenames = list(filter(match_fn, input_zip.namelist()))
135 | if len(filenames) == 0:
136 | raise FileNotFoundError("No file matching '{}' in archive".format(datapath))
137 | elif len(filenames) > 1:
138 | raise FileNotFoundError("More than one file matching '{}' exists in archive: {}".format(datapath, filenames))
139 | else:
140 | filename = filenames[0]
141 | filehandler = BytesIO(input_zip.read(filename))
142 | if return_zip_path:
143 | return filehandler, filename
144 | return filehandler
145 |
146 |
147 | def trimesh_load_from_zip(zippath: str, datapath: str) -> Union[trimesh.Trimesh, trimesh.points.PointCloud]:
148 | filehandler, filename = open_from_zip(zippath, datapath, return_zip_path = True)
149 | ext = os.path.splitext(filename)[1][1:]
150 | mesh = trimesh.load(filehandler, ext, process=False)
151 | return mesh
152 |
153 |
154 | def get_camera_position(xyz_ang: Sequence[float], pos: Sequence[float]):
155 | camera_pose = np.array([
156 | [1.0, 0, 0, pos[0]],
157 | [0.0, 1.0, 0.0, pos[1]],
158 | [0.0, 0, 1.0, pos[2]],
159 | [0.0, 0.0, 0.0, 1.0],
160 | ])
161 | sin, cos = [np.sin(a) for a in xyz_ang], [np.cos(a) for a in xyz_ang]
162 | x_rot = np.array(
163 | [
164 | [1.0, 0, 0, 0.0],
165 | [0.0, cos[0], -sin[0], 0.0],
166 | [0.0, sin[0], cos[0], 0.0],
167 | [0.0, 0.0, 0.0, 1.0],
168 | ])
169 | y_rot = np.array(
170 | [
171 | [cos[1], 0, sin[1], 0.0],
172 | [0.0, 1.0, 0.0, 0.0],
173 | [-sin[1], 0, cos[1], 0.0],
174 | [0.0, 0.0, 0.0, 1.0],
175 | ])
176 | z_rot = np.array(
177 | [
178 | [cos[2], -sin[2], 0, 0.0],
179 | [sin[2], cos[2], 0.0, 0.0],
180 | [0.0, 0.0, 1.0, 0.0],
181 | [0.0, 0.0, 0.0, 1.0],
182 | ])
183 | return camera_pose.dot(z_rot.dot(y_rot.dot(x_rot)))
184 |
185 | def load_hps_sequence(poses_pickle_path, shape_json_path, smplxlib_format = False):
186 | pkl_seq = pickle.load(open(poses_pickle_path, 'rb'))
187 | shape = np.array(json.load(open(shape_json_path))['betas'])
188 | if smplxlib_format:
189 | res = [{"global_orient": pose[:3], "body_pose": pose[3:], "transl": translation, "betas": shape}
190 | for pose, translation in zip(pkl_seq['poses'], pkl_seq['transes'])]
191 | else:
192 | res = [{"pose": pose, "translation": translation, "shape": shape}
193 | for pose, translation in zip(pkl_seq['poses'], pkl_seq['transes'])]
194 | return res
195 |
--------------------------------------------------------------------------------
/download_test_assets.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | mkdir test_assets
3 | cd test_assets || exit
4 | wget "https://github.com/vguzov/cloudrender/releases/download/v1.3.6/test_assets.zip" -O test_assets.zip
5 | unzip test_assets.zip
6 | rm test_assets.zip
--------------------------------------------------------------------------------
/images/test_scene_video_output_example.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vguzov/cloudrender/7fb4ce6847bb00fea5eb8207fcb983552334932e/images/test_scene_video_output_example.gif
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | scipy
3 | pyglm>=2.2.0
4 | trimesh
5 | tqdm
6 | smplpytorch
7 | chumpy
8 | torch >= 1.8.0
9 | PyOpenGL==3.1.5
10 | videoio>=0.2.3
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import os
2 | from glob import glob
3 | from setuptools import setup, find_packages
4 | version = '1.3.6'
5 |
6 | with open("README.md", "r") as fi:
7 | long_description = fi.read()
8 |
9 | keywords = ["rendering", "pointcloud", "opengl", "mesh"]
10 |
11 | classifiers = [
12 | 'Intended Audience :: Developers',
13 | 'License :: Other/Proprietary License',
14 | 'Natural Language :: English',
15 | 'Operating System :: Unix',
16 | 'Programming Language :: Python',
17 | 'Programming Language :: Python :: 3'
18 | ]
19 |
20 | requirements = [
21 | "numpy",
22 | "scipy",
23 | "pyglm>=2.2.0",
24 | "trimesh",
25 | "torch",
26 | "tqdm",
27 | "smplpytorch",
28 | "chumpy",
29 | "PyOpenGL==3.1.5",
30 | "videoio>=0.2.3"
31 | ]
32 |
33 | # Include shaders
34 | package_path = "cloudrender/render/shaders"
35 | package_files = {"cloudrender.render.shaders": [os.path.relpath(x, package_path)
36 | for x in glob('cloudrender/render/shaders/**/*.glsl', recursive=True)]}
37 |
38 | setup(
39 | name="cloudrender",
40 | packages=find_packages(),
41 | package_data=package_files,
42 | include_package_data=True,
43 | version=version,
44 | description="An OpenGL framework for pointcloud and mesh rendering",
45 | author="Vladimir Guzov",
46 | author_email="vguzov@mpi-inf.mpg.de",
47 | url="https://github.com/vguzov/cloudrender",
48 | keywords=keywords,
49 | long_description=long_description,
50 | long_description_content_type='text/markdown',
51 | install_requires=requirements,
52 | classifiers=classifiers
53 | )
--------------------------------------------------------------------------------
/test_scene_video.py:
--------------------------------------------------------------------------------
1 | # On some systems, EGL does not start properly if OpenGL was already initialized, that's why it's better
2 | # to keep EGLContext import on top
3 | from cloudrender.libegl import EGLContext
4 | import logging
5 | import numpy as np
6 | import sys
7 | import os
8 | import json
9 | import smplpytorch
10 | from cloudrender.render import SimplePointcloud, DirectionalLight
11 | from cloudrender.render.smpl import SMPLXColoredModel
12 | from cloudrender.camera import PerspectiveCameraModel
13 | from cloudrender.camera.trajectory import Trajectory
14 | from cloudrender.scene import Scene
15 | from cloudrender.capturing import AsyncPBOCapture
16 | from videoio import VideoWriter
17 | from OpenGL import GL as gl
18 | from tqdm import tqdm
19 | from cloudrender.utils import trimesh_load_from_zip, load_hps_sequence
20 |
21 | logger = logging.getLogger("main_script")
22 | logger.setLevel(logging.INFO)
23 |
24 | SMPLX_ROOT = "./smplx_models" # Change this to your SMPL-X-compatible models folder
25 |
26 |
27 | # This example shows how to:
28 | # - render a pointcloud
29 | # - render a sequence of frames with moving SMPL mesh
30 | # - smoothly move the camera
31 | # - dump rendered frames to a video
32 |
33 |
34 | # First, let's set the target resolution, framerate, video length and initialize OpenGL context.
35 | # We will use EGL offscreen rendering for that, but you can change it to whatever context you prefer (e.g. OsMesa, X-Server)
36 | resolution = (1280,720)
37 | fps = 30.
38 | video_start_time = 6.
39 | video_length_seconds = 12.
40 | logger.info("Initializing EGL and OpenGL")
41 | context = EGLContext()
42 | if not context.initialize(*resolution):
43 | print("Error during context initialization")
44 | sys.exit(0)
45 |
46 | # Now, let's create and set up OpenGL frame and renderbuffers
47 | _main_cb, _main_db = gl.glGenRenderbuffers(2)
48 | viewport_width, viewport_height = resolution
49 |
50 | gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, _main_cb)
51 | gl.glRenderbufferStorage(
52 | gl.GL_RENDERBUFFER, gl.GL_RGBA,
53 | viewport_width, viewport_height
54 | )
55 |
56 | gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, _main_db)
57 | gl.glRenderbufferStorage(
58 | gl.GL_RENDERBUFFER, gl.GL_DEPTH_COMPONENT24,
59 | viewport_width, viewport_height
60 | )
61 |
62 | _main_fb = gl.glGenFramebuffers(1)
63 | gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, _main_fb)
64 | gl.glFramebufferRenderbuffer(
65 | gl.GL_DRAW_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0,
66 | gl.GL_RENDERBUFFER, _main_cb
67 | )
68 | gl.glFramebufferRenderbuffer(
69 | gl.GL_DRAW_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT,
70 | gl.GL_RENDERBUFFER, _main_db
71 | )
72 |
73 | gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, _main_fb)
74 | gl.glDrawBuffers([gl.GL_COLOR_ATTACHMENT0])
75 |
76 | # Let's configure OpenGL
77 | gl.glEnable(gl.GL_BLEND)
78 | gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
79 | gl.glClearColor(1.0, 1.0, 1.0, 0)
80 | gl.glViewport(0, 0, *resolution)
81 | gl.glEnable(gl.GL_DEPTH_TEST)
82 | gl.glDepthMask(gl.GL_TRUE)
83 | gl.glDepthFunc(gl.GL_LESS)
84 | gl.glDepthRange(0.0, 1.0)
85 |
86 | # Create and set a position of the camera
87 | camera = PerspectiveCameraModel()
88 | camera.init_intrinsics(resolution, fov=75, far=50)
89 | camera.init_extrinsics(np.array([1,np.pi/5,0,0]), np.array([0,-1,2]))
90 |
91 | # Create a scene
92 | main_scene = Scene()
93 |
94 | # Load pointcloud
95 | logger.info("Loading pointcloud")
96 | renderable_pc = SimplePointcloud(camera=camera)
97 | # Turn off shadow generation from pointcloud
98 | renderable_pc.generate_shadows = False
99 | renderable_pc.init_context()
100 | pointcloud = trimesh_load_from_zip("test_assets/MPI_Etage6-pc.zip", "pointcloud.ply")
101 | renderable_pc.set_buffers(pointcloud)
102 | main_scene.add_object(renderable_pc)
103 |
104 |
105 | # Load human motion
106 | logger.info("Loading SMPL animation")
107 | # set different smpl_root to SMPL .pkl files folder if needed
108 | renderable_smpl = SMPLXColoredModel(camera=camera, gender="male", model_type="smpl",
109 | smpl_root=SMPLX_ROOT, center_root_joint=True)
110 | # Turn off shadow drawing for SMPL model, as self-shadowing usually produces artifacts
111 | renderable_smpl.draw_shadows = False
112 | renderable_smpl.init_context()
113 | motion_seq = load_hps_sequence("test_assets/SUB4_MPI_Etage6_working_standing.pkl", "test_assets/SUB4.json",
114 | smplxlib_format=True)
115 | renderable_smpl.set_sequence(motion_seq, default_frame_time=1/30.)
116 | # Let's set diffuse material for SMPL model
117 | renderable_smpl.set_material(0.3,1,0,0)
118 | renderable_smpl.set_uniform_color((200, 200, 200, 255))
119 | main_scene.add_object(renderable_smpl)
120 |
121 |
122 | # Let's add a directional light with shadows for this scene
123 | light = DirectionalLight(np.array([0., -1., -1.]), np.array([0.8, 0.8, 0.8]))
124 |
125 |
126 | # We'll create a 4x4x10 meter shadowmap with 1024x1024 texture buffer and center it above the model along the direction
127 | # of the light. We will move the shadomap with the model in the main loop
128 | smpl_model_shadowmap_offset = -light.direction*3
129 | smpl_model_shadowmap = main_scene.add_dirlight_with_shadow(light=light, shadowmap_texsize=(1024,1024),
130 | shadowmap_worldsize=(4.,4.,10.),
131 | shadowmap_center=motion_seq[0]['transl']+smpl_model_shadowmap_offset)
132 |
133 |
134 | # Set camera trajectory and fill in spaces between keypoints with interpolation
135 | logger.info("Creating camera trajectory")
136 | camera_trajectory = Trajectory()
137 | camera_trajectory.set_trajectory(json.load(open("test_assets/TRAJ_SUB4_MPI_Etage6_working_standing.json")))
138 | camera_trajectory.refine_trajectory(time_step=1/30.)
139 |
140 |
141 | ### Main drawing loop ###
142 | logger.info("Running the main drawing loop")
143 | # Create a video writer to dump frames to and an async capturing controller
144 | with VideoWriter("test_assets/output.mp4", resolution=resolution, fps=fps) as vw, \
145 | AsyncPBOCapture(resolution, queue_size=50) as capturing:
146 | for current_time in tqdm(np.arange(video_start_time, video_start_time+video_length_seconds, 1/fps)):
147 | # Update dynamic objects
148 | renderable_smpl.set_time(current_time)
149 | smpl_model_shadowmap.camera.init_extrinsics(
150 | pose=renderable_smpl.global_translation+smpl_model_shadowmap_offset)
151 | # Move the camera along the trajectory
152 | camera_trajectory.apply(camera, current_time)
153 | # Clear OpenGL buffers
154 | gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
155 | # Draw the scene
156 | main_scene.draw()
157 | # Request color readout; optionally receive previous request
158 | gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, _main_fb)
159 | gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0)
160 | color = capturing.request_color_async()
161 | # If received the previous frame, write it to the video
162 | if color is not None:
163 | vw.write(color)
164 | # Flush the remaining frames
165 | logger.info("Flushing PBO queue")
166 | color = capturing.get_first_requested_color()
167 | while color is not None:
168 | vw.write(color)
169 | color = capturing.get_first_requested_color()
170 | logger.info("Done")
171 |
--------------------------------------------------------------------------------