├── README.md ├── render.py ├── render_utils.py └── shades ├── mesh.frag └── mesh.vert /README.md: -------------------------------------------------------------------------------- 1 | # rendering 2 | Quick trimesh headless rendering for meshes. Supports depth map, normal map, ray to mesh intersection point and triangle ids. 3 | 4 | Usage: 5 | 6 | ````Python 7 | import trimesh 8 | from matplotlib import pyplot as plt 9 | from render import render_mesh 10 | from render_utils import normalize_mesh 11 | 12 | mesh = trimesh.load("path-to-file.obj") 13 | normalize_mesh(mesh) 14 | 15 | images, camera_poses = render_mesh(mesh, resolution=128, if_correct_normals=False) 16 | triangle_ids, rendered_images, normal_maps, depth_images, p_images = images 17 | 18 | plt.imshow(rendered_images[20]) 19 | ```` 20 | -------------------------------------------------------------------------------- /render.py: -------------------------------------------------------------------------------- 1 | import os 2 | import matplotlib 3 | from render_utils import * 4 | from render_utils import create_uniform_camera_poses 5 | 6 | 7 | def create_uniform_camera_poses_circular(N, r=2): 8 | mesh = geometry.TriangleMesh() 9 | 10 | def rotation_matrix_y(degrees): 11 | rad = degrees / 180 * np.pi 12 | return np.array( 13 | [[np.cos(rad), 0, -np.sin(rad)], [0, 1, 0], [np.sin(rad), 0, np.cos(rad)]] 14 | ) 15 | 16 | camera_poses = [] 17 | for i in range(N): 18 | frontvectors = np.array([0, 0, 1]) * r 19 | frontvectors = rotation_matrix_y(360 / N * i) @ frontvectors 20 | camera_pose = np.array( 21 | pyrr.Matrix44.look_at( 22 | eye=frontvectors, target=np.zeros(3), up=np.array([0.0, 1.0, 0]) 23 | ).T 24 | ) 25 | camera_pose = np.linalg.inv(np.array(camera_pose)) 26 | camera_poses.append(camera_pose) 27 | return np.stack(camera_poses, 0) 28 | 29 | 30 | def render_mesh(mesh, camera_poses=None, resolution=1024, radius=2.0, if_correct_normals=True, only_render_images=False, clean=False): 31 | from render_utils import create_uniform_camera_poses 32 | 33 | if not isinstance(camera_poses, np.ndarray): 34 | camera_poses = create_uniform_camera_poses(radius) 35 | render = Render(size=resolution, camera_poses=camera_poses) 36 | triangle_ids, rendered_images, normal_maps, depth_images, p_images = render.render( 37 | path=None, clean=clean, mesh=mesh, only_render_images=only_render_images, 38 | if_correct_normals=if_correct_normals 39 | ) 40 | return [triangle_ids, rendered_images, normal_maps, depth_images, p_images], camera_poses 41 | 42 | 43 | def render_mesh_circular(mesh, N=10, resolution=1024, only_render_images=False): 44 | camera_poses = create_uniform_camera_poses_circular(N) 45 | render = Render(size=resolution, camera_poses=camera_poses) 46 | triangle_ids, rendered_images, normal_maps, depth_images, p_images = render.render( 47 | path=None, clean=False, mesh=mesh, only_render_images=only_render_images 48 | ) 49 | return rendered_images 50 | -------------------------------------------------------------------------------- /render_utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | os.environ['PYOPENGL_PLATFORM'] = 'egl' 4 | import pyglet 5 | 6 | pyglet.options['shadow_window'] = False 7 | import matplotlib 8 | 9 | matplotlib.use("Agg") 10 | import open3d as o3d 11 | import torch 12 | from open3d import * 13 | from matplotlib import pyplot as plt 14 | import pyrr 15 | from pyrender import ( 16 | DirectionalLight, 17 | SpotLight, 18 | PointLight, 19 | ) 20 | from sklearn.neighbors import KDTree 21 | import trimesh 22 | import pyrender 23 | import numpy as np 24 | from PIL import Image 25 | 26 | import time 27 | 28 | SIZE = None 29 | Vector3dVector, Vector3iVector = utility.Vector3dVector, utility.Vector3iVector 30 | draw_geometries = o3d.visualization.draw_geometries 31 | 32 | 33 | class Render: 34 | def __init__(self, size, camera_poses): 35 | self.size = size 36 | global SIZE 37 | SIZE = size 38 | 39 | if not isinstance(camera_poses, np.ndarray): 40 | self.camera_poses = create_uniform_camera_poses(2.0) 41 | else: 42 | self.camera_poses = camera_poses 43 | 44 | def render(self, path, clean=True, intensity=6.0, mesh=None, only_render_images=False, if_correct_normals=True): 45 | if not isinstance(mesh, trimesh.Trimesh): 46 | mesh = prepare_mesh(path, color=False, clean=clean) 47 | try: 48 | if mesh.visual.defined: 49 | mesh.visual.material.kwargs["Ns"] = 1.0 50 | except: 51 | print ("Error loading material!") 52 | mesh1 = pyrender.Mesh.from_trimesh(mesh, smooth=False) 53 | 54 | t1 = time.time() 55 | triangle_ids, normal_maps, depth_images, p_images = None, None, None, None 56 | if not only_render_images: 57 | # NOTE Normals are not normalized. 58 | triangle_ids, normal_maps, _, p_images = correct_normals(mesh, self.camera_poses, 59 | correct=if_correct_normals) 60 | rendered_images, depth_images = pyrender_rendering( 61 | mesh1, viz=False, light=True, camera_poses=self.camera_poses, intensity=intensity 62 | ) 63 | print(time.time() - t1) 64 | return triangle_ids, rendered_images, normal_maps, depth_images, p_images 65 | 66 | 67 | def correct_normals(mesh, camera_poses, correct=True): 68 | rayintersector = trimesh.ray.ray_pyembree.RayMeshIntersector(mesh) 69 | 70 | triangle_images = [] 71 | normalmaps = [] 72 | depth_maps = [] 73 | p_images = [] 74 | for i in range(camera_poses.shape[0]): 75 | a, b, index_tri, sign, p_image = trimesh_ray_tracing( 76 | mesh, camera_poses[i], resolution=SIZE, rayintersector=rayintersector 77 | ) 78 | if correct: 79 | mesh.faces[index_tri[sign > 0]] = np.fliplr(mesh.faces[index_tri[sign > 0]]) 80 | 81 | normalmap = render_normal_map( 82 | pyrender.Mesh.from_trimesh(mesh, smooth=False), 83 | camera_poses[i], 84 | SIZE, 85 | viz=False, 86 | ) 87 | 88 | triangle_images.append(b) 89 | normalmaps.append(normalmap) 90 | depth_maps.append(a) 91 | p_images.append(p_image) 92 | return triangle_images, normalmaps, depth_maps, p_images 93 | 94 | 95 | def all_rendering(mesh, camera_poses, light=False, viz=False, correct=True): 96 | rayintersector = trimesh.ray.ray_pyembree.RayMeshIntersector(mesh) 97 | mesh1 = pyrender.Mesh.from_trimesh(mesh, smooth=False) 98 | scene = pyrender.Scene() 99 | scene.add(mesh1) 100 | # renderer 101 | r = pyrender.OffscreenRenderer(SIZE, SIZE) 102 | camera = pyrender.PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1.0) 103 | 104 | # light 105 | if light: 106 | lights = init_light(scene, camera_poses[0]) 107 | 108 | triangle_images = [] 109 | normalmaps = [] 110 | depth_maps = [] 111 | color_images = [] 112 | 113 | for i in range(camera_poses.shape[0]): 114 | a, b, index_tri, sign = trimesh_ray_tracing( 115 | mesh, camera_poses[i], resolution=SIZE, rayintersector=rayintersector 116 | ) 117 | if correct: 118 | mesh.faces[index_tri[sign > 0]] = np.fliplr(mesh.faces[index_tri[sign > 0]]) 119 | 120 | normalmap = render_normal_map( 121 | pyrender.Mesh.from_trimesh(mesh, smooth=False), 122 | camera_poses[i], 123 | SIZE, 124 | viz=False, 125 | ) 126 | 127 | if light: 128 | update_light(scene, lights, camera_poses[i]) 129 | 130 | if light: 131 | color, _ = r.render(scene 132 | ) # , flags=pyrender.constants.RenderFlags.SKIP_CULL_FACES 133 | else: 134 | color, _ = r.render( 135 | scene, flags=pyrender.constants.RenderFlags.FLAT 136 | ) # | pyrender.constants.RenderFlags.SKIP_CULL_FACES 137 | 138 | triangle_images.append(b) 139 | normalmaps.append(normalmap) 140 | depth_maps.append(a) 141 | color_images.append(color) 142 | return color_images, triangle_images, normalmaps, depth_maps 143 | 144 | 145 | def normalize_mesh(mesh, mode="sphere"): 146 | if mode == "sphere": 147 | mesh.vertices = mesh.vertices - mesh.vertices.mean(0) 148 | scale = np.linalg.norm(mesh.vertices, axis=1, ord=2).max() 149 | mesh.vertices = mesh.vertices / scale 150 | elif mode == "com": 151 | box = mesh.bounding_box_oriented 152 | mesh.vertices = mesh.vertices - box.vertices.mean(0) 153 | scale = np.linalg.norm(mesh.vertices, axis=1, ord=2).max() 154 | mesh.vertices = mesh.vertices / scale 155 | 156 | 157 | def prepare_mesh(model_name, color=False, clean=False): 158 | mesh = trimesh.load(model_name, force="mesh") 159 | # mesh = trimesh.Trimesh(vertices=np.array(mesh.vertices), faces=np.array(mesh.faces)) 160 | # if remesh: 161 | # v, f = trimesh.remesh.subdivide_to_size(mesh.vertices, mesh.faces, 0.1) 162 | # mesh.vertices = v 163 | # mesh.faces = f 164 | if clean: 165 | mesh.remove_duplicate_faces() 166 | mesh.remove_degenerate_faces() 167 | mesh.remove_unreferenced_vertices() 168 | 169 | trimesh.repair.fix_inversion(mesh) 170 | trimesh.repair.fix_normals(mesh) 171 | 172 | normalize_mesh(mesh, "com") 173 | if color: 174 | mesh.visual.face_colors = generate_unique_colors( 175 | mesh.faces.shape[0] 176 | ) 177 | return mesh 178 | 179 | 180 | def clean_using_o3d(mesh): 181 | mesh = convert_trimesh_to_o3d(mesh) 182 | mesh.remove_degenerate_triangles() 183 | mesh.remove_duplicated_triangles() 184 | mesh.remove_duplicated_vertices() 185 | mesh.remove_unreferenced_vertices() 186 | mesh.compute_triangle_normals() 187 | mesh.compute_vertex_normals() 188 | p = mesh.sample_points_poisson_disk(10000, 1) 189 | o3d.visualization.draw_geometries([mesh, p]) 190 | return convert_o3d_to_trimesh(mesh) 191 | 192 | 193 | def generate_unique_colors(size): 194 | colors = np.arange(1, 254 * 254 * 254) 195 | z = np.random.choice(colors, (size), replace=False) 196 | colors = np.unravel_index(z, (255, 255, 255)) 197 | colors = np.stack(colors, 1) 198 | return colors 199 | 200 | 201 | def init_light(scene, camera_pose, intensity=6.0): 202 | direc_l = DirectionalLight(color=np.ones(3), intensity=intensity) 203 | spot_l = SpotLight( 204 | color=np.ones(3), 205 | intensity=1.0, 206 | innerConeAngle=np.pi / 16, 207 | outerConeAngle=np.pi / 6, 208 | ) 209 | point_l = PointLight(color=np.ones(3), intensity=1) 210 | 211 | direc_l_node = scene.add(direc_l, pose=camera_pose) 212 | point_l_node = scene.add(point_l, pose=camera_pose) 213 | spot_l_node = scene.add(spot_l, pose=camera_pose) 214 | return spot_l_node, direc_l_node, point_l_node 215 | 216 | 217 | def update_light(scene, lights, pose): 218 | for l in lights: 219 | scene.set_pose(l, pose) 220 | 221 | 222 | class CustomShaderCache: 223 | def __init__(self): 224 | self.program = None 225 | 226 | def get_program( 227 | self, vertex_shader, fragment_shader, geometry_shader=None, defines=None 228 | ): 229 | if self.program is None: 230 | self.program = pyrender.shader_program.ShaderProgram( 231 | "shades/mesh.vert", "shades/mesh.frag", defines=defines 232 | ) 233 | return self.program 234 | 235 | 236 | def render_normal_map(mesh, camera_pose, size, viz=False): 237 | scene = pyrender.Scene(bg_color=(255, 255, 255)) 238 | scene.add(mesh) 239 | camera = pyrender.PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1.0) 240 | scene.add(camera, pose=camera_pose) 241 | 242 | renderer = pyrender.OffscreenRenderer(size, size) 243 | renderer._renderer._program_cache = CustomShaderCache() 244 | 245 | normals, depth = renderer.render( 246 | scene 247 | ) # flags=pyrender.constants.RenderFlags.SKIP_CULL_FACES 248 | world_space_normals = normals / 255 * 2 - 1 249 | 250 | if viz: 251 | image = Image.fromarray(normals, "RGB") 252 | image.show() 253 | 254 | return world_space_normals 255 | 256 | 257 | def pyrender_rendering(mesh, camera_poses, viz=False, light=False, intensity=6.0): 258 | # renderer 259 | r = pyrender.OffscreenRenderer(SIZE, SIZE) 260 | 261 | scene = pyrender.Scene() 262 | scene.add(mesh) 263 | 264 | camera = pyrender.PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1.) 265 | # frontVector = np.array( [1.2, 1.2, 1.2] ) 266 | # frontVector = (Rotation.from_euler('y', 0, degrees=True)).apply( frontVector ) 267 | # camera_pose = np.array(pyrr.Matrix44.look_at(eye=frontVector, target=np.zeros(3), up=np.array([0.0, 1.0, 0])).T) 268 | # camera_pose = np.linalg.inv(np.array(camera_pose)) 269 | # camera_poses = [] 270 | camera = scene.add(camera, pose=camera_poses[0]) 271 | # light 272 | if light: 273 | lights = init_light(scene, camera_poses[0], intensity=intensity) 274 | 275 | images = [] 276 | depth_images = [] 277 | for i in range(camera_poses.shape[0]): 278 | # camera 279 | # frontVector = np.array( [1.2, 1.2, 1.2] ) 280 | # frontVector = (Rotation.from_euler('y', 30 * i, degrees=True)).apply( frontVector ) 281 | # camera_pose = np.array(pyrr.Matrix44.look_at(eye=frontVector, target=np.zeros(3), up=np.array([0.0, 1.0, 0])).T) 282 | # camera_pose = np.linalg.inv(np.array(camera_pose)) 283 | scene.set_pose(camera, camera_poses[i]) 284 | if light: 285 | update_light(scene, lights, camera_poses[i]) 286 | 287 | if light: 288 | color, depth = r.render(scene 289 | ) # , flags=pyrender.constants.RenderFlags.SKIP_CULL_FACES 290 | else: 291 | color, depth = r.render( 292 | scene, flags=pyrender.constants.RenderFlags.FLAT 293 | ) # | pyrender.constants.RenderFlags.SKIP_CULL_FACES 294 | 295 | if viz: 296 | plt.figure() 297 | plt.imshow(color) 298 | images.append(color) 299 | depth_images.append(depth) 300 | return images, depth_images 301 | 302 | 303 | def create_look_at(eye, target, up, dtype=None): 304 | """Creates a look at matrix according to OpenGL standards. 305 | 306 | :param numpy.array eye: Position of the camera in world coordinates. 307 | :param numpy.array target: The position in world coordinates that the 308 | camera is looking at. 309 | :param numpy.array up: The up vector of the camera. 310 | :rtype: numpy.array 311 | :return: A look at matrix that can be used as a viewMatrix 312 | """ 313 | 314 | def normalize(a): 315 | return a / (np.linalg.norm(a, ord=2) + 1e-7) 316 | 317 | eye = np.asarray(eye) 318 | target = np.asarray(target) 319 | up = np.asarray(up) 320 | 321 | cameraDirection = normalize(eye - target) 322 | right = normalize(np.cross(normalize(up), cameraDirection)) 323 | up = normalize(np.cross(cameraDirection, right)) 324 | M = np.zeros((4, 4)) 325 | M[0, 0:3] = right 326 | M[1, 0:3] = up 327 | M[2, 0:3] = cameraDirection 328 | M[3, 3] = 1.0 329 | T = np.eye(4) 330 | T[0:3, -1] = -eye 331 | return M @ T 332 | 333 | 334 | def camera_transform_matrix(eye, target, up, dtype=None): 335 | """Creates a look at matrix according to OpenGL standards. 336 | 337 | :param numpy.array eye: Position of the camera in world coordinates. 338 | :param numpy.array target: The position in world coordinates that the 339 | camera is looking at. 340 | :param numpy.array up: The up vector of the camera. 341 | :rtype: numpy.array 342 | :return: A look at matrix that can be used as a viewMatrix 343 | """ 344 | 345 | def normalize(a): 346 | return a / (np.linalg.norm(a, ord=2) + 1e-7) 347 | 348 | eye = np.asarray(eye) 349 | target = np.asarray(target) 350 | up = np.asarray(up) 351 | 352 | cameraDirection = normalize(eye - target) 353 | right = -normalize(np.cross(normalize(up), cameraDirection)) 354 | up = -normalize(np.cross(cameraDirection, right)) 355 | M = np.zeros((4, 4)) 356 | M[0, 0:3] = right 357 | M[1, 0:3] = up 358 | M[2, 0:3] = cameraDirection 359 | M[3, 3] = 1.0 360 | 361 | M = M.T 362 | M = M 363 | # M = np.eye(4) 364 | M[0:3, -1] = eye 365 | return M 366 | 367 | 368 | def trimesh_ray_tracing(mesh, M, resolution=225, fov=60, rayintersector=None): 369 | # this is done to correct the mistake in way trimesh raycasting works. 370 | # in general this cannot be done. 371 | extra = np.eye(4) 372 | extra[0, 0] = 0 373 | extra[0, 1] = 1 374 | extra[1, 0] = -1 375 | extra[1, 1] = 0 376 | scene = mesh.scene() 377 | 378 | # np.linalg.inv(create_look_at(frontVector, np.zeros(3), np.array([0, 1, 0]))) 379 | scene.camera_transform = M @ extra # @ np.diag([1, -1,-1, 1] 380 | # scene.camera_transform = camera_transform_matrix(frontVector, np.zeros(3), np.array([0, 1, 0])) @ e 381 | 382 | # any of the automatically generated values can be overridden 383 | # set resolution, in pixels 384 | scene.camera.resolution = [resolution, resolution] 385 | # set field of view, in degrees 386 | # make it relative to resolution so pixels per degree is same 387 | scene.camera.fov = fov, fov 388 | 389 | # convert the camera to rays with one ray per pixel 390 | origins, vectors, pixels = scene.camera_rays() 391 | 392 | # do the actual ray- mesh queries 393 | # points, index_ray, index_tri = mesh.ray.intersects_location( 394 | # origins, vectors, multiple_hits=False) 395 | # points, index_ray, index_tri = rayintersector.intersects_location( 396 | # origins, vectors, multiple_hits=False) 397 | 398 | # for each hit, find the distance along its vector 399 | index_tri, index_ray, points = rayintersector.intersects_id( 400 | origins, vectors, multiple_hits=False, return_locations=True 401 | ) 402 | depth = trimesh.util.diagonal_dot(points - origins[0], vectors[index_ray]) 403 | sign = trimesh.util.diagonal_dot(mesh.face_normals[index_tri], vectors[index_ray]) 404 | 405 | # find pixel locations of actual hits 406 | pixel_ray = pixels[index_ray] 407 | # create a numpy array we can turn into an image 408 | # doing it with uint8 creates an `L` mode greyscale image 409 | a = np.zeros(scene.camera.resolution, dtype=np.uint8) 410 | b = np.ones(scene.camera.resolution, dtype=np.int32) * -1 411 | p_image = np.ones([scene.camera.resolution[0], scene.camera.resolution[1], 3], dtype=np.float32) * -1 412 | # scale depth against range (0.0 - 1.0) 413 | # import ipdb; ipdb.set_trace() 414 | depth_float = (depth - depth.min()) / depth.ptp() 415 | 416 | # convert depth into 0 - 255 uint8 417 | depth_int = (depth_float * 255).round().astype(np.uint8) 418 | 419 | # assign depth to correct pixel locations 420 | a[pixel_ray[:, 0], pixel_ray[:, 1]] = depth_int 421 | b[pixel_ray[:, 0], pixel_ray[:, 1]] = index_tri 422 | p_image[pixel_ray[:, 0], pixel_ray[:, 1]] = points 423 | 424 | # show the resulting image 425 | return a, b, index_tri, sign, p_image 426 | 427 | 428 | def create_uniform_camera_poses(distance=2): 429 | mesh = geometry.TriangleMesh() 430 | frontvectors = np.array(mesh.create_sphere(distance, 7).vertices) 431 | camera_poses = [] 432 | for i in range(frontvectors.shape[0]): 433 | camera_pose = np.array(pyrr.Matrix44.look_at(eye=frontvectors[i], 434 | target=np.zeros(3), 435 | up=np.array([0.0, 1.0, 0])).T) 436 | camera_pose = np.linalg.inv(np.array(camera_pose)) 437 | camera_poses.append(camera_pose) 438 | return np.stack(camera_poses, 0) 439 | 440 | 441 | def generate_dodecahedron(): 442 | # r = (1.0 + math.sqrt(5.0)) / 2.0 443 | vertices = np.array([ 444 | -0.57735, -0.57735, 0.57735, 445 | 0.934172, 0.356822, 0, 446 | 0.934172, -0.356822, 0, 447 | -0.934172, 0.356822, 0, 448 | -0.934172, -0.356822, 0, 449 | 0, 0.934172, 0.356822, 450 | 0, 0.934172, -0.356822, 451 | 0.356822, 0, -0.934172, 452 | -0.356822, 0, -0.934172, 453 | 0, -0.934172, -0.356822, 454 | 0, -0.934172, 0.356822, 455 | 0.356822, 0, 0.934172, 456 | -0.356822, 0, 0.934172, 457 | 0.57735, 0.57735, -0.57735, 458 | 0.57735, 0.57735, 0.57735, 459 | -0.57735, 0.57735, -0.57735, 460 | -0.57735, 0.57735, 0.57735, 461 | 0.57735, -0.57735, -0.57735, 462 | 0.57735, -0.57735, 0.57735, 463 | -0.57735, -0.57735, -0.57735, 464 | ]).reshape((-1, 3), order="C") 465 | return vertices 466 | 467 | 468 | def transfer_labels_shapenet_points_to_mesh(points, labels, mesh): 469 | pcd = visualize_point_cloud(points, viz=False) 470 | box = pcd.get_axis_aligned_bounding_box() 471 | points = points @ np.array([[0, 0, 1], [0, 1, 0], [-1, 0, 0]]).T 472 | points = points / np.linalg.norm(points, axis=1, ord=2).max() 473 | points = points - points.mean(0) 474 | points = points + np.array(mesh.sample(2500)).mean(0) 475 | pcd.points = Vector3dVector(points) 476 | 477 | _, indices = find_match(np.array(pcd.points), mesh.triangles_center) 478 | return labels[indices] 479 | 480 | 481 | def find_match(source, target, k=1): 482 | tree = KDTree(source) 483 | d, indices = tree.query(target, k=k) 484 | return d[:, 0], indices[:, 0] 485 | 486 | 487 | def normalize_colors(c): 488 | c = c - c.min((0, 1), keepdims=True) 489 | c = c / c.max((0, 1), keepdims=True) 490 | return c 491 | -------------------------------------------------------------------------------- /shades/mesh.frag: -------------------------------------------------------------------------------- 1 | #version 330 core 2 | 3 | in vec3 frag_position; 4 | in vec3 frag_normal; 5 | 6 | out vec4 frag_color; 7 | 8 | void main() 9 | { 10 | vec3 normal = normalize(frag_normal); 11 | 12 | frag_color = vec4(normal * 0.5 + 0.5, 1.0); 13 | } -------------------------------------------------------------------------------- /shades/mesh.vert: -------------------------------------------------------------------------------- 1 | #version 330 core 2 | 3 | // Vertex Attributes 4 | layout(location = 0) in vec3 position; 5 | layout(location = NORMAL_LOC) in vec3 normal; 6 | layout(location = INST_M_LOC) in mat4 inst_m; 7 | 8 | // Uniforms 9 | uniform mat4 M; 10 | uniform mat4 V; 11 | uniform mat4 P; 12 | 13 | // Outputs 14 | out vec3 frag_position; 15 | out vec3 frag_normal; 16 | 17 | void main() 18 | { 19 | gl_Position = P * V * M * inst_m * vec4(position, 1); 20 | frag_position = vec3(M * inst_m * vec4(position, 1.0)); 21 | 22 | mat4 N = transpose(inverse(M * inst_m)); 23 | frag_normal = normalize(vec3(N * vec4(normal, 0.0))); 24 | } --------------------------------------------------------------------------------