├── .gitignore ├── Gltf_Parser ├── cameraUtils.py ├── gltfparser.py └── parserUtils.py ├── LICENSE ├── README.md ├── gallery ├── 3d_viewer │ ├── Armored Van.jpg │ ├── Attack helicopter.jpg │ ├── Banana Gun with Scope.jpg │ ├── Bee.jpg │ ├── Castle.jpg │ ├── Cathedral.jpg │ ├── Covered wagon.jpg │ ├── Create a dragon tale.jpg │ ├── Double-Decker.jpg │ ├── Emoji With Glasses.jpg │ ├── Fantastical diorama.jpg │ ├── Gazebo.jpg │ ├── Large troll.jpg │ ├── Lynx - low poly.jpg │ ├── Motorcycle.jpg │ ├── Mount Rainier.jpg │ ├── Plunger of DEATH.jpg │ ├── Quaint Village.jpg │ ├── Red dragon.jpg │ ├── Rock fountain.jpg │ ├── Sabrewulf.jpg │ ├── Sedan car.jpg │ ├── Small troll.jpg │ ├── Smiling Critter.jpg │ ├── Snowscape diorama.jpg │ ├── Snowy Village.jpg │ ├── Steampunk Cottage.jpg │ ├── Steampunk Dirigible with Ship.jpg │ ├── Stone.jpg │ ├── Tiger.jpg │ ├── Topiary 4.jpg │ ├── Tree with Falling Leaves.jpg │ ├── Tyrannosaurus Rex.jpg │ ├── Unicorn.jpg │ ├── Vulture.jpg │ ├── Wacky UFO.jpg │ └── Winter Cabin.jpg └── official_samples │ ├── 2CylinderEngine.jpg │ ├── AlphaBlendModeTest.jpg │ ├── AnimatedMorphCube.jpg │ ├── AnimatedMorphSphere.jpg │ ├── AntiqueCamera.jpg │ ├── Avocado.jpg │ ├── BarramundiFish.jpg │ ├── BoomBox.jpg │ ├── Box.jpg │ ├── BoxAnimated.jpg │ ├── BoxInterleaved.jpg │ ├── BoxTextured.jpg │ ├── BoxTexturedNonPowerOfTwo.jpg │ ├── BoxVertexColors.jpg │ ├── BrainStem.jpg │ ├── Buggy.jpg │ ├── CesiumMan.jpg │ ├── CesiumMilkTruck.jpg │ ├── ClearCoatTest.jpg │ ├── Corset.jpg │ ├── DamagedHelmet.jpg │ ├── Duck.jpg │ ├── GearboxAssy.jpg │ ├── InterpolationTest.jpg │ ├── Lantern.jpg │ ├── MaterialsVariantsShoe.jpg │ ├── MetalRoughSpheres.jpg │ ├── MetalRoughSpheresNoTextures.jpg │ ├── MorphPrimitivesTest.jpg │ ├── MorphStressTest.jpg │ ├── NormalTangentMirrorTest.jpg │ ├── NormalTangentTest.jpg │ ├── OrientationTest.jpg │ ├── ReciprocatingSaw.jpg │ ├── RiggedFigure.jpg │ ├── RiggedSimple.jpg │ ├── SheenChair.jpg │ ├── SheenCloth.jpg │ ├── SpecGlossVsMetalRough.jpg │ ├── TextureCoordinateTest.jpg │ ├── TextureEncodingTest.jpg │ ├── TextureSettingsTest.jpg │ ├── TextureTransformMultiTest.jpg │ ├── TextureTransformTest.jpg │ ├── ToyCar.jpg │ ├── VertexColorTest.jpg │ └── WaterBottle.jpg ├── main.py ├── samples └── basic │ ├── basic.glb │ └── basic.png └── testUtils.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Prerequisites 2 | *.d 3 | 4 | # Compiled Object files 5 | *.slo 6 | *.lo 7 | *.o 8 | *.obj 9 | 10 | # Precompiled Headers 11 | *.gch 12 | *.pch 13 | 14 | # Compiled Dynamic libraries 15 | *.so 16 | *.dylib 17 | *.dll 18 | 19 | # Fortran module files 20 | *.mod 21 | *.smod 22 | 23 | # Compiled Static libraries 24 | *.lai 25 | *.la 26 | *.a 27 | *.lib 28 | 29 | # Executables 30 | *.exe 31 | *.out 32 | *.app 33 | -------------------------------------------------------------------------------- /Gltf_Parser/cameraUtils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import math 3 | import vray 4 | 5 | #returns rotational axis* rotational angle vec3 to rotate the camera to a specific point 6 | #takes vray.Vector 7 | def camera_look_at(from_vec,to_vec,camera_facing_dir = vray.Vector(0.0,0.0,-1.0)): 8 | dist_vec = from_vec - to_vec 9 | 10 | directionA = camera_facing_dir.normalize() 11 | directionB = dist_vec.normalize() 12 | 13 | rotAngle = math.acos(directionA*directionB) 14 | 15 | rotAxis = (directionA^directionB).normalize() 16 | 17 | return rotAxis*rotAngle 18 | 19 | def make_transform(rotX=0, rotY=0, rotZ=0, scale=1, offset=vray.Vector(0.0,0.0,0.0)): 20 | """Creates a transform with the specified rotation and scale. 21 | """ 22 | mS = vray.Matrix(scale) 23 | mX = vray.Matrix.makeRotationMatrixX(rotX) 24 | mY = vray.Matrix.makeRotationMatrixY(rotY) 25 | mZ = vray.Matrix.makeRotationMatrixZ(rotZ) 26 | transform = vray.Transform(mS * mZ * mY * mX, offset) 27 | return transform 28 | #the biggest distance from 0.0 be it x,y or z 29 | #angle around the 0.0 as in rotating the camera around 0.0 30 | # view - one of 'front', 'top', 'right', 'back', 'bottom', 'left' 31 | def set_up_default_camera(renderer, minSceneBound, maxSceneBound, look_at = vray.Vector(0,0,0), rot_angles = (0.0,0.0,0.0), 32 | fov = math.pi/2, cam_moffset = (-0.5,0.1,0.0),default_cam_pos = None,zoom = 1.0, view='front'): 33 | renderView = renderer.classes.RenderView() 34 | renderView.fov = fov 35 | 36 | diag=maxSceneBound-minSceneBound 37 | diagLen=diag.length() 38 | max_pos_val=diagLen 39 | print("Default Camera looking at : " + str(look_at)) 40 | if default_cam_pos != None: 41 | camPos = vray.Vector(default_cam_pos[0], default_cam_pos[1], default_cam_pos[2]) 42 | camUp=vray.Vector(0,1,0) 43 | else: 44 | if view=='auto': 45 | if diag.ydiag.x*1.2: 48 | view='right' 49 | else: 50 | view='front' 51 | 52 | if view=='front': 53 | camPos=vray.Vector(minSceneBound.x+diag.x*0.5, minSceneBound.y+diag.y*0.5, maxSceneBound.z) 54 | upVec=vray.Vector(0,1,0) 55 | elif view=='top': 56 | camPos=vray.Vector(minSceneBound.x+diag.x*0.5, maxSceneBound.y, minSceneBound.z+diag.z*0.5) 57 | upVec=vray.Vector(0,0,-1) 58 | elif view=='right': 59 | camPos=vray.Vector(maxSceneBound.x, minSceneBound.y+diag.y*0.5, minSceneBound.z+diag.z*0.5) 60 | upVec=vray.Vector(0,1,0) 61 | elif view=='back': 62 | camPos=vray.Vector(minSceneBound.x+diag.x*0.5, minSceneBound.y+diag.y*0.5, minSceneBound.z) 63 | upVec=vray.Vector(0,1,0) 64 | elif view=='bottom': 65 | camPos=vray.Vector(minSceneBound.x+diag.x*0.5, minSceneBound.y, minSceneBound.z+diag.z*0.5) 66 | upVec=vray.Vector(0,0,1) 67 | elif view=='left': 68 | camPos=vray.Vector(minSceneBound.x, minSceneBound.y+diag.y*0.5, minSceneBound.z+diag.z*0.5) 69 | upVec=vray.Vector(0,1,0) 70 | else: 71 | upVec=vray.Vector(0,1,0) 72 | if (diag.x>diag.z): 73 | camPos=vray.Vector(minSceneBound.x+diag.x*0.5, minSceneBound.y+diag.y*0.5, maxSceneBound.z) 74 | else: 75 | camPos=vray.Vector(maxSceneBound.x, minSceneBound.y+diag.y*0.5, minSceneBound.z+diag.z*0.5) 76 | 77 | camDist=diagLen*0.5/math.sin(fov*0.5) 78 | camDiff=camPos-look_at 79 | camPos=look_at+camDiff*camDist*(1.0+cam_moffset[2])/camDiff.length() 80 | camRight=(upVec^camDiff).normalize() 81 | camUp=(camDiff^camRight).normalize() 82 | camPos+=camDist*camRight*cam_moffset[0] 83 | camPos+=camDist*camUp*cam_moffset[1] 84 | 85 | mX = vray.Matrix.makeRotationMatrixX(math.radians(rot_angles[0])) 86 | mY = vray.Matrix.makeRotationMatrixY(math.radians(rot_angles[1])) 87 | mZ = vray.Matrix.makeRotationMatrixZ(math.radians(rot_angles[2])) 88 | camPos=camPos*mX*mY*mZ 89 | 90 | camZ=(camPos-look_at).normalize() 91 | camX=(camUp^camZ).normalize() 92 | camY=(camZ^camX).normalize() 93 | camTransform=vray.Transform(vray.Matrix(camX, camY, camZ), camPos) 94 | 95 | renderView.transform = camTransform 96 | return camTransform -------------------------------------------------------------------------------- /Gltf_Parser/gltfparser.py: -------------------------------------------------------------------------------- 1 | 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | import json 7 | 8 | import vray 9 | 10 | from . import parserUtils 11 | from . import cameraUtils 12 | 13 | import numpy as np 14 | from tempfile import mkstemp 15 | import math 16 | import pyquaternion as pyq 17 | 18 | #decoding data uris 19 | import base64 20 | 21 | #debug 22 | import testUtils 23 | 24 | #gltf textures need to be flipped around the Y axis for opengl to vray coords 25 | TEXTURE_FLIP_TRANSFORM = vray.Transform(vray.Matrix(vray.Vector(1.0, 0.0, 0.0), 26 | vray.Vector(0.0, -1.0, 0.0), 27 | vray.Vector(0.0, 0.0, 1.0)), 28 | vray.Vector(0.0, 1.0, 0.0)) 29 | 30 | def computeNormalMatrix(nrm): 31 | nxSq=nrm.x*nrm.x 32 | nySq=nrm.y*nrm.y 33 | nzSq=nrm.z*nrm.z 34 | lenSq1=nxSq+nzSq 35 | lenSq2=nxSq+nySq 36 | if lenSq1>lenSq2: 37 | lenInv=1.0/math.sqrt(lenSq1) 38 | uVec=vray.Vector(-nrm.z*lenInv, 0.0, nrm.x*lenInv) 39 | else: 40 | lenInv=1.0/math.sqrt(lenSq2) 41 | uVec=vray.Vector(nrm.y*lenInv, -nrm.x*lenInv, 0.0) 42 | vVec=nrm^uVec; 43 | return vray.Matrix(uVec, vVec, nrm) 44 | 45 | def minVec(a, b): 46 | return vray.Vector(min(a.x, b.x), min(a.y, b.y), min(a.z, b.z)) 47 | 48 | def maxVec(a, b): 49 | return vray.Vector(max(a.x, b.x), max(a.y, b.y), max(a.z, b.z)) 50 | 51 | ## Apply vertex color to the diffuse color of the given brdf. This is done by multiplying the texture by a vertex color texture. 52 | # @param renderer A VRayRenderer object. 53 | # @param brdf The BRDF for which to adjust the diffuse color; typicall BRDFVRayMtl. 54 | # @param channel_names A string list with the mapping channels for the mesh that the material is applied to. 55 | def applyVertexColor(renderer, brdf, channel_names): 56 | if 'COLOR_0' in channel_names: 57 | vertexColor_idx=channel_names.index('COLOR_0') 58 | vertexColor_uvwGen=renderer.classes.UVWGenChannel() 59 | vertexColor_uvwGen.uvw_channel=vertexColor_idx 60 | 61 | vertexColor_tex=renderer.classes.TexUVW() 62 | vertexColor_tex.uvwgen=vertexColor_uvwGen 63 | 64 | diffuseVertexColor_tex=renderer.classes.TexAColorOp() 65 | diffuseVertexColor_tex.color_a=brdf.diffuse 66 | diffuseVertexColor_tex.color_b=vertexColor_tex 67 | 68 | brdf.diffuse=diffuseVertexColor_tex.product 69 | 70 | class GltfParser: 71 | def __init__(self): 72 | 73 | self.nodes = [] 74 | self.buffers = [] 75 | self.bufferViews = [] 76 | self.accessors = [] 77 | self.meshes = [] 78 | self.materials = [] 79 | self.primitives = [] 80 | self.textures = [] 81 | self.images = [] 82 | self.samplers = [] 83 | self.skins = [] 84 | self.scenes = [] 85 | self.extensions = [] 86 | self.cameras = [] 87 | self.current_camera_id = 0 88 | 89 | self.animations = [] 90 | self.animation_time = 0.0 91 | 92 | self.animation_fps = 60.0 93 | self.current_time = 0.0 94 | 95 | #SPECIAL: GLTF does not have lights, they are added as extensions to nodes 96 | self.lights = [] 97 | 98 | #relative to the scene and the avarage node position set by command line arguments for now 99 | self.defaultCameraTransform = vray.Transform(vray.Matrix.identity, vray.Vector(0.0,0.0,0.0)) 100 | self.max_pos_object_val = 0.0 101 | self.minVertBound=vray.Vector(1e18, 1e18, 1e18) 102 | self.maxVertBound=vray.Vector(-1e18, -1e18, -1e18) 103 | self.average_scene_pos = vray.Vector(0,0,0) 104 | self.scene_verts = 0 105 | #overwritten look at from console 106 | self.average_scene_pos_or = None 107 | self.default_cam_rot = (0,0,0) 108 | self.default_cam_moffset = (0.0,0.0,0.0) 109 | self.default_cam_pos = None 110 | #fov is in degrees for user ease of use 111 | self.default_cam_fov = 45 112 | 113 | self.file_loc = None 114 | 115 | #options 116 | self.use_only_default_mat = False 117 | self.use_default_cam = False 118 | self.use_default_lights = False 119 | self.use_ground_plane = False 120 | self.thick_glass = False 121 | self.thin_glass = False 122 | self.trace_depth = 8 123 | self.environment_scene=None 124 | 125 | def set_options(self, args = None): 126 | self.animation_fps = args.animation_fps 127 | 128 | self.use_only_default_mat = args.test_material 129 | self.use_default_cam = args.default_camera 130 | self.default_cam_rot = args.default_cam_rot 131 | self.default_cam_moffset = args.default_cam_moffset 132 | self.default_cam_pos = args.default_cam_pos 133 | self.default_cam_zoom = args.default_cam_zoom 134 | self.default_cam_view = args.default_cam_view 135 | self.default_cam_fov = args.default_cam_fov 136 | self.use_default_lights = args.default_lights 137 | self.use_ground_plane = args.ground_plane 138 | self.thick_glass = args.thick_glass 139 | self.thin_glass = args.thin_glass 140 | self.trace_depth = args.trace_depth 141 | self.environment_scene=args.environment_scene 142 | 143 | if args.default_cam_look_at != None: 144 | self.average_scene_pos_or = vray.Vector(args.default_cam_look_at[0],args.default_cam_look_at[1],args.default_cam_look_at[2]) 145 | # add in more args 146 | 147 | #used for default adaptive camera 148 | def _get_camera_pos_data(self,vec): 149 | self.max_pos_object_val = max(self.max_pos_object_val,abs(vec.x),abs(vec.y),abs(vec.z)) 150 | self.average_scene_pos = self.average_scene_pos + vec 151 | self.minVertBound=minVec(self.minVertBound, vec) 152 | self.maxVertBound=maxVec(self.maxVertBound, vec) 153 | 154 | def _get_accessor_offset(self,accessor): 155 | return parserUtils.none_to_zero(accessor.byteOffset) + parserUtils.none_to_zero(self.bufferViews[accessor.bufferViewID].byteOffset) 156 | 157 | def _get_bufferview_offset(self,bufferViewID): 158 | return parserUtils.none_to_zero(self.bufferViews[bufferViewID].byteOffset) 159 | 160 | def _get_accessor_stride(self,accessor): 161 | return parserUtils.none_to_zero(self.bufferViews[accessor.bufferViewID].byteStride) 162 | 163 | def _get_bufferview_stride(self,bufferViewID): 164 | return parserUtils.none_to_zero(self.bufferViews[bufferViewID].byteStride) 165 | 166 | def _get_accessor_byte_length(self,accessor): 167 | return parserUtils.none_to_zero(self.bufferViews[accessor.bufferViewID].byteLength) 168 | 169 | def _get_bufferview_byte_length(self,bufferViewID): 170 | return parserUtils.none_to_zero(self.bufferViews[bufferViewID].byteLength) 171 | 172 | def _get_accessor_buffer_uri(self,accessor): 173 | # if no URI aka .glb returns None, if there is URI it return URI or data URI 174 | return self.buffers[self.bufferViews[accessor.bufferViewID].bufferID].uri 175 | 176 | def _get_buffer_uri(self,bufferViewID): 177 | # if no URI aka .glb returns None, if there is URI it return URI or data URI 178 | return self.buffers[self.bufferViews[bufferViewID].bufferID].uri 179 | 180 | def _from_bin_w_stride(self,bufferData, bufferViewID, compType, count, countModifier, offset,base64str = False): 181 | 182 | b_stride = self._get_bufferview_stride(bufferViewID) 183 | b_length = self._get_bufferview_byte_length(bufferViewID) 184 | comp_size = compType.itemsize 185 | 186 | if base64str == True: 187 | raw_data = np.fromstring(bufferData[offset:count*b_stride],dtype = 'B') 188 | else: 189 | raw_data = np.memmap(bufferData,dtype = 'B',mode ='r',shape=(count*b_stride),offset = offset) 190 | 191 | data = np.empty(shape = (count*countModifier,), dtype = compType) 192 | for idx in range(0,count): 193 | 194 | data_it = idx*countModifier 195 | raw_data_it = idx*b_stride 196 | 197 | data[data_it : data_it+countModifier] = (raw_data[raw_data_it : raw_data_it + countModifier*comp_size]).view(compType) 198 | 199 | data = data.reshape(count,countModifier) 200 | 201 | 202 | return data 203 | 204 | #Parses data for accessors using component Types and shapes it while considering the encoding methods 205 | def _get_data(self, buffer_uri, bufferViewID, sceneFile, compType, count, countModifier, offset): 206 | data = None 207 | 208 | if buffer_uri == None: 209 | if self.bufferViews[bufferViewID].byteStride == None: 210 | data = np.fromfile(sceneFile, dtype = compType, count = count*countModifier, offset = offset) 211 | data = data.reshape(count,countModifier) 212 | else: 213 | 214 | data = self._from_bin_w_stride(sceneFile, bufferViewID, np.dtype(compType), count, countModifier, offset) 215 | 216 | elif 'data:application/' in buffer_uri: 217 | # Reading here from a base64str encoded string data from the actual .gltf file 218 | base64_data = buffer_uri[buffer_uri.find(','):] 219 | decoded_data = base64.b64decode(base64_data) 220 | if self.bufferViews[bufferViewID].byteStride == None: 221 | data = np.fromstring(decoded_data[offset:], dtype = compType, count = count*countModifier) 222 | data = data.reshape(count, countModifier) 223 | else: 224 | data = self._from_bin_w_stride(decoded_data, bufferViewID, np.dtype(compType), count,countModifier, offset, base64str = True) 225 | 226 | else: 227 | file_path = os.path.join(self.file_loc, buffer_uri) 228 | if self.bufferViews[bufferViewID].byteStride == None: 229 | data = np.fromfile(file_path,dtype = compType, count = count*countModifier, offset = offset) 230 | data = data.reshape(count,countModifier) 231 | else: 232 | data = self._from_bin_w_stride(file_path, bufferViewID, np.dtype(compType), count, countModifier, offset) 233 | 234 | return data 235 | 236 | #OPENGL type to numpy type str 237 | def _opengl_numpy_type(self,openGL_type): 238 | 239 | if openGL_type == 5123: 240 | return ' 0: 277 | 278 | idx_dict = ascr.sparse.get('indices') 279 | if idx_dict != None: 280 | 281 | idx_compType = idx_dict.get('componentType') 282 | if idx_compType != None: 283 | idx_data_type = self._opengl_numpy_type(idx_compType) 284 | else: 285 | idx_data_type = compType 286 | #get custom byteOffset if there is any 287 | idx_offset = parserUtils.none_to_zero(idx_dict.get('byteOffset')) 288 | idx_bufferViewID = idx_dict.get('bufferView') 289 | idx_buffer_uri = self._get_buffer_uri(idx_bufferViewID) 290 | #get the actual indices for the data substitution 291 | indices = self._get_data(buffer_uri=idx_buffer_uri, bufferViewID= idx_bufferViewID, sceneFile = dataFileName, 292 | compType= idx_data_type, count = sub_count, countModifier = 1,offset=self.currentOffset + self._get_bufferview_offset(idx_bufferViewID) + idx_offset) 293 | indices = indices.flatten() 294 | 295 | #Sparse values 296 | sparse_values_dict = ascr.sparse.get('values') 297 | if sparse_values_dict != None: 298 | sparse_values_bufferViewID = sparse_values_dict.get('bufferView') 299 | sparse_values_offset = parserUtils.none_to_zero(sparse_values_dict.get('byteOffset')) 300 | 301 | if sparse_values_bufferViewID != None: 302 | sparse_values_buffer_uri = self._get_buffer_uri(sparse_values_bufferViewID) 303 | sparse_values = self._get_data(buffer_uri=sparse_values_buffer_uri, bufferViewID= sparse_values_bufferViewID, sceneFile = dataFileName, 304 | compType= compType, count = sub_count, countModifier = countModifier, offset=self.currentOffset + self._get_bufferview_offset(sparse_values_bufferViewID) + sparse_values_offset) 305 | else: 306 | sparse_values = np.zeros(shape = (sub_count,countModifier), dtype = compType) 307 | 308 | #data substituion 309 | ascr.data[indices] = sparse_values 310 | 311 | #Textures/Images 312 | for image_idx in range(0, len(self.images)): 313 | 314 | image_uri = self.images[image_idx].uri 315 | if image_uri == None: 316 | self.images[image_idx].data = np.fromfile(dataFileName,dtype = 'B',count = self.bufferViews[self.images[image_idx].bufferView].byteLength, 317 | offset = self.currentOffset + parserUtils.none_to_zero(self.bufferViews[self.images[image_idx].bufferView].byteOffset)) 318 | #self.images[image_idx].file_loc = 'temp/img_' + str(image_idx) + '.png' 319 | #create temp file 320 | if self.images[image_idx].mimeType == 'image/jpeg' or self.images[image_idx].mimeType == 'image/jpg': 321 | fd , self.images[image_idx].file_loc = mkstemp(suffix='.jpg') 322 | elif self.images[image_idx].mimeType == 'image/png': 323 | fd , self.images[image_idx].file_loc = mkstemp(suffix='.png') 324 | os.close(fd) 325 | self.images[image_idx].data.tofile(self.images[image_idx].file_loc) 326 | 327 | elif 'data:image/' in image_uri: 328 | f_ext = 'png' 329 | file_type_str = image_uri[:image_uri.find(';')] 330 | if 'image/jpeg' in file_type_str: 331 | f_ext = 'jpeg' 332 | 333 | elif 'image/bmp' in image_uri: 334 | f_ext = 'bmp' 335 | base64_data = image_uri[image_uri.find(','):] 336 | decoded_data = base64.b64decode(base64_data) 337 | self.images[image_idx].data = np.fromstring(decoded_data, dtype = 'B') 338 | #self.images[image_idx].file_loc = 'temp/img_' + str(image_idx) + f_ext 339 | #create temp file 340 | fd , self.images[image_idx].file_loc = mkstemp(suffix='.png') 341 | os.close(fd) 342 | self.images[image_idx].data.tofile(self.images[image_idx].file_loc) 343 | 344 | else: 345 | 346 | self.images[image_idx].file_loc = os.path.join(self.file_loc, image_uri) 347 | self.images[image_idx].local = True 348 | 349 | def _calculate_node_raw_transforms(self,gltf_node): 350 | _rot_mat=vray.Matrix.identity 351 | _offset=vray.Vector(0,0,0) 352 | _scale_mat=vray.Matrix(vray.Vector(1, 0, 0), vray.Vector(0, 1, 0), vray.Vector(0, 0, 1)) 353 | 354 | updated_Transform = False 355 | if gltf_node.matrixTransform != None: 356 | matrix = vray.Matrix( 357 | vray.Vector(gltf_node.matrixTransform[0], gltf_node.matrixTransform[1], gltf_node.matrixTransform[2]), 358 | vray.Vector(gltf_node.matrixTransform[4], gltf_node.matrixTransform[5], gltf_node.matrixTransform[6]), 359 | vray.Vector(gltf_node.matrixTransform[8], gltf_node.matrixTransform[9], gltf_node.matrixTransform[10]) 360 | ) 361 | offset = vray.Vector(gltf_node.matrixTransform[12], gltf_node.matrixTransform[13], gltf_node.matrixTransform[14]) 362 | 363 | _rot_mat = matrix 364 | _offset = offset 365 | updated_Transform = True 366 | else : 367 | if gltf_node.translation != None: 368 | trans_x = np.clip(gltf_node.translation[0], - 1.0E+18, 1.0E+18) 369 | trans_y = np.clip(gltf_node.translation[1], - 1.0E+18, 1.0E+18) 370 | trans_z = np.clip(gltf_node.translation[2], - 1.0E+18, 1.0E+18) 371 | offset = vray.Vector(trans_x,trans_y,trans_z) 372 | 373 | _offset = offset 374 | 375 | updated_Transform = True 376 | 377 | if gltf_node.scale != None: 378 | scale_x = np.clip(gltf_node.scale[0], - 1.0E+18, 1.0E+18) 379 | scale_y = np.clip(gltf_node.scale[1], - 1.0E+18, 1.0E+18) 380 | scale_z = np.clip(gltf_node.scale[2], - 1.0E+18, 1.0E+18) 381 | scale = vray.Vector(scale_x,scale_y,scale_z) 382 | 383 | _scale_mat = vray.Matrix(vray.Vector(scale[0],0.0,0.0), vray.Vector(0.0,scale[1],0.0), vray.Vector(0.0,0.0,scale[2])) 384 | 385 | updated_Transform = True 386 | 387 | if gltf_node.rotation != None: 388 | qrot_x = np.clip(gltf_node.rotation[0], - 1.0E+18, 1.0E+18) 389 | qrot_y = np.clip(gltf_node.rotation[1], - 1.0E+18, 1.0E+18) 390 | qrot_z = np.clip(gltf_node.rotation[2], - 1.0E+18, 1.0E+18) 391 | qrot_w = np.clip(gltf_node.rotation[3], - 1.0E+18, 1.0E+18) 392 | 393 | quatRotation=pyq.Quaternion(qrot_w, qrot_x, qrot_y, qrot_z) 394 | matrixRotation=quatRotation.rotation_matrix 395 | 396 | _rot_mat=vray.Matrix( 397 | vray.Vector(matrixRotation[0][0], matrixRotation[1][0], matrixRotation[2][0]), 398 | vray.Vector(matrixRotation[0][1], matrixRotation[1][1], matrixRotation[2][1]), 399 | vray.Vector(matrixRotation[0][2], matrixRotation[1][2], matrixRotation[2][2]) 400 | ) 401 | 402 | updated_Transform = True 403 | 404 | if updated_Transform == True: 405 | return vray.Transform(_rot_mat * _scale_mat, _offset) 406 | 407 | return None 408 | ##tex_coord_ow tuple (tex_cord_accessor_idx, (scalex,scaley)) 409 | #returns a set up TexBitMap() from BitmapBuffer from a gltf texture dictionary 410 | def _make_texture(self, renderer, prim, tex_dict, color_mult = vray.AColor(1,1,1,1),transfer_func = 1,gamma = 1): 411 | if tex_dict != None: 412 | 413 | tex_source_idx = tex_dict.get('index') 414 | if tex_source_idx != None: 415 | wrapU=1 416 | wrapV=1 417 | 418 | samplerIdx=self.textures[tex_source_idx].sampler 419 | if samplerIdx!=None: 420 | gltf_wrapS=self.samplers[samplerIdx].wrapS 421 | if gltf_wrapS==10497: # The texture repeats in U 422 | wrapU=1 423 | elif gltf_wrapS==33071: # Clamp to edge; V-Ray doesn't really have a matching mode, so just disable wrapping 424 | wrapU=0 425 | elif gltf_wrapS== 33648: # Mirrored repeat 426 | wrapU=2 427 | 428 | gltf_wrapT=self.samplers[samplerIdx].wrapT 429 | if gltf_wrapT==10497: # The texture repeats in V 430 | wrapV=1 431 | elif gltf_wrapT==33071: # Clamp to edge; V-Ray doesn't really have a matching mode, so just disable wrapping 432 | wrapV=0 433 | elif gltf_wrapT== 33648: # Mirrored repeat 434 | wrapV=2 435 | 436 | tex_idx = self.textures[tex_source_idx].source 437 | 438 | bmb = renderer.classes.BitmapBuffer() 439 | bmb.file = self.images[tex_idx].file_loc 440 | bmb.transfer_function = transfer_func 441 | #bmb.allow_negative_colors = True 442 | bmb.gamma = gamma 443 | bmb.filter_type=5 # Sharp mip-map filtering 444 | 445 | texture = renderer.classes.TexBitmap() 446 | texture.bitmap = bmb 447 | texture.color_mult = color_mult 448 | 449 | if wrapU!=0 or wrapV!=0: 450 | texture.tile=1 451 | else: 452 | texture.tile=0 453 | 454 | uvw_gen = renderer.classes.UVWGenChannel() 455 | uvw_gen.uvw_channel = -1 456 | uvw_gen.uvw_transform = TEXTURE_FLIP_TRANSFORM 457 | #uvw_gen.use_double_sided_mode = True 458 | uvw_gen.wrap_mode = 0 459 | uvw_gen.wrap_u=wrapU 460 | uvw_gen.wrap_v=wrapV 461 | 462 | tex_uv_channel_idx = tex_dict.get('texCoord') 463 | if tex_uv_channel_idx != None: 464 | uvw_gen.uvw_channel = tex_uv_channel_idx 465 | else: 466 | tex_uv_channel_idx = -1 467 | 468 | tex_source_ext = tex_dict.get('extensions') 469 | if tex_source_ext != None: 470 | for ext in tex_source_ext.keys(): 471 | if ext == "KHR_texture_transform": 472 | tex_trans_ext = tex_source_ext.get("KHR_texture_transform") 473 | 474 | # Figure out the final UVW transformation 475 | tex_transform=vray.Transform(vray.Matrix.identity, vray.Vector(0, 0, 0)) 476 | 477 | gltf_tex_offset=tex_trans_ext.get('offset') 478 | if gltf_tex_offset!=None: 479 | tex_transform=tex_transform.replaceOffset(vray.Vector(gltf_tex_offset[0], gltf_tex_offset[1], 0)) 480 | 481 | gltf_tex_rotate=tex_trans_ext.get('rotation') 482 | if gltf_tex_rotate==None: 483 | gltf_tex_rotate=0.0 484 | 485 | gltf_tex_scale=tex_trans_ext.get('scale') 486 | if gltf_tex_scale==None: 487 | gltf_tex_scale=[1.0, 1.0] 488 | 489 | cs=math.cos(-gltf_tex_rotate) 490 | sn=math.sin(-gltf_tex_rotate) 491 | tex_transform=tex_transform.replaceMatrix(vray.Matrix( 492 | vray.Vector(cs, sn, 0)*gltf_tex_scale[0], 493 | vray.Vector(-sn, cs, 0)*gltf_tex_scale[1], 494 | vray.Vector(0, 0, 1) 495 | )) 496 | 497 | # First apply the transfrom from the extension, and then flip the Y direction 498 | uvw_gen.uvw_transform=(uvw_gen.uvw_transform*tex_transform) 499 | 500 | new_tex_coord = tex_trans_ext.get('texCoord') 501 | if new_tex_coord != None: 502 | 503 | meshUvs = vray.VectorList() 504 | 505 | # for uvVal in self.accessors[new_tex_coord].data: 506 | # meshUvs.append(vray.Vector(uvVal[0],uvVal[1],0.0)) 507 | 508 | # if tex_uv_channel_idx != -1: 509 | # prim.vray_node_ref.geometry.map_channels[tex_uv_channel_idx] = meshUvs 510 | # else: 511 | # channels = [] 512 | # channels.append(meshUvs) 513 | # prim.vray_node_ref.geometry.map_channels = channels 514 | 515 | texture.uvwgen = uvw_gen 516 | 517 | return texture 518 | 519 | return None 520 | 521 | def _pbr_metallic_roughness(self,renderer,prim,gltf_pbrmr_mat,brdf): 522 | 523 | gltf_diff_tex = gltf_pbrmr_mat.baseColorTexture 524 | c_mult = gltf_pbrmr_mat.baseColorFactor 525 | 526 | #some default vals 527 | brdf.option_use_roughness = 5 # Use roughness for reflections and refractions 528 | 529 | brdf.reflect = vray.AColor(1,1,1,1) 530 | brdf.reflect_glossiness = 1.0 531 | brdf.refract_glossiness = 1.0 532 | 533 | if gltf_diff_tex != None: 534 | if c_mult != None: 535 | brdf.diffuse = self._make_texture(renderer,prim,gltf_diff_tex,vray.AColor(c_mult[0], c_mult[1], c_mult[2], c_mult[3]), transfer_func = 2) 536 | else: 537 | brdf.diffuse = self._make_texture(renderer,prim,gltf_diff_tex, transfer_func = 2) 538 | else: 539 | if c_mult != None: 540 | dif = renderer.classes.TexAColor() 541 | dif.texture = vray.AColor(c_mult[0], c_mult[1], c_mult[2], c_mult[3]) 542 | brdf.diffuse = dif 543 | 544 | metallic = gltf_pbrmr_mat.metallicFactor 545 | if metallic==None: 546 | metallic=1.0 # The default metallic factor is 1.0 according to the glTF specification 547 | 548 | brdf.metalness = metallic 549 | 550 | roughness = gltf_pbrmr_mat.roughnessFactor 551 | if roughness==None: 552 | roughness=1.0 # The default roughness factor is 1.0 according to the glTF specification 553 | 554 | brdf.reflect_glossiness = roughness 555 | brdf.refract_glossiness = roughness 556 | 557 | metallicRoughness_tex = gltf_pbrmr_mat.metallicRoughnessTexture 558 | if metallicRoughness_tex != None: 559 | metallicRoughness_texture = self._make_texture(renderer,prim,metallicRoughness_tex, transfer_func = 0) 560 | 561 | roughness = parserUtils.none_to_val(roughness,1.0) 562 | metallic = parserUtils.none_to_val(metallic,1.0) 563 | #mults 564 | mr_mult = vray.AColor(1,roughness,metallic,1) #mult metallic and roughness factors 565 | mult_maps = renderer.classes.TexAColorOp() 566 | mult_maps.color_a = metallicRoughness_texture 567 | mult_maps.color_b = mr_mult 568 | 569 | #split 570 | split_maps = renderer.classes.TexAColorOp() 571 | split_maps.color_a = mult_maps.product 572 | roughness_map = split_maps.green 573 | metalness_map = split_maps.blue 574 | 575 | if roughness > 1e-8: 576 | brdf.reflect_glossiness = roughness_map 577 | brdf.refract_glossiness = roughness_map 578 | else: 579 | brdf.refract_glossiness = 0.0 580 | 581 | if metallic > 1e-8: 582 | brdf.metalness = metalness_map 583 | else: 584 | brdf.metalness = 0.0 585 | 586 | 587 | # https://github.com/KhronosGroup/glTF/tree/master/extensions/2.0/Khronos/KHR_materials_transmission 588 | # gltf_mat - the extension passed as dictionary (json) 589 | def _create_KHR_materials_transmission(self,renderer,gltf_mat,brdf): 590 | brdf.refract_thin_walled = True 591 | gltf_transmission_texture = gltf_mat.get('transmissionTexture') 592 | if gltf_transmission_texture != None: 593 | gltf_transmission_texture = self._make_texture(renderer,None,gltf_transmission_texture, transfer_func = 0) 594 | 595 | split_tex = renderer.classes.TexAColorOp() 596 | split_tex.color_a = gltf_transmission_texture 597 | 598 | trans_tex = renderer.classes.TexAColorOp() 599 | trans_tex.color_a = brdf.diffuse 600 | trans_tex.mult_a = split_tex.red 601 | 602 | tex_mult = renderer.classes.TexAColorOp() 603 | tex_mult.color_a = trans_tex.result_a 604 | 605 | 606 | gltf_transmission_factor = gltf_mat.get('transmissionFactor') 607 | if gltf_transmission_factor != None: 608 | tex_mult.mult_a = gltf_transmission_factor 609 | else: 610 | tex_mult.mult_a = 1.0 611 | 612 | brdf.refract = tex_mult.result_a 613 | 614 | else: 615 | gltf_transmission_factor = gltf_mat.get('transmissionFactor') 616 | if gltf_transmission_factor != None: 617 | refract_map = renderer.classes.TexAColorOp() 618 | refract_map.color_a = brdf.diffuse 619 | refract_map.color_b = vray.AColor(gltf_transmission_factor,gltf_transmission_factor,gltf_transmission_factor,1) 620 | brdf.refract = refract_map.product 621 | 622 | # https://github.com/KhronosGroup/glTF/tree/master/extensions/2.0/Khronos/KHR_materials_pbrSpecularGlossiness 623 | # gltf_mat - the extension passed as dictionary (json) 624 | def _create_KHR_materials_pbrSpecularGlossiness(self, renderer, prim, gltf_mat, brdf, channel_names): 625 | brdf.option_use_roughness = 0 626 | brdf.fresnel = False # Disable Fresnel as we will use a falloff texture for the reflections 627 | gltf_diff_tex= gltf_mat.get('diffuseTexture') 628 | if gltf_diff_tex != None: 629 | if brdf.diffuse == vray.AColor(0.5,0.5,0.5,1): 630 | brdf.diffuse = self._make_texture(renderer, prim, gltf_diff_tex, transfer_func = 2) 631 | 632 | gltf_diff_factor = gltf_mat.get('diffuseFactor') 633 | if gltf_diff_factor != None: 634 | brdf.diffuse.color_mult = vray.AColor(gltf_diff_factor[0],gltf_diff_factor[1],gltf_diff_factor[2],gltf_diff_factor[3]) 635 | 636 | if gltf_diff_tex == None: 637 | brdf.diffuse = vray.AColor(gltf_diff_factor[0],gltf_diff_factor[1],gltf_diff_factor[2],gltf_diff_factor[3]) 638 | 639 | gltf_specular_factor = gltf_mat.get('specularFactor') 640 | if gltf_specular_factor != None: 641 | brdf.reflect = vray.AColor(gltf_specular_factor[0], gltf_specular_factor[1], gltf_specular_factor[2], 1) 642 | 643 | gltf_gloss_factor = gltf_mat.get('glossinessFactor') 644 | if gltf_gloss_factor != None: 645 | brdf.reflect_glossiness = gltf_gloss_factor 646 | 647 | 648 | gltf_gloss_tex = gltf_mat.get('specularGlossinessTexture') 649 | if gltf_gloss_tex != None: 650 | specgloss_tex = self._make_texture(renderer, prim, gltf_gloss_tex, transfer_func = 2)#, gamma = 2.2) #sRGB for the color 651 | 652 | # Create a falloff texture to compute the reflection color. In order to emulate glossy Fresnel, 653 | # the side color is a simple blend between the reflection color and white based on the glossiness. 654 | sidecolor_tex=renderer.classes.TexBlend() 655 | sidecolor_tex.color_a=specgloss_tex 656 | sidecolor_tex.color_b=vray.AColor(1,1,1,1) 657 | 658 | falloff_tex = renderer.classes.TexFresnel() 659 | falloff_tex.fresnel_ior=falloff_tex.refract_ior=brdf.refract_ior 660 | falloff_tex.white_color=specgloss_tex 661 | falloff_tex.black_color=sidecolor_tex 662 | brdf.reflect= falloff_tex 663 | 664 | reflgloss_tex = self._make_texture(renderer, prim, gltf_gloss_tex, transfer_func = 0) #sRGB for the color 665 | 666 | brdf.reflect_glossiness = reflgloss_tex.out_alpha 667 | sidecolor_tex.blend_amount=reflgloss_tex.out_alpha 668 | 669 | # https://github.com/KhronosGroup/glTF/blob/master/extensions/2.0/Khronos/KHR_materials_clearcoat 670 | # gltf_mat - the extension passed as dictionary (json) 671 | def _create_KHR_materials_clearcoat(self, renderer, prim, gltf_mat, brdf): 672 | brdf.coat_color=vray.AColor(1,1,1,1) 673 | brdf.coat_ior=1.5 674 | # Clear coat amount 675 | gltf_clearcoat_texture=gltf_mat.get('clearcoatTexture') 676 | if gltf_clearcoat_texture!=None: 677 | gltf_clearcoat_texture=self._make_texture(renderer, None, gltf_clearcoat_texture, transfer_func = 0) 678 | 679 | split_tex=renderer.classes.TexAColorOp() 680 | split_tex.color_a=gltf_clearcoat_texture 681 | 682 | gltf_clearcoat_factor=gltf_mat.get('clearcoatFactor') 683 | if gltf_clearcoat_factor!=None: 684 | split_tex.mult_a=gltf_clearcoat_factor 685 | else: 686 | split_tex.mult_a=1.0 687 | 688 | brdf.coat_amount=split_tex.red 689 | else: 690 | gltf_clearcoat_factor=gltf_mat.get('clearcoatFactor') 691 | if gltf_clearcoat_factor!=None: 692 | brdf.coat_amount=gltf_clearcoat_factor 693 | 694 | # Clear coat roughness 695 | gltf_clearcoat_roughness_texture=gltf_mat.get('clearcoatRoughnessTexture') 696 | if gltf_clearcoat_roughness_texture!=None: 697 | gltf_clearcoat_roughness_texture=self._make_texture(renderer, None, gltf_clearcoat_roughness_texture, transfer_func = 0) 698 | 699 | split_tex=renderer.classes.TexAColorOp() 700 | split_tex.color_a=gltf_clearcoat_roughness_texture 701 | 702 | gltf_clearcoat_roughness_factor=gltf_mat.get('clearcoatRoughnessFactor') 703 | if gltf_clearcoat_roughness_factor!=None: 704 | split_tex.mult_a=gltf_clearcoat_roughness_factor 705 | else: 706 | split_tex.mult_a=1.0 707 | 708 | brdf.coat_glossiness=split_tex.green 709 | else: 710 | gltf_clearcoat_roughness_factor=gltf_mat.get('clearcoatRoughnessFactor') 711 | if gltf_clearcoat_roughness_factor!=None: 712 | brdf.coat_glossiness=gltf_clearcoat_roughness_factor 713 | 714 | # Coat normals 715 | clearcoat_normal_tex=self._make_texture(renderer, prim, gltf_mat.get('clearcoatNormalTexture'), transfer_func=0) 716 | if clearcoat_normal_tex!=None: 717 | flipGreen_tex=renderer.classes.TexNormalMapFlip() 718 | flipGreen_tex.texmap=clearcoat_normal_tex 719 | flipGreen_tex.flip_green=True 720 | brdf.coat_bump_map=flipGreen_tex 721 | brdf.coat_bump_type=1 722 | 723 | # https://github.com/KhronosGroup/glTF/blob/master/extensions/2.0/Khronos/KHR_materials_sheen 724 | # gltf_mat - the extension passed as dictionary (json) 725 | def _create_KHR_materials_sheen(self, renderer, prim, gltf_mat, brdf): 726 | brdf.sheen_color=vray.AColor(0,0,0,1) 727 | brdf.sheen_glossiness=0.0 728 | 729 | # Sheen amount 730 | gltf_sheen_texture=gltf_mat.get('sheenColorTexture') 731 | if gltf_sheen_texture!=None: 732 | gltf_sheen_texture=self._make_texture(renderer, None, gltf_sheen_texture, transfer_func = 2) 733 | brdf.sheen_color=gltf_sheen_texture 734 | 735 | gltf_sheen_factor=gltf_mat.get('sheenColorFactor') 736 | if gltf_sheen_factor!=None: 737 | brdf.sheen_color.color_mult=vray.Color(gltf_sheen_factor[0], gltf_sheen_factor[1], gltf_sheen_factor[2]) 738 | else: 739 | gltf_sheen_factor=gltf_mat.get('sheenColorFactor') 740 | if gltf_sheen_factor!=None: 741 | brdf.sheen_color=vray.Color(gltf_sheen_factor[0], gltf_sheen_factor[1], gltf_sheen_factor[2]) 742 | 743 | # Sheen roughness 744 | gltf_sheen_roughness_texture=gltf_mat.get('sheenRoughnessTexture') 745 | if gltf_sheen_roughness_texture!=None: 746 | gltf_sheen_roughness_texture=self._make_texture(renderer, None, gltf_sheen_roughness_texture, transfer_func = 0) 747 | 748 | split_tex=renderer.classes.TexAColorOp() 749 | split_tex.color_a=gltf_sheen_roughness_texture 750 | 751 | gltf_sheen_roughness_factor=gltf_mat.get('sheenRoughnessFactor') 752 | if gltf_sheen_roughness_factor!=None: 753 | split_tex.mult_a=gltf_sheen_roughness_factor 754 | else: 755 | split_tex.mult_a=1.0 756 | 757 | brdf.sheen_glossiness=split_tex.alpha 758 | else: 759 | gltf_sheen_roughness_factor=gltf_mat.get('sheenRoughnessFactor') 760 | if gltf_sheen_roughness_factor!=None: 761 | brdf.sheen_glossiness=gltf_sheen_roughness_factor 762 | 763 | def _create_material(self, renderer, prim, channel_names): 764 | gltf_mat = self.materials[prim.material] 765 | 766 | material = renderer.classes.MtlSingleBRDF() 767 | #material.double_sided=True 768 | brdf = renderer.classes.BRDFVRayMtl() 769 | 770 | # fresnel should be true at all times 771 | brdf.fresnel = True 772 | brdf.option_glossy_fresnel = True # Glossy Fresnel produces a better result for rough reflective surfaces 773 | brdf.refract_ior = 1.5 # glTF uses IOR 1.5 by default 774 | brdf.reflect_depth = self.trace_depth 775 | brdf.refract_depth = self.trace_depth 776 | 777 | def_uvw_gen = renderer.classes.UVWGenChannel() 778 | def_uvw_gen.uvw_channel = -1 779 | def_uvw_gen.uvw_transform = TEXTURE_FLIP_TRANSFORM 780 | 781 | #double sided 782 | if gltf_mat.doubleSided != None: 783 | material.double_sided = gltf_mat.doubleSided 784 | brdf.option_double_sided = gltf_mat.doubleSided 785 | 786 | #pbrMetallicRoughness 787 | if gltf_mat.pbrMetallicRoughness!=None: 788 | self._pbr_metallic_roughness(renderer,prim,gltf_mat.pbrMetallicRoughness,brdf) 789 | 790 | # Extensions should be handled early as they may contains the diffuse map which we need to opacity and occlusion 791 | if gltf_mat.extensions != None: 792 | for gltf_ext in gltf_mat.extensions.keys(): 793 | if gltf_ext == 'KHR_materials_pbrSpecularGlossiness': 794 | self._create_KHR_materials_pbrSpecularGlossiness(renderer, prim, gltf_mat.extensions.get('KHR_materials_pbrSpecularGlossiness'), brdf, channel_names) 795 | if gltf_ext == 'KHR_materials_transmission': 796 | self._create_KHR_materials_transmission(renderer, gltf_mat.extensions.get('KHR_materials_transmission'), brdf) 797 | if gltf_ext=='KHR_materials_clearcoat': 798 | self._create_KHR_materials_clearcoat(renderer, prim, gltf_mat.extensions.get('KHR_materials_clearcoat'), brdf) 799 | if gltf_ext=='KHR_materials_sheen': 800 | self._create_KHR_materials_sheen(renderer, prim, gltf_mat.extensions.get('KHR_materials_sheen'), brdf) 801 | 802 | # Apply vertex color to the diffuse texture 803 | applyVertexColor(renderer, brdf, channel_names) 804 | 805 | #normal texture 806 | norm_tex = self._make_texture(renderer, prim, gltf_mat.normalTexture, transfer_func=0) 807 | if norm_tex != None: 808 | norm_scale = gltf_mat.normalTexture.get('scale') 809 | if norm_scale != None: 810 | norm_tex.color_mult = vray.AColor(norm_scale,norm_scale,norm_scale,norm_scale) 811 | 812 | flipGreen_tex=renderer.classes.TexNormalMapFlip() 813 | flipGreen_tex.texmap=norm_tex 814 | flipGreen_tex.flip_green=True 815 | brdf.bump_map=flipGreen_tex 816 | brdf.bump_type=1 817 | 818 | #emissive tex 819 | emissive_tex = self._make_texture(renderer,prim,gltf_mat.emissiveTexture, transfer_func = 2) 820 | if emissive_tex != None: 821 | emissive_factor = gltf_mat.emissiveFactor 822 | if emissive_factor != None: 823 | emissive_tex.color_mult = vray.AColor(emissive_factor[0], emissive_factor[1], emissive_factor[2], 1) 824 | 825 | brdf.self_illumination = emissive_tex 826 | # brdf.self_illumination_gi=True 827 | 828 | #occlusion tex 829 | useOcclusion = False 830 | if useOcclusion: 831 | occl_tex = self._make_texture(renderer,prim,gltf_mat.occlusionTexture, transfer_func = 0) 832 | if occl_tex != None: 833 | 834 | occl_scale = gltf_mat.occlusionTexture.get('strength') 835 | if occl_scale != None: 836 | occl_tex.color_mult = vray.AColor(occl_scale, occl_scale, occl_scale, occl_scale) 837 | 838 | split_maps = renderer.classes.TexAColorOp() 839 | split_maps.color_a = occl_tex 840 | if brdf.diffuse.getType()=="TexBitmap": 841 | brdf.diffuse.color_mult = split_maps.red 842 | 843 | #alpha 844 | if gltf_mat.alphaMode == "BLEND": 845 | alpha_split = renderer.classes.TexAColorOp() 846 | alpha_split.color_a = brdf.diffuse 847 | if self.thick_glass: 848 | # Use the opacity as inverse refraction color 849 | refract_tex = renderer.classes.TexFloatToColor() 850 | refract_tex.input=alpha_split.alpha 851 | refract_tex.invert=True 852 | brdf.refract = refract_tex 853 | brdf.refract_affect_shadows = True 854 | elif self.thin_glass: 855 | # Use the opacity as inverse refraction color and thin-walled mode 856 | refract_tex = renderer.classes.TexFloatToColor() 857 | refract_tex.input=alpha_split.alpha 858 | refract_tex.invert=True 859 | brdf.refract = refract_tex 860 | brdf.refract_thin_walled = True 861 | brdf.refract_affect_shadows = True 862 | else: 863 | # Use opacity as is 864 | brdf.opacity = alpha_split.alpha 865 | 866 | if gltf_mat.alphaMode == "MASK": 867 | 868 | alpha_split = renderer.classes.TexAColorOp() 869 | alpha_split.color_a = brdf.diffuse 870 | 871 | cutoff_tex = renderer.classes.TexCondition() 872 | cutoff_tex.op_a = alpha_split.alpha 873 | 874 | if gltf_mat.alphaCutoff != None: 875 | cutoff_tex.op_b = gltf_mat.alphaCutoff 876 | else: 877 | cutoff_tex.op_b = 0.5 878 | 879 | cutoff_tex.result_true = vray.AColor(1,1,1,1) 880 | cutoff_tex.result_false = vray.AColor(0,0,0,0) 881 | 882 | cutoff_tex.operation = 2 #(greater than) 883 | 884 | alpha_cutoff_split = renderer.classes.TexAColorOp() 885 | alpha_cutoff_split.color_a = cutoff_tex.color 886 | brdf.opacity = alpha_cutoff_split.alpha 887 | 888 | material.brdf = brdf 889 | 890 | return material 891 | 892 | def _create_vray_node(self, renderer, gltf_node): 893 | #important so when traversting the nodes again in scene update for animations we do not duplicate nodes 894 | gltf_node.vray_node_created = True 895 | 896 | if gltf_node.mesh != None: 897 | mesh = self.meshes[gltf_node.mesh] 898 | 899 | for prim in mesh.primitives: 900 | 901 | #Do not create new node for geometry if its already created 902 | node = renderer.classes.Node() 903 | prim.vray_node_ref = node 904 | node.transform = gltf_node.transform 905 | 906 | #Attributes 907 | meshVerts = vray.VectorList() 908 | meshNormals = vray.VectorList() 909 | meshFaces = vray.IntList() 910 | mesh_uvw1 = vray.VectorList() 911 | mesh_uvw1_faces = vray.IntList() 912 | 913 | # Check for morph targets, so we can skip unnecessary iterations and checks 914 | 915 | #pos morph targets 916 | if prim.targets != None: 917 | morph_pos_vals = np.zeros((len(self.accessors[prim.attributes.get('POSITION')].data), 3),dtype = float) 918 | for t_idx in range(0,len(prim.targets)): 919 | 920 | pos_idx = prim.targets[t_idx].get('POSITION') 921 | if pos_idx != None: 922 | try: 923 | morph_pos_vals = self.accessors[pos_idx].data*mesh.weights[t_idx] + morph_pos_vals 924 | except: 925 | #no weights given for these morph targets assume 1.0 926 | morph_pos_vals = self.accessors[pos_idx].data + morph_pos_vals 927 | 928 | pos_vals = morph_pos_vals + self.accessors[prim.attributes.get('POSITION')].data 929 | for posVal in pos_vals: 930 | meshVerts.append(vray.Vector(posVal[0],posVal[1],posVal[2])) 931 | 932 | self._get_camera_pos_data(gltf_node.transform * vray.Vector(posVal[0],posVal[1],posVal[2])) 933 | 934 | #Need numbers of verts to avarage scene pos 935 | self.scene_verts = self.scene_verts + len(meshVerts) 936 | else: 937 | for posVal in self.accessors[prim.attributes.get('POSITION')].data: 938 | meshVerts.append(vray.Vector(posVal[0],posVal[1],posVal[2])) 939 | 940 | self._get_camera_pos_data(gltf_node.transform * vray.Vector(posVal[0],posVal[1],posVal[2])) 941 | 942 | self.scene_verts = self.scene_verts + len(meshVerts) 943 | #Doing normals morph targets away from pos targets so numpy ndarray for positions loses all references so we can free the memory through garbage collector 944 | if prim.attributes.get('NORMAL') != None: 945 | 946 | if prim.targets != None: 947 | morph_normal_vals = np.zeros((len(self.accessors[prim.attributes.get('NORMAL')].data), 3),dtype = float) 948 | for t_idx in range(0,len(prim.targets)): 949 | 950 | pos_idx = prim.targets[t_idx].get('NORMAL') 951 | if pos_idx != None: 952 | try: 953 | morph_normal_vals = self.accessors[pos_idx].data*mesh.weights[t_idx] + morph_normal_vals 954 | except IndexError: 955 | #no weights given for these morph targets assume 1.0 956 | morph_normal_vals = self.accessors[pos_idx].data + morph_normal_vals 957 | 958 | normal_vals = morph_normal_vals + self.accessors[prim.attributes.get('NORMAL')].data 959 | for normVal in normal_vals: 960 | meshNormals.append(vray.Vector(normVal[0],normVal[1],normVal[2])) 961 | else: 962 | for normVal in self.accessors[prim.attributes.get('NORMAL')].data: 963 | meshNormals.append(vray.Vector(normVal[0],normVal[1],normVal[2])) 964 | 965 | if prim.indices != None: 966 | for scalVal in self.accessors[prim.indices].data: 967 | meshFaces.append(int(scalVal)) 968 | 969 | channels = [] 970 | channel_names = [] 971 | texC_idx = 0 972 | 973 | # Add UVW channels 974 | while True: 975 | key = 'TEXCOORD_' + str(texC_idx) 976 | if prim.attributes.get(key) == None: 977 | break 978 | 979 | meshUvs = vray.VectorList() 980 | for uvVal in self.accessors[prim.attributes[key]].data: 981 | meshUvs.append(vray.Vector(uvVal[0],uvVal[1],0.0)) 982 | 983 | uvw_channel = [] 984 | uvw_channel.append(texC_idx) 985 | uvw_channel.append(meshUvs) 986 | uvw_channel.append(meshFaces) 987 | channels.append(uvw_channel) 988 | channel_names.append(key) 989 | 990 | texC_idx += 1 991 | 992 | # Add vertex color channels 993 | vertexColor_idx=0 994 | while True: 995 | key='COLOR_'+str(vertexColor_idx) 996 | if prim.attributes.get(key)==None: 997 | break 998 | meshColors=vray.VectorList() 999 | for colVal in self.accessors[prim.attributes[key]].data: 1000 | meshColors.append(vray.Vector(colVal[0], colVal[1], colVal[2])) 1001 | 1002 | color_channel = [] 1003 | color_channel.append(texC_idx+vertexColor_idx) 1004 | color_channel.append(meshColors) 1005 | color_channel.append(meshFaces) 1006 | channels.append(color_channel) 1007 | channel_names.append(key) 1008 | 1009 | vertexColor_idx += 1 1010 | 1011 | geometry = renderer.classes.GeomStaticMesh() 1012 | geometry.vertices = meshVerts 1013 | 1014 | # Only export normals if they are specified; if we export a zero-length array, the mesh 1015 | # will not render correctly. 1016 | if len(meshNormals)>0: 1017 | geometry.normals = meshNormals 1018 | 1019 | geometry.faces = meshFaces 1020 | geometry.map_channels = channels 1021 | geometry.map_channels_names = channel_names 1022 | 1023 | node.geometry = geometry 1024 | 1025 | if self.use_only_default_mat != True: 1026 | if prim.material != None: 1027 | node.material = self._create_material(renderer, prim, channel_names) 1028 | else: 1029 | mat = renderer.classes.MtlSingleBRDF() 1030 | mat.brdf = renderer.classes.BRDFVRayMtl() 1031 | mat.double_sided= True 1032 | node.material = mat 1033 | else: 1034 | testUtils._set_testing_material(renderer, node) 1035 | 1036 | if gltf_node.camera != None: 1037 | 1038 | self.has_camera = True 1039 | gltf_camera = self.cameras[gltf_node.camera] 1040 | renderView = renderer.classes.RenderView() 1041 | 1042 | renderView.transform = gltf_node.transform 1043 | 1044 | self.camTransform = gltf_node.transform 1045 | wind_size = renderer.size 1046 | if gltf_camera.camera_type == 'perspective': 1047 | #calculate horizontal FOV for vray 1048 | renderView.fov = 2.0*math.atan((0.5*wind_size[0]) / ( 0.5*wind_size[1] / math.tan(parserUtils.none_to_val(gltf_camera.yfov,1.0)))) 1049 | 1050 | elif gltf_camera.camera_type == 'orthographic': 1051 | renderView.orthographic = True 1052 | if gltf_camera.ymag != None: 1053 | renderView.orthographicWidth = gltf_camera.ymag 1054 | 1055 | if gltf_camera.zfar != None: 1056 | # renderView.clipping = True 1057 | renderView.clipping_far = gltf_camera.zfar 1058 | if gltf_camera.znear != None: 1059 | # renderView.clipping = True 1060 | renderView.clipping_near = gltf_camera.znear 1061 | 1062 | if gltf_node.extensions != None: 1063 | 1064 | # Lights 1065 | light_ext = gltf_node.extensions.get('KHR_lights_punctual') 1066 | if light_ext != None: 1067 | 1068 | light_idx = light_ext.get('light') 1069 | 1070 | if light_idx != None: 1071 | gltf_light = self.lights[light_idx] 1072 | 1073 | # Create the vray light node 1074 | if gltf_light.mtype == 'spot': 1075 | v_light = renderer.classes.LightSpot() 1076 | v_light.units = 2 # Candela (cd=lm/sr) as units 1077 | 1078 | inner_c_a = gltf_light.spot_attr.get('innerConeAngle') 1079 | outer_c_a = gltf_light.spot_attr.get('outerConeAngle') 1080 | 1081 | # Note that in V-Ray, the entire angle is measured (it's a diameter), 1082 | # not just the half angle from the center of the hotspot, so we need to multiply by 2.0 1083 | if outer_c_a != None: 1084 | v_light.coneAngle = outer_c_a*2.0 1085 | else: 1086 | v_light.coneAngle = math.pi/4.0 1087 | if inner_c_a != None: 1088 | #negative to start inside the spot cone 1089 | v_light.penumbraAngle = (inner_c_a - outer_c_a)*2.0 1090 | else: 1091 | v_light.penumbraAngle = 0 1092 | 1093 | elif gltf_light.mtype == 'point': 1094 | v_light = renderer.classes.LightOmni() 1095 | v_light.units = 2 # Candela (cd=lm/sr) as units 1096 | 1097 | elif gltf_light.mtype == 'directional': 1098 | v_light = renderer.classes.MayaLightDirect() 1099 | v_light.units = 2 # Lux (lx=lm/m/m as units) 1100 | 1101 | #Color 1102 | if gltf_light.color != None: 1103 | v_light.color = vray.AColor(gltf_light.color[0], gltf_light.color[1], gltf_light.color[2],1.0) 1104 | else: 1105 | v_light.color = vray.AColor(1.0, 1.0, 1.0,1.0) 1106 | 1107 | #Intensity 1108 | if gltf_light.intensity != None: 1109 | v_light.intensity = gltf_light.intensity 1110 | else: 1111 | v_light.intensity = 1.0 1112 | 1113 | #TODO: VRay light distance cutoff? 1114 | 1115 | #transforms 1116 | v_light.transform = gltf_node.transform 1117 | 1118 | #Geom update for morph target update 1119 | def _update_node_geom(self,renderer,gltf_node): 1120 | if gltf_node.mesh != None: 1121 | mesh = self.meshes[gltf_node.mesh] 1122 | 1123 | for prim in mesh.primitives: 1124 | 1125 | #POSITION MORPH TARGETS 1126 | if prim.targets != None: 1127 | if prim.attributes.get('POSITION') != None: 1128 | 1129 | meshVerts = vray.VectorList() 1130 | morph_pos_vals = np.zeros((len(self.accessors[prim.attributes.get('POSITION')].data), 3),dtype = float) 1131 | 1132 | for t_idx in range(0,len(prim.targets)): 1133 | pos_idx = prim.targets[t_idx].get('POSITION') 1134 | if pos_idx != None: 1135 | try: 1136 | morph_pos_vals = self.accessors[pos_idx].data*mesh.weights[t_idx] + morph_pos_vals 1137 | except IndexError: 1138 | #no weights given for these morph targets assume 1.0 1139 | morph_pos_vals = self.accessors[pos_idx].data + morph_pos_vals 1140 | 1141 | pos_vals = morph_pos_vals + self.accessors[prim.attributes.get('POSITION')].data 1142 | for posVal in pos_vals: 1143 | meshVerts.append(vray.Vector(posVal[0],posVal[1],posVal[2])) 1144 | #update the actual vray node geometry 1145 | prim.vray_node_ref.geometry.vertices = meshVerts 1146 | 1147 | #NORMAL MORPH TARGETS 1148 | if prim.attributes.get('NORMAL') != None: 1149 | 1150 | meshNormals = vray.VectorList() 1151 | morph_normal_vals = np.zeros((len(self.accessors[prim.attributes.get('NORMAL')].data), 3),dtype = float) 1152 | 1153 | for t_idx in range(0,len(prim.targets)): 1154 | 1155 | pos_idx = prim.targets[t_idx].get('NORMAL') 1156 | if pos_idx != None: 1157 | try: 1158 | morph_normal_vals = self.accessors[pos_idx].data*mesh.weights[t_idx] + morph_normal_vals 1159 | 1160 | except IndexError: 1161 | #no weights given for these morph targets assume 1.0 1162 | morph_normal_vals = self.accessors[pos_idx].data + morph_normal_vals 1163 | 1164 | normal_vals = morph_normal_vals + self.accessors[prim.attributes.get('NORMAL')].data 1165 | for normVal in normal_vals: 1166 | meshNormals.append(vray.Vector(normVal[0],normVal[1],normVal[2])) 1167 | #update the actual vray node geometry 1168 | prim.vray_node_ref.geometry.normals = meshNormals 1169 | 1170 | #called for nodes with meshes, as we create vray node for every primitive Geometry 1171 | def _update_nodes(self,renderer,gltf_node): 1172 | if gltf_node.mesh != None: 1173 | mesh = self.meshes[gltf_node.mesh] 1174 | 1175 | for prim in mesh.primitives: 1176 | prim.vray_node_ref.transform = gltf_node.transform 1177 | 1178 | def _traverse_nodes(self, renderer, gltf_node): 1179 | #calculate transform for current node 1180 | gltf_node.local_transform = self._calculate_node_raw_transforms(gltf_node) 1181 | 1182 | #if there is no transform just inherit the previous one so it can be passed to children 1183 | if gltf_node.local_transform != None: 1184 | gltf_node.transform = vray.Transform(gltf_node.transform.matrix * gltf_node.local_transform.matrix,gltf_node.transform.matrix*gltf_node.local_transform.offset + gltf_node.transform.offset) 1185 | 1186 | #create and init the vray node, if it is already created updater all primitive geometry nodes under it 1187 | if gltf_node.vray_node_created == True: 1188 | self._update_nodes(renderer,gltf_node) 1189 | else: 1190 | self._create_vray_node(renderer,gltf_node) 1191 | 1192 | #traverse children recursively, children inherit root node transform as per gltf documentation 1193 | if gltf_node.children != None: 1194 | for child_idx in gltf_node.children: 1195 | self.nodes[child_idx].transform = gltf_node.transform 1196 | self._traverse_nodes(renderer, self.nodes[child_idx]) 1197 | 1198 | #called once in the first init of the scene 1199 | def _init_scene(self,renderer): 1200 | for scene in self.scenes: 1201 | for root_node in scene.nodes: 1202 | self._traverse_nodes(renderer, self.nodes[root_node]) 1203 | for anim in self.animations: 1204 | anim.init(self.accessors) 1205 | 1206 | 1207 | def _update_scene(self,renderer): 1208 | for node in self.nodes: 1209 | #reset node transforms for full recalculations as in APPSDK node() does not have children 1210 | node.transform = vray.Transform(vray.Matrix.identity, vray.Vector(0, 0, 0)) 1211 | self._update_node_geom(renderer,node) 1212 | for scene in self.scenes: 1213 | for root_node in scene.nodes: 1214 | self._traverse_nodes(renderer, self.nodes[root_node]) 1215 | for node in self.nodes: 1216 | for val in node.transform_change: 1217 | val = None 1218 | 1219 | def _parse_json_data(self,fileData): 1220 | parserUtils._parse_json_part(fileData,self.buffers,'buffers',parserUtils.Buffer.fromDict) 1221 | parserUtils._parse_json_part(fileData,self.bufferViews,'bufferViews',parserUtils.BufferView.fromDict) 1222 | parserUtils._parse_json_part(fileData,self.accessors,'accessors',parserUtils.Accessor.fromDict) 1223 | parserUtils._parse_json_part(fileData,self.nodes,'nodes',parserUtils.Node.fromDict, errorwarn = False) 1224 | parserUtils._parse_json_part(fileData,self.meshes,'meshes',parserUtils.Mesh.fromDict, errorwarn = False) 1225 | parserUtils._parse_json_part(fileData,self.materials,'materials',parserUtils.Material.fromDict, errorwarn = False) 1226 | parserUtils._parse_json_part(fileData,self.scenes,'scenes',parserUtils.Scene.fromDict) 1227 | parserUtils._parse_json_part(fileData,self.images,'images',parserUtils.Image.fromDict, errorwarn = False) 1228 | parserUtils._parse_json_part(fileData,self.textures,'textures',parserUtils.Texture.fromDict, errorwarn = False) 1229 | parserUtils._parse_json_part(fileData,self.samplers,'samplers',parserUtils.Sampler.fromDict, errorwarn = False) 1230 | parserUtils._parse_json_part(fileData,self.cameras,'cameras',parserUtils.Camera.fromDict, errorwarn = False) 1231 | 1232 | parserUtils._parse_json_part(fileData,self.animations,'animations',parserUtils.Animation.fromDict, errorwarn = True) 1233 | # get longest animation by its input global time incase user does not define animation time or number of frames to render 1234 | for anim in self.animations: 1235 | anim.calc_max_time(self.accessors) 1236 | self.animation_time = max(anim.anim_time,self.animation_time) 1237 | #important call to setup weight animation 1238 | 1239 | if len(self.cameras) < 1: 1240 | self.has_camera = False 1241 | dicts = fileData.get('extensionsUsed') 1242 | if dicts != None: 1243 | for ext in dicts: 1244 | self.extensions.append(ext) 1245 | 1246 | exts = fileData.get('extensions') 1247 | if exts != None: 1248 | light_ext = exts.get('KHR_lights_punctual') 1249 | if light_ext != None: 1250 | lights = light_ext.get('lights') 1251 | if lights != None: 1252 | for light in lights: 1253 | self.lights.append(parserUtils.Light.fromDict(light)) 1254 | 1255 | def parseScene(self,file_name = '', vrenderer = None, dumpToJson = False, jsonFileName = 'jsonDump.txt'): 1256 | #get files location for external .bin files 1257 | self.file_loc = os.path.dirname(os.path.abspath(file_name)) 1258 | 1259 | self.currentOffset = 0 1260 | 1261 | if file_name.endswith('.gltf'): 1262 | 1263 | self.fileType = 'gltf' 1264 | 1265 | with open(file_name) as gltf_scene: 1266 | 1267 | fileData = json.load(gltf_scene) 1268 | self._parse_json_data(fileData) 1269 | 1270 | elif file_name.endswith('glb'): 1271 | self.fileType = 'glb' 1272 | #GLB Header [0] ASCII string 'gltf',[1] version,[2] - length in bytes 1273 | GLB_header = np.fromfile(file_name, dtype=' 0.9995: 88 | res = pyq.Quaternion() 89 | res = res.unit 90 | return res 91 | 92 | theta_0 = math.acos(quat_dot) 93 | theta = theta_0 * t 94 | sin_theta = math.sin(theta) 95 | sin_theta_0 = math.sin(theta_0) 96 | 97 | s1 = math.cos(theta) - quat_dot*sin_theta/sin_theta_0 98 | s2 = sin_theta / sin_theta_0 99 | 100 | return (s1 * quat1) + (s2 * quat2) 101 | 102 | def get_lerp_time(c_time,t1,t2): 103 | return np.clip((c_time - t1) / (t2 - t1),0.0,1.0) 104 | 105 | # Cubic Hermite spline 106 | # t is [0;1] 107 | # delta_time is the time between 2 keyframes, used to scale the tangents 108 | def _spline(previousPoint,previousTangent, nextPoint, nextTangent, t = 0, delta_time = 0): 109 | previousTangent = previousTangent * delta_time 110 | nextTangent = nextTangent * delta_time 111 | 112 | t2 = t*t 113 | t3 = t2*t 114 | 115 | return (2 * t3 - 3 * t2 + 1) * previousPoint + (t3 - 2 * t2 + t) * previousTangent + (-2 * t3 + 3 * t2) * nextPoint + (t3 - t2) * nextTangent; 116 | 117 | class Accessor: 118 | 119 | def __init__(self, bufferViewID, byteOffset = None, dataType = None, compType = None, count = None, dataMin = None, dataMax = None, sparse_dict = None): 120 | self.bufferViewID = bufferViewID 121 | self.byteOffset = byteOffset 122 | self.type = dataType 123 | self.compType = compType 124 | self.count = count 125 | self.min = dataMin 126 | self.max = dataMax 127 | self.sparse = sparse_dict 128 | 129 | #data set throgh parsing to not repeat the same checks twice 130 | #data filled from the apropriate bin file 131 | self.data = [] 132 | 133 | 134 | @classmethod 135 | def fromDict(cls,dict): 136 | return cls(dict.get('bufferView'), 137 | dict.get('byteOffset'), 138 | dict.get('type'), 139 | dict.get('componentType'), 140 | dict.get('count'), 141 | dict.get('min'), 142 | dict.get('max'), 143 | dict.get('sparse')) 144 | 145 | #ONLY FOR DEBUGGING PURPOSES, STR() IS SLOW 146 | def __str__(self): 147 | return ("bufferView : " + str(self.bufferViewID) + "\n" + 148 | "byteOffset : " + str(self.byteOffset) + "\n" + 149 | "type : " + str(self.type) + "\n" + 150 | "componentType : " + str(self.compType) + "\n" + 151 | "count : " + str(self.count) + "\n" + 152 | "min : " + str(self.min) + "\n" + 153 | "max : " + str(self.max) + "\n") 154 | 155 | 156 | class SparseAccessor: 157 | 158 | def __init__(self,type,componentType,count,sparseDataCount,valuesBufferView,indicesBufferView,indicesComponentType): 159 | pass 160 | 161 | class BufferView: 162 | 163 | def __init__(self,bufferID,byteOffset,byteLength,byteStride,target): 164 | self.bufferID = bufferID 165 | self.byteOffset = byteOffset 166 | self.byteLength = byteLength 167 | self.byteStride = byteStride 168 | self.target = target 169 | 170 | 171 | @classmethod 172 | def fromDict(cls,dict): 173 | return cls(dict.get('buffer'), 174 | dict.get('byteOffset'), 175 | dict.get('byteLength'), 176 | dict.get('byteStride'), 177 | dict.get('target')) 178 | 179 | def __str__(self): 180 | return ("buffer : " + str(self.bufferID) + "\n" + 181 | "byteOffset : " + str(self.byteOffset) + "\n" + 182 | "byteLength : " + str(self.byteLength) + "\n" + 183 | "byteStride : " + str(self.byteStride) + "\n" + 184 | "target : " + str(self.target) + "\n") 185 | 186 | class Buffer: 187 | 188 | def __init__(self,byteLength,uri): 189 | self.byteLength = byteLength 190 | self.uri = uri 191 | 192 | @classmethod 193 | def fromDict(cls,dict): 194 | return cls(dict.get('byteLength'),dict.get('uri')) 195 | 196 | def __str__(self): 197 | return ("byteLength : " + str(self.byteLength) + "\n" + 198 | "uri : " + str(self.uri) + "\n") 199 | 200 | class Scene: 201 | 202 | def __init__(self,sceneName,nodes = None): 203 | self.sceneName = sceneName 204 | self.nodes = nodes 205 | 206 | @classmethod 207 | def fromDict(cls,dict): 208 | return cls(dict.get('name'),dict.get('nodes')) 209 | 210 | def __str__(self): 211 | return ('name : ' + str(self.sceneName) + '\n'+ 'nodes : ' + str(self.nodes) + '\n') 212 | 213 | class Node: 214 | 215 | def __init__(self,children = None, matrixTransform = None,translation = vray.Vector(0.0,0.0,0.0), rotation = vray.Vector(0.0,0.0,0.0), scale = vray.Vector(0.0,0.0,0.0), 216 | mesh = None, camera = None , name = None, skin = None,extensions = None): 217 | self.children = children 218 | self.matrixTransform = matrixTransform 219 | self.translation = translation 220 | self.rotation = rotation 221 | self.scale = scale 222 | self.mesh = mesh 223 | self.camera = camera 224 | self.name = name 225 | self.skin = skin 226 | self.extensions = extensions 227 | 228 | self.vray_node_created = False 229 | 230 | # Needed for accumulating Transforms through node tree as APPSDK has no nodeTree 231 | # data set throgh parsing to not repeat the same checks twice 232 | self.parentNode_idx = None 233 | self.transform = vray.Transform(vray.Matrix.identity,vray.Vector(0,0,0)) 234 | self.local_transform = vray.Transform(vray.Matrix.identity,vray.Vector(0,0,0)) 235 | 236 | #Should be a tuple of (rot,scale,offset) /// Still testing if multiple animations of the same type can target the same Node 237 | self.transform_change = [None,None,None] 238 | 239 | @classmethod 240 | def fromDict(cls,dict): 241 | return cls(dict.get('children'), 242 | dict.get('matrix'), 243 | dict.get('translation'), 244 | dict.get('rotation'), 245 | dict.get('scale'), 246 | dict.get('mesh'), 247 | dict.get('camera'), 248 | dict.get('name'), 249 | dict.get('skin'), 250 | dict.get('extensions')) 251 | 252 | def __str__(self): 253 | return ("children : " + str(self.children) + "\n" + 254 | "matrix : " + str(self.matrixTransform) + "\n" + 255 | "translation : " + str(self.translation) + "\n" + 256 | "rotation : " + str(self.rotation) + "\n" + 257 | "scale : " + str(self.scale) + "\n" + 258 | "mesh : " + str(self.mesh) + "\n" + 259 | "camera : " + str(self.camera) + "\n" + 260 | "name : " + str(self.name) + "\n" + 261 | "extensions : " + str(self.extensions) + "\n" + 262 | "skin : " + str(self.skin) + "\n") 263 | 264 | class Primitive: 265 | ##Attributes is a dict with POSITION and NORMAL 266 | def __init__(self, node = None, indices = None, attributes = None, material = None, targets = None): 267 | self.node = node 268 | self.indices = indices 269 | self.attributes = attributes 270 | self.material = material 271 | self.targets = targets 272 | 273 | self.vray_node_ref = None 274 | 275 | @classmethod 276 | def fromDict(cls,dict): 277 | return cls(dict.get('node'), 278 | dict.get('indices'), 279 | dict.get('attributes'), 280 | dict.get('material'), 281 | dict.get('targets')) 282 | 283 | def __str__(self): 284 | return ("node : " + str(self.node) + "\n" + 285 | "indices : " + str(self.indices) + "\n" + 286 | "attributes : " + str(self.attributes) + "\n" + 287 | "material : " + str(self.material) + "\n" + 288 | "targets : " + str(self.targets) + "\n") 289 | 290 | class Mesh: 291 | 292 | ID = 0 293 | 294 | def __init__(self,primitives = None, weights = None): 295 | self.primitives = [] 296 | self.ID = Mesh.ID 297 | self.weights = weights 298 | 299 | 300 | Mesh.ID += 1 301 | for pr in primitives: 302 | self.primitives.append(Primitive.fromDict(pr)) 303 | 304 | @classmethod 305 | def fromDict(cls,dict): 306 | return cls(dict.get('primitives'),dict.get('weights')) 307 | 308 | def __str__(self): 309 | print("**** MESH "+str(self.ID) + " ****") 310 | for pr in self.primitives: 311 | print(pr) 312 | return("**************") 313 | 314 | 315 | class PbrMetallicRoughness: 316 | 317 | def __init__(self,baseColorTexture,baseColorFactor,metallicFactor,roughnessFactor,metallicRoughnessTexture): 318 | self.baseColorTexture = baseColorTexture 319 | self.baseColorFactor = baseColorFactor 320 | self.metallicFactor = metallicFactor 321 | self.roughnessFactor = roughnessFactor 322 | self.metallicRoughnessTexture = metallicRoughnessTexture 323 | 324 | @classmethod 325 | def fromDict(cls,dict): 326 | return cls(dict.get('baseColorTexture'), 327 | dict.get('baseColorFactor'), 328 | dict.get('metallicFactor'), 329 | dict.get('roughnessFactor'), 330 | dict.get('metallicRoughnessTexture')) 331 | 332 | def __str__(self): 333 | return ("baseColorTexture : " + str(self.baseColorTexture) + "\n" + 334 | "baseColorFactor : " + str(self.baseColorFactor) + "\n" + 335 | "metallicFactor : " + str(self.metallicFactor) + "\n" + 336 | "roughnessFactor : " + str(self.roughnessFactor) + "\n" + 337 | "metallicRoughnessTexture : " + str(self.metallicRoughnessTexture) + "\n") 338 | 339 | 340 | class Material: 341 | 342 | def __init__(self,name,alphaMode = None,alphaCutoff = None,doubleSided = False,emissiveFactor = None, 343 | normalTexture = None,occlusionTexture = None,emissiveTexture = None,pbrMetallicRoughness = None,extensions=None): 344 | 345 | self.name = name 346 | self.alphaMode = alphaMode 347 | self.alphaCutoff = alphaCutoff 348 | self.doubleSided = doubleSided 349 | self.emissiveFactor = emissiveFactor 350 | self.normalTexture = normalTexture 351 | self.occlusionTexture = occlusionTexture 352 | self.emissiveTexture = emissiveTexture 353 | if pbrMetallicRoughness!=None: 354 | self.pbrMetallicRoughness = PbrMetallicRoughness.fromDict(pbrMetallicRoughness) 355 | else: 356 | self.pbrMetallicRoughness=None 357 | self.extensions = extensions 358 | 359 | @classmethod 360 | def fromDict(cls,dict): 361 | return cls(dict.get('name'), 362 | dict.get('alphaMode'), 363 | dict.get('alphaCutoff'), 364 | dict.get('doubleSided'), 365 | dict.get('emissiveFactor'), 366 | dict.get('normalTexture'), 367 | dict.get('occlusionTexture'), 368 | dict.get('emissiveTexture'), 369 | dict.get('pbrMetallicRoughness'), 370 | dict.get('extensions')) 371 | 372 | def __str__(self): 373 | return ("name : " + str(self.name) + "\n" + 374 | "alphaMode : " + str(self.alphaMode) + "\n" + 375 | "alphaCutoff : " + str(self.alphaCutoff) + "\n" + 376 | "doubleSided : " + str(self.doubleSided) + "\n" + 377 | "emissiveFactor : " + str(self.emissiveFactor) + "\n" + 378 | "normalTexture : " + str(self.normalTexture) + "\n" + 379 | "occlusionTexture : " + str(self.occlusionTexture) + "\n" + 380 | "emissiveTexture : " + str(self.emissiveTexture) + "\n" + 381 | "extensions : " + str(self.extensions) + "\n" + 382 | "************\npbrMetallicRoughness : \n" + str(self.pbrMetallicRoughness) + "\n"+ 383 | "************" + '\n') 384 | 385 | 386 | 387 | class Camera: 388 | # xxxx_dicts used differently for the 2 different camera types 389 | def __init__(self,cam_type=None,persp_dict=None,ortho_dict=None): 390 | 391 | 392 | self.camera_type = cam_type 393 | if cam_type == "perspective": 394 | self.aspectRatio = persp_dict.get('aspectRatio') 395 | self.yfov = persp_dict.get('yfov') 396 | self.zfar = persp_dict.get('zfar') 397 | self.znear = persp_dict.get('znear') 398 | elif cam_type == 'orthographic': 399 | self.xmag = ortho_dict.get('xmag') 400 | self.ymag = ortho_dict.get('ymag') 401 | self.zfar = ortho_dict.get('zfar') 402 | self.znear = ortho_dict.get('znear') 403 | 404 | @classmethod 405 | def fromDict(cls,dict): 406 | return cls(dict.get('type'), 407 | dict.get('perspective'), 408 | dict.get('orthographic')) 409 | 410 | class Texture: 411 | 412 | def __init__(self,source,sampler): 413 | self.source = source 414 | self.sampler = sampler 415 | 416 | @classmethod 417 | def fromDict(cls,dict): 418 | return cls(dict.get('source'),dict.get('sampler')) 419 | 420 | class Image: 421 | 422 | def __init__(self, uri = None , bufferView = None, mimeType = None): 423 | self.uri = uri 424 | self.bufferView = bufferView 425 | self.mimeType = mimeType 426 | self.local = False 427 | 428 | #data set throgh parsing to not repeat the same checks twice 429 | self.data = [] 430 | self.file_loc = '' 431 | 432 | @classmethod 433 | def fromDict(cls,dict): 434 | return cls(dict.get('uri'),dict.get('bufferView'),dict.get('mimeType')) 435 | 436 | def __str__(self): 437 | return ("uri : " + str(self.uri) + "\n" + 438 | "bufferView : " + str(self.bufferView) + "\n" + 439 | "mimeType : " + str(self.mimeType) + "\n") 440 | 441 | class Sampler: 442 | 443 | def __init__(self,magFilter = None,minFilter = None,wrapS = None ,wrapT = None): 444 | 445 | self.magFilter = magFilter 446 | self.minFilter = minFilter 447 | self.wrapS = wrapS 448 | self.wrapT = wrapT 449 | 450 | @classmethod 451 | def fromDict(cls,dict): 452 | return cls(dict.get('magFilter'),dict.get('minFilter'),dict.get('wrapS'),dict.get('wrapT')) 453 | 454 | class Skin: 455 | 456 | def __init__(self, inverseBindMatrices, joints): 457 | self.inverseBindMatrices = inverseBindMatrices 458 | self.joints = joints 459 | 460 | #GLTF 2.0 Experimental KHR_lights_punctual : https://github.com/KhronosGroup/glTF/tree/master/extensions/2.0/Khronos/KHR_lights_punctual 461 | class Light: 462 | 463 | def __init__(self,name = None, color = None, intenstity = None, mtype = None,mrange = None, spot_attr = None): 464 | self.name = name 465 | self.color = color 466 | self.intensity = intenstity 467 | self.mtype = mtype 468 | self.mrange = mrange 469 | 470 | self.spot_attr = spot_attr 471 | 472 | @classmethod 473 | def fromDict(cls,dict): 474 | return cls(dict.get('name'), dict.get('color'), dict.get('intensity'), dict.get('type'), dict.get('range'),dict.get('spot')) 475 | 476 | def __str__(self): 477 | return ("name : " + str(self.name) + "\n" + 478 | "Color : " + str(self.color) + "\n" + 479 | "Intensity : " + str(self.intensity) + "\n" + 480 | "type : " + str(self.mtype) + "\n" + 481 | "range : " + str(self.mrange) + "\n") 482 | 483 | class AnimSampler: 484 | 485 | def __init__(self,time_input = None, interpolation = None, output = None, idx_offset = 1): 486 | self.time_input = time_input #input accessor id 487 | self.interpolation = interpolation 488 | self.output = output #output accessor id 489 | self.current_sampl_idx = 0 490 | 491 | #caching animation data so we dont calculate every frame if its not needed 492 | self.nextKeyTime_idx = 0 493 | self.prevKeyTime = 0.0 494 | self.nextKeyTime = 0.0 495 | self.prevKeyFrameData = None 496 | self.nextKeyFrameData = None 497 | 498 | self.data_idx_offset = idx_offset 499 | 500 | def __str__(self): 501 | return ("input : " + str(self.time_input) + "\n" + 502 | "interpolation : " + str(self.interpolation) + "\n" + 503 | "output : " + str(self.output) + "\n") 504 | 505 | @classmethod 506 | def fromDict(cls,dict): 507 | return cls(dict.get('input'), dict.get('interpolation'), dict.get('output')) 508 | 509 | # Used in STEP and Linear interpolation animations 510 | class AnimSamplerLinear(AnimSampler): 511 | 512 | def __init__(self, time_input = None, interpolation = None, output = None): 513 | super(AnimSamplerLinear,self).__init__(time_input,interpolation,output) 514 | 515 | def update(self,parser,anim_type,target_node): 516 | time_data = parser.accessors[self.time_input].data 517 | time_s = parser.current_time 518 | 519 | if time_s >= self.nextKeyTime: 520 | 521 | for current_keyframe_time_idx in range(self.nextKeyTime_idx,len(time_data)): 522 | if time_s >= time_data[current_keyframe_time_idx]: 523 | self.nextKeyTime_idx+=1 524 | else: 525 | break 526 | 527 | if self.nextKeyTime_idx < len(time_data): 528 | 529 | self.prevKeyTime = time_data[self.nextKeyTime_idx-1] 530 | self.nextKeyTime = time_data[self.nextKeyTime_idx] 531 | 532 | prev_data_idx = (self.nextKeyTime_idx-1)*self.data_idx_offset 533 | next_data_idx = self.nextKeyTime_idx*self.data_idx_offset 534 | self.prevKeyFrameData = parser.accessors[self.output].data[prev_data_idx : prev_data_idx + self.data_idx_offset] 535 | self.prevKeyFrameData = self.prevKeyFrameData.flatten() 536 | self.nextKeyFrameData = parser.accessors[self.output].data[next_data_idx : next_data_idx + self.data_idx_offset] 537 | self.nextKeyFrameData = self.nextKeyFrameData.flatten() 538 | 539 | if time_s >= self.prevKeyTime: 540 | if self.interpolation == 'STEP': 541 | 542 | if anim_type == 'translation': 543 | parser.nodes[target_node].translation = [self.prevKeyFrameData[0],self.prevKeyFrameData[1],self.prevKeyFrameData[2]] 544 | if anim_type == 'rotation': 545 | parser.nodes[target_node].rotation = [self.prevKeyFrameData[0],self.prevKeyFrameData[1],self.prevKeyFrameData[2],self.prevKeyFrameData[3]] 546 | if anim_type == 'scale': 547 | parser.nodes[target_node].scale = [self.prevKeyFrameData[0],self.prevKeyFrameData[1],self.prevKeyFrameData[2]] 548 | if anim_type == 'weights': 549 | parser.meshes[parser.nodes[target_node].mesh].weights = self.prevKeyFrameData 550 | 551 | if self.interpolation == 'LINEAR': 552 | lerp_time = get_lerp_time(time_s,self.prevKeyTime,self.nextKeyTime) 553 | 554 | if anim_type == 'translation': 555 | 556 | trans_x = lerp(self.prevKeyFrameData[0],self.nextKeyFrameData[0],lerp_time) 557 | trans_y = lerp(self.prevKeyFrameData[1],self.nextKeyFrameData[1],lerp_time) 558 | trans_z = lerp(self.prevKeyFrameData[2],self.nextKeyFrameData[2],lerp_time) 559 | 560 | parser.nodes[target_node].translation = [trans_x,trans_y,trans_z] 561 | if anim_type == 'rotation': 562 | 563 | quat1 = pyq.Quaternion(self.prevKeyFrameData[3], self.prevKeyFrameData[0], self.prevKeyFrameData[1], self.prevKeyFrameData[2]) 564 | quat2 = pyq.Quaternion(self.nextKeyFrameData[3], self.nextKeyFrameData[0], self.nextKeyFrameData[1], self.nextKeyFrameData[2]) 565 | slerp_quat = quat_slerp(quat1,quat2,lerp_time) 566 | 567 | parser.nodes[target_node].rotation = [slerp_quat.x,slerp_quat.y,slerp_quat.z,slerp_quat.w] 568 | if anim_type == 'scale': 569 | 570 | scale_x = lerp(self.prevKeyFrameData[0],self.nextKeyFrameData[0],lerp_time) 571 | scale_y = lerp(self.prevKeyFrameData[1],self.nextKeyFrameData[1],lerp_time) 572 | scale_z = lerp(self.prevKeyFrameData[2],self.nextKeyFrameData[2],lerp_time) 573 | 574 | parser.nodes[target_node].scale = [scale_x,scale_y,scale_z] 575 | 576 | if anim_type == 'weights': 577 | for i in range(0,len(parser.meshes[parser.nodes[target_node].mesh].weights)): 578 | parser.meshes[parser.nodes[target_node].mesh].weights[i] = float(lerp(self.prevKeyFrameData[i],self.nextKeyFrameData[i],lerp_time)) 579 | 580 | # Used in cubic spline Samplers 581 | class AnimSamplerSpline(AnimSampler): 582 | 583 | def __init__(self, time_input = None, interpolation = None, output = None): 584 | super(AnimSamplerSpline,self).__init__(time_input,interpolation,output) 585 | #caching animation [input_tan , data , output tan] so we dont calculate every frame if its not needed 586 | self.prevOutputTangent = None 587 | self.nextInputTangent = None 588 | 589 | def update(self,parser,anim_type,target_node): 590 | time_data = parser.accessors[self.time_input].data 591 | time_s = parser.current_time 592 | 593 | if time_s < time_data[0]: 594 | return None 595 | 596 | # see if the cached data needs updating and we are between the 2 next keyframes 597 | if time_s >= self.nextKeyTime: 598 | for current_keyframe_time_idx in range(self.nextKeyTime_idx,len(time_data)): 599 | if time_s >= time_data[current_keyframe_time_idx]: 600 | self.nextKeyTime_idx+=1 601 | else: 602 | break 603 | 604 | if self.nextKeyTime_idx < len(time_data): 605 | self.prevKeyTime = time_data[self.nextKeyTime_idx-1] 606 | self.nextKeyTime = time_data[self.nextKeyTime_idx] 607 | 608 | #the data for Cubic Spline is contained in 3 sequential elements [inputTangent,data,outputTangent] 609 | prevKeyTime_data_idx = (self.nextKeyTime_idx-1)*3 610 | nextKeyTime_data_idx = self.nextKeyTime_idx*3 611 | self.prevKeyFrameData = parser.accessors[self.output].data[prevKeyTime_data_idx+1] 612 | self.nextKeyFrameData = parser.accessors[self.output].data[nextKeyTime_data_idx+1] 613 | 614 | self.prevOutputTangent = parser.accessors[self.output].data[prevKeyTime_data_idx+2] 615 | self.nextInputTangent = parser.accessors[self.output].data[nextKeyTime_data_idx] 616 | 617 | if time_s >= self.prevKeyTime: 618 | lerp_time = get_lerp_time(time_s,self.prevKeyTime,self.nextKeyTime) 619 | delta_time = self.nextKeyTime - self.prevKeyTime 620 | if delta_time < 0.0: 621 | delta_time = 0.0 622 | 623 | if anim_type == 'translation': 624 | 625 | trans = _spline(self.prevKeyFrameData,self.prevOutputTangent,self.nextKeyFrameData,self.nextInputTangent,lerp_time,delta_time) 626 | 627 | parser.nodes[target_node].translation = [trans[0],trans[1],trans[2]] 628 | 629 | if anim_type == 'rotation': 630 | spline_quat = _spline(self.prevKeyFrameData,self.prevOutputTangent,self.nextKeyFrameData,self.nextInputTangent,lerp_time,delta_time) 631 | 632 | parser.nodes[target_node].rotation = [spline_quat[0],spline_quat[1],spline_quat[2],spline_quat[3]] 633 | if anim_type == 'scale': 634 | 635 | scale = _spline(self.prevKeyFrameData,self.prevOutputTangent,self.nextKeyFrameData,self.nextInputTangent,lerp_time,delta_time) 636 | parser.nodes[target_node].scale = [scale[0],scale[1],scale[2]] 637 | 638 | class AnimChannel: 639 | 640 | def __init__(self, target = None,sampler_id = None,sampler_dict = None): 641 | self.target = target 642 | self.sampler_id = sampler_id 643 | self.sampler = None 644 | self.anim_type = target.get('path') 645 | self.target_node = target.get('node') 646 | 647 | interpolation_type = sampler_dict.get('interpolation') 648 | # we default to LINEAR 649 | if interpolation_type == None: 650 | interpolation_type = 'LINEAR' 651 | sampler_dict['interpolation'] = 'LINEAR' 652 | #creating sampler for the channel 653 | if interpolation_type == 'LINEAR' or interpolation_type == 'STEP': 654 | 655 | self.sampler = AnimSamplerLinear.fromDict(sampler_dict) 656 | elif interpolation_type == 'CUBICSPLINE': 657 | self.sampler = AnimSamplerSpline.fromDict(sampler_dict) 658 | 659 | 660 | @classmethod 661 | def fromDict(cls,dict,sampler_dict): 662 | return cls(dict.get('target'), dict.get('sampler'),sampler_dict) 663 | 664 | def __str__(self): 665 | return ("target : " + str(self.target) + " \n" + 666 | "sampler : " + str(self.sampler) + "\n") 667 | 668 | def update(self,parser): 669 | self.sampler.update(parser,self.anim_type,self.target_node) 670 | 671 | class Animation: 672 | 673 | def __init__(self,channels): 674 | self.channels = channels 675 | self.anim_time = None 676 | 677 | @classmethod 678 | def fromDict(cls,dict): 679 | vchannels = [] 680 | 681 | channels = dict.get('channels') 682 | samplers = dict.get('samplers') 683 | 684 | if channels != None: 685 | for channel,sampler in zip(channels,samplers): 686 | vchannels.append(AnimChannel.fromDict(channel,sampler)) 687 | 688 | return cls(vchannels) 689 | 690 | def __str__(self): 691 | return str(self.channels) + str(self.samplers) 692 | 693 | # needs gltfParser ascr list to work 694 | # used to go through the full animation if frames or animation time is not set by user 695 | def calc_max_time(self, ascrs_list): 696 | self.anim_time = 0.0 697 | for channel in self.channels: 698 | 699 | acsr_idx = channel.sampler.time_input 700 | current_time = max(ascrs_list[acsr_idx].max) 701 | self.anim_time = max(current_time,self.anim_time) 702 | 703 | def init(self,ascrs_list): 704 | for channel in self.channels: 705 | acsr_idx_input = channel.sampler.time_input 706 | acsr_idx_output = channel.sampler.output 707 | channel.sampler.data_idx_offset = int(len(ascrs_list[acsr_idx_output].data) / len(ascrs_list[acsr_idx_input].data)) 708 | 709 | def update(self,parser): 710 | for channel in self.channels: 711 | channel.update(parser) 712 | 713 | # used to fill parser data, including checks 714 | def _parse_json_part(fileData = None, parserList = None,dictName = None, constructor = None,errorwarn = True): 715 | dicts = fileData.get(dictName) 716 | if dicts != None: 717 | for val in dicts: 718 | parserList.append(constructor(val)) 719 | else: 720 | if errorwarn == True: 721 | print("[ParserInfo] ERROR : No " + dictName + " found in file") 722 | else: 723 | print("[ParserInfo] Warning : No " + dictName + " found in file") 724 | 725 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Chaos Software OOD 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # V-Ray glTF viewer 2 | 3 | ![Sample render](/samples/basic/basic.png) 4 | 5 | ## Overview 6 | 7 | The V-Ray glTF viewer is a set of Python scripts for the V-Ray App SDK that allow the parsing and rendering of glTF (.gltf and .glb) files. 8 | 9 | ## Installing and running the V-Ray glTF viewer 10 | 11 | ### With V-Ray App SDK 12 | 13 | * Install Python 3.8 for all users in "c:\Program Files\Python38" 14 | 15 | * Install nightly V-Ray 5 App SDK (Qt version) in "C:\Program Files\Chaos Group\V-Ray\AppSDK". Be sure to choose the _Advanced_ installation type and make sure the installation does _not_ modify any license settings or environment variables. 16 | 17 | Note: In case you accidentally let the V-Ray App SDK installation modify the V-Ray license settings, run the tool setvrlservice.exe or from the start menu search for "Change V-Ray client license settings" to change them. 18 | 19 | * Open a command prompt (press Windows+R, type `cmd` and press Enter). 20 | 21 | * Execute `set path="c:\program files\python38";"c:\program files\python38\scripts";%path%` 22 | * _First time only:_ Execute `pip install numpy` 23 | * _First time only:_ Execute `pip install pyquaternion` 24 | * _First time only:_ Execute `pip install numba` 25 | * _First time only:_ Execute `pip install scipy` 26 | 27 | * Execute `"C:\Program Files\Chaos Group\V-Ray\AppSDK\setenv38.bat"` 28 | * Execute `cd /d VRAY_GLTF_FOLDER` where VRAY_GLTF_FOLDER is the folder where the file main.py is located. 29 | * Execute `python main.py` to see a list of options; use `python main.py --help` for detailed usage description. 30 | 31 | ### With V-Ray 5 for 3ds Max 32 | 33 | The Python binding of the V-Ray AppSDK is also included with V-Ray 5 for 3ds Max and Maya and in this case it is not needed to install the V-Ray AppSDK separately. 34 | 35 | * Make sure you have a recent version of V-Ray 5 for 3ds Max with the Python 3 binding of the V-Ray App SDK included (check if you have the folder "C:\Program Files\Chaos Group\V-Ray\3ds Max 2021\samples\appsdk\python38"). 36 | 37 | * Install Python 3.8 for all users in "c:\Program Files\Python38" 38 | 39 | * Open a command prompt (press Windows+R, type `cmd` and press Enter). 40 | 41 | * Execute `set path="c:\program files\python38";"c:\program files\python38\scripts";%path%` 42 | * _First time only:_ Execute `pip install numpy` 43 | * _First time only:_ Execute `pip install pyquaternion` 44 | * _First time only:_ Execute `pip install numba` 45 | * _First time only:_ Execute `pip install scipy` 46 | 47 | * Execute `"C:\Program Files\Chaos Group\V-Ray\3ds Max 2021\samples\appsdk\setenv38.bat"` 48 | * Execute `cd /d VRAY_GLTF_FOLDER` where VRAY_GLTF_FOLDER is the folder where the file main.py is located. 49 | * Execute `python main.py` to see a list of options; use `python main.py --help` for detailed usage description. 50 | 51 | ## Usage 52 | 53 | The V-Ray glTF scripts are command-line only; there is no GUI and all options must be passed on the command line. 54 | 55 | Use the --help option to list all possible options and their values. 56 | 57 | An example command to render the sample .glb file could look like this (assuming that the vray_gltf project is extracted to **d:\temp\vray_gltf**): 58 | 59 | ``` 60 | python main.py d:\temp\vray_gltf\samples\basic\basic.glb --thick_glass --default_lights --default_cam_moffset "(0, 0, -0.5)" --output_file d:\temp\vray_gltf.jpg 61 | ``` 62 | 63 | ## Supported features 64 | 65 | The glTF parser supports glTF 2.0 with the following extensions: 66 | 67 | * KHR_texture_transform 68 | * KHR_materials_pbrSpecularGlossiness 69 | * KHR_materials_transmission 70 | * KHR_materials_clearcoat 71 | * KHR_materials_sheen 72 | * KHR_lights_punctual is mostly working, but light range is not supported 73 | 74 | Simple transform animations are supported to some extent. Vertex deformations, either through morphing or skinning are currently not supported. 75 | 76 | Most of the sample models provided by Khronos generally render fine, as well as many models from the Windows 3D viewer library. 77 | -------------------------------------------------------------------------------- /gallery/3d_viewer/Armored Van.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Armored Van.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Attack helicopter.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Attack helicopter.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Banana Gun with Scope.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Banana Gun with Scope.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Bee.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Bee.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Castle.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Castle.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Cathedral.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Cathedral.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Covered wagon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Covered wagon.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Create a dragon tale.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Create a dragon tale.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Double-Decker.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Double-Decker.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Emoji With Glasses.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Emoji With Glasses.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Fantastical diorama.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Fantastical diorama.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Gazebo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Gazebo.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Large troll.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Large troll.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Lynx - low poly.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Lynx - low poly.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Motorcycle.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Motorcycle.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Mount Rainier.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Mount Rainier.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Plunger of DEATH.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Plunger of DEATH.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Quaint Village.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Quaint Village.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Red dragon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Red dragon.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Rock fountain.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Rock fountain.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Sabrewulf.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Sabrewulf.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Sedan car.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Sedan car.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Small troll.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Small troll.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Smiling Critter.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Smiling Critter.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Snowscape diorama.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Snowscape diorama.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Snowy Village.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Snowy Village.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Steampunk Cottage.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Steampunk Cottage.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Steampunk Dirigible with Ship.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Steampunk Dirigible with Ship.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Stone.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Stone.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Tiger.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Tiger.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Topiary 4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Topiary 4.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Tree with Falling Leaves.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Tree with Falling Leaves.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Tyrannosaurus Rex.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Tyrannosaurus Rex.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Unicorn.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Unicorn.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Vulture.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Vulture.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Wacky UFO.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Wacky UFO.jpg -------------------------------------------------------------------------------- /gallery/3d_viewer/Winter Cabin.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/3d_viewer/Winter Cabin.jpg -------------------------------------------------------------------------------- /gallery/official_samples/2CylinderEngine.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/2CylinderEngine.jpg -------------------------------------------------------------------------------- /gallery/official_samples/AlphaBlendModeTest.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/AlphaBlendModeTest.jpg -------------------------------------------------------------------------------- /gallery/official_samples/AnimatedMorphCube.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/AnimatedMorphCube.jpg -------------------------------------------------------------------------------- /gallery/official_samples/AnimatedMorphSphere.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/AnimatedMorphSphere.jpg -------------------------------------------------------------------------------- /gallery/official_samples/AntiqueCamera.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/AntiqueCamera.jpg -------------------------------------------------------------------------------- /gallery/official_samples/Avocado.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/Avocado.jpg -------------------------------------------------------------------------------- /gallery/official_samples/BarramundiFish.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/BarramundiFish.jpg -------------------------------------------------------------------------------- /gallery/official_samples/BoomBox.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/BoomBox.jpg -------------------------------------------------------------------------------- /gallery/official_samples/Box.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/Box.jpg -------------------------------------------------------------------------------- /gallery/official_samples/BoxAnimated.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/BoxAnimated.jpg -------------------------------------------------------------------------------- /gallery/official_samples/BoxInterleaved.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/BoxInterleaved.jpg -------------------------------------------------------------------------------- /gallery/official_samples/BoxTextured.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/BoxTextured.jpg -------------------------------------------------------------------------------- /gallery/official_samples/BoxTexturedNonPowerOfTwo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/BoxTexturedNonPowerOfTwo.jpg -------------------------------------------------------------------------------- /gallery/official_samples/BoxVertexColors.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/BoxVertexColors.jpg -------------------------------------------------------------------------------- /gallery/official_samples/BrainStem.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/BrainStem.jpg -------------------------------------------------------------------------------- /gallery/official_samples/Buggy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/Buggy.jpg -------------------------------------------------------------------------------- /gallery/official_samples/CesiumMan.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/CesiumMan.jpg -------------------------------------------------------------------------------- /gallery/official_samples/CesiumMilkTruck.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/CesiumMilkTruck.jpg -------------------------------------------------------------------------------- /gallery/official_samples/ClearCoatTest.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/ClearCoatTest.jpg -------------------------------------------------------------------------------- /gallery/official_samples/Corset.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/Corset.jpg -------------------------------------------------------------------------------- /gallery/official_samples/DamagedHelmet.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/DamagedHelmet.jpg -------------------------------------------------------------------------------- /gallery/official_samples/Duck.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/Duck.jpg -------------------------------------------------------------------------------- /gallery/official_samples/GearboxAssy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/GearboxAssy.jpg -------------------------------------------------------------------------------- /gallery/official_samples/InterpolationTest.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/InterpolationTest.jpg -------------------------------------------------------------------------------- /gallery/official_samples/Lantern.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/Lantern.jpg -------------------------------------------------------------------------------- /gallery/official_samples/MaterialsVariantsShoe.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/MaterialsVariantsShoe.jpg -------------------------------------------------------------------------------- /gallery/official_samples/MetalRoughSpheres.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/MetalRoughSpheres.jpg -------------------------------------------------------------------------------- /gallery/official_samples/MetalRoughSpheresNoTextures.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/MetalRoughSpheresNoTextures.jpg -------------------------------------------------------------------------------- /gallery/official_samples/MorphPrimitivesTest.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/MorphPrimitivesTest.jpg -------------------------------------------------------------------------------- /gallery/official_samples/MorphStressTest.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/MorphStressTest.jpg -------------------------------------------------------------------------------- /gallery/official_samples/NormalTangentMirrorTest.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/NormalTangentMirrorTest.jpg -------------------------------------------------------------------------------- /gallery/official_samples/NormalTangentTest.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/NormalTangentTest.jpg -------------------------------------------------------------------------------- /gallery/official_samples/OrientationTest.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/OrientationTest.jpg -------------------------------------------------------------------------------- /gallery/official_samples/ReciprocatingSaw.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/ReciprocatingSaw.jpg -------------------------------------------------------------------------------- /gallery/official_samples/RiggedFigure.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/RiggedFigure.jpg -------------------------------------------------------------------------------- /gallery/official_samples/RiggedSimple.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/RiggedSimple.jpg -------------------------------------------------------------------------------- /gallery/official_samples/SheenChair.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/SheenChair.jpg -------------------------------------------------------------------------------- /gallery/official_samples/SheenCloth.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/SheenCloth.jpg -------------------------------------------------------------------------------- /gallery/official_samples/SpecGlossVsMetalRough.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/SpecGlossVsMetalRough.jpg -------------------------------------------------------------------------------- /gallery/official_samples/TextureCoordinateTest.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/TextureCoordinateTest.jpg -------------------------------------------------------------------------------- /gallery/official_samples/TextureEncodingTest.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/TextureEncodingTest.jpg -------------------------------------------------------------------------------- /gallery/official_samples/TextureSettingsTest.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/TextureSettingsTest.jpg -------------------------------------------------------------------------------- /gallery/official_samples/TextureTransformMultiTest.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/TextureTransformMultiTest.jpg -------------------------------------------------------------------------------- /gallery/official_samples/TextureTransformTest.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/TextureTransformTest.jpg -------------------------------------------------------------------------------- /gallery/official_samples/ToyCar.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/ToyCar.jpg -------------------------------------------------------------------------------- /gallery/official_samples/VertexColorTest.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/VertexColorTest.jpg -------------------------------------------------------------------------------- /gallery/official_samples/WaterBottle.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/gallery/official_samples/WaterBottle.jpg -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import os 4 | import sys 5 | 6 | import argparse 7 | 8 | import Gltf_Parser.gltfparser as gltfp 9 | import testUtils 10 | 11 | import math 12 | import vray 13 | import numpy as np 14 | import tempfile 15 | 16 | argParser = argparse.ArgumentParser("Vray GLTF", allow_abbrev=False) 17 | 18 | argParser.add_argument('scene_file',type=str, help="Scene file") 19 | 20 | argParser.add_argument('--render_mode',dest='render_mode', type=str, help="Rendering Mode, by default interactive; can be production or interactive") 21 | argParser.set_defaults(render_mode='interactive') 22 | 23 | argParser.add_argument('--noise_treshold',dest='noise_treshold', type=float, help="Noise Treshold") 24 | argParser.set_defaults(noise_treshold=0.01) 25 | 26 | argParser.add_argument('--size',dest='size', type=str, help="Size of render (x,y) or '(x,y)'") 27 | argParser.set_defaults(size='(1080,1080)') 28 | 29 | argParser.add_argument('--default_camera',dest='default_camera', action='store_true', help="Ignore file cameras and use a default one") 30 | argParser.set_defaults(default_camera=False) 31 | 32 | argParser.add_argument('--default_cam_look_at',dest='default_cam_look_at', type=str, help="Camera look at (x,y,z)") 33 | argParser.set_defaults(default_cam_look_at=None) 34 | 35 | argParser.add_argument('--default_cam_rot',dest='default_cam_rot',type = str,help = 'Default camera rotation(degrees) (x,y,z) or "(x,y,z)" around the avarage object position of the scene, other brackets work too') 36 | argParser.set_defaults(default_cam_rot='(0,0,0)') 37 | 38 | argParser.add_argument('--default_cam_moffset',dest='default_cam_moffset',type = str,help = 'Default camera multiplier offset (x,y,z) or "(x,y,z)", all brackets will work') 39 | argParser.set_defaults(default_cam_moffset='(-0.3,0.1,0)') 40 | 41 | argParser.add_argument('--default_cam_pos',dest='default_cam_pos',type = str,help = 'Default camera Pos (x,y,z) or "(x,y,z)", other default cam still work but relative on this position') 42 | argParser.set_defaults(default_cam_pos=None) 43 | 44 | argParser.add_argument('--default_cam_fov',dest='default_cam_fov',type = float,help = 'Default camera FOV is degrees') 45 | argParser.set_defaults(default_cam_fov=45.0) 46 | 47 | argParser.add_argument('--default_cam_zoom',dest='default_cam_zoom',type = float,help = 'Default camera Zoom -inf to 1.0 as 1.0 max zoom') 48 | argParser.set_defaults(default_cam_zoom=0.0) 49 | 50 | argParser.add_argument('--default_cam_view', dest='default_cam_view', type = str, help = 'Default camera view, one of front, back, left, right, top, bottom or auto') 51 | argParser.set_defaults(default_cam_view='auto') 52 | 53 | argParser.add_argument('--test_material', dest='test_material', action='store_true',help = 'Use testing materials, made to see mesh vertecies and triangles') 54 | argParser.set_defaults(test_material=False) 55 | 56 | argParser.add_argument('--json_dump',dest='json_dump',action='store_true', help="Dump json data file contents into json. Used to debug .glb files") 57 | argParser.set_defaults(json_dump=False) 58 | 59 | argParser.add_argument('--num_frames',dest='num_frames',type = int, help="Set number of frames to render") 60 | argParser.set_defaults(num_frames=0) 61 | 62 | argParser.add_argument('--start_frame',dest='start_frame',type = int, help="Offset from the 0 frame") 63 | argParser.set_defaults(start_frame=0) 64 | 65 | argParser.add_argument('--animation_fps',dest='animation_fps',type = int, help="Set animation fps for whole scene. Default is 60") 66 | argParser.set_defaults(animation_fps=60) 67 | 68 | argParser.add_argument('--output_file',dest='output_file',type = str, help="Location to save output file, can contain file extension, if not it defaults to exr. " + 69 | "If not provided file will be saved in the same folder as the scene") 70 | argParser.set_defaults(output_file=None) 71 | 72 | argParser.add_argument('--output_vrscene',dest='output_vrscene',type = str, help="Location to save a .vrscene file which can be rendered.") 73 | argParser.set_defaults(output_vrscene=None) 74 | 75 | argParser.add_argument('--default_lights',dest='default_lights', action='store_true', help="default lights") 76 | argParser.set_defaults(default_lights=False) 77 | 78 | argParser.add_argument('--ground_plane',dest='ground_plane', action='store_true', help="Add a ground plane") 79 | argParser.set_defaults(ground_plane=False) 80 | 81 | argParser.add_argument('--thick_glass',dest='thick_glass', action='store_true', help="For alpha mode set to BLEND, use thick glass, otherwise use opacity") 82 | argParser.set_defaults(thick_glass=False) 83 | 84 | argParser.add_argument('--thin_glass',dest='thin_glass', action='store_true', help="For alpha mode set to BLEND, use thin glass, otherwise use opacity") 85 | argParser.set_defaults(thin_glass=False) 86 | 87 | argParser.add_argument('--trace_depth',dest='trace_depth', type=int, help="Set the maximum reflection/refraction trace depth") 88 | argParser.set_defaults(trace_depth=8) 89 | 90 | argParser.add_argument('--environment_scene',dest='environment_scene', type=str, help=".vrscene file to load with additional geometry and lights") 91 | argParser.set_defaults(environment_scene=None) 92 | 93 | args = argParser.parse_args() 94 | 95 | #this way it can be written in any way, as a tuple/list/array or just plain white space separation 96 | args.default_cam_rot = tuple(eval(args.default_cam_rot)) 97 | if len(args.default_cam_rot) >3 or len(args.default_cam_rot) <3: 98 | raise argparse.ArgumentTypeError("Default camera angles must be (x,y,z)") 99 | 100 | args.default_cam_moffset = tuple(eval(args.default_cam_moffset)) 101 | if len(args.default_cam_moffset) >3 or len(args.default_cam_moffset) <3: 102 | raise argparse.ArgumentTypeError("Default camera moffset must be (x,y,z)") 103 | 104 | args.size = tuple(eval(args.size)) 105 | if len(args.size) >2 or len(args.size) <2: 106 | raise argparse.ArgumentTypeError("Size must be (x,y)") 107 | 108 | if args.default_cam_pos != None: 109 | args.default_cam_pos = tuple(eval(args.default_cam_pos)) 110 | if len(args.default_cam_pos) >3 or len(args.default_cam_pos) <3: 111 | raise argparse.ArgumentTypeError("Default camera pos must be (x,y,z)") 112 | 113 | if args.default_cam_look_at != None: 114 | args.default_cam_look_at = tuple(eval(args.default_cam_look_at)) 115 | if len(args.default_cam_look_at) >3 or len(args.default_cam_look_at) <3: 116 | raise argparse.ArgumentTypeError("Default camera look at must be (x,y,z)") 117 | 118 | if __name__ == "__main__": 119 | 120 | Parser = gltfp.GltfParser() 121 | 122 | renderer = vray.VRayRenderer() 123 | 124 | try: 125 | 126 | def dumpMsg(renderer, message, level, instant): 127 | if level == vray.LOGLEVEL_ERROR: 128 | print("[ERROR]", message) 129 | elif level == vray.LOGLEVEL_WARNING: 130 | print("[Warning]", message) 131 | elif level == vray.LOGLEVEL_INFO: 132 | print("[info]", message) 133 | 134 | #always set arg options before parsing scene 135 | Parser.set_options(args) 136 | 137 | #renderer OPTS 138 | renderer.setImprovedDefaultSettings() 139 | renderer.setOnLogMessage(dumpMsg) 140 | renderer.renderMode = args.render_mode 141 | renderer.size = args.size 142 | renderer.setInteractiveNoiseThreshold(args.noise_treshold) 143 | 144 | # For interactive or GPU rendering, set limit for the trace depth 145 | settingsRTEngine=renderer.classes.SettingsRTEngine.getInstances() 146 | if len(settingsRTEngine)>0: 147 | settingsRTEngine[0].trace_depth=args.trace_depth 148 | 149 | # Set the units settings 150 | photometricSettings=renderer.classes.SettingsUnitsInfo.getInstanceOrCreate() 151 | photometricSettings.photometric_scale=1.0 152 | photometricSettings.scene_upDir=vray.Vector(0, 1, 0) # glTF is Y-up 153 | photometricSettings.meters_scale=1.0 # Assume 1 unit is 1 meter 154 | 155 | #parsing scene contents and creating vray plugins 156 | Parser.parseScene(args.scene_file,renderer, dumpToJson = args.json_dump) 157 | 158 | if args.output_vrscene!=None: 159 | renderer.export(args.output_vrscene) 160 | 161 | if args.num_frames > 0: 162 | frames = args.num_frames 163 | else: 164 | frames = math.floor(Parser.animation_time*Parser.animation_fps) 165 | frames = np.clip(frames,1,sys.maxsize) 166 | #set up frame, update animations 167 | Parser._setup_frame(0,renderer) 168 | 169 | for i in range(args.start_frame, args.start_frame+frames, 1): 170 | 171 | renderer.startSync() 172 | renderer.waitForRenderEnd() 173 | 174 | image = renderer.getImage() 175 | 176 | if frames > 1: 177 | 178 | if args.output_file != None: 179 | root, ext= os.path.splitext(args.output_file) 180 | _dir , filename = os.path.split(root) 181 | 182 | if ext == None: 183 | ext = '.exr' 184 | if filename == '' or filename.isspace(): 185 | #get scene name 186 | filename = os.path.splitext(args.scene_file)[0] 187 | if ext == '.png': 188 | image.changeGamma(2.2) 189 | 190 | save_loc = _dir + filename + str(i) + ext 191 | print("Saving " + str(save_loc)) 192 | image.save(str(save_loc)) 193 | else: 194 | filename = os.path.splitext(args.scene_file) 195 | print("Saving " + str(filename[0])) 196 | image.save(str(filename[0]) + str(i) + ".exr") 197 | 198 | print('Image Ready, frame ' + str(renderer.frame) + 199 | ' (sequenceEnd = ' + str(renderer.sequenceEnded) + ')') 200 | Parser._setup_frame(i+1,renderer) 201 | else: 202 | 203 | if args.output_file != None: 204 | root, ext= os.path.splitext(args.output_file) 205 | _dir , filename = os.path.split(root) 206 | 207 | if ext == None: 208 | ext = '.exr' 209 | if filename == '' or filename.isspace(): 210 | #get scene name 211 | filename = os.path.splitext(args.scene_file)[0] 212 | 213 | save_loc = _dir + "/" + filename + ext 214 | print("Saving " + str(save_loc)) 215 | image.save(str(save_loc)) 216 | else: 217 | filename = os.path.splitext(args.scene_file) 218 | print("Saving " + str(filename[0])) 219 | image.save(str(filename[0]) + ".exr") 220 | #clean up after all frames 221 | renderer.clearScene() 222 | Parser.clean_up() 223 | 224 | except KeyboardInterrupt: 225 | print("Terminating Script by KeyboardInterrupt") 226 | renderer.clearScene() 227 | Parser.clean_up() 228 | quit() 229 | 230 | 231 | -------------------------------------------------------------------------------- /samples/basic/basic.glb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/samples/basic/basic.glb -------------------------------------------------------------------------------- /samples/basic/basic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChaosGroup/vray_gltf/63eb33c4b65a61823eff4e07f581c16a54e43b7a/samples/basic/basic.png -------------------------------------------------------------------------------- /testUtils.py: -------------------------------------------------------------------------------- 1 | import vray 2 | import math 3 | import Gltf_Parser.cameraUtils as camUtils 4 | 5 | 6 | def _set_testing_material(renderer,node): 7 | #make default material for testing 8 | testTexture = renderer.classes.TexEdges() 9 | testTexture.edges_tex = vray.AColor(1,0.0,0.0,1) 10 | testTexture.bg_tex = vray.AColor(0.3481481, 0.8, 0.3481481, 1) 11 | testTexture.width_type = 1 12 | testTexture.pixel_width = 1 13 | 14 | testBRDF = renderer.classes.BRDFVRayMtl() 15 | testBRDF.diffuse = testTexture 16 | #testBRDF.reflect = vray.AColor(0.5,0.8,0.4, 0.01) 17 | #testBRDF.refract = vray.AColor(0.5,0.8,0.4, 0.01) 18 | #print(testBRDF.option_use_roughness) 19 | testBRDF.fresnel = True 20 | material = renderer.classes.MtlSingleBRDF() 21 | material.brdf = testBRDF 22 | material.double_sided = True 23 | node.material = material 24 | 25 | def rotateCamera(renderView,x,y,z,s=1): 26 | 27 | mS = vray.Matrix(s) 28 | mX = vray.Matrix.makeRotationMatrixX(x) 29 | mY = vray.Matrix.makeRotationMatrixY(y) 30 | mZ = vray.Matrix.makeRotationMatrixZ(z) 31 | transform = vray.Transform(mS * mZ * mY * mX, renderView.transform.offset) 32 | renderView.transform = transform 33 | 34 | 35 | def moveCamera(renderView,x = 0,y = 0,z = 0): 36 | # By changing RenderView's transform we move the camera. 37 | 38 | # Obtain a copy of the renderView transform. 39 | updatedTransform = renderView.transform 40 | 41 | # Modify the copy of the renderView transform. 42 | # The changes do not affect the scene directly since 43 | # updatedTransform is a copy of the actual transform. 44 | updatedTransform = updatedTransform.replaceOffset(vray.Vector( 45 | updatedTransform.offset.x + x, 46 | updatedTransform.offset.y + y, 47 | updatedTransform.offset.z - z 48 | )) 49 | 50 | # Update the transform value in renderView (applying the changes above). 51 | renderView.transform = updatedTransform 52 | def make_transform(rotX=0, rotY=0, rotZ=0, scale=1, offset=vray.Vector(0.0,0.0,0.0)): 53 | """Creates a transform with the specified rotation and scale. 54 | """ 55 | mS = vray.Matrix(scale) 56 | mX = vray.Matrix.makeRotationMatrixX(rotX) 57 | mY = vray.Matrix.makeRotationMatrixY(rotY) 58 | mZ = vray.Matrix.makeRotationMatrixZ(rotZ) 59 | transform = vray.Transform(mS * mZ * mY * mX, offset) 60 | return transform 61 | 62 | #testing for default camera 63 | def camera_look_at(pos,offset_camera = vray.Vector(0.03, 0.015, 0.01)): 64 | dist_vec = pos 65 | dir_default = vray.Vector(0.0, 0.0, -1.0) 66 | dir_point = dist_vec.normalize() 67 | 68 | rot_angl = math.acos(dir_default * dir_point) 69 | 70 | rot_axis = (dir_default ^ dir_point) 71 | #offset_camera = (dir_point*-1) * ( 2*dist_vec.length()) 72 | camTrans = make_transform(math.radians(rot_angl*rot_axis.x)+math.pi/2,math.radians(rot_angl*rot_axis.z), math.radians(rot_angl*rot_axis.y)+math.pi/1.5 ,1,offset_camera) 73 | print(offset_camera) 74 | print(rot_axis) 75 | print(rot_angl) 76 | #exit() 77 | return camTrans 78 | 79 | def setup_scene(renderer, obj_diameter, lookat = None,camTransform = None,has_camera = False): 80 | """Sets up a scene with camera and light. 81 | """ 82 | #if has_camera == False: 83 | # renderView = renderer.classes.RenderView() 84 | # renderView.fov = math.pi/2 85 | # #x right 86 | # #y up 87 | # # -z forward 88 | 89 | 90 | # cam_z = 0.8/2/math.tan(renderView.fov/2) 91 | # #cam_z = abs(0.8*math.tan(renderView.fov/2)) 92 | # camPos = vray.Vector(0.0,0.5, cam_z) 93 | # camRot = camUtils.camera_look_at(camPos,vray.Vector(0,0,0),vray.Vector(0,0,1)) 94 | # #print(camRot) 95 | # #exit() 96 | # camTransform = make_transform(camRot.x,camRot.y,0.0, 1, camPos) 97 | # #camTransform = make_transform(-math.pi/8,math.pi/8,0.0, 1, camPos) 98 | # renderView.transform = camTransform#camera_look_at(lookat/1000000) 99 | 100 | dome = renderer.classes.LightDome() 101 | dome.intensity = 0.5 102 | 103 | #place a light over and slightly behind the camera 104 | 105 | 106 | return None --------------------------------------------------------------------------------