├── README.md
├── metadata
├── abo_classes_3d.txt
└── abo_to_shapenet_classes.json
└── render
├── GLBtoX.py
├── all_amazon_3dmodel_asins.txt
├── blender_misc.py
├── render.py
└── utils
├── __init__.py
├── armature.py
├── camera.py
├── composition.py
├── lighting.py
├── material.py
├── mesh.py
├── modifier.py
├── node.py
├── texture.py
└── utils.py
/README.md:
--------------------------------------------------------------------------------
1 | # Amazon Berkeley Objects (ABO)
2 |
3 | Repository for additional ABO data and annotations used in:
4 |
5 | [ABO: Dataset and Benchmarks for Real-World 3D Object Understanding](https://arxiv.org/pdf/2110.06199.pdf)
6 | Jasmine Collins, Shubham Goel, Kenan Deng, Achleshwar Luthra, Leon Xu, Erhan Gundogdu, Xi Zhang, Tomas F Yago Vicente, Thomas Dideriksen, Himanshu Arora, Matthieu Guillaumin, Jitendra Malik
7 | Conference on Computer Vision and Pattern Recognition (CVPR), 2022
8 |
9 | Data and metadata used for material prediction and multi-view retrieval experiments can be found on the primary dataset [website](https://amazon-berkeley-objects.s3.amazonaws.com/index.html). Data, metadata and code used for single-view 3D reconstruction experiments can be found here.
10 |
11 | ## Download 3D renders dataset
12 | The rendered ABO images + camera poses for 3D reconstruction benchmarking can be downloaded [here](https://amazon-berkeley-objects.s3.us-east-1.amazonaws.com/archives/abo-release-renders.zip) (223 GB). The dataset contains Blender renderings of 30 viewpoints for each of the 7,953 3D objects in ABO, as well as camera intrinsics and extrinsics for each rendering.
13 |
--------------------------------------------------------------------------------
/metadata/abo_to_shapenet_classes.json:
--------------------------------------------------------------------------------
1 | {
2 | "airplane": [],
3 | "bench": [
4 | "bench",
5 | ],
6 | "boat": [],
7 | "car": [],
8 | "chair": [
9 | "chair",
10 | ],
11 | "couch": [
12 | "sofa"
13 | ],
14 | "cabinet": [
15 | "cabinet",
16 | "dresser"
17 | ],
18 | "lamp": [
19 | "lamp",
20 | ],
21 | "monitor": [],
22 | "other": [
23 | "bed",
24 | "rug",
25 | "picture frame or painting",
26 | "ottoman",
27 | "pillow",
28 | "plant or flower pot",
29 | "shelf",
30 | "mirror",
31 | "vase",
32 | "cart",
33 | "fan",
34 | "exercise weight",
35 | "container or basket",
36 | "ladder",
37 | "tent",
38 | "battery charger",
39 | "air conditioner",
40 | "mattress",
41 | "mount",
42 | "jar",
43 | "electrical cable",
44 | "laptop stand",
45 | "floor mat",
46 | "clothes rack",
47 | "clock",
48 | "heater",
49 | "figurine or sculpture",
50 | "exercise mat",
51 | "bag",
52 | "step stool",
53 | "speaker stand",
54 | "mouse pad",
55 | "clothes hook",
56 | "bottle rack",
57 | "wagon",
58 | "tray",
59 | "holder",
60 | "cooking pan",
61 | "candle holder",
62 | "book or journal",
63 | "trash can",
64 | "shredder",
65 | "instrument stand",
66 | "vanity",
67 | "drink coaster",
68 | "bowl",
69 | "birdhouse",
70 | "sports equipment",
71 | "soap dispenser",
72 | "office appliance",
73 | "fire pit",
74 | "file folder",
75 | "exercise equipment",
76 | "easel",
77 | "cup",
78 | ],
79 | "phone": [],
80 | "rifle": [],
81 | "speaker": [],
82 | "table": [
83 | "table",
84 | ]
85 | }
86 |
--------------------------------------------------------------------------------
/render/GLBtoX.py:
--------------------------------------------------------------------------------
1 | """ Run as blender -b -P GLBtoX.py -- regexp
2 | Converts all glb files with names matching regexp in `SOURCE_DIR` to OBJ files with the same name in `TARGET_DIR`
3 | """
4 | import os
5 | import os.path as osp
6 | import re
7 | import sys
8 | import traceback
9 |
10 | import bpy
11 |
12 | sys.path.append(os.path.dirname(__file__))
13 | from blender_misc import eprint, initialize_blender_cuda, import_glb,
14 |
15 | # basedir = 'C:\\Users\\Achleshwar\\Desktop\\UCB\\3d_sample_data\\3d_sample_data\\'
16 | SOURCE_DIR = '/home/jazzie/AMAZON3D146K/3dmodels/'
17 | TARGET_DIR = '/home/jazzie/AMAZON3D146K/3dmodels_obj/'
18 |
19 | def glb_to_X(glb_path: str, target_dir: str, out_format: str = 'obj') -> None:
20 | # Delete all objects in scene
21 | bpy.ops.object.select_all(action='SELECT')
22 | bpy.ops.object.delete()
23 |
24 | # Import glb
25 | object = import_glb(glb_path)
26 |
27 | # Select only the mesh object
28 | bpy.ops.object.select_all(action='DESELECT')
29 | object.select_set(True)
30 |
31 | # Target filepath
32 | portion = osp.splitext(osp.basename(glb_path))
33 | out_file = portion[0]+ "." + out_format
34 | out_path = osp.join(target_dir, out_file)
35 |
36 | # Export
37 | if out_format == 'obj':
38 | bpy.ops.export_scene.obj(filepath = out_path)
39 | elif out_format == 'ply':
40 | bpy.ops.export_mesh.ply(filepath = out_path)
41 | else:
42 | raise ValueError
43 |
44 | if __name__ == "__main__":
45 |
46 | # Initizlize blender
47 | initialize_blender_cuda()
48 |
49 | # Check if only part of data has to be rerun
50 | try:
51 | regexp = str(sys.argv[sys.argv.index('--') + 1])
52 | except (IndexError, ValueError):
53 | regexp = ".*"
54 | pattern = re.compile(regexp)
55 |
56 | for _mi, model_name in enumerate(os.listdir(SOURCE_DIR)):
57 | if not pattern.match(model_name):
58 | print(f'Skipping1 {_mi:04d}: {model_name}')
59 | continue
60 | if not osp.splitext(model_name)[1]=='.glb':
61 | print(f'Skipping2 {_mi:04d}: {model_name}')
62 | continue
63 | print(f'{_mi:04d}: {model_name}')
64 | eprint(f'{_mi:04d}: {model_name}')
65 | glb_path = f'{SOURCE_DIR}/{model_name}'
66 |
67 | try:
68 | glb_to_X(glb_path, TARGET_DIR, out_format='obj')
69 | except:
70 | eprint("*** failed", model_name)
71 | exc_type, exc_value, exc_traceback = sys.exc_info()
72 | eprint("*** print_tb:")
73 | traceback.print_tb(exc_traceback, limit=1, file=sys.stderr)
74 | eprint("*** print_exception:")
75 | # exc_type below is ignored on 3.5 and later
76 | traceback.print_exception(exc_type, exc_value, exc_traceback,
77 | limit=2, file=sys.stderr)
78 | print('Done')
79 | eprint('Done')
80 |
--------------------------------------------------------------------------------
/render/blender_misc.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os.path as osp
3 | import requests
4 |
5 | import bpy
6 |
7 |
8 | def eprint(*args, **kwargs):
9 | print(*args, file=sys.stderr, **kwargs)
10 |
11 | def initialize_blender_cuda():
12 | # FROM https://gist.github.com/S1U/13b8efe2c616a25d99de3d2ac4b34e86
13 | # Mark all scene devices as GPU for cycles
14 | bpy.context.scene.render.engine = 'CYCLES'
15 | bpy.context.scene.cycles.device = 'GPU'
16 |
17 | eprint("--------------- SCENE LIST ---------------")
18 | for scene in bpy.data.scenes:
19 | eprint(scene.name)
20 | scene.cycles.device = 'GPU'
21 |
22 | # Enable CUDA
23 | bpy.context.preferences.addons['cycles'].preferences.compute_device_type = 'CUDA'
24 |
25 | # Enable and list all devices, or optionally disable CPU
26 | eprint("----------------------------------------------")
27 | eprint(bpy.context.preferences.addons['cycles'].preferences.get_devices())
28 | eprint("----------------------------------------------")
29 | print("----------------------------------------------")
30 | for devices in bpy.context.preferences.addons['cycles'].preferences.get_devices():
31 | eprint(devices)
32 | for d in devices:
33 | d.use = True
34 | if d.type == 'CPU':
35 | d.use = False
36 | eprint("Device '{}' type {} : {}" . format(d.name, d.type, d.use))
37 | print("Device '{}' type {} : {}" . format(d.name, d.type, d.use))
38 | eprint("----------------------------------------------")
39 | print("----------------------------------------------")
40 |
41 | def import_glb(glb_path) -> bpy.types.Object:
42 | """
43 | Import GLB at glb_path, return corresponding mesh object
44 | Assumes the scene is empty
45 | """
46 | status = bpy.ops.import_scene.gltf(filepath=glb_path)
47 | assert('FINISHED' in status)
48 | bpy.ops.object.select_all(action='SELECT')
49 | objects = bpy.context.selected_objects[:]
50 | obj = [o for o in objects if o.type=='MESH'][0]
51 | obj.rotation_euler = 0,0,0 # clear default rotation
52 | obj.location = 0,0,0 # clear default translation
53 | bpy.context.view_layer.update()
54 | return obj
55 |
56 | def hdrihaven_fetch(hdri_name: str, res='4k', out_dir='hdris/'):
57 | # download hdri if it doesn't exist
58 | hdri_path = f'{out_dir}/{hdri_name}_{res}.hdr'
59 | if not osp.isfile(hdri_path):
60 | url = f'https://hdrihaven.com/files/hdris/{hdri_name}_{res}.hdr'
61 | print(f'Downloading HDRI from {url}')
62 | r = requests.get(url)
63 | with open(hdri_path, 'wb') as f:
64 | f.write(r.content)
65 | # Retrieve HTTP meta-data
66 | print(r.status_code)
67 | print(r.headers['content-type'])
68 | print(r.encoding)
69 | return hdri_path
70 |
--------------------------------------------------------------------------------
/render/render.py:
--------------------------------------------------------------------------------
1 | """
2 | Run as: blender -b -P render.py -- regexp hdriname
3 |
4 | Renders all models s.t. model_name matches regexp
5 | Download HDRI names hdriname from hdrihaven.com
6 | """
7 | import json
8 | import os
9 | import os.path as osp
10 | import re
11 | import socket
12 | import sys
13 | import traceback
14 | from shutil import copyfile
15 | import time
16 |
17 | import bpy
18 | import numpy as np
19 |
20 | # Add this folder to path
21 | sys.path.append(osp.dirname(osp.abspath(__file__)))
22 | import utils
23 | from blender_misc import (eprint, hdrihaven_fetch, import_glb,
24 | initialize_blender_cuda)
25 |
26 | MODELS_DIR = '/home/jazzie/AMAZON3D146K/3dmodels/' # em
27 | if not osp.isdir(MODELS_DIR):
28 | MODELS_DIR = ''
29 | raise ValueError
30 |
31 | try:
32 | # Enter any HDRI name from hdrihaven.com
33 | HDRI_NAME = str(sys.argv[sys.argv.index('--') + 2])
34 | except IndexError:
35 | HDRI_NAME = 'photo_studio_01'
36 | print('HDRI', HDRI_NAME)
37 | eprint('HDRI', HDRI_NAME)
38 | RESULTS_DIR = f'/home/jazzie/AMAZON3D146K/renders/{HDRI_NAME}'
39 | RGB_SAVE_PATH = '{model_name:s}_{i:02d}'
40 | USE_ENV_LIGHTING = True
41 | ENV_LIGHTING_PATH = hdrihaven_fetch(HDRI_NAME, res='4k')
42 |
43 | RESUME = True
44 | VIEWS = 30
45 | RESOLUTION = 1024
46 | RENDER_DEPTH = False
47 | RENDER_NORMALS = False
48 | COLOR_DEPTH = 16
49 | DEPTH_FORMAT = 'OPEN_EXR'
50 | COLOR_FORMAT = 'PNG'
51 | NORMAL_FORMAT = 'PNG'
52 | CAMERA_FOV_RANGE = [40, 40]
53 | UPPER_VIEWS = True
54 | MIN_ELEVATION = 0 #degrees (top:0 -> bottom:180)
55 | MAX_ELEVATION = 100 #degrees (top:0 -> bottom:180)
56 | LIGHT_NUM = 1
57 | LIGHT_ENERGY = 10
58 | RANDOM_SEED = 0xaaaa_aaaa_aaaa_aaaa
59 |
60 | default_rng = np.random.default_rng(RANDOM_SEED)
61 |
62 | def remove_prefix(text, prefix):
63 | if text.startswith(prefix):
64 | return text[len(prefix):]
65 | return text
66 |
67 | def parent_obj_to_camera(b_camera):
68 | origin = (0, 0, 0)
69 | b_empty = bpy.data.objects.new("Empty", None)
70 | b_empty.location = origin
71 | b_camera.parent = b_empty # setup parenting
72 | bpy.context.scene.collection.objects.link(b_empty)
73 | bpy.context.view_layer.objects.active = b_empty
74 | return b_empty
75 |
76 | def listify_matrix(matrix):
77 | matrix_list = []
78 | for row in matrix:
79 | matrix_list.append(list(row))
80 | return matrix_list
81 |
82 | def add_environment_lighting(scene):
83 | world = scene.world
84 | world.use_nodes = True
85 |
86 | enode = world.node_tree.nodes.new('ShaderNodeTexEnvironment')
87 | enode.image = bpy.data.images.load(ENV_LIGHTING_PATH)
88 |
89 | node_tree = world.node_tree
90 | node_tree.links.new(enode.outputs['Color'], node_tree.nodes['Background'].inputs['Color'])
91 |
92 | def setup_nodegraph(scene):
93 | # Render Optimizations
94 | scene.render.use_persistent_data = True
95 |
96 | # Set up rendering of depth map.
97 | scene.use_nodes = True
98 | tree = scene.node_tree
99 | links = tree.links
100 |
101 | # Add passes for additionally dumping albedo and normals.
102 | scene.view_layers["View Layer"].use_pass_normal = True
103 | # scene.view_layers["View Layer"].use_pass_color = True
104 |
105 | # Create input render layer node.
106 | render_layers = tree.nodes.new('CompositorNodeRLayers')
107 |
108 | if RENDER_DEPTH:
109 | depth_file_output = tree.nodes.new(type="CompositorNodeOutputFile")
110 | depth_file_output.label = 'Depth Output'
111 | links.new(render_layers.outputs['Depth'], depth_file_output.inputs[0])
112 | depth_file_output.format.file_format = str(DEPTH_FORMAT)
113 | depth_file_output.base_path = ''
114 | else:
115 | depth_file_output = None
116 |
117 | if RENDER_NORMALS:
118 | normal_file_output = tree.nodes.new(type="CompositorNodeOutputFile")
119 | normal_file_output.label = 'Normal Output'
120 | links.new(render_layers.outputs['Normal'], normal_file_output.inputs[0])
121 | normal_file_output.format.file_format = str(NORMAL_FORMAT)
122 | normal_file_output.base_path = ''
123 | else:
124 | normal_file_output = None
125 |
126 | # albedo_file_output = tree.nodes.new(type="CompositorNodeOutputFile")
127 | # albedo_file_output.label = 'Albedo Output'
128 | # links.new(render_layers.outputs['Color'], albedo_file_output.inputs[0])
129 | return depth_file_output, normal_file_output
130 |
131 | def create_random_point_lights(number, radius, energy=10, rng=default_rng):
132 | lights = []
133 |
134 | for i in range(number):
135 | # create light datablock, set attributes
136 | light_data = bpy.data.lights.new(name=f'ptlight{i}', type='SUN')
137 | light_data.energy = energy
138 | light_data.angle = 3.14159
139 | # light_data.falloff_type = 'INVERSE_LINEAR'
140 |
141 | # create new object with our light datablock
142 | light_object = bpy.data.objects.new(name=f'ptlight{i}', object_data=light_data)
143 |
144 | #change location
145 | light_object.location = rng.uniform(-1., 1., size=3)
146 | light_object.location *= radius / np.linalg.norm(light_object.location)
147 |
148 | lights.append(light_object)
149 |
150 | for light in lights:
151 | # link light object
152 | bpy.context.collection.objects.link(light)
153 |
154 | return lights
155 |
156 | def render_multiple(obj_path, output_dir, rng = default_rng, model_name=''):
157 |
158 | if not os.path.exists(output_dir):
159 | os.makedirs(output_dir)
160 |
161 | if RESUME:
162 | try:
163 | with open(output_dir + '/' + 'transforms.json', 'r') as out_file:
164 | data = json.load(out_file)
165 | if len(data['frames'])>=VIEWS:
166 | print('#' * 30)
167 | print('#' * 30)
168 | print('#' * 30)
169 | print('#' * 30)
170 | print(f'Returning because enough frames ({len(data["frames"])}) are already present in transforms.json')
171 | print('#' * 30)
172 | print('#' * 30)
173 | print('#' * 30)
174 | print('#' * 30)
175 | return
176 | except FileNotFoundError:
177 | pass
178 |
179 | # Clear scene
180 | utils.clean_objects()
181 |
182 | # Import obj
183 | obj_object = import_glb(obj_path)
184 | print('Imported name: ', obj_object.name, flush=True)
185 | verts = np.array([tuple(obj_object.matrix_world @ v.co) for v in obj_object.data.vertices])
186 | vmin = verts.min(axis=0)
187 | vmax = verts.max(axis=0)
188 | vcen = (vmin+vmax)/2
189 | obj_size = np.abs(verts - vcen).max()
190 |
191 | scene = bpy.context.scene
192 |
193 | # Setup Node graph for rendering rgbs,depth,normals
194 | (depth_file_output, normal_file_output) = setup_nodegraph(scene)
195 |
196 | # Add random lighting
197 | if USE_ENV_LIGHTING:
198 | add_environment_lighting(scene)
199 | light_objects = []
200 | else:
201 | light_objects = create_random_point_lights(LIGHT_NUM, 3*obj_size, energy=LIGHT_ENERGY)
202 |
203 | # Create collection for objects not to render with background
204 | objs = [ob for ob in scene.objects if ob.type in ('EMPTY') and 'Empty' in ob.name]
205 | bpy.ops.object.delete({"selected_objects": objs})
206 |
207 | # Setup camera, constraint to empty object
208 | cam = utils.create_camera(location=(0, 0, 1))
209 | cam.data.sensor_fit = 'HORIZONTAL'
210 | cam.data.sensor_width = 36.0
211 | cam.data.sensor_height = 36.0
212 | b_empty = parent_obj_to_camera(cam)
213 | utils.add_track_to_constraint(cam, b_empty)
214 |
215 | # Move everything to be centered at vcen
216 | b_empty.location = vcen
217 |
218 | for light in light_objects:
219 | light.location += b_empty.location
220 |
221 | # Image settings
222 | scene.camera = cam
223 | scene.render.engine = 'CYCLES'
224 | scene.render.image_settings.file_format = str(COLOR_FORMAT)
225 | scene.render.image_settings.color_depth = str(COLOR_DEPTH)
226 | scene.render.resolution_x = RESOLUTION
227 | scene.render.resolution_y = RESOLUTION
228 | scene.render.resolution_percentage = 100
229 | scene.render.dither_intensity = 0.0
230 | scene.render.film_transparent = True
231 | scene.view_layers[0].cycles.use_denoising = True
232 | scene.cycles.samples = 128
233 |
234 | out_data = {
235 | 'obj_path':remove_prefix(obj_path, MODELS_DIR),
236 | }
237 | out_data['frames'] = []
238 |
239 | for i in range(0, VIEWS):
240 | scene.render.filepath = output_dir + '/' + RGB_SAVE_PATH.format(i=i, model_name=model_name)
241 | if UPPER_VIEWS:
242 | min_rot0 = np.cos((MIN_ELEVATION)*np.pi/180)
243 | max_rot0 = np.cos((MAX_ELEVATION)*np.pi/180)
244 | rot = rng.uniform(0, 1, size=3) * (max_rot0-min_rot0,0,2*np.pi)
245 | rot[0] = np.arccos(rot[0] + min_rot0)
246 | b_empty.rotation_euler = rot
247 | else:
248 | b_empty.rotation_euler = rng.uniform(0, 2*np.pi, size=3)
249 |
250 | # Update camera location and angle
251 | bpy.context.view_layer.update()
252 | # cam = scene.camera
253 | cam.data.angle = rng.uniform(CAMERA_FOV_RANGE[0],CAMERA_FOV_RANGE[1]) * np.pi/180
254 | cam.location = (0, 0, 1.8 * obj_size/np.tan(cam.data.angle/2))
255 | # cam.data.angle = 0.691111147403717
256 | # cam.location = (0,0,4/9)
257 | bpy.context.view_layer.update()
258 |
259 | if RENDER_DEPTH:
260 | depth_file_output.file_slots[0].path = scene.render.filepath + "_depth_"
261 | if RENDER_NORMALS:
262 | normal_file_output.file_slots[0].path = scene.render.filepath + "_normal_"
263 |
264 | bpy.ops.render.render(write_still=True) # render still
265 |
266 | bpy.context.view_layer.update()
267 | frame_data = {
268 | 'file_path': remove_prefix(scene.render.filepath, MODELS_DIR),
269 | 'transform_matrix': listify_matrix(cam.matrix_world),
270 |
271 | # Independent components that make up transformation matrix
272 | 'camera':{
273 | 'angle_x': cam.data.angle_x,
274 | 'angle_y': cam.data.angle_y,
275 | 'shift_x': cam.data.shift_x,
276 | 'shift_y': cam.data.shift_y,
277 | 'sensor_height': cam.data.sensor_height,
278 | 'sensor_width': cam.data.sensor_width,
279 | 'sensor_fit': cam.data.sensor_fit,
280 | # 'location': list(cam.location),
281 | # 'scale': list(cam.scale),
282 | # 'rotation_quaternion': list(cam.rotation_quaternion),
283 | # 'be_location': list(b_empty.location),
284 | # 'be_scale': list(b_empty.scale),
285 | # 'be_rotation_euler': list(b_empty.rotation_euler),
286 | # 'be_rotation_matrix': listify_matrix(b_empty.matrix_world),
287 | }
288 | }
289 | out_data['frames'].append(frame_data)
290 |
291 | with open(output_dir + '/' + 'transforms.json', 'w') as out_file:
292 | json.dump(out_data, out_file, indent=4)
293 |
294 |
295 | if __name__ == "__main__":
296 |
297 | print('Host', socket.gethostname())
298 | eprint('Host', socket.gethostname())
299 |
300 | # Initizlize blender
301 | initialize_blender_cuda()
302 |
303 | # Check if only part of data has to be rerun
304 | try:
305 | regexp = str(sys.argv[sys.argv.index('--') + 1])
306 | except (IndexError, ValueError):
307 | regexp = ".*"
308 | pattern = re.compile(regexp)
309 |
310 | for _mi, model_fname in enumerate(os.listdir(MODELS_DIR)):
311 | if not pattern.match(model_fname):
312 | continue
313 | model_name, model_ext = osp.splitext(model_fname)
314 | if not model_ext=='.glb':
315 | continue
316 | print(f'{_mi:04d}: {model_name}')
317 | eprint(f'{_mi:04d}: {model_name}')
318 | OBJ_PATH = f'{MODELS_DIR}/{model_name}{model_ext}'
319 |
320 | try:
321 |
322 | # Reset RANDOM_SEED for each instance
323 | rng = np.random.default_rng(RANDOM_SEED)
324 |
325 | OUTPUT_DIR = f'{RESULTS_DIR}/{model_name}/'
326 | render_multiple(
327 | OBJ_PATH,
328 | OUTPUT_DIR,
329 | rng = rng,
330 | model_name = model_name,
331 | )
332 | except RuntimeError:
333 | # Sleep indefinitely because we have a buggy GPU
334 | print(f'Sleeping indefinitely because buggy GPU on {socket.gethostname()}')
335 | eprint(f'Sleeping indefinitely because buggy GPU on {socket.gethostname()}')
336 | sys.stdout.flush()
337 | sys.stderr.flush()
338 | while True:
339 | time.sleep(1)
340 | except:
341 | eprint("*** failed", model_name)
342 | exc_type, exc_value, exc_traceback = sys.exc_info()
343 | eprint("*** print_tb:")
344 | traceback.print_tb(exc_traceback, limit=1, file=sys.stderr)
345 | eprint("*** print_exception:")
346 | # exc_type below is ignored on 3.5 and later
347 | traceback.print_exception(exc_type, exc_value, exc_traceback,
348 | limit=2, file=sys.stderr)
349 |
--------------------------------------------------------------------------------
/render/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from utils.utils import *
2 | from utils.armature import *
3 | from utils.camera import *
4 | from utils.composition import *
5 | from utils.lighting import *
6 | from utils.material import *
7 | from utils.mesh import *
8 | from utils.modifier import *
9 | from utils.node import *
10 |
--------------------------------------------------------------------------------
/render/utils/armature.py:
--------------------------------------------------------------------------------
1 | import bpy
2 | import mathutils
3 | from utils.mesh import create_mesh_from_pydata
4 | from utils.modifier import add_subdivision_surface_modifier
5 | from typing import Any, Dict, Iterable, List, Tuple
6 |
7 |
8 | def create_armature_mesh(scene: bpy.types.Scene, armature_object: bpy.types.Object, mesh_name: str) -> bpy.types.Object:
9 | assert armature_object.type == 'ARMATURE', 'Error'
10 | assert len(armature_object.data.bones) != 0, 'Error'
11 |
12 | def add_rigid_vertex_group(target_object: bpy.types.Object, name: str, vertex_indices: Iterable[int]) -> None:
13 | new_vertex_group = target_object.vertex_groups.new(name=name)
14 | for vertex_index in vertex_indices:
15 | new_vertex_group.add([vertex_index], 1.0, 'REPLACE')
16 |
17 | def generate_bone_mesh_pydata(radius: float, length: float) -> Tuple[List[mathutils.Vector], List[List[int]]]:
18 | base_radius = radius
19 | top_radius = 0.5 * radius
20 |
21 | vertices = [
22 | # Cross section of the base part
23 | mathutils.Vector((-base_radius, 0.0, +base_radius)),
24 | mathutils.Vector((+base_radius, 0.0, +base_radius)),
25 | mathutils.Vector((+base_radius, 0.0, -base_radius)),
26 | mathutils.Vector((-base_radius, 0.0, -base_radius)),
27 |
28 | # Cross section of the top part
29 | mathutils.Vector((-top_radius, length, +top_radius)),
30 | mathutils.Vector((+top_radius, length, +top_radius)),
31 | mathutils.Vector((+top_radius, length, -top_radius)),
32 | mathutils.Vector((-top_radius, length, -top_radius)),
33 |
34 | # End points
35 | mathutils.Vector((0.0, -base_radius, 0.0)),
36 | mathutils.Vector((0.0, length + top_radius, 0.0))
37 | ]
38 |
39 | faces = [
40 | # End point for the base part
41 | [8, 1, 0],
42 | [8, 2, 1],
43 | [8, 3, 2],
44 | [8, 0, 3],
45 |
46 | # End point for the top part
47 | [9, 4, 5],
48 | [9, 5, 6],
49 | [9, 6, 7],
50 | [9, 7, 4],
51 |
52 | # Side faces
53 | [0, 1, 5, 4],
54 | [1, 2, 6, 5],
55 | [2, 3, 7, 6],
56 | [3, 0, 4, 7],
57 | ]
58 |
59 | return vertices, faces
60 |
61 | armature_data: bpy.types.Armature = armature_object.data
62 |
63 | vertices: List[mathutils.Vector] = []
64 | faces: List[List[int]] = []
65 | vertex_groups: List[Dict[str, Any]] = []
66 |
67 | for bone in armature_data.bones:
68 | radius = 0.10 * (0.10 + bone.length)
69 | temp_vertices, temp_faces = generate_bone_mesh_pydata(radius, bone.length)
70 |
71 | vertex_index_offset = len(vertices)
72 |
73 | temp_vertex_group = {'name': bone.name, 'vertex_indices': []}
74 | for local_index, vertex in enumerate(temp_vertices):
75 | vertices.append(bone.matrix_local @ vertex)
76 | temp_vertex_group['vertex_indices'].append(local_index + vertex_index_offset)
77 | vertex_groups.append(temp_vertex_group)
78 |
79 | for face in temp_faces:
80 | if len(face) == 3:
81 | faces.append([
82 | face[0] + vertex_index_offset,
83 | face[1] + vertex_index_offset,
84 | face[2] + vertex_index_offset,
85 | ])
86 | else:
87 | faces.append([
88 | face[0] + vertex_index_offset,
89 | face[1] + vertex_index_offset,
90 | face[2] + vertex_index_offset,
91 | face[3] + vertex_index_offset,
92 | ])
93 |
94 | new_object = create_mesh_from_pydata(scene, vertices, faces, mesh_name, mesh_name)
95 | new_object.matrix_world = armature_object.matrix_world
96 |
97 | for vertex_group in vertex_groups:
98 | add_rigid_vertex_group(new_object, vertex_group['name'], vertex_group['vertex_indices'])
99 |
100 | armature_modifier = new_object.modifiers.new('Armature', 'ARMATURE')
101 | armature_modifier.object = armature_object
102 | armature_modifier.use_vertex_groups = True
103 |
104 | add_subdivision_surface_modifier(new_object, 1, is_simple=True)
105 | add_subdivision_surface_modifier(new_object, 2, is_simple=False)
106 |
107 | # Set the armature as the parent of the new object
108 | bpy.ops.object.select_all(action='DESELECT')
109 | new_object.select_set(True)
110 | armature_object.select_set(True)
111 | bpy.context.view_layer.objects.active = armature_object
112 | bpy.ops.object.parent_set(type='OBJECT')
113 |
114 | return new_object
115 |
--------------------------------------------------------------------------------
/render/utils/camera.py:
--------------------------------------------------------------------------------
1 | import bpy
2 | from typing import Tuple
3 |
4 |
5 | def create_camera(location: Tuple[float, float, float]) -> bpy.types.Object:
6 | bpy.ops.object.camera_add(location=location)
7 |
8 | return bpy.context.object
9 |
10 |
11 | def set_camera_params(camera: bpy.types.Camera,
12 | focus_target_object: bpy.types.Object,
13 | lens: float = 85.0,
14 | fstop: float = 1.4) -> None:
15 | # Simulate Sony's FE 85mm F1.4 GM
16 | camera.sensor_fit = 'HORIZONTAL'
17 | camera.sensor_width = 36.0
18 | camera.sensor_height = 24.0
19 | camera.lens = lens
20 | camera.dof.use_dof = True
21 | camera.dof.focus_object = focus_target_object
22 | camera.dof.aperture_fstop = fstop
23 | camera.dof.aperture_blades = 11
24 |
--------------------------------------------------------------------------------
/render/utils/composition.py:
--------------------------------------------------------------------------------
1 | import bpy
2 | from utils.node import set_socket_value_range, clean_nodes, arrange_nodes
3 |
4 |
5 | def add_split_tone_node_group() -> bpy.types.NodeGroup:
6 | group = bpy.data.node_groups.new(type="CompositorNodeTree", name="SplitToneSub")
7 |
8 | input_node = group.nodes.new("NodeGroupInput")
9 | group.inputs.new("NodeSocketColor", "Image")
10 | group.inputs.new("NodeSocketFloat", "Hue")
11 | group.inputs.new("NodeSocketFloat", "Saturation")
12 |
13 | solid_node = group.nodes.new(type="CompositorNodeCombHSVA")
14 | solid_node.inputs["S"].default_value = 1.0
15 | solid_node.inputs["V"].default_value = 1.0
16 | solid_node.inputs["A"].default_value = 1.0
17 |
18 | input_sep_node = group.nodes.new(type="CompositorNodeSepHSVA")
19 |
20 | overlay_node = group.nodes.new(type="CompositorNodeMixRGB")
21 | overlay_node.blend_type = 'OVERLAY'
22 |
23 | overlay_sep_node = group.nodes.new(type="CompositorNodeSepHSVA")
24 |
25 | comb_node = group.nodes.new(type="CompositorNodeCombHSVA")
26 |
27 | output_node = group.nodes.new("NodeGroupOutput")
28 | group.outputs.new("NodeSocketColor", "Image")
29 |
30 | group.links.new(input_node.outputs["Hue"], solid_node.inputs["H"])
31 | group.links.new(input_node.outputs["Saturation"], overlay_node.inputs["Fac"])
32 | group.links.new(input_node.outputs["Image"], overlay_node.inputs[1])
33 | group.links.new(solid_node.outputs["Image"], overlay_node.inputs[2])
34 | group.links.new(overlay_node.outputs["Image"], overlay_sep_node.inputs["Image"])
35 | group.links.new(input_node.outputs["Image"], input_sep_node.inputs["Image"])
36 | group.links.new(overlay_sep_node.outputs["H"], comb_node.inputs["H"])
37 | group.links.new(overlay_sep_node.outputs["S"], comb_node.inputs["S"])
38 | group.links.new(input_sep_node.outputs["V"], comb_node.inputs["V"])
39 | group.links.new(input_sep_node.outputs["A"], comb_node.inputs["A"])
40 | group.links.new(comb_node.outputs["Image"], output_node.inputs["Image"])
41 |
42 | arrange_nodes(group)
43 |
44 | # --------------------------------------------------------------------------
45 |
46 | group = bpy.data.node_groups.new(type="CompositorNodeTree", name="SplitTone")
47 |
48 | input_node = group.nodes.new("NodeGroupInput")
49 |
50 | group.inputs.new("NodeSocketColor", "Image")
51 | group.inputs.new("NodeSocketFloat", "HighlightsHue")
52 | group.inputs.new("NodeSocketFloat", "HighlightsSaturation")
53 | group.inputs.new("NodeSocketFloat", "ShadowsHue")
54 | group.inputs.new("NodeSocketFloat", "ShadowsSaturation")
55 | group.inputs.new("NodeSocketFloatFactor", "Balance")
56 |
57 | set_socket_value_range(group.inputs["HighlightsHue"])
58 | set_socket_value_range(group.inputs["HighlightsSaturation"])
59 | set_socket_value_range(group.inputs["ShadowsHue"])
60 | set_socket_value_range(group.inputs["ShadowsSaturation"])
61 | set_socket_value_range(group.inputs["Balance"], default_value=0.5)
62 |
63 | input_sep_node = group.nodes.new(type="CompositorNodeSepHSVA")
64 |
65 | subtract_node = group.nodes.new(type="CompositorNodeMath")
66 | subtract_node.inputs[0].default_value = 1.0
67 | subtract_node.operation = 'SUBTRACT'
68 | subtract_node.use_clamp = True
69 |
70 | multiply_node = group.nodes.new(type="CompositorNodeMath")
71 | multiply_node.inputs[1].default_value = 2.0
72 | multiply_node.operation = 'MULTIPLY'
73 | multiply_node.use_clamp = False
74 |
75 | power_node = group.nodes.new(type="CompositorNodeMath")
76 | power_node.operation = 'POWER'
77 | power_node.use_clamp = True
78 |
79 | shadows_node = group.nodes.new(type='CompositorNodeGroup')
80 | shadows_node.name = "Shadows"
81 | shadows_node.node_tree = bpy.data.node_groups["SplitToneSub"]
82 |
83 | highlights_node = group.nodes.new(type='CompositorNodeGroup')
84 | highlights_node.name = "Highlights"
85 | highlights_node.node_tree = bpy.data.node_groups["SplitToneSub"]
86 |
87 | comb_node = group.nodes.new(type="CompositorNodeMixRGB")
88 | comb_node.use_clamp = False
89 |
90 | output_node = group.nodes.new("NodeGroupOutput")
91 | group.outputs.new("NodeSocketColor", "Image")
92 |
93 | group.links.new(input_node.outputs["Image"], input_sep_node.inputs["Image"])
94 | group.links.new(input_node.outputs["Image"], shadows_node.inputs["Image"])
95 | group.links.new(input_node.outputs["ShadowsHue"], shadows_node.inputs["Hue"])
96 | group.links.new(input_node.outputs["ShadowsSaturation"], shadows_node.inputs["Saturation"])
97 | group.links.new(input_node.outputs["Image"], highlights_node.inputs["Image"])
98 | group.links.new(input_node.outputs["HighlightsHue"], highlights_node.inputs["Hue"])
99 | group.links.new(input_node.outputs["HighlightsSaturation"], highlights_node.inputs["Saturation"])
100 | group.links.new(input_node.outputs["Balance"], subtract_node.inputs[1])
101 | group.links.new(subtract_node.outputs["Value"], multiply_node.inputs[0])
102 | group.links.new(input_sep_node.outputs["V"], power_node.inputs[0])
103 | group.links.new(multiply_node.outputs["Value"], power_node.inputs[1])
104 | group.links.new(power_node.outputs["Value"], comb_node.inputs["Fac"])
105 | group.links.new(shadows_node.outputs["Image"], comb_node.inputs[1])
106 | group.links.new(highlights_node.outputs["Image"], comb_node.inputs[2])
107 | group.links.new(comb_node.outputs["Image"], output_node.inputs["Image"])
108 |
109 | arrange_nodes(group)
110 |
111 | return group
112 |
113 |
114 | def add_vignette_node_group() -> bpy.types.NodeGroup:
115 | group = bpy.data.node_groups.new(type="CompositorNodeTree", name="Vignette")
116 |
117 | input_node = group.nodes.new("NodeGroupInput")
118 | group.inputs.new("NodeSocketColor", "Image")
119 | group.inputs.new("NodeSocketFloat", "Amount")
120 | group.inputs["Amount"].default_value = 0.2
121 | group.inputs["Amount"].min_value = 0.0
122 | group.inputs["Amount"].max_value = 1.0
123 |
124 | lens_distortion_node = group.nodes.new(type="CompositorNodeLensdist")
125 | lens_distortion_node.inputs["Distort"].default_value = 1.000
126 |
127 | separate_rgba_node = group.nodes.new(type="CompositorNodeSepRGBA")
128 |
129 | blur_node = group.nodes.new(type="CompositorNodeBlur")
130 | blur_node.filter_type = 'GAUSS'
131 | blur_node.size_x = 300
132 | blur_node.size_y = 300
133 | blur_node.use_extended_bounds = True
134 |
135 | mix_node = group.nodes.new(type="CompositorNodeMixRGB")
136 | mix_node.blend_type = 'MULTIPLY'
137 |
138 | output_node = group.nodes.new("NodeGroupOutput")
139 | group.outputs.new("NodeSocketColor", "Image")
140 |
141 | group.links.new(input_node.outputs["Amount"], mix_node.inputs["Fac"])
142 | group.links.new(input_node.outputs["Image"], mix_node.inputs[1])
143 | group.links.new(input_node.outputs["Image"], lens_distortion_node.inputs["Image"])
144 | group.links.new(lens_distortion_node.outputs["Image"], separate_rgba_node.inputs["Image"])
145 | group.links.new(separate_rgba_node.outputs["A"], blur_node.inputs["Image"])
146 | group.links.new(blur_node.outputs["Image"], mix_node.inputs[2])
147 | group.links.new(mix_node.outputs["Image"], output_node.inputs["Image"])
148 |
149 | arrange_nodes(group)
150 |
151 | return group
152 |
153 |
154 | def create_split_tone_node(node_tree: bpy.types.NodeTree) -> bpy.types.Node:
155 | split_tone_node_group = add_split_tone_node_group()
156 |
157 | node = node_tree.nodes.new(type='CompositorNodeGroup')
158 | node.name = "SplitTone"
159 | node.node_tree = split_tone_node_group
160 |
161 | return node
162 |
163 |
164 | def create_vignette_node(node_tree: bpy.types.NodeTree) -> bpy.types.Node:
165 | vignette_node_group = add_vignette_node_group()
166 |
167 | node = node_tree.nodes.new(type='CompositorNodeGroup')
168 | node.name = "Vignette"
169 | node.node_tree = vignette_node_group
170 |
171 | return node
172 |
173 |
174 | def build_scene_composition(scene: bpy.types.Scene,
175 | vignette: float = 0.20,
176 | dispersion: float = 0.050,
177 | gain: float = 1.10,
178 | saturation: float = 1.10) -> None:
179 | scene.use_nodes = True
180 | clean_nodes(scene.node_tree.nodes)
181 |
182 | render_layer_node = scene.node_tree.nodes.new(type="CompositorNodeRLayers")
183 |
184 | vignette_node = create_vignette_node(scene.node_tree)
185 | vignette_node.inputs["Amount"].default_value = vignette
186 |
187 | lens_distortion_node = scene.node_tree.nodes.new(type="CompositorNodeLensdist")
188 | lens_distortion_node.inputs["Distort"].default_value = -dispersion * 0.40
189 | lens_distortion_node.inputs["Dispersion"].default_value = dispersion
190 |
191 | color_correction_node = scene.node_tree.nodes.new(type="CompositorNodeColorCorrection")
192 | color_correction_node.master_saturation = saturation
193 | color_correction_node.master_gain = gain
194 |
195 | split_tone_node = create_split_tone_node(scene.node_tree)
196 |
197 | glare_node = scene.node_tree.nodes.new(type="CompositorNodeGlare")
198 | glare_node.glare_type = 'FOG_GLOW'
199 | glare_node.quality = 'HIGH'
200 |
201 | composite_node = scene.node_tree.nodes.new(type="CompositorNodeComposite")
202 |
203 | scene.node_tree.links.new(render_layer_node.outputs['Image'], vignette_node.inputs['Image'])
204 | scene.node_tree.links.new(vignette_node.outputs['Image'], lens_distortion_node.inputs['Image'])
205 | scene.node_tree.links.new(lens_distortion_node.outputs['Image'], color_correction_node.inputs['Image'])
206 | scene.node_tree.links.new(color_correction_node.outputs['Image'], split_tone_node.inputs['Image'])
207 | scene.node_tree.links.new(split_tone_node.outputs['Image'], glare_node.inputs['Image'])
208 | scene.node_tree.links.new(glare_node.outputs['Image'], composite_node.inputs['Image'])
209 |
210 | arrange_nodes(scene.node_tree)
211 |
--------------------------------------------------------------------------------
/render/utils/lighting.py:
--------------------------------------------------------------------------------
1 | import bpy
2 | from typing import Optional, Tuple
3 |
4 |
5 | def create_area_light(location: Tuple[float, float, float] = (0.0, 0.0, 5.0),
6 | rotation: Tuple[float, float, float] = (0.0, 0.0, 0.0),
7 | size: float = 5.0,
8 | color: Tuple[float, float, float, float] = (1.00, 0.90, 0.80, 1.00),
9 | strength: float = 1000.0,
10 | name: Optional[str] = None) -> bpy.types.Object:
11 | if bpy.app.version >= (2, 80, 0):
12 | bpy.ops.object.light_add(type='AREA', location=location, rotation=rotation)
13 | else:
14 | bpy.ops.object.lamp_add(type='AREA', location=location, rotation=rotation)
15 |
16 | if name is not None:
17 | bpy.context.object.name = name
18 |
19 | light = bpy.context.object.data
20 | light.size = size
21 | light.use_nodes = True
22 | light.node_tree.nodes["Emission"].inputs["Color"].default_value = color
23 | light.energy = strength
24 |
25 | return bpy.context.object
26 |
27 |
28 | def create_sun_light(location: Tuple[float, float, float] = (0.0, 0.0, 5.0),
29 | rotation: Tuple[float, float, float] = (0.0, 0.0, 0.0),
30 | name: Optional[str] = None) -> bpy.types.Object:
31 | bpy.ops.object.light_add(type='SUN', location=location, rotation=rotation)
32 |
33 | if name is not None:
34 | bpy.context.object.name = name
35 |
36 | return bpy.context.object
37 |
--------------------------------------------------------------------------------
/render/utils/material.py:
--------------------------------------------------------------------------------
1 | import bpy
2 | from typing import Tuple
3 | from utils.node import set_socket_value_range, arrange_nodes, create_frame_node, clean_nodes
4 |
5 |
6 | def create_texture_node(node_tree: bpy.types.NodeTree, path: str, is_color_data: bool) -> bpy.types.Node:
7 | # Instantiate a new texture image node
8 | texture_node = node_tree.nodes.new(type='ShaderNodeTexImage')
9 |
10 | # Open an image and set it to the node
11 | texture_node.image = bpy.data.images.load(path)
12 |
13 | # Set other parameters
14 | texture_node.image.colorspace_settings.is_data = False if is_color_data else True
15 |
16 | # Return the node
17 | return texture_node
18 |
19 |
20 | def set_principled_node(principled_node: bpy.types.Node,
21 | base_color: Tuple[float, float, float, float] = (0.6, 0.6, 0.6, 1.0),
22 | subsurface: float = 0.0,
23 | subsurface_color: Tuple[float, float, float, float] = (0.8, 0.8, 0.8, 1.0),
24 | subsurface_radius: Tuple[float, float, float] = (1.0, 0.2, 0.1),
25 | metallic: float = 0.0,
26 | specular: float = 0.5,
27 | specular_tint: float = 0.0,
28 | roughness: float = 0.5,
29 | anisotropic: float = 0.0,
30 | anisotropic_rotation: float = 0.0,
31 | sheen: float = 0.0,
32 | sheen_tint: float = 0.5,
33 | clearcoat: float = 0.0,
34 | clearcoat_roughness: float = 0.03,
35 | ior: float = 1.45,
36 | transmission: float = 0.0,
37 | transmission_roughness: float = 0.0) -> None:
38 | principled_node.inputs['Base Color'].default_value = base_color
39 | principled_node.inputs['Subsurface'].default_value = subsurface
40 | principled_node.inputs['Subsurface Color'].default_value = subsurface_color
41 | principled_node.inputs['Subsurface Radius'].default_value = subsurface_radius
42 | principled_node.inputs['Metallic'].default_value = metallic
43 | principled_node.inputs['Specular'].default_value = specular
44 | principled_node.inputs['Specular Tint'].default_value = specular_tint
45 | principled_node.inputs['Roughness'].default_value = roughness
46 | principled_node.inputs['Anisotropic'].default_value = anisotropic
47 | principled_node.inputs['Anisotropic Rotation'].default_value = anisotropic_rotation
48 | principled_node.inputs['Sheen'].default_value = sheen
49 | principled_node.inputs['Sheen Tint'].default_value = sheen_tint
50 | principled_node.inputs['Clearcoat'].default_value = clearcoat
51 | principled_node.inputs['Clearcoat Roughness'].default_value = clearcoat_roughness
52 | principled_node.inputs['IOR'].default_value = ior
53 | principled_node.inputs['Transmission'].default_value = transmission
54 | principled_node.inputs['Transmission Roughness'].default_value = transmission_roughness
55 |
56 |
57 | def build_pbr_nodes(node_tree: bpy.types.NodeTree,
58 | base_color: Tuple[float, float, float, float] = (0.6, 0.6, 0.6, 1.0),
59 | metallic: float = 0.0,
60 | specular: float = 0.5,
61 | roughness: float = 0.5,
62 | sheen: float = 0.0) -> None:
63 | output_node = node_tree.nodes.new(type='ShaderNodeOutputMaterial')
64 | principled_node = node_tree.nodes.new(type='ShaderNodeBsdfPrincipled')
65 | node_tree.links.new(principled_node.outputs['BSDF'], output_node.inputs['Surface'])
66 |
67 | set_principled_node(principled_node=principled_node,
68 | base_color=base_color,
69 | metallic=metallic,
70 | specular=specular,
71 | roughness=roughness,
72 | sheen=sheen)
73 |
74 | arrange_nodes(node_tree)
75 |
76 |
77 | def build_checker_board_nodes(node_tree: bpy.types.NodeTree, size: float) -> None:
78 | output_node = node_tree.nodes.new(type='ShaderNodeOutputMaterial')
79 | principled_node = node_tree.nodes.new(type='ShaderNodeBsdfPrincipled')
80 | checker_texture_node = node_tree.nodes.new(type='ShaderNodeTexChecker')
81 |
82 | set_principled_node(principled_node=principled_node)
83 | checker_texture_node.inputs['Scale'].default_value = size
84 |
85 | node_tree.links.new(checker_texture_node.outputs['Color'], principled_node.inputs['Base Color'])
86 | node_tree.links.new(principled_node.outputs['BSDF'], output_node.inputs['Surface'])
87 |
88 | arrange_nodes(node_tree)
89 |
90 |
91 | def build_matcap_nodes(node_tree: bpy.types.NodeTree, image_path: str) -> None:
92 | tex_coord_node = node_tree.nodes.new(type='ShaderNodeTexCoord')
93 | vector_transform_node = node_tree.nodes.new(type='ShaderNodeVectorTransform')
94 | mapping_node = node_tree.nodes.new(type='ShaderNodeMapping')
95 | texture_image_node = create_texture_node(node_tree, image_path, True)
96 | emmission_node = node_tree.nodes.new(type='ShaderNodeEmission')
97 | output_node = node_tree.nodes.new(type='ShaderNodeOutputMaterial')
98 |
99 | create_frame_node(node_tree, (tex_coord_node, vector_transform_node, mapping_node),
100 | name="MatCap UV",
101 | label="MatCap UV")
102 |
103 | vector_transform_node.vector_type = "VECTOR"
104 | vector_transform_node.convert_from = "OBJECT"
105 | vector_transform_node.convert_to = "CAMERA"
106 |
107 | mapping_node.vector_type = "TEXTURE"
108 | if bpy.app.version >= (2, 81, 0):
109 | mapping_node.inputs["Location"].default_value = (1.0, 1.0, 0.0)
110 | mapping_node.inputs["Scale"].default_value = (2.0, 2.0, 1.0)
111 | else:
112 | mapping_node.translation = (1.0, 1.0, 0.0)
113 | mapping_node.scale = (2.0, 2.0, 1.0)
114 |
115 | node_tree.links.new(tex_coord_node.outputs['Normal'], vector_transform_node.inputs['Vector'])
116 | node_tree.links.new(vector_transform_node.outputs['Vector'], mapping_node.inputs['Vector'])
117 | node_tree.links.new(mapping_node.outputs['Vector'], texture_image_node.inputs['Vector'])
118 | node_tree.links.new(texture_image_node.outputs['Color'], emmission_node.inputs['Color'])
119 | node_tree.links.new(emmission_node.outputs['Emission'], output_node.inputs['Surface'])
120 |
121 | arrange_nodes(node_tree)
122 |
123 |
124 | def build_pbr_textured_nodes(
125 | node_tree: bpy.types.NodeTree,
126 | color_texture_path: str = "",
127 | metallic_texture_path: str = "",
128 | roughness_texture_path: str = "",
129 | normal_texture_path: str = "",
130 | displacement_texture_path: str = "",
131 | ambient_occlusion_texture_path: str = "",
132 | scale: Tuple[float, float, float] = (1.0, 1.0, 1.0)) -> None:
133 | output_node = node_tree.nodes.new(type='ShaderNodeOutputMaterial')
134 | principled_node = node_tree.nodes.new(type='ShaderNodeBsdfPrincipled')
135 | node_tree.links.new(principled_node.outputs['BSDF'], output_node.inputs['Surface'])
136 |
137 | coord_node = node_tree.nodes.new(type='ShaderNodeTexCoord')
138 | mapping_node = node_tree.nodes.new(type='ShaderNodeMapping')
139 | mapping_node.vector_type = 'TEXTURE'
140 | mapping_node.scale = scale
141 | node_tree.links.new(coord_node.outputs['UV'], mapping_node.inputs['Vector'])
142 |
143 | if color_texture_path != "":
144 | texture_node = create_texture_node(node_tree, color_texture_path, True)
145 | node_tree.links.new(mapping_node.outputs['Vector'], texture_node.inputs['Vector'])
146 | if ambient_occlusion_texture_path != "":
147 | ao_texture_node = create_texture_node(node_tree, ambient_occlusion_texture_path, False)
148 | node_tree.links.new(mapping_node.outputs['Vector'], ao_texture_node.inputs['Vector'])
149 | mix_node = node_tree.nodes.new(type='ShaderNodeMixRGB')
150 | mix_node.blend_type = 'MULTIPLY'
151 | node_tree.links.new(texture_node.outputs['Color'], mix_node.inputs['Color1'])
152 | node_tree.links.new(ao_texture_node.outputs['Color'], mix_node.inputs['Color2'])
153 | node_tree.links.new(mix_node.outputs['Color'], principled_node.inputs['Base Color'])
154 | else:
155 | node_tree.links.new(texture_node.outputs['Color'], principled_node.inputs['Base Color'])
156 |
157 | if metallic_texture_path != "":
158 | texture_node = create_texture_node(node_tree, metallic_texture_path, False)
159 | node_tree.links.new(mapping_node.outputs['Vector'], texture_node.inputs['Vector'])
160 | node_tree.links.new(texture_node.outputs['Color'], principled_node.inputs['Metallic'])
161 |
162 | if roughness_texture_path != "":
163 | texture_node = create_texture_node(node_tree, roughness_texture_path, False)
164 | node_tree.links.new(mapping_node.outputs['Vector'], texture_node.inputs['Vector'])
165 | node_tree.links.new(texture_node.outputs['Color'], principled_node.inputs['Roughness'])
166 |
167 | if normal_texture_path != "":
168 | texture_node = create_texture_node(node_tree, normal_texture_path, False)
169 | node_tree.links.new(mapping_node.outputs['Vector'], texture_node.inputs['Vector'])
170 | normal_map_node = node_tree.nodes.new(type='ShaderNodeNormalMap')
171 | node_tree.links.new(texture_node.outputs['Color'], normal_map_node.inputs['Color'])
172 | node_tree.links.new(normal_map_node.outputs['Normal'], principled_node.inputs['Normal'])
173 |
174 | if displacement_texture_path != "":
175 | texture_node = create_texture_node(node_tree, displacement_texture_path, False)
176 | node_tree.links.new(mapping_node.outputs['Vector'], texture_node.inputs['Vector'])
177 | node_tree.links.new(texture_node.outputs['Color'], output_node.inputs['Displacement'])
178 |
179 | arrange_nodes(node_tree)
180 |
181 |
182 | def add_parametric_color_ramp() -> bpy.types.NodeGroup:
183 | group = bpy.data.node_groups.new(type="ShaderNodeTree", name="Parametric Color Ramp")
184 |
185 | # Input
186 |
187 | input_node = group.nodes.new(type="NodeGroupInput")
188 | group.inputs.new("NodeSocketFloatFactor", "Fac")
189 | group.inputs.new("NodeSocketColor", "Color1")
190 | group.inputs.new("NodeSocketColor", "Color2")
191 | group.inputs.new("NodeSocketFloatFactor", "Pos1")
192 | group.inputs.new("NodeSocketFloatFactor", "Pos2")
193 |
194 | set_socket_value_range(group.inputs["Fac"], default_value=0.5)
195 | set_socket_value_range(group.inputs["Pos1"], default_value=0.0)
196 | set_socket_value_range(group.inputs["Pos2"], default_value=1.0)
197 |
198 | # Math
199 |
200 | denominator_subtract_node = group.nodes.new(type="ShaderNodeMath")
201 | denominator_subtract_node.operation = "SUBTRACT"
202 | denominator_subtract_node.use_clamp = True
203 |
204 | numerator_subtract_node = group.nodes.new(type="ShaderNodeMath")
205 | numerator_subtract_node.operation = "SUBTRACT"
206 | numerator_subtract_node.use_clamp = True
207 |
208 | divide_node = group.nodes.new(type="ShaderNodeMath")
209 | divide_node.operation = "DIVIDE"
210 | divide_node.use_clamp = True
211 |
212 | group.links.new(input_node.outputs["Pos2"], denominator_subtract_node.inputs[0])
213 | group.links.new(input_node.outputs["Fac"], denominator_subtract_node.inputs[1])
214 |
215 | group.links.new(input_node.outputs["Pos2"], numerator_subtract_node.inputs[0])
216 | group.links.new(input_node.outputs["Pos1"], numerator_subtract_node.inputs[1])
217 |
218 | group.links.new(denominator_subtract_node.outputs["Value"], divide_node.inputs[0])
219 | group.links.new(numerator_subtract_node.outputs["Value"], divide_node.inputs[1])
220 |
221 | # Mixing
222 |
223 | mix_node = group.nodes.new(type="ShaderNodeMixRGB")
224 |
225 | group.links.new(divide_node.outputs["Value"], mix_node.inputs["Fac"])
226 | group.links.new(input_node.outputs["Color2"], mix_node.inputs[1])
227 | group.links.new(input_node.outputs["Color1"], mix_node.inputs[2])
228 |
229 | # Output
230 |
231 | output_node = group.nodes.new(type="NodeGroupOutput")
232 | group.outputs.new("NodeSocketColor", "Color")
233 |
234 | group.links.new(mix_node.outputs["Color"], output_node.inputs["Color"])
235 |
236 | # Return
237 |
238 | arrange_nodes(group)
239 |
240 | return group
241 |
242 |
243 | def create_parametric_color_ramp_node(node_tree: bpy.types.NodeTree) -> bpy.types.Node:
244 | color_ramp_node_group: bpy.types.NodeGroup
245 |
246 | if "Parametric Color Ramp" in bpy.data.node_groups:
247 | color_ramp_node_group = bpy.data.node_groups["Parametric Color Ramp"]
248 | else:
249 | color_ramp_node_group = add_parametric_color_ramp()
250 |
251 | node = node_tree.nodes.new(type='ShaderNodeGroup')
252 | node.name = "Parametric Color Ramp"
253 | node.node_tree = color_ramp_node_group
254 |
255 | return node
256 |
257 |
258 | def add_tri_parametric_color_ramp() -> bpy.types.NodeGroup:
259 | group = bpy.data.node_groups.new(type="ShaderNodeTree", name="Tri Parametric Color Ramp")
260 |
261 | # Input
262 |
263 | input_node = group.nodes.new(type="NodeGroupInput")
264 | group.inputs.new("NodeSocketFloatFactor", "Fac")
265 | group.inputs.new("NodeSocketColor", "Color1")
266 | group.inputs.new("NodeSocketColor", "Color2")
267 | group.inputs.new("NodeSocketColor", "Color3")
268 | group.inputs.new("NodeSocketFloatFactor", "Pos1")
269 | group.inputs.new("NodeSocketFloatFactor", "Pos2")
270 | group.inputs.new("NodeSocketFloatFactor", "Pos3")
271 |
272 | set_socket_value_range(group.inputs["Fac"], default_value=0.5)
273 | set_socket_value_range(group.inputs["Pos1"], default_value=0.25)
274 | set_socket_value_range(group.inputs["Pos2"], default_value=0.50)
275 | set_socket_value_range(group.inputs["Pos3"], default_value=0.75)
276 |
277 | # Nested color ramp
278 |
279 | nested_color_ramp_node = create_parametric_color_ramp_node(group)
280 |
281 | group.links.new(input_node.outputs["Color1"], nested_color_ramp_node.inputs["Color1"])
282 | group.links.new(input_node.outputs["Color2"], nested_color_ramp_node.inputs["Color2"])
283 | group.links.new(input_node.outputs["Pos1"], nested_color_ramp_node.inputs["Pos1"])
284 | group.links.new(input_node.outputs["Pos2"], nested_color_ramp_node.inputs["Pos2"])
285 | group.links.new(input_node.outputs["Fac"], nested_color_ramp_node.inputs["Fac"])
286 |
287 | # Math
288 |
289 | denominator_subtract_node = group.nodes.new(type="ShaderNodeMath")
290 | denominator_subtract_node.operation = "SUBTRACT"
291 | denominator_subtract_node.use_clamp = True
292 |
293 | numerator_subtract_node = group.nodes.new(type="ShaderNodeMath")
294 | numerator_subtract_node.operation = "SUBTRACT"
295 | numerator_subtract_node.use_clamp = True
296 |
297 | divide_node = group.nodes.new(type="ShaderNodeMath")
298 | divide_node.operation = "DIVIDE"
299 | divide_node.use_clamp = True
300 |
301 | group.links.new(input_node.outputs["Pos3"], denominator_subtract_node.inputs[0])
302 | group.links.new(input_node.outputs["Fac"], denominator_subtract_node.inputs[1])
303 |
304 | group.links.new(input_node.outputs["Pos3"], numerator_subtract_node.inputs[0])
305 | group.links.new(input_node.outputs["Pos2"], numerator_subtract_node.inputs[1])
306 |
307 | group.links.new(denominator_subtract_node.outputs["Value"], divide_node.inputs[0])
308 | group.links.new(numerator_subtract_node.outputs["Value"], divide_node.inputs[1])
309 |
310 | # Mixing
311 |
312 | mix_node = group.nodes.new(type="ShaderNodeMixRGB")
313 |
314 | group.links.new(divide_node.outputs["Value"], mix_node.inputs["Fac"])
315 | group.links.new(input_node.outputs["Color3"], mix_node.inputs[1])
316 | group.links.new(nested_color_ramp_node.outputs["Color"], mix_node.inputs[2])
317 |
318 | # Output
319 |
320 | output_node = group.nodes.new(type="NodeGroupOutput")
321 | group.outputs.new("NodeSocketColor", "Color")
322 |
323 | group.links.new(mix_node.outputs["Color"], output_node.inputs["Color"])
324 |
325 | # Return
326 |
327 | arrange_nodes(group)
328 |
329 | return group
330 |
331 |
332 | def create_tri_parametric_color_ramp_node(node_tree: bpy.types.NodeTree) -> bpy.types.Node:
333 | tri_color_ramp_node_group: bpy.types.NodeGroup
334 |
335 | if "Tri Parametric Color Ramp" in bpy.data.node_groups:
336 | tri_color_ramp_node_group = bpy.data.node_groups["Tri Parametric Color Ramp"]
337 | else:
338 | tri_color_ramp_node_group = add_tri_parametric_color_ramp()
339 |
340 | node = node_tree.nodes.new(type='ShaderNodeGroup')
341 | node.name = "Tri Parametric Color Ramp"
342 | node.node_tree = tri_color_ramp_node_group
343 |
344 | return node
345 |
346 |
347 | def add_peeling_paint_metal_node_group() -> bpy.types.NodeGroup:
348 | group = bpy.data.node_groups.new(type="ShaderNodeTree", name="Peeling Paint Metal")
349 |
350 | input_node = group.nodes.new(type="NodeGroupInput")
351 | group.inputs.new("NodeSocketColor", "Paint Color")
352 | group.inputs.new("NodeSocketFloatFactor", "Paint Roughness")
353 | group.inputs.new("NodeSocketColor", "Metal Color")
354 | group.inputs.new("NodeSocketFloatFactor", "Metal Roughness")
355 | group.inputs.new("NodeSocketFloat", "Scale")
356 | group.inputs.new("NodeSocketFloat", "Detail")
357 | group.inputs.new("NodeSocketFloat", "Distortion")
358 | group.inputs.new("NodeSocketFloatFactor", "Threshold")
359 | group.inputs.new("NodeSocketFloat", "Peel Intense")
360 |
361 | group.inputs["Paint Color"].default_value = (0.152, 0.524, 0.067, 1.000)
362 | group.inputs["Metal Color"].default_value = (0.062, 0.015, 0.011, 1.000)
363 |
364 | set_socket_value_range(group.inputs["Paint Roughness"], default_value=0.05)
365 | set_socket_value_range(group.inputs["Metal Roughness"], default_value=0.50)
366 |
367 | set_socket_value_range(group.inputs["Scale"], default_value=4.5, min_value=0.0, max_value=1000.0)
368 | set_socket_value_range(group.inputs["Detail"], default_value=8.0, min_value=0.0, max_value=16.0)
369 | set_socket_value_range(group.inputs["Distortion"], default_value=0.5, min_value=0.0, max_value=1000.0)
370 | set_socket_value_range(group.inputs["Threshold"], default_value=0.42)
371 | set_socket_value_range(group.inputs["Peel Intense"], default_value=0.2, min_value=0.0, max_value=1.0)
372 |
373 | tex_coord_node = group.nodes.new(type="ShaderNodeTexCoord")
374 | mapping_node = group.nodes.new(type="ShaderNodeMapping")
375 |
376 | group.links.new(tex_coord_node.outputs["Object"], mapping_node.inputs["Vector"])
377 |
378 | # Peeling region segmentation
379 |
380 | peeling_noise_node = group.nodes.new(type="ShaderNodeTexNoise")
381 |
382 | group.links.new(mapping_node.outputs["Vector"], peeling_noise_node.inputs["Vector"])
383 | group.links.new(input_node.outputs["Scale"], peeling_noise_node.inputs["Scale"])
384 | group.links.new(input_node.outputs["Detail"], peeling_noise_node.inputs["Detail"])
385 | group.links.new(input_node.outputs["Distortion"], peeling_noise_node.inputs["Distortion"])
386 |
387 | peeling_threshold_node = create_parametric_color_ramp_node(group)
388 | peeling_threshold_node.inputs["Color1"].default_value = (0.0, 0.0, 0.0, 1.0)
389 | peeling_threshold_node.inputs["Color2"].default_value = (1.0, 1.0, 1.0, 1.0)
390 |
391 | # Base color
392 |
393 | epsilon_subtract_node = group.nodes.new(type="ShaderNodeMath")
394 | epsilon_subtract_node.operation = "SUBTRACT"
395 | epsilon_subtract_node.inputs[1].default_value = 0.001
396 |
397 | group.links.new(input_node.outputs["Threshold"], epsilon_subtract_node.inputs[0])
398 |
399 | group.links.new(peeling_noise_node.outputs["Fac"], peeling_threshold_node.inputs["Fac"])
400 | group.links.new(epsilon_subtract_node.outputs["Value"], peeling_threshold_node.inputs["Pos1"])
401 | group.links.new(input_node.outputs["Threshold"], peeling_threshold_node.inputs["Pos2"])
402 |
403 | color_mix_node = group.nodes.new(type="ShaderNodeMixRGB")
404 | group.links.new(peeling_threshold_node.outputs["Color"], color_mix_node.inputs["Fac"])
405 | group.links.new(input_node.outputs["Metal Color"], color_mix_node.inputs[1])
406 | group.links.new(input_node.outputs["Paint Color"], color_mix_node.inputs[2])
407 |
408 | # Ambient occulusion
409 |
410 | epsilon_add_node = group.nodes.new(type="ShaderNodeMath")
411 | epsilon_add_node.operation = "ADD"
412 | epsilon_add_node.inputs[1].default_value = 0.010
413 |
414 | group.links.new(input_node.outputs["Threshold"], epsilon_add_node.inputs[0])
415 |
416 | fallout_subtract_node = group.nodes.new(type="ShaderNodeMath")
417 | fallout_subtract_node.operation = "SUBTRACT"
418 | fallout_subtract_node.inputs[1].default_value = 0.060
419 |
420 | group.links.new(input_node.outputs["Threshold"], fallout_subtract_node.inputs[0])
421 |
422 | ao_node = create_tri_parametric_color_ramp_node(group)
423 | ao_node.inputs["Color1"].default_value = (1.0, 1.0, 1.0, 1.0)
424 | ao_node.inputs["Color2"].default_value = (0.0, 0.0, 0.0, 1.0)
425 | ao_node.inputs["Color3"].default_value = (1.0, 1.0, 1.0, 1.0)
426 |
427 | group.links.new(peeling_noise_node.outputs["Fac"], ao_node.inputs["Fac"])
428 | group.links.new(fallout_subtract_node.outputs["Value"], ao_node.inputs["Pos1"])
429 | group.links.new(input_node.outputs["Threshold"], ao_node.inputs["Pos2"])
430 | group.links.new(epsilon_add_node.outputs["Value"], ao_node.inputs["Pos3"])
431 |
432 | ao_mix_node = group.nodes.new(type="ShaderNodeMixRGB")
433 | ao_mix_node.blend_type = "MULTIPLY"
434 | ao_mix_node.inputs["Fac"].default_value = 1.0
435 |
436 | group.links.new(color_mix_node.outputs["Color"], ao_mix_node.inputs[1])
437 | group.links.new(ao_node.outputs["Color"], ao_mix_node.inputs[2])
438 |
439 | create_frame_node(group, nodes=(epsilon_add_node, fallout_subtract_node, ao_node), name="AO", label="AO")
440 |
441 | # Metallic
442 |
443 | metallic_node = group.nodes.new(type="ShaderNodeMixRGB")
444 | metallic_node.inputs["Color1"].default_value = (1.0, 1.0, 1.0, 1.0)
445 | metallic_node.inputs["Color2"].default_value = (0.0, 0.0, 0.0, 1.0)
446 |
447 | group.links.new(peeling_threshold_node.outputs["Color"], metallic_node.inputs["Fac"])
448 |
449 | # Roughness
450 |
451 | roughness_node = group.nodes.new(type="ShaderNodeMixRGB")
452 |
453 | group.links.new(input_node.outputs["Metal Roughness"], roughness_node.inputs["Color1"])
454 | group.links.new(input_node.outputs["Paint Roughness"], roughness_node.inputs["Color2"])
455 | group.links.new(peeling_threshold_node.outputs["Color"], roughness_node.inputs["Fac"])
456 |
457 | # Bump
458 |
459 | height_node = create_tri_parametric_color_ramp_node(group)
460 | height_node.inputs["Color1"].default_value = (0.0, 0.0, 0.0, 1.0)
461 | height_node.inputs["Color2"].default_value = (1.0, 1.0, 1.0, 1.0)
462 | height_node.inputs["Color3"].default_value = (0.5, 0.5, 0.5, 1.0)
463 |
464 | height_peak_add_node = group.nodes.new(type="ShaderNodeMath")
465 | height_peak_add_node.operation = "MULTIPLY_ADD"
466 | height_peak_add_node.inputs[1].default_value = 0.025
467 | height_peak_add_node.label = "Height Peak Add"
468 |
469 | height_tail_add_node = group.nodes.new(type="ShaderNodeMath")
470 | height_tail_add_node.operation = "MULTIPLY_ADD"
471 | height_tail_add_node.inputs[1].default_value = 0.100
472 | height_tail_add_node.label = "Height Tail Add"
473 |
474 | group.links.new(input_node.outputs["Threshold"], height_peak_add_node.inputs[2])
475 | group.links.new(input_node.outputs["Peel Intense"], height_peak_add_node.inputs[0])
476 | group.links.new(height_peak_add_node.outputs["Value"], height_tail_add_node.inputs[2])
477 | group.links.new(input_node.outputs["Peel Intense"], height_tail_add_node.inputs[0])
478 | group.links.new(peeling_noise_node.outputs["Fac"], height_node.inputs["Fac"])
479 | group.links.new(input_node.outputs["Threshold"], height_node.inputs["Pos1"])
480 | group.links.new(height_peak_add_node.outputs["Value"], height_node.inputs["Pos2"])
481 | group.links.new(height_tail_add_node.outputs["Value"], height_node.inputs["Pos3"])
482 |
483 | bump_node = group.nodes.new(type="ShaderNodeBump")
484 | group.links.new(height_node.outputs["Color"], bump_node.inputs["Height"])
485 |
486 | create_frame_node(group,
487 | nodes=(height_node, height_peak_add_node, height_tail_add_node, bump_node),
488 | name="Bump",
489 | label="Bump")
490 |
491 | # Output
492 |
493 | output_node = group.nodes.new("NodeGroupOutput")
494 | group.outputs.new("NodeSocketColor", "Color")
495 | group.outputs.new("NodeSocketFloatFactor", "Metallic")
496 | group.outputs.new("NodeSocketFloatFactor", "Roughness")
497 | group.outputs.new("NodeSocketVectorDirection", "Bump")
498 |
499 | group.links.new(ao_mix_node.outputs["Color"], output_node.inputs["Color"])
500 | group.links.new(metallic_node.outputs["Color"], output_node.inputs["Metallic"])
501 | group.links.new(roughness_node.outputs["Color"], output_node.inputs["Roughness"])
502 | group.links.new(bump_node.outputs["Normal"], output_node.inputs["Bump"])
503 |
504 | arrange_nodes(group)
505 |
506 | return group
507 |
508 |
509 | def create_peeling_paint_metal_node_group(node_tree: bpy.types.NodeTree) -> bpy.types.Node:
510 | peeling_paint_metal_node_group: bpy.types.NodeGroup
511 |
512 | if "Peeling Paint Metal" in bpy.data.node_groups:
513 | peeling_paint_metal_node_group = bpy.data.node_groups["Peeling Paint Metal"]
514 | else:
515 | peeling_paint_metal_node_group = add_peeling_paint_metal_node_group()
516 |
517 | node = node_tree.nodes.new(type='ShaderNodeGroup')
518 | node.name = "Peeling Paint Metal"
519 | node.node_tree = peeling_paint_metal_node_group
520 |
521 | return node
522 |
523 |
524 | def build_peeling_paint_metal_nodes(node_tree: bpy.types.NodeTree) -> None:
525 | output_node = node_tree.nodes.new(type='ShaderNodeOutputMaterial')
526 | principled_node = node_tree.nodes.new(type='ShaderNodeBsdfPrincipled')
527 | peeling_paint_metal_node = create_peeling_paint_metal_node_group(node_tree)
528 |
529 | node_tree.links.new(peeling_paint_metal_node.outputs['Color'], principled_node.inputs['Base Color'])
530 | node_tree.links.new(peeling_paint_metal_node.outputs['Metallic'], principled_node.inputs['Metallic'])
531 | node_tree.links.new(peeling_paint_metal_node.outputs['Roughness'], principled_node.inputs['Roughness'])
532 | node_tree.links.new(peeling_paint_metal_node.outputs['Bump'], principled_node.inputs['Normal'])
533 | node_tree.links.new(principled_node.outputs['BSDF'], output_node.inputs['Surface'])
534 |
535 | arrange_nodes(node_tree)
536 |
537 |
538 | def build_emission_nodes(node_tree: bpy.types.NodeTree,
539 | color: Tuple[float, float, float] = (0.0, 0.0, 0.0),
540 | strength: float = 1.0) -> None:
541 | '''
542 | https://docs.blender.org/api/current/bpy.types.ShaderNodeEmission.html
543 | '''
544 | output_node = node_tree.nodes.new(type='ShaderNodeOutputMaterial')
545 | emission_node = node_tree.nodes.new(type='ShaderNodeEmission')
546 |
547 | emission_node.inputs["Color"].default_value = color + (1.0, )
548 | emission_node.inputs["Strength"].default_value = strength
549 |
550 | node_tree.links.new(emission_node.outputs['Emission'], output_node.inputs['Surface'])
551 |
552 | arrange_nodes(node_tree)
553 |
554 |
555 | def add_material(name: str = "Material",
556 | use_nodes: bool = False,
557 | make_node_tree_empty: bool = False) -> bpy.types.Material:
558 | '''
559 | https://docs.blender.org/api/current/bpy.types.BlendDataMaterials.html
560 | https://docs.blender.org/api/current/bpy.types.Material.html
561 | '''
562 |
563 | # TODO: Check whether the name is already used or not
564 |
565 | material = bpy.data.materials.new(name)
566 | material.use_nodes = use_nodes
567 |
568 | if use_nodes and make_node_tree_empty:
569 | clean_nodes(material.node_tree.nodes)
570 |
571 | return material
572 |
--------------------------------------------------------------------------------
/render/utils/mesh.py:
--------------------------------------------------------------------------------
1 | import bpy
2 | import math
3 | from typing import Tuple, Iterable, Optional, Sequence
4 | from utils.modifier import add_subdivision_surface_modifier
5 |
6 |
7 | def set_smooth_shading(mesh: bpy.types.Mesh) -> None:
8 | for polygon in mesh.polygons:
9 | polygon.use_smooth = True
10 |
11 |
12 | def create_mesh_from_pydata(scene: bpy.types.Scene,
13 | vertices: Iterable[Iterable[float]],
14 | faces: Iterable[Iterable[int]],
15 | mesh_name: str,
16 | object_name: str,
17 | use_smooth: bool = True) -> bpy.types.Object:
18 | # Add a new mesh and set vertices and faces
19 | # In this case, it does not require to set edges
20 | # After manipulating mesh data, update() needs to be called
21 | new_mesh: bpy.types.Mesh = bpy.data.meshes.new(mesh_name)
22 | new_mesh.from_pydata(vertices, [], faces)
23 | new_mesh.update()
24 | if use_smooth:
25 | set_smooth_shading(new_mesh)
26 |
27 | new_object: bpy.types.Object = bpy.data.objects.new(object_name, new_mesh)
28 | scene.collection.objects.link(new_object)
29 |
30 | return new_object
31 |
32 |
33 | def create_cached_mesh_from_alembic(file_path: str, name: str) -> bpy.types.Object:
34 | bpy.ops.wm.alembic_import(filepath=file_path, as_background_job=False)
35 | bpy.context.active_object.name = name
36 |
37 | return bpy.context.active_object
38 |
39 |
40 | def create_plane(location: Tuple[float, float, float] = (0.0, 0.0, 0.0),
41 | rotation: Tuple[float, float, float] = (0.0, 0.0, 0.0),
42 | size: float = 2.0,
43 | name: Optional[str] = None) -> bpy.types.Object:
44 | bpy.ops.mesh.primitive_plane_add(size=size, location=location, rotation=rotation)
45 |
46 | current_object = bpy.context.object
47 |
48 | if name is not None:
49 | current_object.name = name
50 |
51 | return current_object
52 |
53 |
54 | def create_smooth_sphere(location: Tuple[float, float, float] = (0.0, 0.0, 0.0),
55 | radius: float = 1.0,
56 | subdivision_level: int = 1,
57 | name: Optional[str] = None) -> bpy.types.Object:
58 | bpy.ops.mesh.primitive_uv_sphere_add(radius=radius, location=location, calc_uvs=True)
59 |
60 | current_object = bpy.context.object
61 |
62 | if name is not None:
63 | current_object.name = name
64 |
65 | set_smooth_shading(current_object.data)
66 | add_subdivision_surface_modifier(current_object, subdivision_level)
67 |
68 | return current_object
69 |
70 |
71 | def create_smooth_monkey(location: Tuple[float, float, float] = (0.0, 0.0, 0.0),
72 | rotation: Tuple[float, float, float] = (0.0, 0.0, 0.0),
73 | subdivision_level: int = 2,
74 | name: Optional[str] = None) -> bpy.types.Object:
75 | bpy.ops.mesh.primitive_monkey_add(location=location, rotation=rotation, calc_uvs=True)
76 |
77 | current_object = bpy.context.object
78 |
79 | if name is not None:
80 | current_object.name = name
81 |
82 | set_smooth_shading(current_object.data)
83 | add_subdivision_surface_modifier(current_object, subdivision_level)
84 |
85 | return current_object
86 |
87 |
88 | def create_three_smooth_monkeys(
89 | names: Optional[Tuple[str, str, str]] = None) -> Tuple[bpy.types.Object, bpy.types.Object, bpy.types.Object]:
90 | if names is None:
91 | names = ("Suzanne Left", "Suzanne Center", "Suzanne Right")
92 |
93 | left = create_smooth_monkey(location=(-1.8, 0.0, 1.0), rotation=(0.0, 0.0, -math.pi * 60.0 / 180.0), name=names[0])
94 | center = create_smooth_monkey(location=(0.0, 0.0, 1.0), rotation=(0.0, 0.0, -math.pi * 60.0 / 180.0), name=names[1])
95 | right = create_smooth_monkey(location=(+1.8, 0.0, 1.0), rotation=(0.0, 0.0, -math.pi * 60.0 / 180.0), name=names[2])
96 |
97 | return left, center, right
98 |
99 |
100 | # https://docs.blender.org/api/current/bpy.types.VertexGroups.html
101 | # https://docs.blender.org/api/current/bpy.types.VertexGroup.html
102 | def add_vertex_group(mesh_object: bpy.types.Object, name: str = "Group") -> bpy.types.VertexGroup:
103 |
104 | # TODO: Check whether the object has a mesh data
105 | # TODO: Check whether the object already has a vertex group with the specified name
106 |
107 | vertex_group = mesh_object.vertex_groups.new(name=name)
108 |
109 | return vertex_group
110 |
--------------------------------------------------------------------------------
/render/utils/modifier.py:
--------------------------------------------------------------------------------
1 | import bpy
2 |
3 |
4 | def add_boolean_modifier(mesh_object: bpy.types.Object,
5 | another_mesh_object: bpy.types.Object,
6 | operation: str = "DIFFERENCE") -> None:
7 | '''
8 | https://docs.blender.org/api/current/bpy.types.BooleanModifier.html
9 | '''
10 |
11 | modifier: bpy.types.SubsurfModifier = mesh_object.modifiers.new(name="Boolean", type='BOOLEAN')
12 |
13 | modifier.object = another_mesh_object
14 | modifier.operation = operation
15 |
16 |
17 | def add_subdivision_surface_modifier(mesh_object: bpy.types.Object, level: int, is_simple: bool = False) -> None:
18 | '''
19 | https://docs.blender.org/api/current/bpy.types.SubsurfModifier.html
20 | '''
21 |
22 | modifier: bpy.types.SubsurfModifier = mesh_object.modifiers.new(name="Subsurf", type='SUBSURF')
23 |
24 | modifier.levels = level
25 | modifier.render_levels = level
26 | modifier.subdivision_type = 'SIMPLE' if is_simple else 'CATMULL_CLARK'
27 |
28 |
29 | def add_solidify_modifier(mesh_object: bpy.types.Object,
30 | thickness: float = 0.01,
31 | flip_normal: bool = False,
32 | fill_rim: bool = True,
33 | material_index_offset: int = 0,
34 | shell_vertex_group: str = "",
35 | rim_vertex_group: str = "") -> None:
36 | '''
37 | https://docs.blender.org/api/current/bpy.types.SolidifyModifier.html
38 | '''
39 |
40 | modifier: bpy.types.SolidifyModifier = mesh_object.modifiers.new(name="Solidify", type='SOLIDIFY')
41 |
42 | modifier.material_offset = material_index_offset
43 | modifier.thickness = thickness
44 | modifier.use_flip_normals = flip_normal
45 | modifier.use_rim = fill_rim
46 |
47 | # TODO: Check whether shell_vertex_group is either empty or defined
48 | # TODO: Check whether rim_vertex_group is either empty or defined
49 |
50 | modifier.shell_vertex_group = shell_vertex_group
51 | modifier.rim_vertex_group = rim_vertex_group
52 |
53 |
54 | def add_displace_modifier(mesh_object: bpy.types.Object,
55 | texture_name: str,
56 | vertex_group: str = "",
57 | mid_level: float = 0.5,
58 | strength: float = 1.0) -> None:
59 | '''
60 | https://docs.blender.org/api/current/bpy.types.DisplaceModifier.html
61 | '''
62 |
63 | modifier = mesh_object.modifiers.new(name="Displace", type='DISPLACE')
64 |
65 | modifier.mid_level = mid_level
66 | modifier.strength = strength
67 |
68 | # TODO: Check whether texture_name is properly defined
69 | modifier.texture = bpy.data.textures[texture_name]
70 |
71 | # TODO: Check whether vertex_group is either empty or defined
72 | modifier.vertex_group = vertex_group
73 |
--------------------------------------------------------------------------------
/render/utils/node.py:
--------------------------------------------------------------------------------
1 | import bpy
2 | import sys
3 | import math
4 | from typing import Iterable
5 |
6 |
7 | def create_frame_node(node_tree: bpy.types.NodeTree,
8 | nodes: Iterable[bpy.types.Node] = [],
9 | name: str = "Frame",
10 | label: str = "Frame") -> bpy.types.Node:
11 | frame_node = node_tree.nodes.new(type='NodeFrame')
12 | frame_node.name = name
13 | frame_node.label = label
14 |
15 | for node in nodes:
16 | node.parent = frame_node
17 |
18 | return frame_node
19 |
20 |
21 | def set_socket_value_range(socket: bpy.types.NodeSocket,
22 | default_value: float = 0.0,
23 | min_value: float = 0.0,
24 | max_value: float = 1.0) -> None:
25 | assert socket.type == "VALUE"
26 |
27 | socket.default_value = default_value
28 | socket.min_value = min_value
29 | socket.max_value = max_value
30 |
31 |
32 | def clean_nodes(nodes: bpy.types.Nodes) -> None:
33 | for node in nodes:
34 | nodes.remove(node)
35 |
36 |
37 | def arrange_nodes(node_tree: bpy.types.NodeTree, verbose: bool = False) -> None:
38 | max_num_iters = 2000
39 | epsilon = 1e-05
40 | target_space = 50.0
41 |
42 | second_stage = False
43 |
44 | fix_horizontal_location = True
45 | fix_vertical_location = True
46 | fix_overlaps = True
47 |
48 | if verbose:
49 | print("-----------------")
50 | print("Target nodes:")
51 | for node in node_tree.nodes:
52 | print("- " + node.name)
53 |
54 | # In the first stage, expand nodes overly
55 | target_space *= 2.0
56 |
57 | # Gauss-Seidel-style iterations
58 | previous_squared_deltas_sum = sys.float_info.max
59 | for i in range(max_num_iters):
60 | squared_deltas_sum = 0.0
61 |
62 | if fix_horizontal_location:
63 | for link in node_tree.links:
64 | k = 0.9 if not second_stage else 0.5
65 | threshold_factor = 2.0
66 |
67 | x_from = link.from_node.location[0]
68 | x_to = link.to_node.location[0]
69 | w_from = link.from_node.width
70 | signed_space = x_to - x_from - w_from
71 | C = signed_space - target_space
72 | grad_C_x_from = -1.0
73 | grad_C_x_to = 1.0
74 |
75 | # Skip if the distance is sufficiently large
76 | if C >= target_space * threshold_factor:
77 | continue
78 |
79 | lagrange = C / (grad_C_x_from * grad_C_x_from + grad_C_x_to * grad_C_x_to)
80 | delta_x_from = -lagrange * grad_C_x_from
81 | delta_x_to = -lagrange * grad_C_x_to
82 |
83 | link.from_node.location[0] += k * delta_x_from
84 | link.to_node.location[0] += k * delta_x_to
85 |
86 | squared_deltas_sum += k * k * (delta_x_from * delta_x_from + delta_x_to * delta_x_to)
87 |
88 | if fix_vertical_location:
89 | k = 0.5 if not second_stage else 0.05
90 | socket_offset = 20.0
91 |
92 | def get_from_socket_index(node: bpy.types.Node, node_socket: bpy.types.NodeSocket) -> int:
93 | for i in range(len(node.outputs)):
94 | if node.outputs[i] == node_socket:
95 | return i
96 | assert False
97 |
98 | def get_to_socket_index(node: bpy.types.Node, node_socket: bpy.types.NodeSocket) -> int:
99 | for i in range(len(node.inputs)):
100 | if node.inputs[i] == node_socket:
101 | return i
102 | assert False
103 |
104 | for link in node_tree.links:
105 | from_socket_index = get_from_socket_index(link.from_node, link.from_socket)
106 | to_socket_index = get_to_socket_index(link.to_node, link.to_socket)
107 | y_from = link.from_node.location[1] - socket_offset * from_socket_index
108 | y_to = link.to_node.location[1] - socket_offset * to_socket_index
109 | C = y_from - y_to
110 | grad_C_y_from = 1.0
111 | grad_C_y_to = -1.0
112 | lagrange = C / (grad_C_y_from * grad_C_y_from + grad_C_y_to * grad_C_y_to)
113 | delta_y_from = -lagrange * grad_C_y_from
114 | delta_y_to = -lagrange * grad_C_y_to
115 |
116 | link.from_node.location[1] += k * delta_y_from
117 | link.to_node.location[1] += k * delta_y_to
118 |
119 | squared_deltas_sum += k * k * (delta_y_from * delta_y_from + delta_y_to * delta_y_to)
120 |
121 | if fix_overlaps and second_stage:
122 | k = 0.9
123 | margin = 0.5 * target_space
124 |
125 | # Examine all node pairs
126 | for node_1 in node_tree.nodes:
127 | for node_2 in node_tree.nodes:
128 | if node_1 == node_2:
129 | continue
130 |
131 | x_1 = node_1.location[0]
132 | x_2 = node_2.location[0]
133 | w_1 = node_1.width
134 | w_2 = node_2.width
135 | cx_1 = x_1 + 0.5 * w_1
136 | cx_2 = x_2 + 0.5 * w_2
137 | rx_1 = 0.5 * w_1 + margin
138 | rx_2 = 0.5 * w_2 + margin
139 |
140 | # Note: "dimensions" and "height" may not be correct depending on the situation
141 | def get_height(node: bpy.types.Node) -> float:
142 | if node.dimensions.y > epsilon:
143 | return node.dimensions.y
144 | elif math.fabs(node.height - 100.0) > epsilon:
145 | return node.height
146 | else:
147 | return 200.0
148 |
149 | y_1 = node_1.location[1]
150 | y_2 = node_2.location[1]
151 | h_1 = get_height(node_1)
152 | h_2 = get_height(node_2)
153 | cy_1 = y_1 - 0.5 * h_1
154 | cy_2 = y_2 - 0.5 * h_2
155 | ry_1 = 0.5 * h_1 + margin
156 | ry_2 = 0.5 * h_2 + margin
157 |
158 | C_x = math.fabs(cx_1 - cx_2) - (rx_1 + rx_2)
159 | C_y = math.fabs(cy_1 - cy_2) - (ry_1 + ry_2)
160 |
161 | # If no collision, just skip
162 | if C_x >= 0.0 or C_y >= 0.0:
163 | continue
164 |
165 | # Solve collision for the "easier" direction
166 | if C_x > C_y:
167 | grad_C_x_1 = 1.0 if cx_1 - cx_2 >= 0.0 else -1.0
168 | grad_C_x_2 = -1.0 if cx_1 - cx_2 >= 0.0 else 1.0
169 | lagrange = C_x / (grad_C_x_1 * grad_C_x_1 + grad_C_x_2 * grad_C_x_2)
170 | delta_x_1 = -lagrange * grad_C_x_1
171 | delta_x_2 = -lagrange * grad_C_x_2
172 |
173 | node_1.location[0] += k * delta_x_1
174 | node_2.location[0] += k * delta_x_2
175 |
176 | squared_deltas_sum += k * k * (delta_x_1 * delta_x_1 + delta_x_2 * delta_x_2)
177 | else:
178 | grad_C_y_1 = 1.0 if cy_1 - cy_2 >= 0.0 else -1.0
179 | grad_C_y_2 = -1.0 if cy_1 - cy_2 >= 0.0 else 1.0
180 | lagrange = C_y / (grad_C_y_1 * grad_C_y_1 + grad_C_y_2 * grad_C_y_2)
181 | delta_y_1 = -lagrange * grad_C_y_1
182 | delta_y_2 = -lagrange * grad_C_y_2
183 |
184 | node_1.location[1] += k * delta_y_1
185 | node_2.location[1] += k * delta_y_2
186 |
187 | squared_deltas_sum += k * k * (delta_y_1 * delta_y_1 + delta_y_2 * delta_y_2)
188 |
189 | if verbose:
190 | print("Iteration #" + str(i) + ": " + str(previous_squared_deltas_sum - squared_deltas_sum))
191 |
192 | # Check the termination conditiion
193 | if math.fabs(previous_squared_deltas_sum - squared_deltas_sum) < epsilon:
194 | if second_stage:
195 | break
196 | else:
197 | target_space = 0.5 * target_space
198 | second_stage = True
199 |
200 | previous_squared_deltas_sum = squared_deltas_sum
201 |
--------------------------------------------------------------------------------
/render/utils/texture.py:
--------------------------------------------------------------------------------
1 | import bpy
2 |
3 |
4 | def add_clouds_texture(name: str = "Clouds Texture",
5 | size: float = 0.25,
6 | depth: int = 2,
7 | nabla: float = 0.025,
8 | brightness: float = 1.0,
9 | contrast: float = 1.0) -> bpy.types.CloudsTexture:
10 | '''
11 | https://docs.blender.org/api/current/bpy.types.BlendDataTextures.html
12 | https://docs.blender.org/api/current/bpy.types.Texture.html
13 | https://docs.blender.org/api/current/bpy.types.CloudsTexture.html
14 | '''
15 |
16 | # TODO: Check whether the name is already used or not
17 |
18 | tex = bpy.data.textures.new(name, type='CLOUDS')
19 |
20 | tex.noise_scale = size
21 | tex.noise_depth = depth
22 | tex.nabla = nabla
23 |
24 | tex.intensity = brightness
25 | tex.contrast = contrast
26 |
27 | return tex
28 |
--------------------------------------------------------------------------------
/render/utils/utils.py:
--------------------------------------------------------------------------------
1 | import bpy
2 | import math
3 | from typing import Optional, Tuple
4 | from utils.node import arrange_nodes
5 |
6 | ################################################################################
7 | # Text
8 | ################################################################################
9 |
10 |
11 | def create_text(
12 | scene: bpy.types.Scene,
13 | body: str,
14 | name: str,
15 | align_x: str = 'CENTER',
16 | align_y: str = 'CENTER',
17 | size: float = 1.0,
18 | font_name: str = "Bfont",
19 | extrude: float = 0.0,
20 | space_line: float = 1.0,
21 | location: Tuple[float, float, float] = (0.0, 0.0, 0.0),
22 | rotation: Tuple[float, float, float] = (0.0, 0.0, 0.0)
23 | ) -> bpy.types.Object:
24 |
25 | new_text_data: bpy.types.Curve = bpy.data.curves.new(name=name, type='FONT')
26 |
27 | new_text_data.body = body
28 | new_text_data.align_x = align_x
29 | new_text_data.align_y = align_y
30 | new_text_data.size = size
31 | new_text_data.font = bpy.data.fonts[font_name]
32 | new_text_data.space_line = space_line
33 | new_text_data.extrude = extrude
34 |
35 | new_object: bpy.types.Object = bpy.data.objects.new(name, new_text_data)
36 | scene.collection.objects.link(new_object)
37 |
38 | new_object.location = location
39 | new_object.rotation_euler = (math.pi * rotation[0] / 180.0, math.pi * rotation[1] / 180.0, math.pi * rotation[2])
40 |
41 | return new_object
42 |
43 |
44 | ################################################################################
45 | # Scene
46 | ################################################################################
47 |
48 |
49 | def set_animation(scene: bpy.types.Scene,
50 | fps: int = 24,
51 | frame_start: int = 1,
52 | frame_end: int = 48,
53 | frame_current: int = 1) -> None:
54 | scene.render.fps = fps
55 | scene.frame_start = frame_start
56 | scene.frame_end = frame_end
57 | scene.frame_current = frame_current
58 |
59 |
60 | def build_rgb_background(world: bpy.types.World,
61 | rgb: Tuple[float, float, float, float] = (0.9, 0.9, 0.9, 1.0),
62 | strength: float = 1.0) -> None:
63 | world.use_nodes = True
64 | node_tree = world.node_tree
65 |
66 | rgb_node = node_tree.nodes.new(type="ShaderNodeRGB")
67 | rgb_node.outputs["Color"].default_value = rgb
68 |
69 | node_tree.nodes["Background"].inputs["Strength"].default_value = strength
70 |
71 | node_tree.links.new(rgb_node.outputs["Color"], node_tree.nodes["Background"].inputs["Color"])
72 |
73 | arrange_nodes(node_tree)
74 |
75 |
76 | def build_environment_texture_background(world: bpy.types.World, hdri_path: str, rotation: float = 0.0) -> None:
77 | world.use_nodes = True
78 | node_tree = world.node_tree
79 |
80 | environment_texture_node = node_tree.nodes.new(type="ShaderNodeTexEnvironment")
81 | environment_texture_node.image = bpy.data.images.load(hdri_path)
82 |
83 | mapping_node = node_tree.nodes.new(type="ShaderNodeMapping")
84 | if bpy.app.version >= (2, 81, 0):
85 | mapping_node.inputs["Rotation"].default_value = (0.0, 0.0, rotation)
86 | else:
87 | mapping_node.rotation[2] = rotation
88 |
89 | tex_coord_node = node_tree.nodes.new(type="ShaderNodeTexCoord")
90 |
91 | node_tree.links.new(tex_coord_node.outputs["Generated"], mapping_node.inputs["Vector"])
92 | node_tree.links.new(mapping_node.outputs["Vector"], environment_texture_node.inputs["Vector"])
93 | node_tree.links.new(environment_texture_node.outputs["Color"], node_tree.nodes["Background"].inputs["Color"])
94 |
95 | arrange_nodes(node_tree)
96 |
97 |
98 | def set_output_properties(scene: bpy.types.Scene, resolution_percentage: int = 100, output_file_path: str = "") -> None:
99 | scene.render.resolution_percentage = resolution_percentage
100 |
101 | if output_file_path:
102 | scene.render.filepath = output_file_path
103 |
104 |
105 | def set_cycles_renderer(scene: bpy.types.Scene,
106 | camera_object: bpy.types.Object,
107 | num_samples: int,
108 | use_denoising: bool = True,
109 | use_motion_blur: bool = False,
110 | use_transparent_bg: bool = False) -> None:
111 | scene.camera = camera_object
112 |
113 | scene.render.image_settings.file_format = 'PNG'
114 | scene.render.engine = 'CYCLES'
115 | scene.render.use_motion_blur = use_motion_blur
116 |
117 | scene.render.film_transparent = use_transparent_bg
118 | scene.view_layers[0].cycles.use_denoising = use_denoising
119 |
120 | scene.cycles.samples = num_samples
121 |
122 |
123 | ################################################################################
124 | # Constraints
125 | ################################################################################
126 |
127 |
128 | def add_track_to_constraint(camera_object: bpy.types.Object, track_to_target_object: bpy.types.Object) -> None:
129 | constraint = camera_object.constraints.new(type='TRACK_TO')
130 | constraint.target = track_to_target_object
131 | constraint.track_axis = 'TRACK_NEGATIVE_Z'
132 | constraint.up_axis = 'UP_Y'
133 |
134 |
135 | def add_copy_location_constraint(copy_to_object: bpy.types.Object,
136 | copy_from_object: bpy.types.Object,
137 | use_x: bool,
138 | use_y: bool,
139 | use_z: bool,
140 | bone_name: str = '') -> None:
141 | constraint = copy_to_object.constraints.new(type='COPY_LOCATION')
142 | constraint.target = copy_from_object
143 | constraint.use_x = use_x
144 | constraint.use_y = use_y
145 | constraint.use_z = use_z
146 | if bone_name:
147 | constraint.subtarget = bone_name
148 |
149 |
150 | ################################################################################
151 | # Library
152 | ################################################################################
153 |
154 |
155 | def append_material(blend_file_path: str, material_name: str) -> bool:
156 | '''
157 | https://docs.blender.org/api/current/bpy.types.BlendDataLibraries.html
158 | '''
159 |
160 | # Load the library file
161 | with bpy.data.libraries.load(blend_file_path, link=False) as (data_from, data_to):
162 | # Check whether the specified material exists in the blend file
163 | if material_name in data_from.materials:
164 | # Append the material and return True
165 | data_to.materials = [material_name]
166 | return True
167 | else:
168 | # If the material is not found, return False without doing anything
169 | return False
170 |
171 | # TODO: Handle the exception of not being able to load the library file
172 | # TODO: Remove the linked library from byp.data.libraries
173 |
174 |
175 | ################################################################################
176 | # Misc.
177 | ################################################################################
178 |
179 |
180 | def clean_objects() -> None:
181 | for item in bpy.data.objects:
182 | bpy.data.objects.remove(item)
183 |
--------------------------------------------------------------------------------