├── .gitattributes ├── .gitignore ├── README.md ├── __init__.py ├── addon_preferences_ui.py ├── addon_updater.py ├── addon_updater_ops.py ├── asset_parser.py ├── atool.code-workspace ├── bitmap_type_name_conventions.json ├── bl_utils.py ├── data.blend ├── data.py ├── dev_tools.py ├── edit_mode_operator.py ├── image_utils.py ├── node_utils.py ├── pose_mode_operator.py ├── property_panel_operator.py ├── requirements.txt ├── scripts ├── get_polyhaven_dimensions.py ├── initialize_asset.py ├── preview.py ├── render_icon.blend ├── render_icon.py ├── render_partial.py ├── render_worker.py └── unreal_export.py ├── shader_editor_operator.py ├── shader_editor_ui.py ├── ship.py ├── type_definer.py ├── utils.py ├── view_3d_fur_operator.py ├── view_3d_operator.py └── view_3d_ui.py /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[cod] 2 | __pycache__/ 3 | *.blend1 4 | site-packages/ 5 | .ropeproject/ 6 | LICENSE 7 | material_settings.db 8 | local_resources/ 9 | atool_updater/ 10 | .vscode/ 11 | config.json 12 | __cache__.db 13 | es.exe 14 | __project__.json 15 | icon.png 16 | __icon__.png 17 | atool-main_updater/ 18 | __project__* 19 | uf_*.json -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ATool 2 | 3 | An asset manager add-on. 4 | 5 | [Blender Artists](https://blenderartists.org/t/atool/1267349) 6 | 7 | [Node Groups Info](https://spark.adobe.com/page/HxjzDLFGeP7j5/) 8 | 9 | Support me: 10 | * Give feedback 11 | * Share the add-on 12 | * Suggest features 13 | * Report bugs 14 | * Donate: [Ko-Fi](https://ko-fi.com/unwave) (Fee: 0%), [Gumroad](https://gumroad.com/l/atool) (Fee: 5% + charge fee (3.5% + 30¢)) 15 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | bl_info = { 2 | "name" : "ATool", 3 | "author" : "unwave", 4 | "description" : "", 5 | "blender" : (2, 83, 0), 6 | "version" : (0, 0, 1), 7 | "location" : "", 8 | "warning" : "", 9 | "category" : "Generic" 10 | } 11 | 12 | from timeit import default_timer as timer 13 | start = timer() 14 | 15 | 16 | # https://docs.python.org/3/howto/logging.html 17 | # https://docs.python.org/3/library/logging.html 18 | import logging 19 | log = logging.getLogger("atool") 20 | log.setLevel(logging.DEBUG) 21 | 22 | log_handler = logging.StreamHandler() 23 | log_handler.setLevel(logging.DEBUG) 24 | log_formatter = logging.Formatter("Atool %(levelname)s: %(message)s") 25 | log_handler.setFormatter(log_formatter) 26 | log.addHandler(log_handler) 27 | 28 | import typing 29 | import sys 30 | import os 31 | import threading 32 | 33 | import bpy 34 | 35 | from . import addon_updater_ops 36 | 37 | def ensure_site_packages(packages: typing.List[typing.Tuple[str, str]]): 38 | """ `packages`: list of tuples (, ) """ 39 | 40 | if not packages: 41 | return 42 | 43 | import site 44 | import importlib 45 | import importlib.util 46 | 47 | user_site_packages = site.getusersitepackages() 48 | sys.path.append(user_site_packages) 49 | 50 | modules_to_install = [module[1] for module in packages if not importlib.util.find_spec(module[0])] 51 | if not modules_to_install: 52 | return 53 | 54 | if bpy.app.version < (2,91,0): 55 | python_binary = bpy.app.binary_path_python 56 | else: 57 | python_binary = sys.executable 58 | 59 | import subprocess 60 | subprocess.run([python_binary, '-m', 'ensurepip'], check=True) 61 | subprocess.run([python_binary, '-m', 'pip', 'install', *modules_to_install, "--user"], check=True) 62 | 63 | importlib.invalidate_caches() 64 | 65 | ensure_site_packages([ 66 | ("PIL", "Pillow"), 67 | # ("imagesize", "imagesize"), 68 | ("xxhash","xxhash"), 69 | # ("lxml", "lxml"), 70 | ("bs4","beautifulsoup4"), 71 | # ("tldextract", "tldextract"), 72 | ("validators", "validators"), 73 | ("pyperclip", "pyperclip"), 74 | # ("pathos", "pathos"), 75 | ("cv2", "opencv-contrib-python-headless"), 76 | ("cached_property", "cached-property"), 77 | ("inflection", "inflection") 78 | ]) 79 | 80 | ADDON_FILES_POSTFIXES = ('_operator.py', '_ui.py', 'data.py') 81 | ADDON_UTILS_POSTFIXES = ('utils.py', 'asset_parser.py', 'type_definer.py') 82 | 83 | import importlib 84 | modules = [] 85 | utils_names = set() 86 | for file in os.scandir(os.path.dirname(__file__)): 87 | if not file.is_file(): 88 | continue 89 | 90 | stem = os.path.splitext(file.name)[0] 91 | 92 | if file.name.endswith(ADDON_FILES_POSTFIXES): 93 | modules.append(importlib.import_module('.' + stem, package = __package__)) 94 | elif file.name.endswith(ADDON_UTILS_POSTFIXES): 95 | utils_names.add(stem) 96 | 97 | from . import utils 98 | config = utils.read_local_file("config.json") # type: dict 99 | if config and config.get("dev_mode"): 100 | modules.append(importlib.import_module('.dev_tools', package = __package__)) 101 | 102 | class ATOOL_OT_reload_addon(bpy.types.Operator): 103 | bl_idname = "atool.reload_addon" 104 | bl_label = "Reload Atool Addon" 105 | bl_description = "Reload the Atool addon." 106 | 107 | def execute(self, context): 108 | 109 | utils_to_reload = set() 110 | for module in modules: 111 | for key, value in module.__dict__.items(): 112 | if key in utils_names: 113 | utils_to_reload.add(value) 114 | 115 | for util in utils_to_reload: 116 | importlib.reload(util) 117 | 118 | for module in modules: 119 | module.register.unregister() 120 | importlib.reload(module) 121 | module.register.register() 122 | 123 | wm = context.window_manager 124 | wm["at_asset_previews"] = 0 125 | wm["at_current_page"] = 1 126 | 127 | threading.Thread(target=wm.at_asset_data.update, args=(bpy.context,), daemon=True).start() 128 | # threading.Thread(target=utils.init_find, daemon=True).start() 129 | 130 | print('Atool has been reloaded.') 131 | 132 | return {'FINISHED'} 133 | 134 | def register(): 135 | start = timer() 136 | 137 | addon_updater_ops.register(bl_info) 138 | bpy.utils.register_class(ATOOL_OT_reload_addon) 139 | 140 | for module in modules: 141 | module.register.register() 142 | 143 | wm = bpy.context.window_manager 144 | wm["at_asset_previews"] = 0 145 | wm["at_current_page"] = 1 146 | 147 | threading.Thread(target=wm.at_asset_data.update, args=(bpy.context,), daemon=True).start() 148 | threading.Thread(target=utils.EVERYTHING.set_es_exe, daemon=True).start() 149 | 150 | register_time = timer() - start 151 | log.info(f"register time:\t {register_time:.2f} sec") 152 | log.info(f"all time:\t\t {register_time + init_time:.2f} sec") 153 | 154 | 155 | def unregister(): 156 | addon_updater_ops.unregister() 157 | bpy.utils.unregister_class(ATOOL_OT_reload_addon) 158 | 159 | for module in modules: 160 | module.register.unregister() 161 | 162 | 163 | init_time = timer() - start 164 | log.info(f"__init__ time:\t {init_time:.2f} sec") -------------------------------------------------------------------------------- /addon_preferences_ui.py: -------------------------------------------------------------------------------- 1 | import threading 2 | 3 | import bpy 4 | 5 | from . import addon_updater_ops 6 | from . import bl_utils 7 | 8 | register = bl_utils.Register(globals()) 9 | 10 | 11 | def update_library_path(self, context): 12 | asset_data = context.window_manager.at_asset_data 13 | asset_data.check_path(self.library_path, 'library') 14 | threading.Thread(target=asset_data.update_library, args=(context,), daemon=True).start() 15 | 16 | def update_auto_path(self, context): 17 | asset_data = context.window_manager.at_asset_data 18 | asset_data.check_path(self.auto_path, 'auto') 19 | threading.Thread(target=asset_data.update_auto, args=(context,), daemon=True).start() 20 | 21 | class ATOOL_PT_addon_preferences(bpy.types.AddonPreferences): 22 | bl_idname = __package__ 23 | 24 | library_path: bpy.props.StringProperty( 25 | name="Library", 26 | subtype='DIR_PATH', 27 | description="A path to a library folder", 28 | update=update_library_path 29 | ) 30 | auto_path: bpy.props.StringProperty( 31 | name="Auto", 32 | subtype='DIR_PATH', 33 | description="A path to folder to be autoprocessed on the startup", 34 | update=update_auto_path 35 | ) 36 | 37 | auto_check_update: bpy.props.BoolProperty( 38 | name="Auto-check for Update", 39 | description="If enabled, auto-check for updates using an interval", 40 | default=False, 41 | ) 42 | updater_intrval_months: bpy.props.IntProperty( 43 | name='Months', 44 | description="Number of months between checking for updates", 45 | default=0, 46 | min=0 47 | ) 48 | updater_intrval_days: bpy.props.IntProperty( 49 | name='Days', 50 | description="Number of days between checking for updates", 51 | default=7, 52 | min=0, 53 | max=31 54 | ) 55 | updater_intrval_hours: bpy.props.IntProperty( 56 | name='Hours', 57 | description="Number of hours between checking for updates", 58 | default=0, 59 | min=0, 60 | max=23 61 | ) 62 | updater_intrval_minutes: bpy.props.IntProperty( 63 | name='Minutes', 64 | description="Number of minutes between checking for updates", 65 | default=0, 66 | min=0, 67 | max=59 68 | ) 69 | 70 | def draw(self, context): 71 | layout = self.layout 72 | layout.prop(self, "library_path") 73 | layout.prop(self, "auto_path") 74 | layout.operator('atool.data_paths') 75 | addon_updater_ops.update_settings_ui(self,context) 76 | 77 | 78 | class ATOOL_OT_update_data_paths(bpy.types.Operator): 79 | bl_idname = 'atool.data_paths' 80 | bl_label = 'Update' 81 | bl_options = {"REGISTER", "UNDO"} 82 | 83 | def execute(self, context): 84 | 85 | asset_data = context.window_manager.at_asset_data 86 | threading.Thread(target=asset_data.update, args=(context,), daemon=True).start() 87 | 88 | return {"FINISHED"} -------------------------------------------------------------------------------- /asset_parser.py: -------------------------------------------------------------------------------- 1 | import re 2 | import os 3 | import json 4 | import subprocess 5 | import tempfile 6 | import operator 7 | import typing 8 | from collections import Counter 9 | 10 | import logging 11 | log = logging.getLogger("atool") 12 | 13 | 14 | if __package__: 15 | import bpy 16 | from . import utils 17 | from . import bl_utils 18 | # from . import type_definer 19 | else: 20 | import utils 21 | # import bl_utils 22 | # import type_definer 23 | 24 | # class bl_utils: 25 | 26 | # def iter_with_progress(iterator, *args, **kw): 27 | # for i in iterator: 28 | # yield i 29 | 30 | # def download_with_progress(response, path, *args, **kw): 31 | # with open(path, "wb") as f: 32 | # for chunk in response.iter_content(chunk_size=4096): 33 | # f.write(chunk) 34 | 35 | # bl_utils = bl_utils() 36 | 37 | 38 | # import requests 39 | # from bs4 import BeautifulSoup 40 | # import tldextract 41 | # import validators 42 | 43 | try: 44 | import winreg 45 | with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\7-Zip") as key: 46 | seven_z = winreg.QueryValueEx(key, "Path")[0] 47 | seven_z = os.path.join(seven_z, "7z.exe") 48 | except: 49 | seven_z = "7z" 50 | finally: 51 | try: 52 | subprocess.run([seven_z], stdout=subprocess.DEVNULL) 53 | except: 54 | print("7z is not found. The sbsar info auto import is unavailable.") 55 | seven_z = None 56 | 57 | def get_base_url(url): 58 | return url.split("?")[0].split("#")[0].rstrip("/") 59 | 60 | def get_web_file(url, content_folder = None, content_path = None, headers = None): 61 | assert not(content_folder == None and content_path == None) 62 | 63 | import requests 64 | response = requests.get(url, headers=headers, stream=True) 65 | if response.status_code != 200: 66 | return False, response.text 67 | 68 | if content_path: 69 | os_path = content_path 70 | file_name = os.path.basename(os_path) 71 | os.makedirs(os.path.dirname(os_path), exist_ok=True) 72 | else: 73 | file_name = response.url.split("?")[0].split("#")[0].split("/")[-1] # todo: check if does not have extension 74 | os_path = os.path.join(content_folder, file_name) 75 | os.makedirs(content_folder, exist_ok=True) 76 | 77 | assert not os.path.exists(os_path) 78 | 79 | total = int(response.headers.get('content-length')) 80 | bl_utils.download_with_progress(response, os_path, total= total, indent=1, prefix = file_name) 81 | 82 | return True, os_path 83 | 84 | 85 | def get_web_ambientcg_info(url, content_folder): 86 | 87 | # https://cc0textures.com/view?id=Plaster003 88 | # https://ambientcg.com/view?id=Bricks056 89 | 90 | if "cc0textures.com" in url or "ambientcg.com" in url: 91 | match = re.search(r"(?<=id=)[a-zA-Z0-9]+", url) 92 | if not match: 93 | return False, "Not valid Ambient CG url." 94 | id = match.group(0) 95 | elif "cc0.link" in url: # https://cc0.link/a/Plaster003 96 | url = url.split("?")[0].split("#")[0].rstrip("/") 97 | id = url.split("/")[-1] 98 | 99 | api_url = f"https://ambientcg.com/api/v2/full_json?id={id}&sort=Latest&limit=1&include=tagData%2CdisplayData%2CdimensionsData%2CdownloadData%2CpreviewData%2CimageData" 100 | 101 | headers = {'User-Agent': 'Blender'} 102 | 103 | import requests 104 | response = requests.get(api_url, headers=headers) 105 | if response.status_code != 200: 106 | return False, response.text 107 | 108 | json = response.json() 109 | 110 | asset = json["foundAssets"][0] 111 | 112 | if asset["dataType"] == "3DModel": 113 | return False, "3DModel is not supported yet." 114 | 115 | dimensions = {} 116 | for letter, name in zip('xyz', ("dimensionX", "dimensionY", "dimensionZ")): 117 | dimension = asset.get(name) 118 | if dimension: 119 | dimensions[letter] = int(dimension)/100 120 | 121 | info = { 122 | "id": id, 123 | "name": asset["displayName"], 124 | "url": f"https://ambientcg.com/view?id={id}", 125 | "author": "ambientcg", 126 | "author_url": "https://ambientcg.com", 127 | "licence": "CC0", 128 | "licence_url": "https://help.ambientcg.com/01-General/Licensing.html", 129 | "tags": asset["tags"], 130 | "preview_url": asset["previewImage"]["1024-PNG"], 131 | "description": asset.get("description"), 132 | "dimensions": dimensions 133 | } 134 | 135 | info['material_settings'] = {'Y- Normal Map': 1} 136 | 137 | if content_folder: 138 | download = utils.locate_item(asset["downloadFolders"], ("attribute", "4K-JPG"), return_as = "parent")[0] 139 | url = download["downloadLink"] # "https://cc0textures.com/get?file=Plaster003_4K-PNG.zip" 140 | info["downloadLink"] = url 141 | info["fileName"] = download["fileName"] # "Plaster003_4K-PNG.zip" 142 | 143 | utils.remove_empty(info) 144 | return True, info 145 | 146 | def get_web_ambientcg_asset(url, content_folder): 147 | 148 | is_ok, result = get_web_ambientcg_info(url, content_folder) 149 | if not is_ok: 150 | return False, result 151 | 152 | info = result 153 | 154 | url = info.pop("downloadLink") 155 | content_path = os.path.join(content_folder, info.pop("fileName")) 156 | 157 | headers = {'User-Agent': 'Blender'} 158 | 159 | is_ok, result = get_web_file(url, content_path = content_path, headers=headers) 160 | if is_ok: 161 | utils.extract_zip(result, path = content_folder) 162 | os.remove(result) 163 | else: 164 | print(f"Cannot download asset {url}", result) 165 | 166 | url = info["preview_url"] 167 | is_ok, result = get_web_file(url, content_folder, headers=headers) 168 | if is_ok: 169 | info["preview_path"] = result 170 | else: 171 | print(f"Cannot download preview {url}", result) 172 | 173 | return True, info 174 | 175 | 176 | def get_web_polyhaven_info(url, content_folder): 177 | # https://polyhaven.com/a/aerial_rocks_02 178 | url = get_base_url(url) 179 | 180 | if not "polyhaven.com/a/" in url: 181 | return False, "Not valid Poly Haven url." 182 | 183 | # aerial_rocks_02 184 | id = url.split('/')[-1] 185 | 186 | api_url = f"https://api.polyhaven.com/info/{id}" 187 | 188 | import requests 189 | response = requests.get(api_url) 190 | if response.status_code != 200: 191 | return False, response.text 192 | 193 | data = response.json() # type: dict 194 | 195 | type_to_text = { 196 | 0: 'hdri', 197 | 1: 'material', 198 | 2: 'model', 199 | } 200 | type = type_to_text[data['type']] 201 | 202 | if type == 'hdri': 203 | raise NotImplementedError('HDRIs are not supported yet.') 204 | 205 | authors = data.get('authors', {}) 206 | authors_list = list(authors) 207 | authors = ', '.join(authors_list) 208 | author_url = f"https://polyhaven.com/textures?a={authors_list[0]}" 209 | 210 | if type == 'material': 211 | preview_url = f"https://cdn.polyhaven.com/asset_img/thumbs/{id}.png?height=780" 212 | elif type == 'model': 213 | preview_url = f"https://cdn.polyhaven.com/asset_img/primary/{id}.png?height=780" 214 | 215 | tags = data.get('tags', []) 216 | tags.extend(data.get('categories', [])) 217 | 218 | dimensions = {} 219 | if type == 'material': 220 | dimensions_text = data.get('scale') 221 | if dimensions_text: 222 | number_pattern = re.compile("\d+\.?\d*") 223 | for letter, number in zip('xyz', number_pattern.findall(dimensions_text)): 224 | dimensions[letter] = float(number) 225 | 226 | info = { 227 | "id": id, 228 | "name": data.get('name', id), 229 | "url": url, 230 | "author": authors, 231 | "author_url": author_url, 232 | "licence": "CC0", 233 | "licence_url": "https://polyhaven.com/license", 234 | "tags": tags, 235 | "preview_url": preview_url, 236 | # "description": "", 237 | "dimensions": dimensions, 238 | } 239 | 240 | utils.remove_empty(info) 241 | 242 | api_url = f"https://api.polyhaven.com/files/{id}" 243 | response = requests.get(api_url) 244 | if response.status_code == 200: 245 | 246 | data = response.json() 247 | blend = data['blend']['4k']['blend'] 248 | include = blend['include'] # type: dict 249 | 250 | if type == 'material': 251 | with tempfile.TemporaryDirectory() as temp_dir: 252 | is_ok, result = get_web_file(blend['url'], temp_dir) 253 | if is_ok: 254 | process = bl_utils.run_blender(result, script=utils.get_script('get_polyhaven_dimensions.py'), stdout=subprocess.PIPE) 255 | info['dimensions'] = json.loads(process.stdout.split("\n")[0]) 256 | else: 257 | print(f"Cannot get the blend file: {blend['url']}") 258 | print(response.text) 259 | else: 260 | if content_folder: 261 | return False, response.text 262 | else: 263 | print(f"Cannot get the files info: {api_url}") 264 | print(response.text) 265 | 266 | if content_folder: 267 | downloads = [] 268 | 269 | if type == 'material': 270 | for rel_path, texture in include.items(): 271 | downloads.append({'url': texture['url']}) 272 | 273 | elif type == 'model': 274 | downloads.append({'url': blend['url']}) 275 | for rel_path, texture in include.items(): 276 | downloads.append({ 277 | 'rel_path': rel_path, 278 | 'url': texture['url'] 279 | }) 280 | 281 | info["downloads"] = downloads 282 | 283 | return True, info 284 | 285 | def get_web_polyhaven_asset(url, content_folder): 286 | 287 | is_ok, info = get_web_polyhaven_info(url, content_folder) 288 | if not is_ok: 289 | return False, info 290 | 291 | downloads = info.pop('downloads') # type: typing.List[dict] 292 | for download in bl_utils.iter_with_progress(downloads, prefix = "Files"): 293 | rel_path = download.get('rel_path') 294 | if rel_path: 295 | content_path = os.path.join(content_folder, *os.path.split(rel_path)) 296 | is_ok, result = get_web_file(download['url'], content_path = content_path) 297 | else: 298 | is_ok, result = get_web_file(download['url'], content_folder) 299 | if not is_ok: 300 | print(f"Cannot download {download}", result) 301 | 302 | preview_url = info["preview_url"] 303 | is_ok, result = get_web_file(preview_url, content_folder) 304 | if not is_ok: 305 | print(f"Cannot download {preview_url}", result) 306 | else: 307 | info["preview_path"] = result 308 | 309 | return True, info 310 | 311 | 312 | def get_info_from_substance_json(data): 313 | 314 | preview_url = utils.locate_item(data, ("label", "main"), return_as = "parent")[0]["url"] 315 | 316 | extra_data = {dict["key"]: dict["value"] for dict in data["extraData"]} 317 | 318 | dimensions = {} 319 | physicalSize = extra_data.get("physicalSize") 320 | if physicalSize: 321 | for letter, dimension in zip('xyz' , physicalSize.split("/")): 322 | dimensions[letter] = float(dimension)/100.0 323 | 324 | tags = data["tags"] 325 | tags.append(extra_data["type"]) 326 | 327 | info = { 328 | # "id": extra_data["originalName"], # not always 329 | "name": data["title"], 330 | "url": "https://source.substance3d.com/allassets/" + data["id"], 331 | "author": extra_data["author"], 332 | "author_url": "https://source.substance3d.com/", 333 | "licence": "EULA", 334 | "licence_url": "https://www.substance3d.com/legal/general-terms-conditions", 335 | "tags": tags, 336 | "preview_url": preview_url, 337 | # "description": "", 338 | "dimensions": dimensions 339 | } 340 | 341 | # info["preview_path"] = "" 342 | 343 | utils.remove_empty(info) 344 | return info 345 | 346 | def get_info_from_sbsar_xml(xml_file): 347 | with open(xml_file , 'r',encoding = "utf-8") as xml_text: 348 | from bs4 import BeautifulSoup 349 | soup = BeautifulSoup(xml_text.read(), "html.parser") 350 | graph = soup.find("graph") 351 | attrs = graph.attrs # type: dict 352 | 353 | tags = [] 354 | keywords = attrs.get("keywords") 355 | if keywords: 356 | tags = re.split(r" |;|,", keywords.strip("; ").lower()) 357 | 358 | category = attrs.get("category") 359 | if category: 360 | tags.extend(re.split(r" |/|,", category.lower())) 361 | 362 | tags = utils.deduplicate(tags) 363 | tags = list(filter(None, tags)) 364 | 365 | id = None 366 | pkgurl = attrs.get("pkgurl") 367 | if pkgurl: 368 | match = re.search(r"(?<=pkg:\/\/).+", pkgurl) 369 | if match: 370 | id = match.group(0) 371 | 372 | if id: 373 | name = id 374 | else: 375 | name = os.path.splitext(os.path.basename(xml_file))[0] 376 | label = attrs.get("label") 377 | if label: 378 | name = label.strip(" ") 379 | 380 | dimensions = {} 381 | physicalsize = attrs.get("physicalsize") 382 | if physicalsize: 383 | for letter, dimension in zip('xyz' , physicalsize.split(",")): 384 | dimensions[letter] = float(dimension)/100.0 385 | 386 | info = { 387 | "id": id, 388 | "name": name, 389 | # "url": "", 390 | "author": attrs.get("author", ""), 391 | "author_url": attrs.get("authorurl", ""), 392 | # "licence": "", 393 | # "licence_url": "", 394 | "tags": tags, 395 | # "preview_url": "", 396 | "description": attrs.get("description", ""), 397 | "dimensions": dimensions, 398 | "xml_attrs": attrs 399 | } 400 | 401 | 402 | utils.remove_empty(info) 403 | return info 404 | 405 | def get_info_from_sbsar(sbsar): 406 | 407 | global seven_z 408 | if not seven_z: 409 | return False, "7z is not found." 410 | 411 | with tempfile.TemporaryDirectory() as temp_dir: 412 | subprocess.run([seven_z, "e", sbsar, "-o" + temp_dir, "*.xml" ,"-r"], stdout=subprocess.PIPE, check=True) 413 | xml_file = list(os.scandir(temp_dir))[0].path 414 | return True, get_info_from_sbsar_xml(xml_file) 415 | 416 | def get_web_substance_source_info_by_label(label): 417 | 418 | substance_api_url = "https://source-api.substance3d.com/beta/graphql" 419 | 420 | query_assets =\ 421 | 'query Assets($page: Int, $limit: Int = 1, $search: String, $filters: AssetFilters, $sortDir: SortDir = desc, $sort: AssetSort = byPublicationDate) {\n'\ 422 | ' assets(search: $search, filters: $filters, sort: $sort, sortDir: $sortDir, page: $page, limit: $limit) {\n'\ 423 | ' total\n'\ 424 | ' hasMore\n'\ 425 | ' items {\n'\ 426 | ' id\n'\ 427 | ' title\n'\ 428 | ' tags\n'\ 429 | ' cost\n'\ 430 | ' new\n'\ 431 | ' free\n'\ 432 | ' downloadsRecentlyUpdated\n'\ 433 | ' attachments {\n'\ 434 | ' id\n'\ 435 | ' tags\n'\ 436 | ' label\n'\ 437 | ' ... on PreviewAttachment {\n'\ 438 | ' kind\n'\ 439 | ' url\n'\ 440 | ' __typename\n'\ 441 | ' }\n'\ 442 | ' ... on DownloadAttachment {\n'\ 443 | ' url\n'\ 444 | ' __typename\n'\ 445 | ' }\n'\ 446 | ' __typename\n'\ 447 | ' }\n'\ 448 | ' extraData {\n'\ 449 | ' key\n'\ 450 | ' value\n'\ 451 | ' __typename\n'\ 452 | ' }\n'\ 453 | ' type\n'\ 454 | ' status\n'\ 455 | ' __typename\n'\ 456 | ' }\n'\ 457 | ' __typename\n'\ 458 | ' }\n'\ 459 | '}\n' 460 | 461 | substance_search_payload = { 462 | "operationName": "Assets", 463 | "variables": { 464 | "limit": 1, 465 | "sortDir": "desc", 466 | "sort": "bySearchScore", 467 | "search": "\"" + label + "\"", 468 | #"filters": {"status": ["published"]}, 469 | }, 470 | "query": query_assets 471 | } 472 | 473 | import requests 474 | response = requests.post(substance_api_url, json = substance_search_payload) 475 | if response.status_code != 200: 476 | return False, response.text 477 | 478 | search_json = response.json() 479 | items = search_json["data"]["assets"]["items"] 480 | if not items: 481 | return None 482 | 483 | info = get_info_from_substance_json(items[0]) 484 | 485 | if label in info["name"]: # name == title form substance source json 486 | return info 487 | else: 488 | print(items[0]["title"], "!=" ,label) 489 | return None 490 | 491 | def get_web_substance_source_info(url, content_folder): 492 | 493 | # https://source.substance3d.com/allassets/3a92437f756236ad41ca5603286e0068768f1635?free=true 494 | 495 | id = url.split("?")[0].split("#")[0].rstrip("/").split("/")[-1] 496 | 497 | substance_api_url = "https://source-api.substance3d.com/beta/graphql" 498 | 499 | query_asset = 'query Asset($id: String!) {\n'\ 500 | ' asset(id: $id) {\n'\ 501 | ' id\n'\ 502 | ' title\n'\ 503 | ' tags\n'\ 504 | ' cost\n'\ 505 | ' new\n'\ 506 | ' free\n'\ 507 | ' downloadsRecentlyUpdated\n'\ 508 | ' attachments {\n'\ 509 | ' id\n'\ 510 | ' tags\n'\ 511 | ' label\n'\ 512 | ' ... on PreviewAttachment {\n'\ 513 | ' url\n'\ 514 | ' kind\n'\ 515 | ' __typename\n'\ 516 | ' }\n'\ 517 | ' ... on DownloadAttachment {\n'\ 518 | ' url\n'\ 519 | ' __typename\n'\ 520 | ' }\n'\ 521 | ' __typename\n'\ 522 | ' }\n'\ 523 | ' extraData {\n'\ 524 | ' key\n'\ 525 | ' value\n'\ 526 | ' __typename\n'\ 527 | ' }\n'\ 528 | ' type\n'\ 529 | ' createdAt\n'\ 530 | ' status\n'\ 531 | ' __typename\n'\ 532 | ' }\n'\ 533 | '}' 534 | 535 | substance_info_payload = { 536 | "operationName": "Asset", 537 | "variables": { 538 | "id": id 539 | }, 540 | "query": query_asset 541 | } 542 | 543 | import requests 544 | response = requests.post(substance_api_url, json = substance_info_payload) 545 | if response.status_code != 200: 546 | return False, response.text 547 | 548 | try: 549 | response_json = response.json() 550 | data_dict = response_json["data"]["asset"] 551 | info = get_info_from_substance_json(data_dict) 552 | return True, info 553 | except: 554 | return False, response.text 555 | 556 | 557 | def get_web_blendswap_info(url, content_folder): 558 | 559 | url = url.split("?")[0].split("#")[0].rstrip("/") 560 | 561 | if not re.search(r"blendswap.com\/blend\/\d+$", url) and not re.search(r"blendswap.com\/blends\/view\/\d+$", url): 562 | return False, "Not valid BlendSwap url." 563 | 564 | import requests 565 | response = requests.get(url) 566 | if response.status_code != 200: 567 | return False, response.text 568 | 569 | url = response.url # can change 570 | id = url.split("/")[-1] 571 | preview_url = f"https://www.blendswap.com/blend_previews/{id}/0/0" 572 | 573 | from bs4 import BeautifulSoup, NavigableString 574 | soup = BeautifulSoup(response.text, "html.parser") 575 | 576 | name = soup.find("h1", {"class": "page-title"}) 577 | if name.small: 578 | name.small.decompose() 579 | name = name.text.strip("\n ") 580 | 581 | sticky_list = soup.find("div", {"class": "card sticky-top card-sticky"}) 582 | 583 | author = sticky_list.find("i", {"class": "far fa-user"}).parent 584 | 585 | author_id = re.search(r"(?<=\/)\d+$", author.a["href"])[0] 586 | author_url = f"https://www.blendswap.com/profile/{author_id}/blends" 587 | author = author.a.string 588 | 589 | licence = sticky_list.find("i", {"class": "fab fa-creative-commons"}).parent 590 | licence = re.findall(r"[\w\d-]+", licence.text)[1] 591 | 592 | licence_urls = { 593 | "CC-BY": "https://creativecommons.org/licenses/by/4.0/", 594 | "CC-BY-SA": "https://creativecommons.org/licenses/by-sa/4.0/", 595 | "CC-BY-ND": "https://creativecommons.org/licenses/by-nd/4.0/", 596 | "CC-BY-NC": "https://creativecommons.org/licenses/by-nc/4.0/", 597 | "CC-BY-NC-SA": "https://creativecommons.org/licenses/by-nc-sa/4.0/", 598 | "CC-BY-NC-ND": "https://creativecommons.org/licenses/by-nc-nd/4.0/", 599 | "CC-0": "https://creativecommons.org/publicdomain/zero/1.0/", 600 | "GAL": "https://generalassetlicense.org/", 601 | } 602 | 603 | tags = soup.findAll("span", {"class": "badge badge-primary tag-badge"}) 604 | tags = [tag.string for tag in tags] 605 | 606 | description = soup.find("div", {"class": "card-body blend-description"}) 607 | 608 | description_list = [] 609 | for tag in description.children: 610 | if isinstance(tag, NavigableString): 611 | continue 612 | if tag.name == "h3": 613 | continue 614 | if tag.name == "p": 615 | for sub_tag in tag.children: 616 | if sub_tag.name == "a": 617 | description_list.append(sub_tag.string + ": " + sub_tag["href"]) 618 | else: 619 | description_list.append(sub_tag.string) 620 | description_list.append("\n") 621 | 622 | description = ''.join(description_list) 623 | 624 | 625 | info = { 626 | "name": name, 627 | "url": url, 628 | "author": author, 629 | "author_url": author_url, 630 | "licence": licence, 631 | "licence_url": licence_urls.get(licence, ""), 632 | "tags": tags, 633 | "preview_url": preview_url, 634 | "description": description, 635 | } 636 | 637 | if content_folder: 638 | is_ok, result = get_web_file(preview_url, content_folder) 639 | if is_ok: 640 | info["preview_path"] = result 641 | else: 642 | print("Cannot get preview from:", preview_url) 643 | print(result) 644 | 645 | return True, info 646 | 647 | 648 | def get_web_sketchfab_info(url, content_folder): 649 | 650 | url = url.split("?")[0].split("#")[0].rstrip("/") 651 | 652 | if not ("sketchfab.com/3d-models/" in url or "sketchfab.com/models/" in url): 653 | return False, "Not valid Sketchfab model url." 654 | 655 | id = url.split("/")[-1].split("-")[-1] 656 | 657 | #https://sketchfab.com/i/models/c2933b42e63f4f53bb061e323047615a 658 | 659 | import requests 660 | response = requests.get("https://sketchfab.com/i/models/"+ id) 661 | if response.status_code != 200: 662 | return False, response.text 663 | 664 | json = response.json() 665 | 666 | preview_url = max(json["thumbnails"]["images"], key=operator.itemgetter("size"))["url"] 667 | 668 | info = { 669 | "id": json["slug"], 670 | "name": json["name"], 671 | "url": json["viewerUrl"], 672 | "author": json["user"]["displayName"], 673 | "author_url": json["user"]["profileUrl"], 674 | "licence": json["license"]["label"], 675 | "licence_url": json["license"]["url"], 676 | "tags": json["tags"], 677 | "preview_url": preview_url, 678 | "description": json["description"], 679 | # "dimensions": [] 680 | } 681 | 682 | if content_folder: 683 | is_ok, result = get_web_file(preview_url, content_folder) 684 | if is_ok: 685 | info["preview_path"] = result 686 | else: 687 | print("Cannot get preview from:", preview_url) 688 | print(result) 689 | 690 | return True, info 691 | 692 | 693 | def get_megascan_info_from_json(mega_info): 694 | 695 | name = None 696 | 697 | tags = mega_info.get("tags", []) 698 | tags.extend(mega_info.get("categories", [])) 699 | 700 | semantic_tags = mega_info.get("semanticTags") 701 | if semantic_tags: 702 | semantic_tags.pop("industry", None) 703 | for key, value in semantic_tags.items(): 704 | if isinstance(value, list): 705 | tags.extend(value) 706 | elif key in ("subject_matter", "asset_type"): 707 | tags.append(value) 708 | 709 | name = semantic_tags.get("name") 710 | 711 | if not name: 712 | name = mega_info.get("name", "") 713 | 714 | tags = list(map(lambda x: x.lower().strip(" "), dict.fromkeys(tags))) 715 | 716 | meta = {item["key"]: item["value"] for item in mega_info.get("meta", [])} 717 | 718 | number_pattern = re.compile("\d+(?:\.\d+)?") 719 | 720 | dimensions = {} 721 | 722 | x = meta.get("length") 723 | if x: 724 | x = float(number_pattern.search(x).group(0)) 725 | y = meta.get("width") 726 | if y: 727 | y = float(number_pattern.search(y).group(0)) 728 | 729 | if not x and not y: 730 | scan_area = meta.get("scanArea") 731 | if not scan_area: 732 | sizes = utils.locate_item(mega_info, "physicalSize", is_dict_key=True, return_as='data') 733 | if sizes: 734 | scan_area = Counter(sizes).most_common(1)[0][0] 735 | if scan_area: 736 | sizes = number_pattern.findall(scan_area) 737 | if len(sizes) == 2: 738 | x = float(sizes[0]) 739 | y = float(sizes[1]) 740 | elif len(sizes) == 1: 741 | x = y = float(sizes[0]) 742 | 743 | if x: 744 | dimensions['x'] = x 745 | if y: 746 | dimensions['y'] = y 747 | 748 | z = meta.get("height") 749 | if z: 750 | dimensions['z'] = float(number_pattern.search(z).group(0)) 751 | 752 | info = { 753 | # "id": "", # can get a slug from the json listing files 754 | "name": name, 755 | "url": f"https://quixel.com/megascans/home?assetId={mega_info['id']}", 756 | "author": "Quixel Megascans", 757 | "author_url": "https://quixel.com/megascans", 758 | "licence": "EULA", 759 | "licence_url": "https://quixel.com/terms", 760 | "tags": tags, 761 | # "preview_url": "", # probably the url is generated by some javascript 762 | # "description": "", # does not have it 763 | "dimensions": dimensions, 764 | } 765 | 766 | utils.remove_empty(info) 767 | return info 768 | 769 | def get_web_megascan_info(url, content_folder): 770 | 771 | # https://quixel.com/megascans/home?assetId={megascan_id} 772 | match = re.search(r"(?<=assetId=)[a-zA-Z0-9]+", url) 773 | if not match: 774 | return False, "Not valid Megascan url." 775 | 776 | megascan_id = match[0] 777 | 778 | api_url = f"https://quixel.com/v1/assets/{megascan_id}" 779 | 780 | import requests 781 | response = requests.get(api_url) 782 | if response.status_code != 200: 783 | return False, response.text 784 | 785 | mega_info = response.json() 786 | 787 | info = get_megascan_info_from_json(mega_info) 788 | 789 | return True, info 790 | 791 | 792 | ''' 793 | info = { 794 | "id": "", 795 | "name": "", 796 | "url": "", 797 | "author": "", 798 | "author_url": "", 799 | "licence": "", 800 | "licence_url": "", 801 | "tags": [], 802 | "preview_url": "", 803 | "description": "", 804 | "dimensions": {}, 805 | } 806 | info["preview_path"] = "" 807 | ''' 808 | 809 | INFO_SUPPORTED_SITES = { 810 | "sketchfab.com": get_web_sketchfab_info, 811 | "blendswap.com": get_web_blendswap_info, 812 | "source.substance3d.com": get_web_substance_source_info, 813 | "substance3d.adobe.com": get_web_substance_source_info, 814 | "quixel.com": get_web_megascan_info, 815 | 816 | "polyhaven.com": get_web_polyhaven_info, 817 | 818 | "cc0textures.com": get_web_ambientcg_info, 819 | "cc0.link": get_web_ambientcg_info, 820 | "ambientcg.com": get_web_ambientcg_info, 821 | } 822 | 823 | ASSET_SUPPORTED_SITES = { 824 | "polyhaven.com": get_web_polyhaven_asset, 825 | 826 | "cc0textures.com": get_web_ambientcg_asset, 827 | "ambientcg.com": get_web_ambientcg_asset, 828 | "cc0.link": get_web_ambientcg_asset 829 | } 830 | 831 | def get_web(url: str, content_folder = None, as_asset = False) -> typing.Tuple[bool, dict]: 832 | """ 833 | `Parameters`: \n 834 | `url`: url to the asset on the internet \n 835 | `content_folder`: a folder to store downloads \n 836 | `as_asset`: if the url is a supported autogetter asset url 837 | `Return`: tuple (is_ok, result) \n 838 | - If `is_ok` is `True` - `result` is a dictionary with the info. \n 839 | - If `is_ok` is `False` - `result` is an error message. \n 840 | """ 841 | 842 | if as_asset and not content_folder: 843 | raise Exception("If `as_asset` is True `content_folder` must be supplied.") 844 | 845 | # if not re.search(r"^((https?|ftp|smtp):\/\/)?(www.)?[a-z0-9]+\.[a-z]+(\/[a-zA-Z0-9#]+\/?)*$", url): 846 | # return False, "Not valid URL: " + url 847 | 848 | import validators 849 | 850 | if not validators.url(url): 851 | url = "https://" + url 852 | if not validators.url(url): 853 | return False, "Not valid URL: " + url 854 | 855 | from urllib.parse import urlparse 856 | domain = urlparse(url).netloc 857 | 858 | if domain.startswith("www."): 859 | domain = domain[4:] 860 | 861 | if as_asset: 862 | getter = ASSET_SUPPORTED_SITES.get(domain) 863 | else: 864 | getter = INFO_SUPPORTED_SITES.get(domain) 865 | if not getter: 866 | print("Parser:", url, "The site is not supported.") 867 | return False, "The url is not supported." 868 | else: 869 | is_ok, result = getter(url, content_folder) 870 | print("Parser:", url, result) 871 | return is_ok, result 872 | -------------------------------------------------------------------------------- /atool.code-workspace: -------------------------------------------------------------------------------- 1 | { 2 | "folders": [ 3 | { 4 | "path": "." 5 | } 6 | ], 7 | "settings": { 8 | "search.exclude":{ 9 | "site-packages": true, 10 | "addon_updater.py": true, 11 | "addon_updater_ops.py": true 12 | }, 13 | "cSpell.words": [ 14 | "Allegorithmic", 15 | "ASHIKHMIN", 16 | "atool", 17 | "ATOOL", 18 | "authorurl", 19 | "autoraise", 20 | "blendswap", 21 | "bmesh", 22 | "BSDF", 23 | "bspline", 24 | "colorspace", 25 | "COMBXYZ", 26 | "COPYDOWN", 27 | "depsgraph", 28 | "DOWNARROW", 29 | "editmode", 30 | "EEVEE", 31 | "hashfile", 32 | "hexlify", 33 | "HKEY", 34 | "idname", 35 | "imohashxx", 36 | "inflectable", 37 | "LAPLACIANSMOOTH", 38 | "LEFTRIGHT", 39 | "lerp", 40 | "licence", 41 | "lowpoly", 42 | "mainfile", 43 | "matapp", 44 | "matapptemp", 45 | "mathutils", 46 | "megascan", 47 | "megascans", 48 | "midlevel", 49 | "MULTIRES", 50 | "NEWFOLDER", 51 | "NORMALEDIT", 52 | "objectmode", 53 | "OPENIMAGEDENOISE", 54 | "OUTLINER", 55 | "paramflags", 56 | "pkgurl", 57 | "quixel", 58 | "recalc", 59 | "REMESH", 60 | "Repath", 61 | "sbsar", 62 | "scandir", 63 | "SEPRGB", 64 | "sketchfab", 65 | "startfile", 66 | "subcolumn", 67 | "subdiv", 68 | "SUBSURF", 69 | "subvalue", 70 | "TEXTEDIT", 71 | "TOON", 72 | "TRIA", 73 | "triplanar", 74 | "UNCOUNTABLES", 75 | "undisplaced", 76 | "ungroup", 77 | "Uninitialize", 78 | "unrotate", 79 | "untiling", 80 | "UVMAP", 81 | "verts", 82 | "vgroup", 83 | "webloc", 84 | "WINFUNCTYPE", 85 | "wintypes", 86 | "WIREFRAME", 87 | "worldmachine" 88 | ] 89 | } 90 | } -------------------------------------------------------------------------------- /bitmap_type_name_conventions.json: -------------------------------------------------------------------------------- 1 | { 2 | "bitmap":{ 3 | "extension":[ 4 | ".bmp", 5 | ".jpeg", 6 | ".jpg", 7 | ".jp2", 8 | ".j2c", 9 | ".tga", 10 | ".cin", 11 | ".dpx", 12 | ".exr", 13 | ".hdr", 14 | ".sgi", 15 | ".rgb", 16 | ".bw", 17 | ".png", 18 | ".tiff", 19 | ".tif", 20 | ".psd", 21 | ".dds" 22 | ], 23 | "type":{ 24 | "normal":[ 25 | "normals", 26 | "normal", 27 | "norm", 28 | "nor", 29 | "nrml", 30 | "nrm", 31 | "n", 32 | "ddn", 33 | "nm", 34 | "directx", 35 | "dx_normal", 36 | "gl_normal", 37 | "nor_gl", 38 | "nor_dx", 39 | "normalsmap", 40 | "normalmap" 41 | ], 42 | "albedo":[ 43 | "albedo", 44 | "albd", 45 | "alb", 46 | "col_var2" 47 | ], 48 | "diffuse":[ 49 | "diffuse", 50 | "diff", 51 | "dif", 52 | "dffs", 53 | "d", 54 | "diffusecolor", 55 | "basecolour", 56 | "basecolor", 57 | "base_colour", 58 | "base_color", 59 | "bc", 60 | "colour", 61 | "color", 62 | "clr", 63 | "col_var1", 64 | "col", 65 | "c", 66 | "colors", 67 | "colormap" 68 | ], 69 | "metallic":[ 70 | "metallic", 71 | "met", 72 | "mt", 73 | "m", 74 | "mtllc", 75 | "metal", 76 | "metall", 77 | "metalness", 78 | "meta" 79 | ], 80 | "roughness":[ 81 | "roughness", 82 | "rough", 83 | "r", 84 | "rghnss", 85 | "rgh", 86 | "specularroughness" 87 | ], 88 | "displacement":[ 89 | "displacement", 90 | "dsplcmnt", 91 | "displ", 92 | "disp", 93 | "dis", 94 | "dp", 95 | "h", 96 | "height", 97 | "hght", 98 | "displaceheightfield" 99 | ], 100 | "ambient_occlusion":[ 101 | "ambient_occlusion", 102 | "ao", 103 | "ambientocclusion", 104 | "occlusion", 105 | "ambient", 106 | "o", 107 | "a", 108 | "aomap" 109 | ], 110 | "bump":[ 111 | "bump", 112 | "bmp", 113 | "b" 114 | ], 115 | "opacity":[ 116 | "opacity", 117 | "transparency", 118 | "trnsp", 119 | "trans", 120 | "transp", 121 | "alpha", 122 | "alph", 123 | "presence", 124 | "mask" 125 | ], 126 | "gloss":[ 127 | "gloss", 128 | "glossiness", 129 | "g", 130 | "glss", 131 | "smoothness", 132 | "smooth" 133 | ], 134 | "specular":[ 135 | "specular", 136 | "spec", 137 | "s", 138 | "specular_f0", 139 | "specularf0", 140 | "spec_f0", 141 | "specf0", 142 | "f0", 143 | "reflection", 144 | "refl", 145 | "ref", 146 | "reflect", 147 | "rflctn" 148 | ], 149 | "emissive":[ 150 | "emissive", 151 | "emis", 152 | "emit", 153 | "emission", 154 | "glow", 155 | "e", 156 | "illumination", 157 | "illum", 158 | "glowcolor", 159 | "emissioncolor" 160 | ], 161 | "ignore":[ 162 | "cavity", 163 | "translucency" 164 | ], 165 | "cavity":[ 166 | "cavity" 167 | ], 168 | "translucency":[ 169 | "translucency", 170 | "translucent" 171 | ], 172 | "exceptions":[ 173 | "ddna" 174 | ] 175 | } 176 | } 177 | } -------------------------------------------------------------------------------- /bl_utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import operator 3 | import threading 4 | import typing 5 | import os 6 | import subprocess 7 | import time 8 | import math 9 | 10 | import bpy 11 | import mathutils 12 | import blf 13 | 14 | from cached_property import cached_property 15 | 16 | class Register(): 17 | def __init__(self, globals: dict): 18 | self.globals: dict = globals 19 | self.properties = {} 20 | self.menu_items = [] 21 | 22 | @property 23 | def classes(self): 24 | return [module for name, module in self.globals.items() if name.startswith("ATOOL")] 25 | 26 | def property(self, name, value, bpy_type = bpy.types.WindowManager): 27 | self.properties[(bpy_type, name)] = value 28 | 29 | def menu_item(self, type, object): 30 | self.menu_items.append((type, object)) 31 | 32 | def register(self): 33 | 34 | for c in self.classes: 35 | bpy.utils.register_class(c) 36 | 37 | for (bpy_type, name), value in self.properties.items(): 38 | setattr(bpy_type, name, value) 39 | 40 | for menu, object in self.menu_items: 41 | menu.append(object) 42 | 43 | 44 | def unregister(self): 45 | 46 | for c in self.classes: 47 | bpy.utils.unregister_class(c) 48 | 49 | for bpy_type, name in self.properties: 50 | delattr(bpy_type, name) 51 | 52 | for menu, object in self.menu_items: 53 | menu.remove(object) 54 | 55 | if __package__: 56 | from . import utils 57 | from . import node_utils 58 | else: 59 | import utils 60 | import node_utils 61 | 62 | DIR_PATH = os.path.dirname(os.path.realpath(__file__)) 63 | 64 | 65 | class Reference: 66 | """ Reference for using with undo/redo/reload """ 67 | 68 | def __init__(self, block: bpy.types.ID, origin: bpy.types.ID = None): 69 | """ 70 | `block`: data block to get reference for 71 | `origin`: origin of the ID, required for embedded `ShaderNodeTree` 72 | """ 73 | 74 | id_data: bpy.types.ID = block.id_data 75 | self.is_embedded_data = id_data.is_embedded_data 76 | 77 | id_type = id_data.__class__.__name__ 78 | 79 | if id_type not in ("Object", "Material", "ShaderNodeTree", "Image", "Library"): 80 | raise NotImplementedError("Reference for the type '{id_type}' is not yet implemented.") 81 | 82 | if id_type == 'ShaderNodeTree' and self.is_embedded_data: # if is material 83 | if origin is None: 84 | raise TypeError("Origin of the ShaderNodeTree is required.") 85 | self.origin = Reference(origin) 86 | 87 | self.id_type = id_type 88 | self.id_name = id_data.name 89 | 90 | library = id_data.library 91 | if library: 92 | self.library_path = library.filepath 93 | else: 94 | self.library_path = None 95 | 96 | try: 97 | self.path_from_id = block.path_from_id() 98 | except: 99 | self.path_from_id = None 100 | 101 | @staticmethod 102 | def get_collection_item(collection: bpy.types.bpy_prop_collection, id_name: str, library_path: str) -> bpy.types.ID: 103 | try: 104 | return collection[id_name, library_path] 105 | except: 106 | return None 107 | 108 | def get(self) -> bpy.types.bpy_struct: 109 | id_type = self.id_type 110 | 111 | if id_type == "Object": 112 | id_data = self.get_collection_item(bpy.data.objects, self.id_name, self.library_path) 113 | elif id_type == "Material": 114 | id_data = self.get_collection_item(bpy.data.materials, self.id_name, self.library_path) 115 | elif id_type == "ShaderNodeTree": 116 | if self.is_embedded_data: # if is material 117 | id_data = self.origin.get().node_tree 118 | else: 119 | id_data = self.get_collection_item(bpy.data.node_groups, self.id_name, self.library_path) 120 | elif id_type == "Image": 121 | id_data = self.get_collection_item(bpy.data.images, self.id_name, self.library_path) 122 | elif id_type == "Library": 123 | id_data = self.get_collection_item(bpy.data.libraries, self.id_name, self.library_path) 124 | 125 | if not id_data: 126 | return None 127 | 128 | if self.path_from_id: 129 | data = id_data.path_resolve(self.path_from_id) 130 | else: 131 | data = id_data 132 | 133 | return data 134 | 135 | 136 | class Missing_File: 137 | # SUPPORTED_TYPES = ('Image', 'Library') 138 | 139 | def __init__(self, path, block): 140 | 141 | self.block = Reference(block) 142 | self.type: str 143 | self.type = block.__class__.__name__ 144 | 145 | # if self.type not in self.SUPPORTED_TYPES: 146 | # raise TypeError(f"The block '{block.name}' with type '{self.type}' is not supported.") 147 | 148 | self.path = path.lower() 149 | self.name = os.path.basename(self.path) 150 | 151 | self.found_paths = [] 152 | self.closest_path: str = None 153 | 154 | def reload(self): 155 | block = self.block.get() 156 | block.filepath = self.closest_path 157 | block.reload() 158 | 159 | 160 | class Dependency_Getter(dict): 161 | """Getting Image and Library dependencies. This class will be extended if needed.""" 162 | 163 | def __init__(self): 164 | self._children = None # type: typing.Dict[bpy.types.object, typing.List[bpy.types.object]] 165 | 166 | def cached(func): 167 | 168 | def wrapper(self: Dependency_Getter, *args, **kwargs): 169 | id_data = args[0] 170 | id_type = id_data.__class__ 171 | 172 | id_type_cache = self.get(id_type) 173 | if not id_type_cache: 174 | self[id_type] = id_type_cache = {} 175 | 176 | dependencies = id_type_cache.get(id_data) 177 | if not dependencies: 178 | id_type_cache[id_data] = dependencies = func(self, *args, **kwargs) 179 | 180 | return dependencies 181 | 182 | return wrapper 183 | 184 | @property 185 | def children(self): 186 | 187 | if self._children: 188 | return self._children 189 | 190 | self._children = utils.list_by_key(bpy.data.objects, operator.attrgetter('parent')) 191 | return self._children 192 | 193 | @cached 194 | def get_node_tree_dependencies(self, node_tree: bpy.types.NodeTree): 195 | 196 | dependencies = {} # type: typing.Dict[bpy.types.ID, typing.List[bpy.types.ID]] 197 | def add(dependency, ID = node_tree): 198 | utils.map_to_list(dependencies, ID, dependency) 199 | 200 | if node_tree.library and node_tree.library.filepath: 201 | add(node_tree.library) 202 | 203 | for node in node_tree.nodes: 204 | 205 | if node.type == 'TEX_IMAGE' and node.image and node.image.source == 'FILE' and node.image.filepath: 206 | add(node.image) 207 | 208 | elif node.type == 'GROUP' and node.node_tree: 209 | # add(node.node_tree) 210 | add(self.get_node_tree_dependencies(node.node_tree), node.node_tree) 211 | 212 | return dependencies 213 | 214 | @cached 215 | def get_material_dependencies(self, material: bpy.types.Material): 216 | return self.get_node_tree_dependencies(material.node_tree) 217 | 218 | @cached 219 | def get_collection_dependencies(self, collection: bpy.types.Collection): 220 | 221 | dependencies = {} # type: typing.Dict[bpy.types.ID, typing.List[bpy.types.ID]] 222 | def add(dependency): 223 | utils.map_to_list(dependencies, collection, dependency) 224 | 225 | if collection.library and collection.library.filepath: 226 | add(collection.library) 227 | 228 | for object in collection.all_objects: 229 | add(self.get_object_dependencies(object)) 230 | 231 | return dependencies 232 | 233 | @cached 234 | def get_object_dependencies(self, object: bpy.types.Object): 235 | 236 | dependencies = {} # type: typing.Dict[bpy.types.ID, typing.List[bpy.types.ID]] 237 | def add(source, ID: bpy.types.ID = object): 238 | utils.map_to_list(dependencies, ID, source) 239 | 240 | if object.library and object.library.filepath: 241 | add(object.library) 242 | 243 | if object.data: 244 | if object.data.library and object.data.library.filepath: 245 | add(object.data.library) 246 | # add(object.data) 247 | # add(object.data.library, object.data) 248 | 249 | if hasattr(object.data, 'materials'): 250 | for material in object.data.materials: 251 | if material: 252 | add(self.get_material_dependencies(material)) 253 | # add(material) 254 | # add(self.get_material_dependencies(material), material) 255 | 256 | if object.instance_type == 'COLLECTION' and object.instance_collection: 257 | # add(object.instance_collection) 258 | add(self.get_collection_dependencies(object.instance_collection)) 259 | 260 | if object.instance_type in ('VERTS', 'FACES'): 261 | for child in self.children[object]: 262 | # add(child) 263 | add(self.get_object_dependencies(child)) 264 | 265 | particle_systems = [modifier.particle_system.settings for modifier in object.modifiers if modifier.type == 'PARTICLE_SYSTEM'] # type: typing.List[bpy.types.ParticleSettings] 266 | for particle_system in particle_systems: 267 | 268 | if particle_system.render_type == 'COLLECTION' and particle_system.instance_collection: 269 | add(self.get_collection_dependencies(particle_system.instance_collection)) 270 | # add(particle_system.instance_collection) 271 | # add(particle_system) 272 | # add(self.get_collection_dependencies(particle_system.instance_collection), particle_system) 273 | elif particle_system.render_type == 'OBJECT' and particle_system.instance_object: 274 | add(self.get_object_dependencies(particle_system.instance_object)) 275 | # add(particle_system.instance_object) 276 | # add(particle_system) 277 | # add(self.get_object_dependencies(particle_system.instance_object), particle_system) 278 | 279 | return dependencies 280 | 281 | def get_dependencies(self, ID: bpy.types.ID): 282 | if ID.__class__ == bpy.types.Object: 283 | return self.get_object_dependencies(ID) 284 | elif ID.__class__ == bpy.types.Collection: 285 | return self.get_collection_dependencies(ID) 286 | elif ID.__class__ == bpy.types.Material: 287 | return self.get_material_dependencies(ID) 288 | elif ID.__class__ == bpy.types.NodeTree: 289 | return self.get_node_tree_dependencies(ID) 290 | 291 | def get_by_type(self, dependencies: dict, type = ('Library', 'Image')): 292 | """ Recursively getting dependencies by `type` """ 293 | 294 | result = [] 295 | 296 | for IDs in dependencies.values(): 297 | for ID in IDs: 298 | if ID.__class__.__name__ in type: 299 | result.append(ID) 300 | 301 | sub_dependencies = self.get_dependencies(ID) 302 | if sub_dependencies: 303 | result.extend(self.get_by_type(sub_dependencies, type = type)) 304 | 305 | return utils.deduplicate(result) 306 | 307 | def get_object_dependencies_by_type(self, object: bpy.types.Object, type = ('Library', 'Image')): 308 | 309 | result = [] 310 | 311 | for IDs in self.get_object_dependencies(object).values(): 312 | for ID in IDs: 313 | id_type = ID.__class__.__name__ 314 | if not id_type in type: 315 | continue 316 | if id_type == 'Image' and not ID.source == 'FILE': 317 | continue 318 | result.append(ID) 319 | 320 | return utils.deduplicate(result) 321 | 322 | 323 | def arrange_by_materials(objects: typing.Iterable[bpy.types.Object], by_materials = True, by_images = True): 324 | 325 | sets = {} # type: typing.Dict[frozenset, typing.List[bpy.types.Object]] 326 | sets['empty'] = [] 327 | 328 | def append(object): 329 | 330 | if object.data and hasattr(object.data, 'materials'): 331 | materials = [material for material in object.data.materials if material] 332 | else: 333 | materials = None 334 | 335 | if not materials: 336 | sets['empty'].append(object) 337 | return 338 | 339 | object_set = [] 340 | 341 | if by_images: 342 | all_images = [] 343 | for material in materials: 344 | all_images.extend(node_utils.get_all_images(material.node_tree)) 345 | object_set.extend(all_images) 346 | 347 | if by_materials: 348 | object_set.extend(materials) 349 | 350 | if not object_set: 351 | sets['empty'].append(object) 352 | return 353 | 354 | object_set = frozenset(object_set) 355 | 356 | for set in sets: 357 | if object_set.isdisjoint(set): 358 | continue 359 | 360 | if object_set.issubset(set): 361 | sets[set].append(object) 362 | else: 363 | sets[set].append(object) 364 | sets[set|object_set] = sets[set] 365 | del sets[set] 366 | 367 | return 368 | 369 | sets[object_set] = [object] 370 | 371 | for object in objects: 372 | append(object) 373 | 374 | if not sets['empty']: 375 | del sets['empty'] 376 | 377 | last_y = 0 378 | last_offset = 0 379 | y_offset = 0 380 | 381 | for i, objects in enumerate(sets.values()): 382 | 383 | xs = [] 384 | ys = [] 385 | for object in objects: 386 | x, y, z = object.dimensions # not available for new meshes ? 387 | xs.append(x) 388 | ys.append(y) 389 | x = max(xs) 390 | y = max(ys) 391 | 392 | if i != 0: 393 | y_offset = max(y, last_y) + last_offset 394 | 395 | last_y = y 396 | last_offset = y_offset 397 | 398 | for j, object in enumerate(objects): 399 | object.location = (j*x, y_offset, 0) 400 | 401 | 402 | def run_blender(filepath: str = None, script: str = None, argv: list = None, use_atool = True, library_path: str = None, stdout = None): 403 | 404 | args = [bpy.app.binary_path, '-b', '--factory-startup'] 405 | 406 | if filepath: 407 | args.append(filepath) 408 | 409 | if script: 410 | args.extend(('--python', script)) 411 | 412 | args.append('--') 413 | 414 | if use_atool: 415 | atool_path = f'"{DIR_PATH}"' if " " in DIR_PATH else DIR_PATH 416 | args.extend(('-atool_path', atool_path)) 417 | if library_path: 418 | library_path = f'"{library_path}"' if " " in library_path else library_path 419 | args.extend(('-atool_library_path', library_path)) 420 | 421 | if argv: 422 | args.extend(argv) 423 | 424 | return subprocess.run(args, stdout=stdout, check = True, text = True) 425 | 426 | 427 | def get_world_dimensions(objects: typing.Iterable[bpy.types.Object]): 428 | 429 | vertices = [] 430 | for o in objects: 431 | bound_box = o.bound_box 432 | matrix_world = o.matrix_world 433 | vertices.extend([matrix_world @ mathutils.Vector(v) for v in bound_box]) 434 | 435 | xs = [] 436 | ys = [] 437 | zs = [] 438 | for v in vertices: 439 | xs.append(v[0]) 440 | ys.append(v[1]) 441 | zs.append(v[2]) 442 | 443 | max_x = max(xs) 444 | min_x = min(xs) 445 | 446 | max_y = max(ys) 447 | min_y = min(ys) 448 | 449 | max_z = max(zs) 450 | min_z = min(zs) 451 | 452 | x = abs(max_x - min_x) 453 | y = abs(max_y - min_y) 454 | z = abs(max_z - min_z) 455 | 456 | loc_x = (max_x + min_x)/2 457 | loc_y = (max_y + min_y)/2 458 | loc_z = (max_z + min_z)/2 459 | 460 | return (x, y, z), (loc_x, loc_y, loc_z) 461 | 462 | 463 | DRAWER_SLEEP_TIME = 1/16 464 | 465 | class Progress_Drawer: 466 | 467 | def draw_callback(self): 468 | 469 | blf.position(0, 15, 30 + self.indent * 30, 0) 470 | blf.size(0, 20, 72) 471 | blf.draw(0, self.string) 472 | 473 | def string_update(self): 474 | start_time = time.time() 475 | last_index = 0 476 | total = self.total 477 | prefix = self.prefix 478 | show_multiplier = self.show_multiplier 479 | 480 | while self.is_running: 481 | if last_index == self._index: 482 | time.sleep(DRAWER_SLEEP_TIME) 483 | continue 484 | 485 | current_time = time.time() 486 | 487 | past_time = int(current_time - start_time) 488 | past_min, past_sec = divmod(past_time, 60) 489 | 490 | remain_time = int(past_time/self._index * (total - self._index)) 491 | remain_min, remain_sec = divmod(remain_time, 60) 492 | 493 | total_time = int(past_time/self._index * (total - self._index) + past_time) 494 | total_min, total_sec = divmod(int(total_time), 60) 495 | 496 | self.string = ' | '.join(( 497 | f"{prefix}: {int(self._index / total * 100)}%", 498 | f"{self._index * show_multiplier} / {total * show_multiplier}", 499 | f"Total: {total_min}:{total_sec:02d} Past: {past_min}:{past_sec:02d} Remain: {remain_min}:{remain_sec:02d}" 500 | )) 501 | 502 | time.sleep(DRAWER_SLEEP_TIME) 503 | 504 | def __iter__(self): 505 | 506 | if not self.total: 507 | self.total = len(self.iterator) 508 | if not self.total: 509 | return 510 | 511 | self.show_multiplier = 1 512 | if self.is_file: 513 | self.show_multiplier = CHUNK_SIZE 514 | 515 | self.is_running = 1 516 | self._index = 1 517 | threading.Thread(target = self.string_update, args = (), daemon = True).start() 518 | 519 | for index, item in enumerate(self.iterator, start = 1): 520 | self._index = index 521 | yield item 522 | 523 | self.is_running = 0 524 | 525 | def __init__(self, iterator: typing.Iterable, total: int = None, prefix = '', indent = 0, is_file = False): 526 | self.iterator = iterator 527 | self.total = total 528 | self.prefix = prefix 529 | self.indent = indent 530 | self.is_file = is_file 531 | 532 | self.string = '' 533 | 534 | def __enter__(self): 535 | self.handler = bpy.types.SpaceView3D.draw_handler_add(self.draw_callback, tuple(), 'WINDOW', 'POST_PIXEL') 536 | self.next = DRAWER_SLEEP_TIME 537 | bpy.app.timers.register(self.update_view_3d_regions, persistent = True) 538 | return self 539 | 540 | def __exit__(self, exc_type, exc_val, exc_tb): 541 | self.next = None 542 | bpy.types.SpaceView3D.draw_handler_remove(self.handler, 'WINDOW') 543 | 544 | def update_view_3d_regions(self): 545 | for window in bpy.context.window_manager.windows: 546 | for area in window.screen.areas: 547 | if area.type == 'VIEW_3D': 548 | for region in area.regions: 549 | if region.type == 'WINDOW': 550 | region.tag_redraw() 551 | return self.next 552 | 553 | def iter_with_progress(iterator: typing.Iterable, indent = 0, prefix = '', total: int = None): 554 | 555 | if bpy.app.background: 556 | for i in iterator: 557 | yield i 558 | return 559 | 560 | with Progress_Drawer(iterator, prefix = prefix, total = total, indent = indent) as drawer: 561 | for i in drawer: 562 | yield i 563 | 564 | CHUNK_SIZE = 4096 565 | 566 | def download_with_progress(response, path: str, total: int, region: bpy.types.Region = None, indent = 0, prefix = ''): 567 | 568 | if bpy.app.background or not total: 569 | with open(path, "wb") as f: 570 | for chunk in response.iter_content(chunk_size=CHUNK_SIZE): 571 | f.write(chunk) 572 | return 573 | 574 | total_chunks = math.ceil(total/CHUNK_SIZE) 575 | 576 | with Progress_Drawer(range(total_chunks), is_file = True, prefix = prefix, total = total_chunks, indent = indent) as drawer: 577 | with open(path, "wb") as f: 578 | for i, chunk in zip(drawer, response.iter_content(chunk_size=4096)): 579 | f.write(chunk) 580 | 581 | 582 | def abspath(path, library:bpy.types.Library = None): 583 | return os.path.realpath(bpy.path.abspath(path, library = library)) 584 | 585 | def get_block_abspath(block: bpy.types.ID): 586 | return os.path.realpath(bpy.path.abspath(block.filepath, library = block.library)) 587 | 588 | def backward_compatibility_get(object: bpy.types.ID, attr_name: typing.Iterable[str], sentinel = object()): 589 | for i, attr in enumerate(attr_name): 590 | value = object.get(attr, sentinel) 591 | if value == sentinel: 592 | continue 593 | 594 | if i != 0: 595 | del object[attr] 596 | object[attr_name[0]] = value 597 | 598 | return value 599 | return None 600 | 601 | def get_library_by_path(path: str) -> bpy.types.Library: 602 | path = abspath(path) 603 | for library in bpy.data.libraries: 604 | if abspath(library.filepath) == path: 605 | return library 606 | 607 | def get_context_copy_with_object(context: bpy.types.Context, object: bpy.types.Object) -> dict: 608 | override = context.copy() 609 | override['selectable_objects'] = [object] 610 | override['selected_objects'] = [object] 611 | override['selected_editable_objects'] = [object] 612 | override['editable_objects'] = [object] 613 | override['visible_objects'] = [object] 614 | override['active_object'] = object 615 | override['object'] = object 616 | return override 617 | 618 | def get_context_copy_with_objects(context: bpy.types.Context, active_object: bpy.types.Object , objects: typing.Iterable[bpy.types.Object]) -> dict: 619 | override = context.copy() 620 | override['selectable_objects'] = list(objects) 621 | override['selected_objects'] = list(objects) 622 | override['selected_editable_objects'] = list(objects) 623 | override['editable_objects'] = list(objects) 624 | override['visible_objects'] = list(objects) 625 | override['active_object'] = active_object 626 | override['object'] = active_object 627 | return override 628 | 629 | class Operator_Later_Caller: 630 | 631 | def execute(self, context): 632 | raise NotImplementedError('This function needs to be overridden.') 633 | 634 | # example 635 | func = self.get_later_caller(bpy.ops, context.copy(), 'EXEC_DEFAULT', True, key_argument = 'key_argument') 636 | bpy.app.timers.register(func) 637 | return {'FINISHED'} 638 | 639 | @staticmethod 640 | def get_later_caller(func, context: dict = None, execution_context: str = None, undo: bool = None, **key_arguments) -> typing.Callable: 641 | 642 | arguments = [] 643 | for argument in (context, execution_context, undo): 644 | if argument == None: 645 | continue 646 | arguments.append(argument) 647 | 648 | def call_later() -> None: 649 | func(*arguments, **key_arguments) 650 | 651 | return call_later 652 | 653 | 654 | VERTEX_CHANGING_MODIFIER_TYPES = {'ARRAY', 'BEVEL', 'BOOLEAN', 'BUILD', 'DECIMATE', 'EDGE_SPLIT', 'NODES', 'MASK', 'MIRROR', 'MULTIRES', 'REMESH', 'SCREW', 'SKIN', 'SOLIDIFY', 'SUBSURF', 'TRIANGULATE', 'VOLUME_TO_MESH', 'WELD', 'WIREFRAME', 'EXPLODE', 'FLUID', 'OCEAN', 'PARTICLE_INSTANCE'} 655 | 656 | class Object_Mode_Poll(): 657 | @classmethod 658 | def poll(cls, context): 659 | return context.space_data and context.space_data.type == 'VIEW_3D' and context.mode == 'OBJECT' 660 | 661 | 662 | def get_local_view_objects(context): 663 | # Regression: object.local_view_get and object.visible_in_viewport_get() always returns False 664 | # https://developer.blender.org/T95197 665 | 666 | space_view_3d = context.space_data 667 | 668 | if type(space_view_3d) != bpy.types.SpaceView3D: # will crash if space_view_3d is None 669 | raise TypeError(f'The context is incorrect. For context.space_data expected a SpaceView3D type, not {type(space_view_3d)}') 670 | 671 | depsgraph = context.evaluated_depsgraph_get() 672 | 673 | if bpy.data.objects and hasattr(bpy.data.objects[0], 'visible_in_viewport_get'): 674 | return [object for object in bpy.data.objects if object.evaluated_get(depsgraph).visible_in_viewport_get(space_view_3d)] 675 | else: 676 | return [object for object in bpy.data.objects if object.evaluated_get(depsgraph).local_view_get(space_view_3d)] -------------------------------------------------------------------------------- /data.blend: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unwave/atool/701c03768bdebca694a3be85cd4ea4fbb4c784f8/data.blend -------------------------------------------------------------------------------- /dev_tools.py: -------------------------------------------------------------------------------- 1 | import operator 2 | 3 | import bpy 4 | 5 | import pyperclip 6 | 7 | from . import bl_utils 8 | 9 | register = bl_utils.Register(globals()) 10 | 11 | 12 | class Shader_Editor_Poll(): 13 | @classmethod 14 | def poll(cls, context): 15 | return context.space_data.type == 'NODE_EDITOR' and context.space_data.tree_type == 'ShaderNodeTree' 16 | 17 | 18 | class ATOOL_OT_copy_to_clipboard(bpy.types.Operator, Shader_Editor_Poll): 19 | bl_idname = "nodeinsp.copy_to_clipboard" 20 | bl_label = "Copy To clipboard" 21 | bl_description = "Click to copy the content" 22 | bl_options = {'REGISTER'} 23 | 24 | string: bpy.props.StringProperty() 25 | 26 | def execute(self, context): 27 | 28 | pyperclip.copy(self.string) 29 | 30 | return {'FINISHED'} 31 | 32 | class ATOOL_OT_copy_all_to_clipboard(bpy.types.Operator, Shader_Editor_Poll): 33 | bl_idname = "nodeinsp.copy_all_to_clipboard" 34 | bl_label = "" 35 | bl_description = "Click to copy the content for all the selected nodes" 36 | bl_options = {'REGISTER'} 37 | 38 | attribute: bpy.props.StringProperty() 39 | 40 | def execute(self, context): 41 | 42 | attributes = [getattr(node, self.attribute) for node in context.selected_nodes] 43 | 44 | pyperclip.copy(attributes.__repr__()) 45 | 46 | return {'FINISHED'} 47 | 48 | class ATOOL_PT_node_inspector(bpy.types.Panel): 49 | bl_idname = "ATOOL_PT_node_inspector" 50 | bl_label = "Node Inspector" 51 | bl_space_type = 'NODE_EDITOR' 52 | bl_region_type = "UI" 53 | bl_category = "AT" 54 | bl_options = {'DEFAULT_CLOSED'} 55 | 56 | @classmethod 57 | def poll(cls, context): 58 | return context.space_data.tree_type == 'ShaderNodeTree' 59 | 60 | def draw(self, context): 61 | 62 | layout = self.layout 63 | column = layout.column() 64 | 65 | def row(name, object, attribute): 66 | value = getattr(object, attribute) 67 | 68 | if attribute.startswith("_"): 69 | return 70 | class_name = type(value).__name__ 71 | module = type(value).__module__ 72 | if class_name == "bpy_func": 73 | return 74 | # if module == "bpy.types": 75 | # return 76 | if class_name in ("Color", "Vector"): 77 | value = tuple(value) 78 | 79 | value = str(value) 80 | 81 | row = column.row(align=True) 82 | row.operator("nodeinsp.copy_to_clipboard", text=name, emboss=False).string = name 83 | row.operator("nodeinsp.copy_to_clipboard", text=value).string = value 84 | row.operator("nodeinsp.copy_all_to_clipboard", text='', icon='COPYDOWN').attribute = attribute 85 | 86 | selected_nodes = context.selected_nodes 87 | if selected_nodes: 88 | active_node = selected_nodes[0] 89 | row("name", active_node, "name") 90 | row("type", active_node, "type") 91 | row("bl_idname", active_node, "bl_idname") 92 | 93 | column.separator() 94 | for output in active_node.outputs: 95 | row("Ouput:", output, "identifier") 96 | column.separator() 97 | for input in active_node.inputs: 98 | row("Input:", input, "identifier") 99 | column.separator() 100 | 101 | for attribute in dir(active_node): 102 | row(attribute, active_node, attribute) 103 | 104 | 105 | class ATOOL_PT_inspector_tools(bpy.types.Panel): 106 | bl_idname = "ATOOL_PT_inspector_tools" 107 | bl_label = "Inspector Tools" 108 | bl_space_type = 'NODE_EDITOR' 109 | bl_region_type = "UI" 110 | bl_category = "AT" 111 | bl_options = {'DEFAULT_CLOSED'} 112 | 113 | @classmethod 114 | def poll(cls, context): 115 | return context.space_data.tree_type == 'ShaderNodeTree' 116 | 117 | def draw(self, context): 118 | layout = self.layout 119 | column = layout.column() 120 | column.operator("nodeinsp.toggle_group_input_sockets") 121 | column.operator("nodeinsp.iter_by_type") 122 | 123 | 124 | class ATOOL_OT_toggle_group_input_sockets(bpy.types.Operator, Shader_Editor_Poll): 125 | bl_idname = "nodeinsp.toggle_group_input_sockets" 126 | bl_label = "Toggle Group Inputs" 127 | bl_description = "Toggle unused sockets display of group input nodes" 128 | 129 | def execute(self, context): 130 | 131 | nodes = bpy.context.space_data.edit_tree.nodes 132 | 133 | bpy.ops.node.select_all(action='DESELECT') 134 | 135 | group_input_nodes = [node for node in nodes if node.type == "GROUP_INPUT"] 136 | 137 | for node in group_input_nodes: 138 | node.select = True 139 | 140 | bpy.ops.node.hide_socket_toggle() 141 | bpy.ops.node.select_all(action='DESELECT') 142 | 143 | return {'FINISHED'} 144 | 145 | class ATOOL_OT_iter_by_type(bpy.types.Operator, Shader_Editor_Poll): 146 | bl_idname = "nodeinsp.iter_by_type" 147 | bl_label = "Iter By Type" 148 | bl_description = "Press F9 to choose the type" 149 | bl_options = {'REGISTER', 'UNDO'} 150 | 151 | items = [] 152 | for node in bpy.types.ShaderNode.__subclasses__(): 153 | identifier = node.bl_rna.identifier 154 | items.append((identifier, identifier[10:], '')) 155 | items = sorted(items, key=operator.itemgetter(0)) 156 | 157 | sample_current: bpy.props.BoolProperty(name='Sample Current', default=True) 158 | 159 | type: bpy.props.EnumProperty( 160 | name='Type', 161 | items=items, 162 | default='ShaderNodeBsdfPrincipled') 163 | 164 | def draw(self, context): 165 | layout = self.layout 166 | layout.use_property_split = True 167 | layout.use_property_decorate = False 168 | 169 | column = layout.column() 170 | column.prop(self, 'sample_current') 171 | column = layout.column() 172 | column.prop(self, 'type') 173 | column.enabled = not self.sample_current 174 | 175 | def execute(self, context): 176 | 177 | if self.sample_current: 178 | 179 | selected_nodes = context.selected_nodes 180 | if not selected_nodes: 181 | return {'FINISHED'} 182 | 183 | type = context.selected_nodes[0].bl_idname 184 | else: 185 | type = self.type 186 | 187 | 188 | nodes = bpy.context.space_data.edit_tree.nodes 189 | nodes = [node for node in nodes if node.bl_idname == type] 190 | 191 | if not nodes: 192 | return {'FINISHED'} 193 | 194 | for node in nodes: 195 | if node.select == True: 196 | break 197 | 198 | next = nodes.index(node) + 1 199 | 200 | if next > len(nodes) - 1: 201 | next = 0 202 | 203 | bpy.ops.node.select_all(action='DESELECT') 204 | 205 | nodes[next].select = True 206 | 207 | bpy.ops.node.view_selected() 208 | 209 | return {'FINISHED'} 210 | 211 | -------------------------------------------------------------------------------- /edit_mode_operator.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | import bpy 4 | 5 | from . import bl_utils 6 | 7 | register = bl_utils.Register(globals()) 8 | 9 | 10 | class Edit_Mod_Poll: 11 | @classmethod 12 | def poll(cls, context): 13 | return context.space_data.type == 'VIEW_3D' and context.mode == 'EDIT_MESH' 14 | 15 | class ATOOL_OT_triangulate_ngons(bpy.types.Operator, Edit_Mod_Poll): 16 | bl_idname = "atool.triangulate_ngons" 17 | bl_label = "Triangulate Ngons" 18 | bl_description = "Triangulate n-gons with 5+ edges" 19 | 20 | def execute(self, context): # redo with bmesh 21 | bpy.ops.object.mode_set(mode='OBJECT') 22 | object = bpy.context.object 23 | 24 | modifier = object.modifiers.new(name = "__temp__", type='TRIANGULATE') 25 | modifier.min_vertices = 5 26 | modifier.quad_method = 'BEAUTY' 27 | bpy.ops.object.modifier_apply(modifier=modifier.name) 28 | 29 | bpy.ops.object.mode_set(mode='EDIT') 30 | bpy.ops.mesh.select_all(action='SELECT') 31 | bpy.ops.mesh.tris_convert_to_quads(face_threshold=math.pi, shape_threshold=math.pi) 32 | bpy.ops.mesh.select_all(action='DESELECT') 33 | 34 | return {'FINISHED'} 35 | 36 | 37 | class ATOOL_PT_edit_mode(bpy.types.Panel): 38 | bl_idname = "ATOOL_PT_edit_mode" 39 | bl_label = "Tools" 40 | bl_category = "AT" 41 | bl_space_type = 'VIEW_3D' 42 | bl_region_type = "UI" 43 | bl_context = "mesh_edit" 44 | 45 | def draw(self, context): 46 | 47 | column = self.layout.column() 48 | subcolumn = column.column(align=True) 49 | subcolumn.operator("atool.triangulate_ngons") -------------------------------------------------------------------------------- /image_utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import json 4 | import logging 5 | import os 6 | import sqlite3 7 | import typing 8 | 9 | 10 | import numpy as np 11 | from cached_property import cached_property 12 | from PIL import Image as pillow_image 13 | from PIL import ImageGrab 14 | 15 | log = logging.getLogger("atool") 16 | 17 | OPENCV_IO_ENABLE_OPENEXR = False 18 | 19 | def set_OPENCV_IO_ENABLE_OPENEXR(): 20 | config = utils.read_local_file("config.json") # type: dict 21 | if config and config.get("OPENCV_IO_ENABLE_OPENEXR"): 22 | global OPENCV_IO_ENABLE_OPENEXR 23 | OPENCV_IO_ENABLE_OPENEXR = True 24 | os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1" 25 | 26 | if __package__: 27 | import bpy 28 | from . import utils 29 | set_OPENCV_IO_ENABLE_OPENEXR() 30 | from . import type_definer 31 | else: 32 | import utils 33 | set_OPENCV_IO_ENABLE_OPENEXR() 34 | import type_definer 35 | 36 | import cv2 as cv 37 | 38 | FILE_PATH = os.path.dirname(os.path.realpath(__file__)) 39 | CASHE_PATH = os.path.join(FILE_PATH, "__cache__.db") 40 | 41 | class Image_Cache_Database: 42 | def __enter__(self): 43 | self.connection = sqlite3.connect(CASHE_PATH) 44 | self.cursor = self.connection.cursor() 45 | self.cursor.execute("CREATE TABLE IF NOT EXISTS cache (hash TEXT PRIMARY KEY, data TEXT)") 46 | return self 47 | 48 | def __exit__(self, exc_type, exc_value, traceback): 49 | self.connection.commit() 50 | self.cursor.close() 51 | self.connection.close() 52 | 53 | def get(self, hashs): 54 | self.cursor.execute(f"SELECT * FROM cache WHERE hash in ({', '.join(['?']*len(hashs))})", hashs) 55 | datas = [json.loads(data[1]) for data in self.cursor.fetchall()] 56 | return datas 57 | 58 | def set(self, hash, data): 59 | data = json.dumps(data, ensure_ascii=False) 60 | self.cursor.execute("INSERT OR REPLACE INTO cache (hash, data) VALUES(?,?)", (hash, data)) 61 | 62 | 63 | CHANNEL_TO_INDEX = {'R': 0, 'G': 1, 'B': 2, 'A': 3} 64 | INDEX_TO_CHANNEL = ('R', 'G', 'B', 'A') 65 | DUMPABLE = ("x", "y", "channels", "min_max", "hash", "shape", "dtype", "aspect_ratio", "dominant_color") 66 | ONLY_DUMPABLE = ('basename', 'type') 67 | 68 | class Image: 69 | def __init__(self, path: str): 70 | self.path = path 71 | self.basename = os.path.basename(path) 72 | self.name, self.extension = os.path.splitext(self.basename) 73 | self.extension = self.extension.lower() 74 | 75 | self.db: Image_Cache_Database = None 76 | self.asset_info: dict = None 77 | self.data_block: object = None # type: bpy.types.Image 78 | self.type_definer_config: type_definer.Filter_Config = None 79 | 80 | self.type: typing.List[str] # what if type definer retruns all possible types in order of more probable? 81 | 82 | self.hash: str # property, saved, key, called every time 83 | 84 | self.x: int # property, saved 85 | self.y: int # property, saved 86 | self.channels: int # property, saved 87 | self.dtype: str # property, saved 88 | 89 | self.min_max: typing.Dict[str, typing.Tuple[float, float]] = {} # dict, saved 90 | self.dominant_color = {} # dict, saved 91 | 92 | self.image: np.ndarray # property, not saved 93 | 94 | @classmethod 95 | def from_db(cls, path: str, db: Image_Cache_Database = None, type_definer_config: type_definer.Filter_Config = None) -> Image: 96 | image = cls(path) 97 | 98 | if type_definer_config: 99 | image.type_definer_config = type_definer_config 100 | 101 | if db: 102 | image.db = db 103 | image.load_from_db(db) 104 | else: 105 | with Image_Cache_Database() as db: 106 | image.load_from_db(db) 107 | 108 | return image 109 | 110 | def load_from_db(self, db: Image_Cache_Database): 111 | info = db.get((self.hash,)) 112 | if info: 113 | self.load(info[0]) 114 | 115 | @classmethod 116 | def from_asset_info(cls, path: str, info: dict, type_definer_config: type_definer.Filter_Config = None) -> Image: 117 | image = cls(path) 118 | 119 | if type_definer_config: 120 | image.type_definer_config = type_definer_config 121 | 122 | image.load_from_asset_info(info) 123 | return image 124 | 125 | def load_from_asset_info(self, info: dict): 126 | self.asset_info = info 127 | file_info = info.get('file_info') # type: dict 128 | if file_info: 129 | image_info = file_info.get(self.hash) 130 | if image_info: 131 | self.load(image_info) 132 | 133 | def update_source(self): 134 | try: 135 | if self.db: 136 | self.db.set(self.hash, self.dump()) 137 | except: 138 | print(f'The Image_Cache_Database for image {self.path} was not updated.') 139 | 140 | if self.asset_info: 141 | file_info = self.asset_info.get('file_info') # type: dict 142 | if not file_info: 143 | self.asset_info['file_info'] = {} 144 | self.asset_info['file_info'][self.hash] = self.dump() 145 | 146 | if self.data_block: 147 | self.data_block['at_type'] = self.type 148 | 149 | @classmethod 150 | def from_block(cls, block, define_type = True, type_definer_config: type_definer.Filter_Config = None) -> Image: 151 | image = cls(os.path.realpath(bpy.path.abspath(block.filepath, library=block.library))) 152 | image.data_block = block 153 | 154 | if type_definer_config: 155 | image.type_definer_config = type_definer_config 156 | 157 | type = block.get("at_type") 158 | if type: 159 | image.type = type 160 | 161 | return image 162 | 163 | def __repr__(self): 164 | return f"" 165 | 166 | @cached_property 167 | def type(self): 168 | return type_definer.get_type(self.name, self.type_definer_config) 169 | 170 | @cached_property 171 | def hash(self): 172 | return utils.get_file_hash(self.path) 173 | 174 | @cached_property 175 | def image(self): 176 | if self.extension in (".tga",): 177 | with pillow_image.open(self.path) as pil_image: 178 | bands = len(pil_image.getbands()) 179 | image = np.array(pil_image) 180 | if bands == 3: 181 | image = cv.cvtColor(image, cv.COLOR_RGB2BGR) 182 | elif bands == 4: 183 | image = cv.cvtColor(image, cv.COLOR_RGBA2BGRA) 184 | else: 185 | image = cv.imread(self.path, cv.IMREAD_UNCHANGED | cv.IMREAD_ANYCOLOR | cv.IMREAD_ANYDEPTH) 186 | 187 | if self.extension == '.exr': # what if zeros are what i need? 188 | image = cv.merge([channel for channel in cv.split(image) if channel.any()]) 189 | 190 | assert image is not None, f"The image {self.path} wasn't loaded!" 191 | log.debug(f"Image loaded: {self.path}") 192 | 193 | return image 194 | 195 | def trim_type(self): 196 | if self.channels > 3: 197 | return 198 | 199 | if len(self.type) == 4 or (len(self.type) == 2 and self.type[0] in type_definer.TRIPLE_CHANNEL_MAPS): 200 | init_type = self.type.copy() 201 | self.type.pop(0) 202 | print(f'The image {self.path} had a wrong type and was trimmed from {init_type} to {self.type}.') 203 | 204 | @cached_property 205 | def shape(self): 206 | shape = self.image.shape 207 | if len(shape) == 2: 208 | y, x = shape 209 | channels = 1 210 | else: 211 | y, x, channels = shape 212 | return x, y, channels 213 | 214 | def get_shape(self, image = None): 215 | if image is None: 216 | image = self.image 217 | shape = image.shape 218 | if len(shape) == 2: 219 | y, x = shape 220 | channels = 1 221 | else: 222 | y, x, channels = shape 223 | return x, y, channels 224 | 225 | @cached_property 226 | def dtype(self): 227 | return str(self.image.dtype) 228 | 229 | @cached_property 230 | def x(self): 231 | return self.shape[0] 232 | 233 | @cached_property 234 | def y(self): 235 | return self.shape[1] 236 | 237 | @cached_property 238 | def channels(self): 239 | return self.shape[2] 240 | 241 | @cached_property 242 | def aspect_ratio(self): 243 | return self.shape[0]/self.shape[1] 244 | 245 | def pre_process(self, no_height = False): 246 | 247 | self.aspect_ratio 248 | self.trim_type() 249 | 250 | for channel, subtype in self.iter_type(): 251 | 252 | if subtype in {"diffuse", "albedo", "roughness", "gloss", "metallic"}: 253 | self.get_dominant_color(channel) 254 | 255 | # normalize if: height, roughness, gloss, specular 256 | if subtype in {"displacement", "roughness", "gloss", "specular"}: 257 | self.get_min_max(channel) 258 | 259 | # delight? 260 | # valid color range for PBR 261 | # normalize if out of range 262 | # if no height use color and normalize it 263 | if subtype in {"diffuse", "albedo"}: 264 | if no_height: 265 | self.get_min_max(channel) 266 | 267 | # check if normal map is correct 268 | # auto-detect normals Y channel style, DirectX/OpenGL 269 | # invert x, y 270 | # sRGB/Linear 271 | # auto detect normal map? 272 | if subtype == "normal": 273 | pass 274 | 275 | def iter_type(self): 276 | # assert self.type, "Image type is not defined." 277 | type_len = len(self.type) 278 | for index, subtype in enumerate(self.type): 279 | if type_len == 1: # RGB 280 | channel = 'RGB' 281 | elif type_len == 2: # RGB + A 282 | if index == 0: 283 | channel = 'RGB' 284 | else: 285 | channel = 'A' 286 | else: # R + G + B, R + G + B + A 287 | channel = INDEX_TO_CHANNEL[index] 288 | yield channel, subtype 289 | 290 | 291 | def get_channel(self, channel: str, image = None): 292 | log.debug(f"Getting channel: {channel}") 293 | 294 | if image is None: 295 | image = self.image 296 | if channel in {'R', 'G', 'B'}: 297 | if self.channels > 1: 298 | image = list(reversed(cv.split(image))) 299 | if self.channels == 4: 300 | image = image[1:] 301 | image = image[CHANNEL_TO_INDEX[channel]] 302 | else: 303 | pass # does one channel image has R, G or B? 304 | elif channel == 'A': 305 | assert self.channels == 4, f"Image {self.path} does not have an alpha channel." 306 | image = cv.split(image)[-1] 307 | elif channel == 'RGB': # first three channels in BGR order 308 | if self.channels > 1: 309 | if self.channels == 4: 310 | image = cv.cvtColor(image, cv.COLOR_BGRA2BGR) 311 | else: 312 | pass # is one channel image is RGB? 313 | else: 314 | raise KeyError(f"No such channel: '{channel}'.") 315 | return image 316 | 317 | 318 | def get_dominant_color(self, channel: str): 319 | dominant_color = self.dominant_color.get(channel) 320 | if dominant_color: 321 | return dominant_color 322 | 323 | log.debug(f"Computing dominant color for channel: {channel}") 324 | 325 | image = self.get_channel(channel, self.resized(256)) 326 | channels = self.get_shape(image)[2] 327 | image = image.reshape((-1,channels)) 328 | image = np.float32(image) 329 | criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 1.0) 330 | result = cv.kmeans(image, 1, None, criteria, 10, cv.KMEANS_RANDOM_CENTERS) 331 | center = result[2][0].astype(float) 332 | 333 | if channels > 1: 334 | dominant_color = list(self.to_float(center[::-1])) 335 | else: 336 | dominant_color = list(self.to_float(center.repeat(3))) 337 | 338 | self.dominant_color[channel] = dominant_color 339 | return dominant_color 340 | 341 | 342 | def get_grayscaled(self, image = None) -> np.ndarray: 343 | log.debug(f"Getting grayscaled.") 344 | 345 | if image is None: 346 | image = self.image 347 | channels = self.channels 348 | else: 349 | channels = self.get_shape(image)[2] 350 | if channels > 1: 351 | if channels == 4: 352 | image = cv.cvtColor(image, cv.COLOR_BGRA2BGR) 353 | image = cv.transform(image, np.array([0.0722, 0.7152, 0.2126]).reshape((1,3))) 354 | return image 355 | 356 | 357 | def get_min_max(self, channel: str) -> typing.Tuple[float, float]: 358 | min_max = self.min_max.get(channel) 359 | if min_max: 360 | return min_max 361 | 362 | log.debug(f"Computing min max for channel: {channel}") 363 | 364 | image = self.get_channel(channel) 365 | channels = self.get_shape(image)[2] 366 | if channels > 1: 367 | image = self.get_grayscaled(image) 368 | 369 | min_val, max_val, min_loc, max_loc = cv.minMaxLoc(image) 370 | 371 | min_max = self.to_float(min_val), self.to_float(max_val) 372 | self.min_max[channel] = min_max 373 | return min_max 374 | 375 | 376 | # https://numpy.org/doc/stable/user/basics.types.html 377 | # https://numpy.org/doc/stable/reference/generated/numpy.finfo.html 378 | # https://numpy.org/doc/stable/reference/generated/numpy.iinfo.html 379 | def to_float(self, array: typing.Union[np.ndarray, float]) -> typing.Union[np.ndarray, float]: 380 | if self.dtype.startswith('float'): 381 | result = array 382 | elif self.dtype == 'uint8': 383 | result = array/255 384 | elif self.dtype == 'uint16': 385 | result = array/65535 386 | elif self.dtype == 'unit32': 387 | result = array/4294967295 388 | else: 389 | raise TypeError(f"Type {self.dtype} is not defined for the convertion to float.") 390 | 391 | try: not_bad = all(0 <= x <= 1 for x in result) 392 | except: not_bad = 0 <= result <= 1 393 | assert not_bad, f"Bad convertion to float 0-1 for {result}." # Move to report! 394 | 395 | return result 396 | 397 | def load(self, data): 398 | for key, value in data.items(): 399 | if key in DUMPABLE: 400 | setattr(self, key, value) 401 | 402 | def dump(self): 403 | data = {key: getattr(self, key) for key in DUMPABLE + ONLY_DUMPABLE} 404 | return data 405 | 406 | 407 | def resized(self, target) -> np.ndarray: 408 | x = self.x 409 | y = self.y 410 | 411 | if x == y: 412 | x = y = target 413 | elif x > y: 414 | y = int(y/x * target) 415 | x = target 416 | else: 417 | x = int(x/y * target) 418 | y = target 419 | 420 | return cv.resize(self.image, (x, y)) 421 | 422 | def set_bl_props(self, image_block): 423 | image_block["at_hash"] = self.hash 424 | image_block["at_type"] = self.type 425 | image_block["at_size"] = self.shape[:2] 426 | 427 | def to_uint8(self): 428 | image = self.image 429 | dtype = self.dtype 430 | if dtype == 'uint8': 431 | result = image 432 | elif dtype == 'uint16': 433 | result = image / 65535 * 255 434 | elif dtype == 'unit32': 435 | result = image / 4294967295 * 255 436 | elif dtype.startswith('float'): 437 | print("BAD!") 438 | result = cv.normalize(image, None, alpha=0, beta=255, norm_type=cv.NORM_MINMAX, dtype=cv.cv_8u) 439 | else: 440 | raise TypeError(f"Type {dtype} is not defined for the convertion to uint8.") 441 | return result 442 | 443 | def save(self, path): 444 | new_image_path = os.path.join(path, self.name, ".jpg") 445 | if not os.path.exists(new_image_path): 446 | image = self.to_uint8() 447 | cv.imwrite(new_image_path, image) 448 | 449 | def save_as_icon(image: pillow_image.Image, path): 450 | x, y = image.size 451 | if x > y: 452 | box = ((x-y)/2, 0, (x+y)/2, y) 453 | elif x < y: 454 | box = (0, (y-x)/2, x, (y+x)/2) 455 | else: 456 | box = None 457 | image = image.resize((128, 128), resample = pillow_image.LANCZOS, box = box) 458 | icon_path = os.path.join(path, "__icon__.png") 459 | image.save(icon_path , "PNG", optimize=True) 460 | return icon_path 461 | 462 | def save_as_icon_from_clipboard(path): 463 | grab = ImageGrab.grabclipboard() 464 | if not grab: 465 | print("No image in the clipboard.") 466 | return None 467 | return save_as_icon(grab, path) 468 | 469 | def convert_unreal_image(path: str, format = 'png', bgr_to_rgb = False): 470 | new_name = os.path.splitext(os.path.basename(path))[0] + "." + format 471 | new_path = os.path.join(os.path.dirname(path), new_name) 472 | if not os.path.exists(new_path): 473 | with pillow_image.open(path) as tga: 474 | 475 | if bgr_to_rgb: 476 | getbands_len = len(tga.getbands()) 477 | if getbands_len == 3: 478 | r, g, b = tga.split() 479 | tga = pillow_image.merge('RGB', (b, g, r)) 480 | elif getbands_len == 4: 481 | r, g, b, a = tga.split() 482 | tga = pillow_image.merge('RGBA', (b, g, r, a)) 483 | 484 | tga.save(new_path, format = format, compress_level=3) 485 | # ? optimize=True 486 | return new_path -------------------------------------------------------------------------------- /pose_mode_operator.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | import bpy 4 | 5 | from . import bl_utils 6 | from . import utils 7 | 8 | register = bl_utils.Register(globals()) 9 | 10 | class Poll: 11 | @classmethod 12 | def poll(cls, context): 13 | return context.space_data.type == 'VIEW_3D' and context.mode == 'POSE' 14 | 15 | class ATOOL_OT_select_action(bpy.types.Operator, Poll): 16 | bl_idname = "atool.select_action" 17 | bl_label = "Select Action" 18 | bl_description = "Select the action and set the animation preview range." 19 | 20 | action_name: bpy.props.StringProperty() 21 | 22 | def execute(self, context): 23 | 24 | init_use_keyframe_insert_auto = context.scene.tool_settings.use_keyframe_insert_auto 25 | context.scene.tool_settings.use_keyframe_insert_auto = False 26 | 27 | action = bpy.data.actions.get(self.action_name) 28 | 29 | context.object.animation_data.action = action 30 | 31 | start, end = action.frame_range 32 | context.scene.use_preview_range = True 33 | context.scene.frame_preview_start = int(start) 34 | context.scene.frame_preview_end = int(end) 35 | 36 | bpy.ops.pose.transforms_clear() 37 | bpy.ops.pose.select_all(action='INVERT') 38 | bpy.ops.pose.transforms_clear() 39 | bpy.ops.pose.select_all(action='INVERT') 40 | 41 | context.scene.tool_settings.use_keyframe_insert_auto = init_use_keyframe_insert_auto 42 | 43 | return {'FINISHED'} 44 | 45 | 46 | class ATOOL_PT_action_selector(bpy.types.Panel): 47 | bl_idname = "ATOOL_PT_action_selector" 48 | bl_label = "Action Selector" 49 | bl_category = "AT" 50 | bl_space_type = 'VIEW_3D' 51 | bl_region_type = "UI" 52 | bl_context = ".posemode" 53 | bl_options = {'DEFAULT_CLOSED'} 54 | 55 | def draw(self, context: bpy.context): 56 | 57 | column = self.layout.column() 58 | 59 | pose_object = context.pose_object 60 | column.label(text = "Armature: " + str(pose_object.name)) 61 | 62 | animation_data = pose_object.animation_data 63 | if not animation_data: 64 | column.label(text = "No actions.") 65 | return 66 | 67 | actions = [strip.action for track in animation_data.nla_tracks for strip in track.strips] 68 | if not actions: 69 | column.label(text = "No actions.") 70 | return 71 | 72 | actions = utils.deduplicate(actions) 73 | actions.sort(key = lambda action: action.name) 74 | 75 | for action in actions: 76 | column.operator("atool.select_action", text = str(action.name)).action_name = action.name -------------------------------------------------------------------------------- /property_panel_operator.py: -------------------------------------------------------------------------------- 1 | # import typing 2 | 3 | import bpy 4 | import mathutils 5 | 6 | from . import bl_utils 7 | register = bl_utils.Register(globals()) 8 | 9 | class Property_Panel_Poll: 10 | bl_space_type = 'PROPERTIES' 11 | bl_region_type = 'WINDOW' 12 | bl_context = "output" 13 | 14 | @classmethod 15 | def poll(cls, context): 16 | return context.engine in cls.COMPAT_ENGINES 17 | 18 | 19 | class ATOOL_PT_default_world(Property_Panel_Poll, bpy.types.Panel): 20 | bl_label = "Atool" 21 | bl_context = "world" 22 | bl_options = {'DEFAULT_CLOSED'} 23 | COMPAT_ENGINES = {'CYCLES', 'BLENDER_EEVEE'} 24 | 25 | def draw(self, context): 26 | layout = self.layout 27 | layout.use_property_split = True 28 | layout.use_property_decorate = False 29 | 30 | wm = context.window_manager 31 | 32 | col = layout.column(align=True) 33 | operator = col.operator("atool.set_world") 34 | # operator.engine = context.scene.render.engine 35 | 36 | 37 | def get_cycles_world(): 38 | world = bpy.data.worlds.new(name='Default') 39 | world.use_nodes = True 40 | nodes = world.node_tree.nodes 41 | background = [node for node in nodes if node.type == 'BACKGROUND'][0] 42 | sky = nodes.new('ShaderNodeTexSky') 43 | sky.sun_rotation = 2 44 | sky.sun_elevation = 0.7854 45 | world.node_tree.links.new(sky.outputs[0], background.inputs[0]) 46 | return world 47 | 48 | def get_eevee_world(): 49 | world = bpy.data.worlds.new(name='Default') 50 | world.use_nodes = True 51 | nodes = world.node_tree.nodes 52 | background = [node for node in nodes if node.type == 'BACKGROUND'][0] 53 | sky = nodes.new('ShaderNodeTexSky') 54 | sky.sky_type = 'HOSEK_WILKIE' 55 | sky.sun_direction 56 | world.node_tree.links.new(sky.outputs[0], background.inputs[0]) 57 | return world 58 | 59 | class ATOOL_OT_set_world(bpy.types.Operator): 60 | bl_idname = "atool.set_world" 61 | bl_label = "Cycles World" 62 | bl_description = "Set a deafult world" 63 | bl_options = {'REGISTER', 'UNDO'} 64 | 65 | # engine: bpy.props.EnumProperty( 66 | # items = [ 67 | # ('BLENDER_EEVEE', 'EEVEE', ''), 68 | # ('CYCLES', 'Cycles', '') 69 | # ], 70 | # default = 'CYCLES' 71 | # ) 72 | 73 | def execute(self, context: bpy.types.Context): 74 | scene = context.scene 75 | 76 | # if self.engine == 'CYCLES': 77 | scene.view_settings.exposure = -3.5 78 | scene.view_settings.view_transform = 'Filmic' 79 | scene.view_settings.look = 'Medium Contrast' 80 | scene.world = get_cycles_world() 81 | # elif self.engine == 'BLENDER_EEVEE': 82 | # scene.world = get_eevee_world() 83 | 84 | return {'FINISHED'} 85 | 86 | 87 | class ATOOL_OT_add_camera_visibility_vertex_group(bpy.types.Operator): 88 | bl_idname = "atool.add_camera_visibility_vertex_group" 89 | bl_label = "Camera Visibility Vertex Group" 90 | bl_description = "Add a camera visibility vertex group" 91 | bl_options = {'REGISTER', 'UNDO'} 92 | 93 | def execute(self, context: bpy.context): 94 | 95 | camera_object = context.scene.camera 96 | if not camera_object: 97 | self.report({'INFO'}, "Set a camera.") 98 | return {'CANCELLED'} 99 | 100 | camera_data = camera_object.data 101 | 102 | object = context.object 103 | if object.type != 'MESH' or not object.data: 104 | self.report({'INFO'}, f"Select a mesh object.") 105 | return {'CANCELLED'} 106 | 107 | initial_mode = context.object.mode 108 | bpy.ops.object.mode_set(mode='OBJECT') 109 | 110 | turned_off_modifiers = [] 111 | for modifier in object.modifiers: 112 | if modifier.show_viewport and modifier.type in bl_utils.VERTEX_CHANGING_MODIFIER_TYPES: 113 | modifier.show_viewport = False 114 | turned_off_modifiers.append(modifier) 115 | 116 | camera_translation, camera_rotation, camera_scale = camera_object.matrix_world.decompose() 117 | 118 | vectors = [mathutils.Vector(vector) for vector in camera_data.view_frame(scene = context.scene)] 119 | camera_plane_normals = [camera_rotation @ vectors[index].cross(vectors[index + 1]) for index in range(-2, 2)] 120 | 121 | object_matrix_world = object.matrix_world 122 | 123 | all_ver_indexes = range(len(object.data.vertices)) 124 | 125 | vertex_group = object.vertex_groups.get('camera_visibility') 126 | if not vertex_group: 127 | vertex_group = object.vertex_groups.new(name='camera_visibility') 128 | vertex_group.add(all_ver_indexes, 1, 'REPLACE') 129 | 130 | depsgraph = context.evaluated_depsgraph_get() 131 | object = object.evaluated_get(depsgraph) 132 | #mesh = object.to_mesh(preserve_all_data_layers = True, depsgraph = depsgraph) 133 | mesh = object.data 134 | 135 | origin = object_matrix_world.inverted() @ camera_translation 136 | hit_vertices = [] 137 | for vertex in mesh.vertices: 138 | 139 | target = vertex.co 140 | 141 | if any(normal.dot(object_matrix_world @ target - camera_translation) < 0 for normal in camera_plane_normals): 142 | continue 143 | 144 | result, location, normal, index = object.ray_cast(origin, target - origin) 145 | 146 | if result and (location - target).length <= 0.0001: 147 | hit_vertices.append(vertex.index) 148 | 149 | vert_to_poly = {index: [] for index in all_ver_indexes} 150 | for p in mesh.polygons: 151 | for v in p.vertices: 152 | vert_to_poly[v].append(p) 153 | 154 | final_vert_indexes = [] 155 | for hit_vert in hit_vertices: 156 | for poly in vert_to_poly[hit_vert]: 157 | final_vert_indexes.extend(poly.vertices) 158 | 159 | vertex_group.add(all_ver_indexes, 0, 'REPLACE') 160 | vertex_group.add(final_vert_indexes, 1, 'REPLACE') 161 | 162 | for modifier in turned_off_modifiers: 163 | modifier.show_viewport = True 164 | 165 | bpy.ops.object.mode_set(mode=initial_mode) 166 | 167 | return {'FINISHED'} 168 | 169 | 170 | class ATOOL_OT_mix_vertex_groups(bpy.types.Operator): 171 | bl_idname = "atool.mix_vertex_groups" 172 | bl_label = "Mix Vertex Groups" 173 | bl_description = "Mix vertex groups with the modifier" 174 | bl_options = {'REGISTER', 'UNDO'} 175 | 176 | target: bpy.props.StringProperty() 177 | source: bpy.props.StringProperty() 178 | do_apply: bpy.props.BoolProperty(default = True) 179 | operation: bpy.props.EnumProperty(items = [ 180 | ('SUB', "Subtract", "", 1), 181 | ('MUL', "Multiply", "", 2), 182 | ], name = 'Operation') 183 | 184 | def invoke(self, context, event): 185 | self.target = context.object.vertex_groups.active.name 186 | return context.window_manager.invoke_props_dialog(self, width = 300) 187 | 188 | def draw(self, context): 189 | layout = self.layout 190 | layout.use_property_split = True 191 | layout.prop_search(self, "target", context.object, "vertex_groups", text = 'Target') 192 | layout.prop_search(self, "source", context.object, "vertex_groups", text = 'Source') 193 | layout.prop(self, "do_apply", text = 'Apply Modifier') 194 | layout.prop(self, "operation") 195 | 196 | def execute(self, context): 197 | 198 | if not self.target: 199 | self.report({'INFO'}, 'Specify the target') 200 | return {'CANCELLED'} 201 | 202 | if not self.source: 203 | self.report({'INFO'}, 'Specify the source') 204 | return {'CANCELLED'} 205 | 206 | object = context.object 207 | 208 | modifier = object.modifiers.new(name = self.target, type='VERTEX_WEIGHT_MIX') 209 | modifier.mix_mode = self.operation 210 | modifier.vertex_group_a = self.target 211 | modifier.vertex_group_b = self.source 212 | modifier.mix_set = 'A' 213 | 214 | modifier_index = len(object.modifiers) - 1 215 | while 1: 216 | 217 | if modifier_index == 0: 218 | break 219 | 220 | if object.modifiers[modifier_index - 1].type == 'VERTEX_WEIGHT_MIX': 221 | break 222 | 223 | bpy.ops.object.modifier_move_up(modifier = modifier.name) 224 | modifier_index -= 1 225 | 226 | if self.do_apply: 227 | bpy.ops.object.modifier_apply(modifier = modifier.name) 228 | 229 | return {'FINISHED'} 230 | 231 | 232 | def vertex_group_menu(self, context): 233 | layout = self.layout # type: bpy.types.UILayout 234 | 235 | column = layout.column(align=True) 236 | column.separator() 237 | column.operator("atool.add_camera_visibility_vertex_group") 238 | 239 | column = layout.column(align=True) 240 | column.enabled = bool(context.object) and bool(context.object.vertex_groups) 241 | column.separator() 242 | column.operator("atool.mix_vertex_groups") 243 | 244 | register.menu_item(bpy.types.MESH_MT_vertex_group_context_menu, vertex_group_menu) -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | Pillow 2 | xxhash 3 | beautifulsoup4 4 | validators 5 | pyperclip 6 | opencv-contrib-python-headless 7 | cached-property -------------------------------------------------------------------------------- /scripts/get_polyhaven_dimensions.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import argparse 3 | 4 | import bpy 5 | 6 | parser = argparse.ArgumentParser() 7 | parser.add_argument('-atool_path') 8 | parser.add_argument('-atool_library_path') 9 | 10 | args = sys.argv[sys.argv.index('--') + 1:] 11 | args = parser.parse_args(args) 12 | 13 | ATOOL_PATH = args.atool_path 14 | ATOOL_LIBRARY = args.atool_library_path 15 | 16 | import re 17 | re_name = re.compile('plane(\.\d{3})?', flags = re.IGNORECASE) 18 | 19 | planes = [] 20 | for object in bpy.data.objects: 21 | if object.data and object.data.__class__.__name__ == 'Mesh': 22 | if re_name.match(object.name) or len(object.data.vertices) == 4: 23 | planes.append(object) 24 | 25 | # assert len(planes) == 1 # ? 26 | plane = planes[0] 27 | x, y, z = plane.dimensions 28 | z = None 29 | 30 | # assert len(plane.material_slots) == 1 # ? 31 | material = plane.material_slots[0].material 32 | 33 | import site 34 | sys.path.append(site.getusersitepackages()) 35 | sys.path.append(ATOOL_PATH) 36 | 37 | import node_utils 38 | 39 | node_tree = node_utils.Node_Tree_Wrapper(material.node_tree) 40 | output = node_tree.output 41 | 42 | mappings = [] 43 | for node in output.all_children: 44 | 45 | if node.type == 'DISPLACEMENT': 46 | z = round(node.inputs['Scale'].default_value, 6) 47 | 48 | elif node.type == 'MAPPING': 49 | mappings.append(node) 50 | 51 | if mappings: 52 | mapping = mappings[0] 53 | mapping_x, mapping_y, mapping_z = mapping.inputs['Scale'].default_value 54 | else: 55 | mapping_x = mapping_y = 1 56 | 57 | dimensions = { 58 | "x": x/mapping_x, 59 | "y": y/mapping_y, 60 | "z": z if z else 0.05 61 | } 62 | 63 | import json 64 | print(json.dumps(dimensions)) 65 | -------------------------------------------------------------------------------- /scripts/initialize_asset.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import os 3 | import sys 4 | import argparse 5 | 6 | parser = argparse.ArgumentParser() 7 | parser.add_argument('-atool_path') 8 | parser.add_argument('-atool_library_path') 9 | parser.add_argument('-move_textures', action='store_true') 10 | parser.add_argument('-move_sub_assets', action='store_true') 11 | 12 | args = sys.argv[sys.argv.index('--') + 1:] 13 | args = parser.parse_args(args) 14 | 15 | 16 | FILE_PATH = bpy.data.filepath 17 | DIR_PATH = os.path.dirname(FILE_PATH) 18 | TEXTURES_PATH = os.path.join(DIR_PATH, 'textures') 19 | 20 | for object in bpy.data.objects: 21 | bpy.context.collection.objects.link(object) 22 | 23 | def move_textures(): 24 | 25 | textures = [texture for texture in bpy.data.images if texture.source == 'FILE' and os.path.exists(texture.filepath)] 26 | if not textures: 27 | return 28 | 29 | import site 30 | sys.path.insert(0, site.getusersitepackages()) 31 | sys.path.insert(0, args.atool_path) 32 | import data 33 | import utils 34 | import bl_utils 35 | assets = data.AssetData(library=args.atool_library_path) 36 | assets.update_library() 37 | 38 | os.makedirs(TEXTURES_PATH, exist_ok = True) 39 | 40 | for texture in textures: 41 | filepath = bl_utils.get_block_abspath(texture) 42 | 43 | if not args.move_sub_assets and assets.is_sub_asset(filepath): 44 | texture.filepath = bpy.path.relpath(filepath) 45 | continue 46 | 47 | new_path = utils.move_to_folder(filepath, TEXTURES_PATH) 48 | 49 | texture.filepath = bpy.path.relpath(new_path) 50 | 51 | if args.move_textures: 52 | move_textures() 53 | 54 | bpy.ops.object.select_all(action='DESELECT') 55 | bpy.context.preferences.filepaths.save_version = 0 56 | bpy.ops.wm.save_as_mainfile(filepath=FILE_PATH) 57 | bpy.ops.wm.quit_blender() -------------------------------------------------------------------------------- /scripts/preview.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import argparse 3 | import json 4 | import os 5 | import time 6 | 7 | import bpy 8 | import mathutils 9 | 10 | parser = argparse.ArgumentParser() 11 | parser.add_argument('-job') 12 | 13 | args = sys.argv[sys.argv.index('--') + 1:] 14 | args = parser.parse_args(args) 15 | 16 | JOB = json.loads(args.job) 17 | 18 | def get_desktop(): 19 | try: 20 | import winreg 21 | with winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\User Shell Folders") as key: 22 | return winreg.QueryValueEx(key, "Desktop")[0] 23 | except: 24 | return os.path.expanduser("~/Desktop") 25 | 26 | def get_world(): 27 | world = bpy.data.worlds.new(name='world') 28 | world.use_nodes = True 29 | nodes = world.node_tree.nodes 30 | background = [node for node in nodes if node.type == 'BACKGROUND'][0] 31 | sky = nodes.new('ShaderNodeTexSky') 32 | sky.sun_rotation = 2 33 | sky.sun_elevation = 0.7854 34 | world.node_tree.links.new(sky.outputs[0], background.inputs[0]) 35 | return world 36 | 37 | def set_render_settings(scene: bpy.types.Scene): 38 | scene.render.resolution_x = JOB['resolution'] 39 | scene.render.resolution_y = JOB['resolution'] 40 | scene.render.film_transparent = JOB['use_film_transparent'] 41 | 42 | scene.render.use_crop_to_border = False 43 | scene.render.use_border = False 44 | 45 | scene.render.image_settings.file_format = 'PNG' 46 | scene.render.image_settings.color_mode = 'RGBA' 47 | 48 | scene.render.engine = 'CYCLES' 49 | scene.cycles.samples = JOB['samples'] 50 | scene.cycles.use_square_samples = False 51 | scene.cycles.use_denoising = True 52 | try: 53 | scene.cycles.denoiser = 'OPENIMAGEDENOISE' 54 | scene.cycles.denoising_input_passes = 'RGB_ALBEDO_NORMAL' 55 | scene.cycles.denoising_prefilter = 'ACCURATE' 56 | except: 57 | pass 58 | scene.use_nodes = False 59 | 60 | if JOB['use_default_world']: 61 | scene.view_settings.exposure = -3.7 62 | scene.view_settings.view_transform = 'Filmic' 63 | 64 | scene.world = get_world() 65 | 66 | bpy.ops.wm.open_mainfile(filepath=JOB['filepath'], load_ui=False, use_scripts=False, display_file_selector=False) 67 | 68 | context = bpy.context 69 | set_render_settings(context.scene) 70 | 71 | for object in bpy.data.objects: 72 | 73 | for modifier in object.modifiers: 74 | modifier.show_render = modifier.show_viewport 75 | 76 | object.hide_render = not object.visible_get() 77 | 78 | if JOB['is_local_view']: 79 | object.hide_render = not object.name in JOB['local_view_objects'] 80 | 81 | if JOB['use_default_world']: 82 | if object.type == 'LIGHT' and object.data.type == 'SUN': 83 | object.hide_render = True 84 | 85 | camera_data = bpy.data.cameras.new("Camera") 86 | 87 | camera = bpy.data.objects.new("Camera", camera_data) 88 | context.collection.objects.link(camera) 89 | context.scene.camera = camera 90 | 91 | camera.matrix_world = mathutils.Matrix(JOB['view_matrix']) 92 | camera_data.lens = JOB['lens'] 93 | camera_data.clip_start = JOB['clip_start'] 94 | camera_data.clip_end = JOB['clip_end'] 95 | 96 | if 0: # debug 97 | filepath = os.path.join(get_desktop(), f"preview_render_test_{time.strftime('%y%m%d_%H%M%S')}.blend") 98 | bpy.ops.wm.save_as_mainfile(filepath=filepath) 99 | 100 | bpy.ops.render.render() 101 | 102 | image = bpy.data.images['Render Result'] 103 | render_path = os.path.join(get_desktop(), time.strftime('%y%m%d_%H%M%S') + '.png') 104 | image.save_render(render_path) -------------------------------------------------------------------------------- /scripts/render_icon.blend: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unwave/atool/701c03768bdebca694a3be85cd4ea4fbb4c784f8/scripts/render_icon.blend -------------------------------------------------------------------------------- /scripts/render_icon.py: -------------------------------------------------------------------------------- 1 | from itertools import accumulate 2 | import sys 3 | import argparse 4 | import json 5 | import os 6 | 7 | import bpy 8 | import mathutils 9 | 10 | parser = argparse.ArgumentParser() 11 | parser.add_argument('-atool_path') 12 | parser.add_argument('-atool_library_path') 13 | parser.add_argument('-jobs_path') 14 | 15 | args = sys.argv[sys.argv.index('--') + 1:] 16 | args = parser.parse_args(args) 17 | 18 | ATOOL_PATH = args.atool_path 19 | ATOOL_LIBRARY = args.atool_library_path 20 | 21 | with open(args.jobs_path, encoding='utf-8') as jobs_file: 22 | jobs = json.load(jobs_file) # type: dict 23 | 24 | material_jobs = jobs.get('materials') 25 | object_jobs = jobs.get('objects') 26 | 27 | 28 | def get_world(): 29 | world = bpy.data.worlds.new(name='world') 30 | world.use_nodes = True 31 | nodes = world.node_tree.nodes 32 | background = [node for node in nodes if node.type == 'BACKGROUND'][0] 33 | sky = nodes.new('ShaderNodeTexSky') 34 | sky.sun_rotation = 2 35 | sky.sun_elevation = 0.7854 36 | world.node_tree.links.new(sky.outputs[0], background.inputs[0]) 37 | return world 38 | 39 | def set_render_settings(scene: bpy.types.Scene): 40 | scene.render.resolution_x = 256 41 | scene.render.resolution_y = 256 42 | scene.render.film_transparent = True 43 | 44 | scene.render.use_crop_to_border = False 45 | scene.render.use_border = False 46 | 47 | scene.render.image_settings.file_format = 'PNG' 48 | scene.render.image_settings.color_mode = 'RGBA' 49 | 50 | scene.render.engine = 'CYCLES' 51 | scene.cycles.samples = 10 52 | scene.cycles.use_square_samples = False 53 | scene.cycles.use_denoising = True 54 | try: 55 | scene.cycles.denoiser = 'OPENIMAGEDENOISE' 56 | except: 57 | pass 58 | scene.use_nodes = False 59 | 60 | scene.view_settings.exposure = -3.7 61 | scene.view_settings.view_transform = 'Filmic' 62 | scene.view_settings.look = 'Medium High Contrast' 63 | 64 | scene.world = get_world() 65 | 66 | 67 | if material_jobs: 68 | 69 | import site 70 | sys.path.append(site.getusersitepackages()) 71 | sys.path.append(ATOOL_PATH) 72 | 73 | import node_utils 74 | import type_definer 75 | 76 | filepath = os.path.join(ATOOL_PATH, 'scripts', 'render_icon.blend') 77 | bpy.ops.wm.open_mainfile(filepath=filepath, load_ui=False, use_scripts=False, display_file_selector=False) 78 | 79 | mat_sphere = bpy.data.objects['material_sphere'] 80 | 81 | type_definer_config = type_definer.Filter_Config() 82 | type_definer_config.__dict__.update(jobs['type_definer_config']) 83 | 84 | context = bpy.context 85 | set_render_settings(context.scene) 86 | 87 | for job in material_jobs: 88 | type_definer_config.set_common_prefix_from_paths(job['files']) 89 | material = node_utils.get_material(job['files'], use_displacement = True, displacement_scale = job['displacement_scale'], invert_normal_y = job['invert_normal_y'], type_definer_config = type_definer_config) 90 | mat_sphere.material_slots[0].material = material 91 | 92 | bpy.ops.render.render() 93 | 94 | image = bpy.data.images['Render Result'] 95 | image.save_render(job['result_path']) 96 | 97 | if object_jobs: 98 | 99 | for job in object_jobs: 100 | 101 | bpy.ops.wm.open_mainfile(filepath=job['filepath'], load_ui=False, use_scripts=False, display_file_selector=False) 102 | 103 | context = bpy.context 104 | set_render_settings(context.scene) 105 | 106 | coordinates = [] 107 | for object in bpy.data.objects: 108 | 109 | if not object.visible_get(): 110 | continue 111 | 112 | if object.hide_render: 113 | continue 114 | 115 | # blender 3.0 116 | if hasattr(object, 'is_shadow_catcher') and object.is_shadow_catcher: 117 | continue 118 | 119 | # blender <3.0 120 | if hasattr(object.cycles, 'is_shadow_catcher') and object.cycles.is_shadow_catcher: 121 | continue 122 | 123 | # blender 3.0 124 | if hasattr(object, 'visible_camera') and not object.visible_camera: 125 | continue 126 | 127 | # blender <3.0 128 | if hasattr(object, 'cycles_visibility') and not object.cycles_visibility.camera: 129 | continue 130 | 131 | if object.type == 'LIGHT' and object.data.type == 'SUN': 132 | object.hide_render = True 133 | continue 134 | 135 | if not object.type in {'MESH', 'CURVE'}: 136 | continue 137 | 138 | # context.collection.objects.link(object) 139 | # matrix_world = object.matrix_world @ mathutils.Matrix.Scale(1.1, 4) 140 | matrix_world = object.matrix_world 141 | for v in object.bound_box: 142 | coordinates.extend(matrix_world @ mathutils.Vector(v)) 143 | 144 | camera_data = bpy.data.cameras.new("Camera") 145 | camera_data.lens = 120 146 | 147 | camera = bpy.data.objects.new("Camera", camera_data) 148 | context.collection.objects.link(camera) 149 | camera.rotation_mode = 'XYZ' 150 | camera.rotation_euler = (1.178097, 0, 0.3926991) 151 | 152 | context.scene.camera = camera 153 | 154 | depsgraph = context.evaluated_depsgraph_get() 155 | co_return, scale_return = camera.camera_fit_coords(depsgraph, coordinates) 156 | 157 | camera.location = co_return 158 | 159 | import math 160 | dist = math.sqrt(sum(x*x for x in co_return)) 161 | camera_data.clip_start *= dist 162 | camera_data.clip_end = dist * 2 163 | 164 | if 0: # debug 165 | def get_desktop(): 166 | try: 167 | import winreg 168 | with winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\User Shell Folders") as key: 169 | return winreg.QueryValueEx(key, "Desktop")[0] 170 | except: 171 | return os.path.expanduser("~/Desktop") 172 | 173 | filepath = os.path.join(get_desktop(), 'icon_test.blend') 174 | bpy.ops.wm.save_as_mainfile(filepath=filepath) 175 | 176 | bpy.ops.render.render() 177 | 178 | image = bpy.data.images['Render Result'] 179 | image.save_render(job['result_path']) -------------------------------------------------------------------------------- /scripts/render_partial.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import os 3 | import sys 4 | import argparse 5 | import json 6 | 7 | parser = argparse.ArgumentParser() 8 | parser.add_argument('-dicing', type=int) 9 | parser.add_argument('-path') 10 | 11 | args = sys.argv[sys.argv.index('--') + 1:] 12 | args = parser.parse_args(args) 13 | 14 | print(args) 15 | 16 | camera = bpy.context.scene.camera.data 17 | render = bpy.context.scene.render 18 | cycles = bpy.context.scene.cycles 19 | 20 | if cycles.device == 'GPU': 21 | cycles_preferences = bpy.context.preferences.addons['cycles'].preferences 22 | cycles_preferences.compute_device_type = 'CUDA' 23 | for devices in cycles_preferences.get_devices(): 24 | for device in devices: 25 | device.use = True 26 | 27 | cycles.offscreen_dicing_scale = 32 28 | render.use_persistent_data = True 29 | 30 | initial_shift_x = camera.shift_x 31 | initial_shift_y = camera.shift_y 32 | 33 | k = render.resolution_x/render.resolution_y 34 | m = args.dicing 35 | 36 | shift_step_x = 1 37 | shift_step_y = 1 38 | 39 | shift_lim_x = abs(1-m)/2 40 | shift_lim_y = abs(1-m)/2 41 | if k >= 1: 42 | shift_step_y = 1/k 43 | shift_lim_y /= k 44 | else: 45 | shift_step_x = 1*k 46 | shift_lim_x *= k 47 | 48 | camera.lens *= m 49 | render.resolution_x /= m 50 | render.resolution_y /= m 51 | 52 | columns = [] 53 | index = 1 54 | max_index = m*m 55 | 56 | x = -shift_lim_x 57 | while x <= shift_lim_x: 58 | camera.shift_x = x + m * initial_shift_x 59 | 60 | rows = [] 61 | 62 | y = -shift_lim_y 63 | while y <= shift_lim_y: 64 | camera.shift_y = y + m * initial_shift_y 65 | 66 | print('--------------------------') 67 | print(f'Part: {index}/{max_index}') 68 | print('--------------------------') 69 | bpy.ops.render.render() 70 | image = bpy.data.images['Render Result'] 71 | path = os.path.join(args.path, f"{y}_{x}.png") 72 | rows.append(path) 73 | image.save_render(path) 74 | 75 | index += 1 76 | y += shift_step_y 77 | 78 | columns.append(list(reversed(rows))) 79 | x += shift_step_x 80 | 81 | with open(os.path.join(args.path, 'done.json'), 'w', encoding='utf-8') as json_file: 82 | json.dump(columns, json_file, indent = 4, ensure_ascii = False) -------------------------------------------------------------------------------- /scripts/render_worker.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import os 3 | import sys 4 | import argparse 5 | import tempfile 6 | import json 7 | 8 | DIR_PATH = os.path.dirname(__file__) 9 | 10 | def get_desktop(): 11 | try: 12 | import winreg 13 | with winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\User Shell Folders") as key: 14 | return winreg.QueryValueEx(key, "Desktop")[0] 15 | except: 16 | return os.path.expanduser("~/Desktop") 17 | 18 | parser = argparse.ArgumentParser() 19 | parser.add_argument('-blender') 20 | parser.add_argument('-dicing') 21 | parser.add_argument('-file') 22 | 23 | args = parser.parse_args(sys.argv[1:]) 24 | 25 | script = os.path.join(DIR_PATH, 'render_partial.py') 26 | 27 | args = [args.blender, '-b', '--factory-startup', args.file, '--python', script, '--', '-dicing', args.dicing] 28 | 29 | with tempfile.TemporaryDirectory() as temp_dir: 30 | 31 | args.extend(('-path', temp_dir)) 32 | 33 | subprocess.run(args, check = True) 34 | 35 | with open(os.path.join(temp_dir, 'done.json'), 'r', encoding='utf-8') as json_file: 36 | columns = json.load(json_file) 37 | 38 | import site 39 | sys.path.insert(0, site.getusersitepackages()) 40 | 41 | import cv2 as cv 42 | import numpy 43 | 44 | def get_image(path): 45 | return cv.imread(path, cv.IMREAD_UNCHANGED | cv.IMREAD_ANYCOLOR | cv.IMREAD_ANYDEPTH) 46 | 47 | render_columns = [] 48 | 49 | for image_paths in columns: 50 | images = [get_image(path) for path in image_paths] 51 | render_columns.append(numpy.concatenate(images, axis=0)) 52 | 53 | render = numpy.concatenate(render_columns, axis=1) 54 | 55 | from datetime import datetime 56 | 57 | ext ='.png' 58 | render_path = os.path.join(get_desktop(), f"render_{datetime.now().strftime('%y%m%d_%H%M%S')}{ext}") 59 | cv.imwrite(render_path, render) 60 | 61 | input('Press any key to exit...') -------------------------------------------------------------------------------- /scripts/unreal_export.py: -------------------------------------------------------------------------------- 1 | 2 | """ An Unreal script to execute inside an Unreal instance. """ 3 | import unreal # type: ignore 4 | import os 5 | import json 6 | import datetime 7 | import tempfile 8 | 9 | def get_desktop(): 10 | try: 11 | import winreg 12 | with winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\User Shell Folders") as key: 13 | return winreg.QueryValueEx(key, "Desktop")[0] 14 | except: 15 | return os.path.expanduser("~/Desktop") 16 | 17 | 18 | def get_unique_key(dict, key, sentinel = object()): 19 | index = 2 20 | initial_key = key 21 | while dict.get(key, sentinel) is not sentinel: 22 | key = initial_key + f'_{index}' 23 | index += 1 24 | return key 25 | 26 | def find_key_by_value(dict, value_to_find, default = None): 27 | for key, value in dict.items(): 28 | if value == value_to_find: 29 | return key 30 | return None 31 | 32 | def map_unique(dict, traget_name, value): 33 | key = find_key_by_value(dict, value) 34 | if not key: 35 | key = get_unique_key(dict, traget_name) 36 | dict[key] = value 37 | return key 38 | 39 | 40 | def parse_attrs(string): 41 | attrs = {} 42 | for attr in string.strip().split(): 43 | name, value = attr.strip().split('=') 44 | attrs[name] = value.strip("\"'") 45 | return attrs 46 | 47 | def process_list(string): 48 | values = [] 49 | value = '' 50 | bracket_sum = 0 51 | for char in string: 52 | if char == '(': 53 | bracket_sum += 1 54 | elif char == ')': 55 | bracket_sum -= 1 56 | value += char 57 | 58 | if not bracket_sum and char == ',': 59 | value = value[:-1] 60 | values.append(parse_string_attrs(value)) 61 | value = '' 62 | values.append(parse_string_attrs(value)) 63 | return values 64 | 65 | def process_dict(string): 66 | values = {} 67 | name = '' 68 | value = '' 69 | reading_name = True 70 | bracket_sum = 0 71 | for char in string: 72 | if reading_name: 73 | if char == ',': 74 | return process_list(string) 75 | elif char != '=': 76 | name += char 77 | elif char == '=': 78 | reading_name = False 79 | else: 80 | if char == '(': 81 | bracket_sum += 1 82 | elif char == ')': 83 | bracket_sum -= 1 84 | value += char 85 | 86 | if not bracket_sum and char == ',': 87 | value = value[:-1] 88 | values[name] = parse_string_attrs(value) 89 | name = '' 90 | value = '' 91 | reading_name = True 92 | values[name] = parse_string_attrs(value) 93 | return values 94 | 95 | def parse_string_attrs(string, is_list = False): 96 | 97 | if not string.startswith('('): 98 | return string 99 | 100 | string = string[1:-1] 101 | 102 | if string.startswith('('): 103 | return process_list(string) 104 | else: 105 | return process_dict(string) 106 | 107 | def parse_t3d(lines, item = None): 108 | if item == None: 109 | item = {} 110 | item['subitems'] = [] 111 | 112 | reading_object = False 113 | 114 | for line in lines: 115 | line = line.rstrip('\r\n') 116 | 117 | if reading_object: 118 | if line.startswith(' '): 119 | subitem['lines'].append(line[3:]) 120 | continue 121 | else: 122 | subitem.update(parse_t3d(subitem.pop('lines'), item = subitem)) 123 | 124 | if line.startswith('Begin Object'): 125 | subitem = parse_attrs(line[13:]) 126 | subitem['lines'] = [] 127 | subitem['attrs'] = {} 128 | subitem['subitems'] = [] 129 | reading_object = True 130 | elif line.startswith('End Object'): 131 | 132 | for attr in ('subitems', 'attrs'): 133 | if not subitem.get(attr): 134 | subitem.pop(attr) 135 | 136 | item['subitems'].append(subitem) 137 | reading_object = False 138 | else: 139 | name, value = line.split('=', 1) 140 | item['attrs'][name] = parse_string_attrs(value) 141 | 142 | return item 143 | 144 | 145 | def get_fbx_export_option(): 146 | options = unreal.FbxExportOption() 147 | options.collision = False 148 | options.level_of_detail = False 149 | options.vertex_color = True 150 | return options 151 | 152 | def get_export_task(filename, object, options = None): 153 | task = unreal.AssetExportTask() 154 | task.automated = True 155 | task.replace_identical = True 156 | task.filename = filename 157 | task.object = object 158 | if options: 159 | task.options = options 160 | return task 161 | 162 | def get_textures(material): 163 | type = material.__class__.__name__ 164 | 165 | if type in ('StaticMaterial', 'SkeletalMaterial'): 166 | material = material.material_interface 167 | type = material.__class__.__name__ 168 | 169 | if type == 'MaterialInstanceConstant': 170 | textures = [] 171 | for parameter_name in unreal.MaterialEditingLibrary.get_texture_parameter_names(material.get_base_material()): 172 | texture = unreal.MaterialEditingLibrary.get_material_instance_texture_parameter_value(material, parameter_name) 173 | if texture: 174 | textures.append(texture) 175 | return textures 176 | elif type == 'Material': 177 | return unreal.MaterialEditingLibrary.get_used_textures(material) 178 | else: 179 | raise BaseException(f"Material '{material.get_name()}' has unsupported type '{type}'.") 180 | 181 | 182 | class Textures(dict): 183 | def append(self, texture): 184 | return map_unique(self, texture.get_name(), texture) 185 | 186 | def export_iter(self, dir_path, texture_info): 187 | for name, texture in self.items(): 188 | 189 | info = texture_info[name] 190 | 191 | if info['format'] == 'TSF_G8': 192 | format = '.bmp' 193 | exporter = unreal.TextureExporterBMP 194 | else: 195 | format = '.tga' 196 | exporter = unreal.TextureExporterTGA 197 | 198 | name = texture.get_name() + format 199 | yield name 200 | 201 | filename = os.path.join(dir_path, name) 202 | exporter.run_asset_export_task(get_export_task(filename, texture)) 203 | 204 | def get_dict(self): 205 | info = {} 206 | with tempfile.TemporaryDirectory() as temp_dir: 207 | total_frames = len(self) 208 | with unreal.ScopedSlowTask(total_frames, "Getting Info...") as slow_task: 209 | slow_task.make_dialog(True) 210 | 211 | for name, texture in self.items(): 212 | 213 | slow_task.enter_progress_frame(1, 'Getting Info: ' + name) 214 | 215 | if slow_task.should_cancel(): 216 | raise BaseException("Aborted.") 217 | 218 | t3d_filename = os.path.join(temp_dir, name + '.t3d') 219 | unreal.ObjectExporterT3D.run_asset_export_task(get_export_task(t3d_filename , texture)) 220 | 221 | with open(t3d_filename, 'r',encoding='utf-8') as t3d_file: 222 | t3d = parse_t3d(t3d_file.readlines()) 223 | 224 | format = t3d['subitems'][0]['attrs']['Source']['Format'] 225 | 226 | info[name] = { 227 | 'flip_green_channel': texture.flip_green_channel, 228 | 'is_bugged_bgr': format == "TSF_RGBA16", 229 | 'format': format 230 | } 231 | return info 232 | 233 | class Materials(dict): 234 | def __init__(self, textures: Textures): 235 | self.textures = textures 236 | 237 | def append(self, material): 238 | textures = {self.textures.append(texture) for texture in get_textures(material)} 239 | return map_unique(self, material.get_name(), textures) 240 | 241 | def append_from_mesh(self, mesh): 242 | slot_to_material = {} 243 | 244 | type = mesh.__class__.__name__ 245 | materials = mesh.static_materials if type == 'StaticMesh' else mesh.materials # 'SkeletalMesh' 246 | 247 | for material in materials: 248 | material = material.material_interface 249 | 250 | textures = {self.textures.append(texture) for texture in get_textures(material)} 251 | 252 | material_name = target_material = material.get_name() 253 | key = map_unique(self, material_name, textures) 254 | if key != material.get_name(): 255 | target_material = key 256 | 257 | slot_to_material[material_name] = target_material 258 | 259 | return slot_to_material 260 | 261 | def get_dict(self, texture_info): 262 | dict = {} 263 | for name, textures in self.items(): 264 | material_textures = [] 265 | for texture in textures: 266 | info = texture_info[texture] 267 | 268 | if info['format'] == 'TSF_G8': 269 | format = '.bmp' 270 | else: 271 | format = '.tga' 272 | material_textures.append(texture + format) 273 | 274 | dict[name] = material_textures 275 | return dict 276 | 277 | class Meshes(dict): 278 | def __init__(self, materials: Materials): 279 | self.materials = materials 280 | 281 | def append(self, mesh): 282 | materials = self.materials.append_from_mesh(mesh) 283 | return map_unique(self, mesh.get_name(), (mesh, materials)) 284 | 285 | def export_iter(self, dir_path, options = get_fbx_export_option()): 286 | for name, (mesh, materials) in self.items(): 287 | name = name + '.fbx' 288 | yield name 289 | 290 | file_name = os.path.join(dir_path, name) 291 | unreal.ExporterFBX.run_asset_export_task(get_export_task(file_name, mesh, options)) 292 | 293 | def get_dict(self): 294 | return {name + '.fbx': materials for name, (mesh, materials) in self.items()} 295 | 296 | 297 | def export(assets, dir_path): 298 | 299 | textures = Textures() 300 | materials = Materials(textures) 301 | meshes = Meshes(materials) 302 | 303 | for asset in assets: 304 | 305 | type = asset.__class__.__name__ 306 | 307 | if type in ('StaticMesh', 'SkeletalMesh'): 308 | meshes.append(asset) 309 | 310 | elif type in ('Material', 'MaterialInstanceConstant'): 311 | materials.append(asset) 312 | 313 | elif type in ('Texture2D', 'Texture'): 314 | textures.append(asset) 315 | 316 | else: 317 | print(f"Asset '{asset.get_name()}' has unsupported type '{type}'.") 318 | 319 | if not any((textures, materials, meshes)): 320 | return 321 | 322 | os.makedirs(dir_path, exist_ok = True) 323 | 324 | texture_info = textures.get_dict() 325 | info = { 326 | "meshes": meshes.get_dict(), 327 | "materials": materials.get_dict(texture_info), 328 | "textures": texture_info 329 | } 330 | 331 | info_path = os.path.join(dir_path, "__unreal_assets__.json") 332 | with open(info_path, 'w') as info_file: 333 | json.dump(info, info_file, indent = 4, ensure_ascii = False) 334 | 335 | total_frames = len(textures) + len(meshes) 336 | with unreal.ScopedSlowTask(total_frames, "Exporting...") as slow_task: 337 | slow_task.make_dialog(True) 338 | 339 | import itertools 340 | for name in itertools.chain(textures.export_iter(dir_path, texture_info), meshes.export_iter(dir_path)): 341 | if slow_task.should_cancel(): 342 | break 343 | slow_task.enter_progress_frame(1, 'Exporting: ' + name) 344 | 345 | 346 | utility_base = unreal.GlobalEditorUtilityBase.get_default_object() 347 | assets = list(utility_base.get_selected_assets()) 348 | 349 | time_stamp = datetime.datetime.now().strftime('%y%m%d_%H%M%S') 350 | dir_path = os.path.join(get_desktop(), "unreal_assets_" + time_stamp) 351 | 352 | export(assets, dir_path) -------------------------------------------------------------------------------- /shader_editor_ui.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | 3 | from . import bl_utils 4 | register = bl_utils.Register(globals()) 5 | 6 | class ATOOL_PT_tools(bpy.types.Panel): 7 | bl_idname = "ATOOL_PT_tools" 8 | bl_label = "Tools" 9 | bl_space_type = 'NODE_EDITOR' 10 | bl_region_type = "UI" 11 | bl_category = "AT" 12 | 13 | @classmethod 14 | def poll(cls, context): 15 | return context.space_data.tree_type == 'ShaderNodeTree' 16 | 17 | def draw(self, context): 18 | 19 | layout = self.layout 20 | 21 | column = layout.column() 22 | 23 | subcolumn = column.column(align=True) 24 | subcolumn.operator("atool.apply_material", text = "Import From", icon='ADD') 25 | subcolumn.operator("atool.convert_material", text = "Convert", icon='MODIFIER') 26 | subcolumn.operator("atool.replace_material", text = "Replace", icon='PASTEFLIPDOWN') 27 | subcolumn.operator("atool.material_from_selected", text = "From Selected", icon='SELECT_SET') 28 | column.separator() 29 | 30 | column.operator("atool.make_material_links", text = "Link", icon='DRIVER') 31 | column.separator() 32 | 33 | subcolumn = column.column(align=True) 34 | subcolumn.operator("atool.add_height_blend", text = "Height Blend", icon='RNDCURVE') 35 | subcolumn.operator("atool.add_detail_blend", text = "Detail Blend", icon='MOD_UVPROJECT') 36 | column.separator() 37 | 38 | column.operator("atool.normalize_range", text = "Normalize Image", icon='SEQ_HISTOGRAM') 39 | column.operator("atool.ensure_adaptive_subdivision", text = "Ensure Adaptive Subdivision", icon='MOD_NOISE') 40 | column.operator("atool.set_uv_scale_multiplier", icon='UV_DATA') 41 | column.operator("atool.to_pbr", icon='MATERIAL_DATA') 42 | column.operator("atool.ungroup") 43 | column.operator("atool.bake_active_node") 44 | column.operator("atool.arrange_nodes_by_name") 45 | column.operator("atool.override_linked_material") 46 | column.separator() 47 | 48 | column.operator("atool.open_in_file_browser", text = "Open File Browser", icon='FILEBROWSER') 49 | column.operator("atool.append_extra_nodes", text = "Append Extra Nodes", icon='NODE') 50 | column.separator() 51 | 52 | column.label(text='Settings', icon='PROPERTIES') 53 | subcolumn = column.column(align=True) 54 | subcolumn.operator("atool.transfer_settings", text = "Transfer", icon='ANIM') 55 | subcolumn.operator("atool.bake_node_group_defaults", text = "Bake", icon='OUTLINER_DATA_GP_LAYER') 56 | column.separator() 57 | 58 | subcolumn = column.column(align=True) 59 | subcolumn.operator("atool.restore_default_settings", text = "Reset", icon='FILE_REFRESH') 60 | subcolumn.operator("atool.restore_factory_settings", text = "Factory", icon='FILE_BLANK') 61 | column.separator() 62 | 63 | subcolumn = column.column(align=True) 64 | subcolumn.operator("atool.load_material_settings", text = "Load", icon='PASTEDOWN') 65 | subcolumn.operator("atool.save_material_settings", text = "Save", icon='COPYDOWN') -------------------------------------------------------------------------------- /ship.py: -------------------------------------------------------------------------------- 1 | import zipfile 2 | import os 3 | import tempfile 4 | import subprocess 5 | import sys 6 | from datetime import datetime 7 | 8 | BLENDER_PATH = "blender" 9 | 10 | FILE_EXTENSION = (".blend", ".py", ".md", ".json") 11 | 12 | current_dir = os.path.dirname(os.path.realpath(__file__)) 13 | dir_name = os.path.basename(current_dir) 14 | 15 | files_to_pack = [file for file in os.scandir(current_dir) if file.is_file() and file.name.lower().endswith(FILE_EXTENSION)] 16 | 17 | current_directory = os.path.dirname(os.path.realpath(__file__)) 18 | 19 | try: 20 | from win32com.shell import shell, shellcon # type: ignore 21 | desktop = shell.SHGetFolderPath(0, shellcon.CSIDL_DESKTOP, 0, 0) 22 | except: 23 | try: 24 | if sys.platform == "win32": 25 | command = r'reg query "HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Explorer\User Shell Folders" /v "Desktop"' 26 | result = subprocess.run(command, stdout=subprocess.PIPE, text = True) 27 | desktop = result.stdout.splitlines()[2].split()[2] 28 | else: 29 | desktop = os.path.expanduser("~/Desktop") 30 | except: 31 | desktop = os.path.expanduser("~/Desktop") 32 | 33 | 34 | time_stamp = datetime.now().strftime('%y%m%d_%H%M%S') 35 | zipfile_path = os.path.join(desktop, "atool_" + time_stamp + ".zip") 36 | 37 | 38 | with tempfile.TemporaryDirectory() as temp_dir: 39 | temp_blend = os.path.join(temp_dir, "temp.blend") 40 | 41 | script = "\n".join([ 42 | "import bpy", 43 | "node_groups = {node_group for node_group in bpy.data.node_groups if not node_group.name.startswith('#')}", 44 | f"bpy.data.libraries.write(r'{temp_blend}', node_groups, compress=True, fake_user=True)", 45 | "bpy.ops.wm.quit_blender()" 46 | ]) 47 | blend_data_path = os.path.join(current_directory, "data.blend") 48 | subprocess.run([BLENDER_PATH, "-b", blend_data_path, "--python-expr", script, "--factory-startup"], check = True) 49 | 50 | with zipfile.ZipFile(zipfile_path, 'w') as zip_file: 51 | for file in files_to_pack: 52 | if file.name in ("ship.py", "config.json"): 53 | continue 54 | elif file.name == "data.blend": 55 | zip_file.write(temp_blend, arcname = os.path.join(dir_name, file.name), compress_type = zipfile.ZIP_DEFLATED) 56 | else: 57 | zip_file.write(file.path, arcname = os.path.join(dir_name, file.name), compress_type = zipfile.ZIP_DEFLATED) 58 | 59 | 60 | print(zipfile_path) 61 | print("Done") -------------------------------------------------------------------------------- /type_definer.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import itertools 3 | import json 4 | import logging 5 | import os 6 | import re 7 | import operator 8 | import typing 9 | 10 | import inflection 11 | 12 | log = logging.getLogger("atool") 13 | 14 | if __package__: 15 | from . import image_utils 16 | from . import utils 17 | else: 18 | import image_utils 19 | import utils 20 | 21 | SINGLE_CHANNEL_MAPS = ["metallic", "roughness", "displacement", "ambient_occlusion", "bump", "opacity", "gloss", "specular"] 22 | TRIPLE_CHANNEL_MAPS = ["normal", "diffuse", "albedo", "emissive"] 23 | SEPARATOR_PATTERN = re.compile(r"[^a-zA-Z0-9]+|$") 24 | 25 | try: 26 | name_conventions_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "bitmap_type_name_conventions.json") 27 | with open(name_conventions_path, "r", encoding='utf-8') as f: 28 | CONVENTIONS: dict = json.load(f) 29 | except: 30 | import traceback 31 | traceback.print_exc() 32 | CONVENTIONS = None 33 | 34 | 35 | class Match: 36 | def __init__(self, string, reverse_dictionary): 37 | self.string = string 38 | self.string_length = len(string) 39 | self.reverse_dictionary = reverse_dictionary 40 | self.submatches = [] # type: typing.List[re.Match] 41 | self._type_list = None # type: typing.List[str] 42 | 43 | def copy(self): 44 | match = Match(self.string, self.reverse_dictionary) 45 | match.submatches = self.submatches.copy() 46 | if self._type_list: 47 | match._type_list = self._type_list.copy() 48 | return match 49 | 50 | def __repr__(self): 51 | return f"" 52 | 53 | def append(self, submatch): 54 | self.submatches.append(submatch) 55 | 56 | def remove(self, index): 57 | del self.submatches[index] 58 | 59 | @property 60 | def is_separated(self): 61 | if not self.submatches: 62 | return False 63 | 64 | if self.is_pre_separated and self.is_post_separated: 65 | return True 66 | 67 | return False 68 | 69 | @property 70 | def is_pre_separated(self): 71 | if not self.submatches: 72 | return False 73 | 74 | first_char_index = self.submatches[0].start() 75 | if first_char_index != 0: 76 | if SEPARATOR_PATTERN.match(self.string, pos=first_char_index - 1): 77 | return True 78 | elif first_char_index == 0: 79 | return True 80 | 81 | return False 82 | 83 | @property 84 | def is_post_separated(self): 85 | if not self.submatches: 86 | return False 87 | 88 | last_char_index = self.submatches[-1].end() 89 | if last_char_index != self.string_length: 90 | if SEPARATOR_PATTERN.match(self.string, pos=last_char_index): 91 | return True 92 | elif last_char_index == self.string_length: 93 | return True 94 | 95 | return False 96 | 97 | @property 98 | def is_RGB_bitmap(self): 99 | if not self.submatches: 100 | return False 101 | 102 | if self.type_list[0] in TRIPLE_CHANNEL_MAPS: 103 | return True 104 | 105 | return False 106 | 107 | @property 108 | def are_submatches_separated(self): 109 | if len(self.submatches) <= 1: 110 | return False 111 | 112 | for i in range(len(self.submatches) - 1): 113 | if not self.submatches[i].end() < self.submatches[i + 1].start(): 114 | return False 115 | 116 | return True 117 | 118 | @property 119 | def are_any_submatches_separated(self): 120 | if len(self.submatches) <= 1: 121 | return False 122 | 123 | for i in range(len(self.submatches) - 1): 124 | if self.submatches[i].end() < self.submatches[i + 1].start(): 125 | return True 126 | 127 | return False 128 | 129 | @property 130 | def type_list(self) -> typing.List[str]: 131 | if self._type_list != None: 132 | return self._type_list 133 | return [self.reverse_dictionary[submatch_str] for submatch_str in self.submatch_list] 134 | 135 | @type_list.setter 136 | def type_list(self, value): 137 | self._type_list = value 138 | 139 | @property 140 | def submatch_list(self) -> typing.List[str]: 141 | return [submatch.group(0) for submatch in self.submatches] 142 | 143 | @property 144 | def match_string(self): 145 | return ''.join(self.submatch_list) 146 | 147 | @property 148 | def length(self): 149 | return self.submatches[-1].end() - self.submatches[0].start() 150 | 151 | @property 152 | def is_one_letter_match(self): 153 | return False if self.length > len(self.submatches) else True 154 | 155 | class Filter_Config: 156 | def __init__(self): 157 | self.ignore_type: typing.List[str] = [] 158 | self.ignore_format: typing.List[typing.Tuple[str, typing.List[str]]] = [] 159 | self.prefer_type: typing.List[typing.Tuple[str, str]] = [] 160 | self.prefer_format: typing.List[typing.Tuple[str, str, typing.List[str]]] = [] 161 | 162 | self.custom: typing.Dict[str, typing.List[str]] = {} 163 | self.is_rgb_plus_alpha: bool = True 164 | 165 | self.is_strict: bool = True 166 | 167 | self.common_prefix: str = None 168 | self.common_prefix_len: int 169 | 170 | if not image_utils.OPENCV_IO_ENABLE_OPENEXR: 171 | self.ignore_format.append('.exr') 172 | 173 | def set_common_prefix_from_paths(self, paths: typing.Iterable[str]): 174 | self.set_common_prefix((os.path.splitext(os.path.basename(path))[0] for path in paths)) 175 | 176 | def set_common_prefix(self, names: typing.Iterable[str]): 177 | names = list(names) 178 | if len(names) <= 1: 179 | return 180 | 181 | common_prefix = utils.get_longest_substring(names, from_beginning = True) 182 | if not common_prefix: 183 | return 184 | 185 | self.common_prefix = common_prefix 186 | self.common_prefix_len = len(self.common_prefix) 187 | 188 | @property 189 | def dict(self): 190 | return {key: value for key, value in self.__dict__.items() if not key.startswith('__')} 191 | 192 | def get_type(string: str, config: Filter_Config = None): 193 | 194 | if not CONVENTIONS: 195 | raise Exception("Cannot read file bitmap_type_name_conventions.json.") 196 | 197 | patterns = CONVENTIONS["bitmap"]["type"].copy() # type: dict 198 | 199 | is_strict = config.is_strict if config else True 200 | 201 | ignores = patterns.pop('ignore') # type: dict 202 | if is_strict: # temporally only for external testing 203 | for ignore in ignores: 204 | ignore = patterns.pop(ignore) 205 | 206 | if config: 207 | for customized_type, custom_names in config.custom.items(): 208 | for type, names in patterns.items(): 209 | patterns[type] = [name for name in names if name not in custom_names] 210 | patterns[customized_type].extend(custom_names) 211 | 212 | reverse_dictionary = {name: type for type, names in patterns.items() for name in names} 213 | patterns = {type: re.compile('|'.join(sorted(names, reverse=True, key=len))) for type, names in patterns.items()} 214 | 215 | if config and config.common_prefix: 216 | if string.startswith(config.common_prefix): # protect if config is reused 217 | string = string[config.common_prefix_len:] 218 | 219 | string = inflection.underscore(string) 220 | string_length = len(string) 221 | 222 | def match_length(match): 223 | start, end = match.span() 224 | return end - start 225 | 226 | def get_submatch(starting_index, types_to_avoid): 227 | submatches = [] # type: typing.List[re.Match] 228 | for type, pattern in patterns.items(): 229 | if type in types_to_avoid: 230 | continue 231 | submatch = pattern.match(string, pos=starting_index) 232 | if submatch: 233 | submatches.append(submatch) 234 | 235 | return max(submatches, key=match_length) if submatches else None 236 | 237 | def define_bitmap_type(starting_index): 238 | 239 | match = Match(string, reverse_dictionary) 240 | 241 | for i in range(4): 242 | 243 | to_avoid = match.type_list 244 | if match.is_RGB_bitmap or i > 0: 245 | to_avoid += TRIPLE_CHANNEL_MAPS 246 | 247 | separator = SEPARATOR_PATTERN.match(string, pos=starting_index) 248 | if separator: 249 | starting_index = separator.end() 250 | 251 | submatch = get_submatch(starting_index, to_avoid) 252 | if not submatch: 253 | break 254 | match.append(submatch) 255 | starting_index = submatch.end() 256 | 257 | if i == 1 and match.is_RGB_bitmap: 258 | break 259 | elif starting_index == string_length: 260 | break 261 | 262 | if not match.submatches: 263 | return None 264 | 265 | return match 266 | 267 | matches = [] # type: typing.List[Match] 268 | point = 0 269 | while point <= string_length: 270 | result = define_bitmap_type(point) 271 | if result: 272 | # point = result.submatches[-1].end() 273 | point += 1 274 | matches.append(result) 275 | else: 276 | point += 1 277 | 278 | if not matches: 279 | return None 280 | 281 | def filter_strict(matches: typing.List[Match]): 282 | matches = matches.copy() 283 | 284 | for match in matches.copy(): 285 | while not match.is_separated: 286 | 287 | if not match.submatches: 288 | matches.remove(match) 289 | break 290 | 291 | match.remove(-1) 292 | 293 | for match in matches.copy(): 294 | if not match.are_submatches_separated and match.are_any_submatches_separated: 295 | matches.remove(match) 296 | 297 | return matches 298 | 299 | if is_strict: 300 | matches = filter_strict(matches) 301 | if not matches: 302 | return None 303 | else: 304 | not_strict_maches = [match.copy() for match in matches] 305 | matches = filter_strict(matches) 306 | 307 | if not not_strict_maches: 308 | return None 309 | 310 | if not matches: 311 | matches = not_strict_maches 312 | 313 | for match in matches.copy(): 314 | 315 | if match.are_submatches_separated and not match.is_separated: 316 | matches.remove(match) 317 | continue 318 | 319 | if match.is_one_letter_match: 320 | matches.remove(match) 321 | continue 322 | 323 | if not matches: 324 | return None 325 | 326 | for match in matches: # exceptions 327 | type_list = match.type_list 328 | 329 | if len(type_list) == 1: 330 | 331 | if match.match_string == 'ddna': 332 | match.type_list = ['normal', 'gloss'] 333 | 334 | # elif match.match_string == 'arm': 335 | # match.type_list = ['ambient_occlusion', 'roughness', 'metallic'] 336 | 337 | elif len(type_list) == 2 and type_list[0] in ('diffuse', 'albedo') and type_list[1] == 'metallic' and match.submatch_list[1].lower() == 'm': 338 | match.type_list = [type_list[0], 'opacity'] 339 | 340 | # if not is_strict: 341 | # for match in matches: 342 | # for index, type in enumerate(type_list): 343 | # if type in ignores: 344 | # new_type_list = match.type_list 345 | # new_type_list[index] = None 346 | # match.type_list = new_type_list 347 | 348 | if config and not config.is_rgb_plus_alpha: 349 | for match in matches: 350 | if len(match.submatches) == 2: 351 | match.remove(0) 352 | 353 | # print(*matches, sep="\n") 354 | 355 | match = max(matches, key=operator.attrgetter('length')) 356 | return match.type_list 357 | 358 | # variants = [match for match in matches if match.is_separated] 359 | # if variants: 360 | # return variants[-1].type_list 361 | 362 | # variants = [match for match in matches if not match.is_one_letter_match] 363 | # if variants: 364 | # return variants[-1].type_list 365 | 366 | return matches[-1].type_list 367 | 368 | # matches.reverse() 369 | # match = max(matches, key=operator.attrgetter('length')) 370 | # return match.type_list 371 | 372 | 373 | def filter_by_config(images: typing.List[image_utils.Image], config: Filter_Config): 374 | """ 375 | return `paths`, `report` 376 | `images`: list of tuples, (, ) 377 | `report`: `list`, in the Blender's `operator.report` style, 378 | """ 379 | images = images.copy() 380 | report = [] 381 | 382 | extensions = set(CONVENTIONS["bitmap"]["extension"]).difference(set(config.ignore_format)) 383 | for image in images.copy(): 384 | if not image.extension in extensions: 385 | images.remove(image) 386 | report.append(({'INFO'},f"Image {image.basename} was excluded by file format.")) 387 | 388 | 389 | ignore_type = set(config.ignore_type) 390 | for image in images.copy(): 391 | type = image.type 392 | if not type: 393 | images.remove(image) 394 | report.append(({'INFO'}, f"Image {image.basename} has no type detected.")) 395 | continue 396 | if len(type) == 1 and type[0] in ignore_type: 397 | images.remove(image) 398 | report.append(({'INFO'}, f"Image {image.basename} was excluded by type '{type[0]}'.")) 399 | continue 400 | 401 | 402 | for preferred, ignored in config.prefer_type: 403 | for image in images.copy(): 404 | 405 | if not preferred in image.type: 406 | continue 407 | 408 | for _image in images.copy(): 409 | 410 | type = [None if subtype == ignored else subtype for subtype in _image.type] 411 | 412 | if not any(type): 413 | images.remove(_image) 414 | report.append(({'INFO'}, f"Image {_image.basename} was excluded by preferring type '{preferred}' over '{ignored}'.")) 415 | continue 416 | 417 | _image.type = type 418 | 419 | 420 | for preferred, ignored, types in config.prefer_format: 421 | for image in images.copy(): 422 | 423 | if preferred != image.extension: 424 | continue 425 | 426 | for _image in images.copy(): 427 | if _image.extension == ignored and len(_image.type) == 1 and _image.type[0] in types: 428 | images.remove(_image) 429 | report.append(({'INFO'}, f"Image {_image.basename} was excluded by preferring format '{preferred}' over '{ignored}'.")) 430 | 431 | return images, report -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import threading 4 | import typing 5 | import zipfile 6 | import io 7 | import shutil 8 | import functools 9 | import pathlib 10 | import re 11 | import operator 12 | import tempfile 13 | import subprocess 14 | import sys 15 | from datetime import datetime 16 | from timeit import default_timer 17 | 18 | IMAGE_EXTENSIONS = { ".bmp", ".jpeg", ".jpg", ".jp2", ".j2c", ".tga", ".cin", ".dpx", ".exr", ".hdr", ".sgi", ".rgb", ".bw", ".png", ".tiff", ".tif", ".psd", ".dds"} 19 | GEOMETRY_EXTENSIONS = {".obj", ".fbx"} 20 | URL_EXTENSIONS = (".url", ".desktop", ".webloc") 21 | META_FOLDERS = {'__gallery__', '__extra__', '__archive__'} 22 | META_TYPES = {"__info__", "__icon__"} | META_FOLDERS 23 | 24 | DIR_PATH = os.path.dirname(os.path.realpath(__file__)) 25 | PLATFORM = sys.platform 26 | 27 | class PseudoDirEntry: 28 | def __init__(self, path): 29 | self.path = os.path.realpath(path) 30 | self.name = os.path.basename(self.path) 31 | 32 | def is_file(self): 33 | return os.path.isfile(self.path) 34 | 35 | def is_dir(self): 36 | return os.path.isdir(self.path) 37 | 38 | def color_to_gray(color): 39 | return 0.2126*color[0] + 0.7152*color[1] + 0.0722*color[2] 40 | 41 | 42 | @property 43 | @functools.lru_cache() 44 | def data(self): 45 | if self.type == "megascan_info": 46 | with self.open(encoding="utf-8") as json_file: 47 | return json.load(json_file) 48 | elif self.type == "url": 49 | if self.suffix == ".webloc": 50 | from xml.dom.minidom import parse as xml_parse 51 | tag = xml_parse(str(self)).getElementsByTagName("string")[0] 52 | return tag.firstChild.nodeValue 53 | else: 54 | import configparser 55 | config = configparser.ConfigParser(interpolation=None) 56 | config.read(str(self)) 57 | return config[config.sections()[0]].get("URL") 58 | elif self.type == "blendswap_info": 59 | with self.open(encoding="utf-8") as info_file: 60 | match = re.search(r"blendswap.com\/blends\/view\/\d+", info_file.read()) 61 | if match: 62 | return match.group(0) 63 | elif self.type == "__info__": 64 | with self.open(encoding="utf-8") as json_file: 65 | return json.load(json_file) 66 | return None 67 | 68 | @property 69 | @functools.lru_cache() 70 | def is_meta(self): 71 | return self.type in META_TYPES 72 | 73 | @property 74 | @functools.lru_cache() 75 | def file_type(self): 76 | name = self.name 77 | 78 | if not self.is_file(): 79 | if name in META_FOLDERS: 80 | return name 81 | return None 82 | 83 | if name == "__info__.json": 84 | return "__info__" 85 | elif name == "__icon__.png": 86 | return "__icon__" 87 | elif name == "BLENDSWAP_LICENSE.txt": 88 | return "blendswap_info" 89 | elif name.lower().endswith("license.html"): 90 | with self.open(encoding="utf-8") as info_file: 91 | if re.search(r"blendswap.com\/blends\/view\/\d+", info_file.read()): 92 | return "blendswap_info" 93 | 94 | suffix = self.suffix 95 | if suffix == ".sbsar": 96 | return "sbsar" 97 | elif suffix == ".zip": 98 | return "zip" 99 | elif suffix == ".json": 100 | with self.open(encoding="utf-8") as json_file: 101 | json_data = json.load(json_file) 102 | if type(json_data.get("id")) == str and type(json_data.get("meta")) == list and type(json_data.get("points")) == int: 103 | return "megascan_info" 104 | elif suffix in IMAGE_EXTENSIONS: 105 | return "image" 106 | elif suffix in GEOMETRY_EXTENSIONS: 107 | return "geometry" 108 | elif suffix in URL_EXTENSIONS: 109 | return "url" 110 | 111 | return None 112 | 113 | pathlib.Path.type = file_type 114 | pathlib.Path.is_meta = is_meta 115 | pathlib.Path.data = data 116 | 117 | class File_Filter(typing.Dict[str, pathlib.Path] , dict): 118 | def __init__(self): 119 | self.ignore = set() 120 | self.path = None 121 | 122 | @classmethod 123 | def from_dir(cls, path: os.DirEntry, ignore: typing.Union[str, typing.Iterable[str]] = None): 124 | filter = cls() 125 | if ignore: 126 | filter.ignore = ignore = {ignore} if isinstance(ignore, str) else set(ignore) 127 | 128 | filter.path = path.path 129 | for item in os.scandir(path.path): 130 | if item.name not in ignore: 131 | filter[item.name] = pathlib.Path(item.path) 132 | 133 | return filter 134 | 135 | @classmethod 136 | def from_files(cls, files: typing.Iterable[str]): 137 | filter = cls() 138 | for file in files: 139 | name = os.path.basename(file) 140 | filter[name] = pathlib.Path(file) 141 | return filter 142 | 143 | def update(self): 144 | if not self.path: 145 | return 146 | 147 | for name in list(self.keys()): 148 | if not self[name].exists(): 149 | del self[name] 150 | 151 | ignore = self.ignore | set(self.keys()) 152 | for file in os.scandir(self.path): 153 | if file.name not in ignore: 154 | self[file.name] = pathlib.Path(file.path) 155 | 156 | def __iter__(self): 157 | return iter(self.get_files()) 158 | 159 | def get_files(self): 160 | return [item for item in self.values() if item.is_file()] 161 | 162 | def get_folders(self): 163 | return [item for item in self.values() if item.is_dir()] 164 | 165 | def get_by_type(self, type: typing.Union[str, typing.Iterable[str]]): 166 | type = {type} if isinstance(type, str) else set(type) 167 | return [item for item in self.values() if item.type in type] 168 | 169 | def get_by_name(self, name: typing.Union[str, typing.Iterable[str]]): 170 | name = {name} if isinstance(name, str) else set(name) 171 | return [item for item in self.values() if item.name in name] 172 | 173 | def get_by_extension(self, extension: typing.Union[str, typing.Iterable[str]]): 174 | extension = {extension} if isinstance(extension, str) else set(extension) 175 | return [item for item in self.values() if item.suffix in extension] 176 | 177 | 178 | def move_to_folder(file: typing.Union[str, os.DirEntry, PseudoDirEntry, pathlib.PurePath], folder: str, create = True, exists_rename = True): 179 | 180 | if isinstance(file, str): 181 | old_path = file 182 | new_path = os.path.join(folder, os.path.basename(file)) 183 | elif isinstance(file, pathlib.PurePath): 184 | old_path = str(file) 185 | new_path = os.path.join(folder, file.name) 186 | elif isinstance(file, (os.DirEntry, PseudoDirEntry)): 187 | old_path = file.path 188 | new_path = os.path.join(folder, file.name) 189 | else: 190 | raise TypeError(f"The function move_to_folder does not support {str(file)} of type {type(file)}.") 191 | 192 | if old_path != new_path: 193 | 194 | if create: 195 | os.makedirs(folder, exist_ok = True) 196 | 197 | if exists_rename: 198 | name, suffix = os.path.splitext(os.path.basename(new_path)) 199 | index = 2 200 | while os.path.exists(new_path): 201 | new_path = os.path.join(folder, name + f'_{index}' + suffix) 202 | index += 1 203 | 204 | shutil.move(old_path, new_path) 205 | 206 | return new_path 207 | 208 | 209 | def read_local_file(name, auto=True) -> typing.Union[str, dict]: 210 | path = os.path.join(DIR_PATH, name) 211 | 212 | if not os.path.exists(path): 213 | return None 214 | 215 | try: 216 | with open(path, 'r', encoding="utf-8") as file: 217 | if auto: 218 | if path.lower().endswith(".json"): 219 | return(json.load(file)) 220 | return file.read() 221 | except: 222 | import traceback 223 | traceback.print_exc() 224 | return None 225 | 226 | def get_script(name, read = False): 227 | path = os.path.join(DIR_PATH, 'scripts', name) 228 | 229 | if not read: 230 | return path 231 | 232 | with open(path, 'r', encoding="utf-8") as file: 233 | return file.read() 234 | 235 | def get_files(path, get_folders = False, recursively = True): 236 | list = [] 237 | for item in os.scandir(path): 238 | if item.is_file(): 239 | list.append(item) 240 | else: 241 | if get_folders: 242 | list.append(item) 243 | if recursively: 244 | list.extend(get_files(item.path, get_folders, recursively)) 245 | return list 246 | 247 | 248 | def deduplicate(list_to_deduplicate: list): 249 | return list(dict.fromkeys(list_to_deduplicate)) 250 | 251 | def remove_empty(iterable): 252 | if isinstance(iterable, dict): 253 | for key in list(iterable.keys()): 254 | if not iterable[key]: 255 | iterable.pop(key) 256 | elif isinstance(iterable, list): 257 | index = len(iterable) - 1 258 | for item in reversed(iterable): 259 | if not item: 260 | iterable.pop(index) 261 | index -= 1 262 | else: 263 | raise TypeError(f"The argument type should be \"dict\" or \"list\" not {type(iterable)}") 264 | 265 | def extract_zip(file: typing.Union[str, typing.IO[bytes]], path = None, extract = True, recursively = True): 266 | """ 267 | `file`: a path to a zip file \n 268 | `path`: a target root folder, if `None` the zip's folder is used \n 269 | `extract`: if `False` the function only returns the list of files without an extraction \n 270 | `recursively`: extract zips recursively 271 | """ 272 | extracted_files = [] 273 | if path is None: 274 | path = os.path.splitext(file)[0] 275 | to_path = path.replace("/", os.sep) 276 | if extract: 277 | os.makedirs(to_path, exist_ok=True) 278 | with zipfile.ZipFile(file) as zip_file: 279 | for name in zip_file.namelist(): 280 | if name.endswith(".zip") and recursively: 281 | inner_path = '/'.join((path, name[:-4])) 282 | extracted_files.extend(extract_zip(io.BytesIO(zip_file.read(name)), inner_path, extract, recursively)) 283 | else: 284 | if extract: 285 | extracted_files.append(zip_file.extract(name, to_path)) 286 | else: 287 | extracted_files.append(os.path.join(to_path, name.replace("/", os.sep))) 288 | return extracted_files 289 | 290 | def get_last_file(path: str, type: typing.Union[str, typing.Tuple[str]], recursively = True) -> str: 291 | files = [file for file in get_files(path, recursively) if file.name.lower().endswith(type)] 292 | if not files: 293 | return None 294 | return max(files, key=lambda x: (os.path.getmtime(x), os.path.getctime(x), x)).path 295 | 296 | class Item_Location: 297 | def __init__(self, path, iter): 298 | self.path = path 299 | self.iter = iter 300 | 301 | @property 302 | def string(self): 303 | return "".join(("".join(("[", fragment.__repr__(),"]")) for fragment in self.path)) 304 | 305 | @property 306 | def data(self): 307 | data = self.iter 308 | for fragment in self.path: 309 | data = data[fragment] 310 | return data 311 | 312 | @property 313 | def parent(self): 314 | parent = self.iter 315 | for fragment in self.path[:-1]: 316 | parent = parent[fragment] 317 | return parent 318 | 319 | def get_parent(self, level = 1): 320 | parent = self.iter 321 | for fragment in self.path[:-level]: 322 | parent = parent[fragment] 323 | return parent 324 | 325 | def locate_item(iter, item, is_dict_key = False, return_as = None, mode = 'eq'): 326 | """ 327 | `type`: 'any' can be a key, a value 328 | `mode`: operator's 'eq', 'contains', etc. 329 | """ 330 | 331 | def contains(a, b): 332 | if type(a) == str and type(b) == str: 333 | return operator.contains(b.lower(), a.lower()) 334 | else: 335 | return operator.eq(a, b) 336 | 337 | if mode == 'eq': 338 | comparison = operator.eq 339 | elif mode == 'contains': 340 | comparison = contains 341 | else: 342 | comparison = getattr(operator, mode) 343 | 344 | 345 | def locate_value(iter, item, path = []): 346 | if isinstance(iter, (list, tuple)): 347 | for index, value in enumerate(iter): 348 | if isinstance(value, (list, dict, tuple)): 349 | yield from locate_value(value, item, path + [index]) 350 | elif comparison(item, value): 351 | yield path + [index] 352 | elif isinstance(iter, dict): 353 | for name, value in iter.items(): 354 | if isinstance(value, (list, dict, tuple)): 355 | yield from locate_value(value, item, path + [name]) 356 | elif comparison(item, value): 357 | yield path + [name] 358 | 359 | 360 | def locate_key(iter, item, path = []): 361 | if isinstance(iter, (list, tuple)): 362 | for index, value in enumerate(iter): 363 | yield from locate_key(value, item, path + [index]) 364 | elif isinstance(iter, dict): 365 | for key, value in iter.items(): 366 | if isinstance(value, (list, dict, tuple)): 367 | yield from locate_key(value, item, path + [key]) 368 | elif comparison(item, key): 369 | yield path + [key] 370 | 371 | 372 | def locate_key_and_value(iter, item, path = []): 373 | if isinstance(iter, (list, tuple)): 374 | for index, value in enumerate(iter): 375 | yield from locate_key_and_value(value, item, path + [index]) 376 | elif isinstance(iter, dict): 377 | for key, value in iter.items(): 378 | if isinstance(value, (list, dict, tuple)): 379 | yield from locate_key_and_value(value, item, path + [key]) 380 | elif comparison(item[0], key) and comparison(item[1], value): 381 | yield path + [key] 382 | 383 | 384 | if isinstance(item, tuple): 385 | locate = locate_key_and_value 386 | else: 387 | locate = locate_key if is_dict_key else locate_value 388 | 389 | if return_as: 390 | return [getattr(Item_Location(path, iter), return_as) for path in locate(iter, item)] 391 | else: 392 | return [Item_Location(path, iter) for path in locate(iter, item)] 393 | 394 | 395 | class Everything: 396 | def __init__(self): 397 | self.exe = None 398 | self.es_exe = None 399 | self.error_text = "" 400 | self.is_initialized = False 401 | self.lock = threading.RLock() 402 | 403 | @property 404 | def is_available(self): 405 | 406 | if self.is_initialized: 407 | return bool(self.es_exe) 408 | else: 409 | self.set_es_exe() 410 | if not self.es_exe: 411 | print(self.error_text) 412 | return bool(self.es_exe) 413 | 414 | def set_es_exe(self): 415 | 416 | with self.lock: 417 | 418 | if self.is_initialized: 419 | return 420 | 421 | print('atool: checking es.exe') 422 | 423 | if not os.name == 'nt': 424 | self.error_text = "Current OS is not supported." 425 | self.is_initialized = True 426 | return 427 | 428 | es_exe = os.path.join(os.path.dirname(__file__), 'es.exe') 429 | if os.path.exists(es_exe): 430 | self.es_exe = es_exe 431 | self.is_initialized = True 432 | return 433 | 434 | try: 435 | import winreg 436 | with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\Classes\Everything.FileList\DefaultIcon") as key: 437 | self.exe = winreg.QueryValueEx(key, "")[0].split(",")[0] 438 | except: 439 | self.error_text = "Everything.exe is not found." 440 | self.is_initialized = True 441 | return 442 | 443 | with tempfile.TemporaryDirectory() as temp_dir: 444 | 445 | if __package__: 446 | from . import asset_parser 447 | else: 448 | import asset_parser 449 | 450 | is_success, zip = asset_parser.get_web_file(r"https://www.voidtools.com/ES-1.1.0.18.zip", content_folder = temp_dir) 451 | if not is_success: 452 | self.error_text = "Cannot download es.exe" 453 | self.is_initialized = True 454 | return 455 | 456 | for file in extract_zip(zip): 457 | if os.path.basename(file) == 'es.exe': 458 | move_to_folder(file, os.path.dirname(__file__), create = False) 459 | 460 | if not os.path.exists(es_exe): 461 | self.error_text = "Cannot find es.exe in downloads." 462 | self.is_initialized = True 463 | return None 464 | 465 | self.es_exe = es_exe 466 | self.is_initialized = True 467 | 468 | def find(self, names): 469 | 470 | if not self.is_available: 471 | raise BaseException(self.error_text) 472 | 473 | query = '|'.join(["*\\" + '"' + name + '"' for name in names]) 474 | 475 | with tempfile.TemporaryDirectory() as temp_dir: 476 | temp_file = os.path.join(temp_dir, "temp.txt") 477 | command = ' '.join(['"' + self.es_exe + '"', query, '-export-txt', '"' + temp_file + '"']) 478 | subprocess.run(command) 479 | with open(temp_file, encoding='utf-8') as text: 480 | paths = text.read().split("\n")[:-1] 481 | 482 | return paths 483 | 484 | def get_everything(self, query): 485 | 486 | if not self.is_available: 487 | raise BaseException(self.error_text) 488 | 489 | with tempfile.TemporaryDirectory() as temp_dir: 490 | temp_file = os.path.join(temp_dir, "temp.txt") 491 | subprocess.run([self.es_exe, query, '-export-txt', temp_file]) 492 | with open(temp_file, encoding='utf-8') as text: 493 | return text.read().split("\n")[:-1] 494 | 495 | EVERYTHING = Everything() 496 | 497 | 498 | def get_closest_path(lost_path, string_paths): 499 | 500 | lost_path = lost_path.lower().split(os.sep)[:-1] 501 | paths = [path.lower().split(os.sep)[:-1] for path in string_paths] 502 | 503 | if os.name == 'nt': 504 | lost_path.insert(0, 'root') 505 | for path in paths: 506 | path.insert(0, 'root') 507 | 508 | lost_reversed = list(reversed(lost_path)) 509 | def locate_fragment(item): 510 | for index, fragment in enumerate(lost_reversed): 511 | if fragment == item: 512 | return index 513 | 514 | routs = [] 515 | for path_index, path in enumerate(paths): 516 | for length, item in enumerate(reversed(path), start = 1): 517 | index = locate_fragment(item) 518 | if index is not None: 519 | length += index + 1 520 | routs.append((length, path_index)) 521 | break 522 | 523 | return string_paths[min(routs)[1]] 524 | 525 | 526 | def os_open(operator, path): 527 | 528 | if PLATFORM == 'win32': 529 | os.startfile(path) 530 | elif PLATFORM == 'darwin': 531 | subprocess.Popen(['open', path]) 532 | else: 533 | try: 534 | subprocess.Popen(['xdg-open', path]) 535 | except OSError: 536 | operator.report({'INFO'}, "Current OS is not supported.") 537 | import traceback 538 | traceback.print_exc() 539 | 540 | def os_show(operator, files: typing.Iterable[str]): 541 | 542 | if PLATFORM != 'win32': 543 | for directory in deduplicate([os.path.dirname(file) for file in files]): 544 | os_open(operator, directory) 545 | return 546 | 547 | files = [file.lower() for file in files] 548 | directories = list_by_key(files, os.path.dirname) 549 | 550 | import ctypes 551 | import ctypes.wintypes 552 | 553 | prototype = ctypes.WINFUNCTYPE(ctypes.POINTER(ctypes.c_int), ctypes.wintypes.LPCWSTR) 554 | paramflags = (1, "pszPath"), 555 | ILCreateFromPathW = prototype(("ILCreateFromPathW", ctypes.windll.shell32), paramflags) 556 | 557 | ctypes.windll.ole32.CoInitialize(None) 558 | 559 | for directory, files in directories.items(): 560 | 561 | directory_pidl = ILCreateFromPathW(directory) 562 | 563 | file_pidls = (ctypes.POINTER(ctypes.c_int) * len(files))() 564 | for index, file in enumerate(files): 565 | file_pidls[index] = ILCreateFromPathW(file) 566 | 567 | ctypes.windll.shell32.SHOpenFolderAndSelectItems(directory_pidl, len(file_pidls), file_pidls, 0) 568 | 569 | ctypes.windll.shell32.ILFree(directory_pidl) 570 | for file_pidl in file_pidls: 571 | ctypes.windll.shell32.ILFree(file_pidl) 572 | 573 | ctypes.windll.ole32.CoUninitialize() 574 | 575 | def web_open(string , is_url = False): 576 | 577 | starts_with_http = string.startswith("https://") or string.startswith("http://") 578 | 579 | if is_url: 580 | if not starts_with_http: 581 | url = "https://" + string 582 | else: 583 | url = string 584 | else: 585 | if starts_with_http: 586 | url = string 587 | else: 588 | url = fr"https://www.google.com/search?q={string}" 589 | 590 | import webbrowser 591 | webbrowser.open(url, new=2, autoraise=True) 592 | 593 | 594 | def list_by_key(items, key_func): 595 | dict = {} 596 | for item in items: 597 | key = key_func(item) 598 | list = dict.get(key) 599 | if list: 600 | list.append(item) 601 | else: 602 | dict[key] = [item] 603 | return dict 604 | 605 | def map_to_list(mapping: dict, key, value: typing.Union[list, dict, typing.Any]): 606 | 607 | if not value: 608 | return 609 | 610 | if type(value) == dict: 611 | for key, value in value.items(): 612 | map_to_list(mapping, key, value) 613 | return 614 | 615 | if mapping.get(key): 616 | if type(value) == list: 617 | mapping[key].extend(value) 618 | else: 619 | mapping[key].append(value) 620 | else: 621 | if type(value) == list: 622 | mapping[key] = value.copy() 623 | else: 624 | mapping[key] = [value] 625 | 626 | def get_time_stamp(): 627 | return datetime.now().strftime('%y%m%d_%H%M%S') 628 | 629 | 630 | def get_longest_substring(strings: typing.Iterable[str], from_beginning = False): 631 | 632 | if len(strings) == 1: 633 | return list(strings)[0] 634 | 635 | sets = [] 636 | if from_beginning: 637 | for string in strings: 638 | string_set = [] 639 | string_len = len(string) 640 | for i in range(string_len): 641 | string_set.append(string[:i + 1]) 642 | sets.append(set(string_set)) 643 | else: 644 | for string in strings: 645 | string_set = [] 646 | string_len = len(string) 647 | for i in range(string_len): 648 | for j in range(i + 1, string_len + 1): 649 | string_set.append(string[i:j]) 650 | sets.append(set(string_set)) 651 | 652 | mega_set = set().union(*sets) 653 | 654 | for string_set in sets: 655 | mega_set.intersection_update(string_set) 656 | 657 | if not mega_set: 658 | return "" 659 | 660 | return max(mega_set, key=len) 661 | 662 | 663 | def get_slug(string): 664 | string = re.sub("[\\\\\/:*?\"<>|]", "", string) 665 | string = string.strip(" ") 666 | string = re.sub(" +", "_", string) 667 | return string 668 | 669 | 670 | def get_desktop(): 671 | try: 672 | import winreg 673 | with winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\User Shell Folders") as key: 674 | return winreg.QueryValueEx(key, "Desktop")[0] 675 | except: 676 | return os.path.expanduser("~/Desktop") 677 | 678 | def ensure_unique_path(path): 679 | 680 | if not os.path.exists(path): 681 | return path 682 | 683 | dir = os.path.dirname(path) 684 | stem, ext = os.path.splitext(os.path.basename((path))) 685 | number = 2 686 | 687 | new_path = os.path.join(dir, stem + f"_{number}" + ext) 688 | while os.path.exists(new_path): 689 | number += 1 690 | new_path = os.path.join(dir, stem + f"_{number}" + ext) 691 | 692 | return new_path 693 | 694 | def get_path_list(path: str): 695 | fragmented_path = path.split(os.sep) 696 | path_list = [] 697 | for i in range(len(fragmented_path)): 698 | path_list.append(os.sep.join(fragmented_path[:i + 1])) 699 | path_list.sort(key = len) 700 | return path_list 701 | 702 | def get_path_set(path: str): 703 | fragmented_path = path.split(os.sep) 704 | path_set = set() 705 | for i in range(len(fragmented_path)): 706 | path_set.add(os.sep.join(fragmented_path[:i + 1])) 707 | return path_set 708 | 709 | def synchronized(func): 710 | def wrapper(*args, **kw): 711 | with args[0].lock: # self.lock 712 | return func(*args, **kw) 713 | return wrapper 714 | 715 | import binascii 716 | import xxhash 717 | 718 | SAMPLE_THRESHOLD = 256 * 1024 719 | SAMPLE_SIZE = 32 * 1024 720 | 721 | def encode_leb128(n): 722 | """ https://en.wikipedia.org/wiki/LEB128 """ 723 | result = [] 724 | while n >= 128: 725 | result.append(n & 127 | 128) 726 | n >>= 7 727 | result.append(n) 728 | return bytes(result) 729 | 730 | def get_file_hash(path): 731 | with open(path, 'rb') as f: 732 | 733 | size = os.fstat(f.fileno()).st_size 734 | 735 | if size < SAMPLE_THRESHOLD: 736 | data = f.read() 737 | else: 738 | data = f.read(SAMPLE_SIZE) 739 | 740 | f.seek(size//2) 741 | data += f.read(SAMPLE_SIZE) 742 | 743 | f.seek(-SAMPLE_SIZE, os.SEEK_END) 744 | data += f.read(SAMPLE_SIZE) 745 | 746 | digest = xxhash.xxh3_128_digest(data) 747 | 748 | # left for backward compatibility with imohash from https://pypi.org/project/imohash/ by kalafut using xxhash 749 | digest = digest[7::-1] + digest[16:7:-1] 750 | 751 | leb128_encoded_size = encode_leb128(size) 752 | digest = leb128_encoded_size + digest[len(leb128_encoded_size):] 753 | 754 | return binascii.hexlify(digest).decode() 755 | 756 | 757 | def print_json(object): 758 | print(json.dumps(object, indent=4, default=lambda x: x.__repr__())) 759 | 760 | import inflection 761 | 762 | PLURALS_SUB = [(re.compile(rule), replacement) for rule, replacement in inflection.PLURALS] 763 | UNCOUNTABLES_PLURALIZE = inflection.UNCOUNTABLES 764 | 765 | cache_pluralize = {} 766 | 767 | def pluralize(word: str) -> str: 768 | result = cache_pluralize.get(word) 769 | if result: 770 | return result 771 | 772 | if word in UNCOUNTABLES_PLURALIZE: 773 | cache_pluralize[word] = word 774 | return word 775 | 776 | for rule, replacement in PLURALS_SUB: 777 | result = rule.sub(replacement, word) 778 | if word != result: 779 | cache_pluralize[word] = result 780 | return result 781 | 782 | cache_pluralize[word] = word 783 | return word 784 | 785 | UNCOUNTABLES_SINGULARIZE = [re.compile(r'(?i)\b(%s)\Z' % inflection) for inflection in inflection.UNCOUNTABLES] 786 | SINGULARS_SUB = [(re.compile(rule), replacement) for rule, replacement in inflection.SINGULARS] 787 | 788 | cache_singularize = {} 789 | 790 | def singularize(word: str) -> str: 791 | result = cache_singularize.get(word) 792 | if result: 793 | return result 794 | 795 | for inflection in UNCOUNTABLES_SINGULARIZE: 796 | if inflection.search(word): 797 | cache_singularize[word] = word 798 | return word 799 | 800 | for rule, replacement in SINGULARS_SUB: 801 | result = rule.sub(replacement, word) 802 | if word != result: 803 | cache_singularize[word] = result 804 | return result 805 | 806 | cache_singularize[word] = word 807 | return word 808 | 809 | SUB_1 = re.compile(r"([A-Z]+)([A-Z][a-z])") 810 | SUB_2 = re.compile(r"([a-z\d])([A-Z])") 811 | TAGS = re.compile('[^\W_]+') 812 | 813 | def split(word: str): 814 | word = SUB_1.sub(r'\1 \2', word) 815 | word = SUB_2.sub(r'\1 \2', word) 816 | return TAGS.findall(word) 817 | 818 | def get_most_common(items: typing.Iterable): 819 | dictionary = {} 820 | for item in items: 821 | dictionary[item] = dictionary.get(item, 0) + 1 822 | return sorted(dictionary.items(), key = operator.itemgetter(1), reverse = True)[0][0] 823 | 824 | 825 | def timeit(text = None, digits = 2, average = False, timeit_average_dict = {}): 826 | """To getting average text should true and be unique for a testing item.""" 827 | 828 | def timeit_decorator(func): 829 | 830 | @functools.wraps(func) 831 | def timeit_func(*args, **kwargs): 832 | 833 | start = default_timer() 834 | return_value = func(*args, **kwargs) 835 | end = default_timer() 836 | 837 | nonlocal text 838 | 839 | time = end - start 840 | if average and text: 841 | result = timeit_average_dict.get(text) 842 | if result: 843 | count, func_time = result 844 | timeit_average_dict[text] = (count + 1, func_time + time) 845 | time = (func_time + time)/(count + 1) 846 | else: 847 | timeit_average_dict[text] = (1, time) 848 | 849 | if not text: 850 | text = func.__name__ + ' took' 851 | 852 | if time >= 1: 853 | time = round(time, digits) 854 | else: 855 | for index, digit in enumerate(format(time, 'f')[2:]): 856 | if digit == '0': 857 | continue 858 | time = round(time, digits + index) 859 | break 860 | 861 | print(f"{text}: {format(time, 'f').rstrip('.0')} s") 862 | 863 | return return_value 864 | 865 | return timeit_func 866 | 867 | return timeit_decorator 868 | -------------------------------------------------------------------------------- /view_3d_fur_operator.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import mathutils 3 | import json 4 | import subprocess 5 | import os 6 | import threading 7 | 8 | from . import bl_utils 9 | from . import utils 10 | 11 | register = bl_utils.Register(globals()) 12 | 13 | class Poll(): 14 | @classmethod 15 | def poll(cls, context): 16 | return context.space_data and context.space_data.type == 'VIEW_3D' and context.mode in ('OBJECT', 'PARTICLE') 17 | 18 | class ATOOL_OT_add_fur(bpy.types.Operator, bl_utils.Object_Mode_Poll): 19 | bl_idname = "atool.add_fur" 20 | bl_label = "Add Fur" 21 | bl_description = "Add fur particle system." 22 | bl_options = {'REGISTER', 'UNDO'} 23 | 24 | name: bpy.props.StringProperty(name = "Name", default = 'Fur') 25 | length: bpy.props.FloatProperty(name = "Length", default = 0.02) 26 | number: bpy.props.IntProperty(name = "Number", default = 500) 27 | 28 | vertex_group_density: bpy.props.StringProperty(name = "Density") 29 | invert_vertex_group_density: bpy.props.BoolProperty(name = "Invert Density") 30 | 31 | material: bpy.props.StringProperty(name = "Material") 32 | 33 | def draw(self, context): 34 | layout = self.layout 35 | layout.use_property_split = True 36 | 37 | col = layout.column() 38 | col.prop(self, "name") 39 | col.prop(self, "length") 40 | col.prop(self, "number") 41 | 42 | 43 | row = col.row(align=True) 44 | row.prop_search(self, "vertex_group_density", context.object, "vertex_groups") 45 | row.prop(self, "invert_vertex_group_density", text = "", toggle = True, icon = 'ARROW_LEFTRIGHT') 46 | 47 | col.prop_search(self, "material", context.object, "material_slots") 48 | 49 | @property 50 | def random(self): 51 | return int(mathutils.noise.random() * 9999) 52 | 53 | def set_settings(self, settings: bpy.types.ParticleSettings): 54 | 55 | settings.name = self.name 56 | 57 | base_length = 0.016 58 | def proportional(value): 59 | new_value = value * self.length / base_length 60 | if type(value) == int: 61 | return int(new_value) 62 | else: 63 | return new_value 64 | 65 | def proportional_step(value, multiplier = 0.01): 66 | return max(1, value + int((proportional(value) - value) * multiplier)) 67 | 68 | settings.count = self.number 69 | 70 | settings.hair_length = self.length 71 | settings.hair_step = min(proportional_step(5), 16) 72 | 73 | settings.factor_random = proportional(0.0002) 74 | settings.brownian_factor = proportional(0.0005) 75 | 76 | settings.render_step = min(proportional_step(5), 8) 77 | settings.display_step = min(proportional_step(3), 6) 78 | 79 | settings.length_random = 1 80 | 81 | settings.child_nbr = 100 82 | settings.rendered_child_count = 600 83 | 84 | settings.child_length = 0.666667 85 | settings.child_length_threshold = 0.333333 86 | 87 | # settings.virtual_parents = 1 88 | 89 | settings.clump_factor = 0.25 90 | settings.clump_shape = -0.1 91 | 92 | settings.child_parting_factor = 1 93 | 94 | settings.roughness_1 = proportional(0.003) 95 | settings.roughness_1_size = proportional(0.5) 96 | settings.roughness_endpoint = proportional(0.0022) 97 | settings.roughness_end_shape = 0 98 | settings.roughness_2 = proportional(0.007) 99 | settings.roughness_2_size = proportional(2) 100 | settings.roughness_2_threshold = 0.666667 101 | 102 | settings.kink = 'CURL' 103 | settings.kink_amplitude = proportional(0.0025) 104 | settings.kink_amplitude_clump = 0.5 105 | settings.kink_flat = 0 106 | settings.kink_frequency = 1 107 | settings.kink_shape = -0.333333 108 | 109 | settings.root_radius = 0.01 110 | 111 | def setup_system(self, object: bpy.types.Object): 112 | particle_system_modifier = object.modifiers.new(name = self.name, type='PARTICLE_SYSTEM') 113 | particle_system = particle_system_modifier.particle_system # type: bpy.types.ParticleSystem 114 | 115 | particle_system.seed = self.seed 116 | particle_system.child_seed = self.child_seed 117 | 118 | 119 | if self.vertex_group_density not in (group.name for group in object.vertex_groups): 120 | self.vertex_group_density = '' 121 | 122 | particle_system.vertex_group_density = self.vertex_group_density 123 | particle_system.invert_vertex_group_density = self.invert_vertex_group_density 124 | 125 | 126 | settings = particle_system.settings # type: bpy.types.ParticleSettings 127 | 128 | settings.type = 'HAIR' 129 | settings.use_modifier_stack = True 130 | settings.use_advanced_hair = True 131 | settings.distribution = 'RAND' 132 | settings.use_hair_bspline = True 133 | settings.use_parent_particles = True 134 | settings.child_type = 'INTERPOLATED' 135 | 136 | if self.material not in (material_slot.name for material_slot in object.material_slots): 137 | self.material = '' 138 | 139 | if self.material: 140 | settings.material_slot = self.material 141 | 142 | return particle_system 143 | 144 | def invoke(self, context, event): 145 | 146 | selected_objects = context.selected_objects 147 | 148 | if not selected_objects: 149 | self.report({'INFO'}, f"No selected objects.") 150 | return {'CANCELLED'} 151 | 152 | target = context.object 153 | if not hasattr(target, 'modifiers'): 154 | self.report({'INFO'}, f"{target.name} cannot have particles.") 155 | return {'CANCELLED'} 156 | 157 | self.target = bl_utils.Reference(target) 158 | 159 | self.seed = self.random 160 | self.child_seed = self.random 161 | 162 | return self.execute(context) 163 | 164 | def execute(self, context): 165 | 166 | target = self.target.get() # type: bpy.types.Object 167 | self.object = target 168 | 169 | if tuple(target.scale) != (1.0, 1.0, 1.0): 170 | self.report({'WARNING'}, f"The object has a not applied scale. This shall effect the fur rending.") 171 | 172 | self.particle_system = self.setup_system(target) 173 | self.set_settings(self.particle_system.settings) 174 | 175 | return {'FINISHED'} 176 | 177 | 178 | class ATOOL_OT_isolate_particle_system(bpy.types.Operator, Poll): 179 | bl_idname = "atool.isolate_particle_system" 180 | bl_label = "Isolate Particle System" 181 | bl_description = "Isolate the fur in the viewport display" 182 | bl_options = {'REGISTER', 'UNDO'} 183 | 184 | name: bpy.props.StringProperty(name = "Name") 185 | is_additive: bpy.props.BoolProperty(name = "Additive") 186 | 187 | def draw(self, context): 188 | layout = self.layout 189 | layout.prop_search(self, "name", context.object, "particle_systems", text = 'Name') 190 | layout.prop(self, 'is_additive') 191 | 192 | def invoke(self, context, event): 193 | self.is_additive = event.ctrl 194 | return self.execute(context) 195 | 196 | def execute(self, context): 197 | 198 | object = context.object 199 | 200 | modifiers = [modifier for modifier in object.modifiers if modifier.type == 'PARTICLE_SYSTEM'] 201 | for modifier in modifiers: 202 | particle_system = modifier.particle_system 203 | 204 | if self.name == particle_system.name: 205 | modifier.show_viewport = True 206 | elif not self.is_additive: 207 | modifier.show_viewport = False 208 | 209 | for index, particle_system in enumerate(object.particle_systems): 210 | if particle_system.name == self.name: 211 | object.particle_systems.active_index = index 212 | 213 | return {'FINISHED'} 214 | 215 | 216 | class ATOOL_OT_show_all_particle_systems(bpy.types.Operator, Poll): 217 | bl_idname = "atool.show_all_particle_systems" 218 | bl_label = "Show All" 219 | bl_description = "Show all the fur in the viewport display" 220 | bl_options = {'REGISTER', 'UNDO'} 221 | 222 | show_viewport: bpy.props.BoolProperty(name = 'Show Viewport', default = True) 223 | 224 | def execute(self, context): 225 | 226 | object = context.object 227 | 228 | modifiers = [modifier for modifier in object.modifiers if modifier.type == 'PARTICLE_SYSTEM'] 229 | for modifier in modifiers: 230 | particle_system = modifier.particle_system 231 | 232 | settings = particle_system.settings # type: bpy.types.ParticleSettings 233 | if settings.type == 'HAIR': 234 | modifier.show_viewport = self.show_viewport 235 | 236 | return {'FINISHED'} 237 | 238 | 239 | class ATOOL_OT_render_view(bpy.types.Operator, bl_utils.Object_Mode_Poll): 240 | bl_idname = "atool.render_view" 241 | bl_label = "Render View" 242 | bl_description = "Render in background the current camera position with respect to viewport object and modifier visibility and save in the Desktop folder" 243 | bl_options = {'REGISTER'} 244 | 245 | resolution: bpy.props.IntProperty(name = 'Resolution', default = 512) 246 | samples: bpy.props.IntProperty(name = 'Samples', default = 10) 247 | 248 | use_default_world: bpy.props.BoolProperty(name = 'Default World', default = False) 249 | use_film_transparent: bpy.props.BoolProperty(name = 'Film Transparent', default = False) 250 | 251 | def invoke(self, context, event): 252 | return context.window_manager.invoke_props_dialog(self, width = 300) 253 | 254 | def execute(self, context: bpy.context): 255 | 256 | space_view_3d = context.space_data 257 | region_3d = context.space_data.region_3d 258 | 259 | is_local_view = bool(space_view_3d.local_view) 260 | if is_local_view: 261 | local_view_objects = [object.name for object in bl_utils.get_local_view_objects(context)] 262 | else: 263 | local_view_objects = [] 264 | 265 | view_matrix = region_3d.view_matrix.inverted() 266 | view_matrix_serializable = [list(row) for row in view_matrix] 267 | 268 | filepath = os.path.join(bpy.app.tempdir, utils.get_time_stamp() + '.blend') 269 | bpy.ops.wm.save_as_mainfile(filepath = filepath, copy=True, compress = False, check_existing = False) 270 | 271 | data = { 272 | 'resolution': self.resolution, 273 | 'samples': self.samples, 274 | 'use_default_world': self.use_default_world, 275 | 'use_film_transparent': self.use_film_transparent, 276 | 277 | 'view_matrix': view_matrix_serializable, 278 | 'lens': context.space_data.lens, 279 | 'clip_start': context.space_data.clip_start, 280 | 'clip_end': context.space_data.clip_end, 281 | 282 | 'is_local_view': is_local_view, 283 | 'local_view_objects': local_view_objects, 284 | 285 | 'filepath': filepath 286 | } 287 | 288 | render_preview = utils.get_script('preview.py') 289 | argv = ['-job', json.dumps(data)] 290 | 291 | def run(): 292 | bl_utils.run_blender(script = render_preview, argv = argv, use_atool = False) #, stdout = subprocess.DEVNULL) 293 | 294 | threading.Thread(target = run, args = ()).start() 295 | 296 | return {'FINISHED'} 297 | 298 | 299 | class ATOOL_OT_rename_particle_system(bpy.types.Operator, Poll): 300 | bl_idname = "atool.rename_particle_system" 301 | bl_label = "Rename Particle System" 302 | bl_description = "Rename the system, the modifier and the settings" 303 | bl_options = {'REGISTER', 'UNDO'} 304 | 305 | name: bpy.props.StringProperty() 306 | new_name: bpy.props.StringProperty() 307 | 308 | def invoke(self, context, event): 309 | self.new_name = self.name 310 | return context.window_manager.invoke_props_dialog(self, width = 300) 311 | 312 | def draw(self, context): 313 | layout = self.layout 314 | layout.prop(self, "new_name", text = 'New Name') 315 | 316 | def execute(self, context): 317 | 318 | object = context.object 319 | 320 | modifiers = [modifier for modifier in object.modifiers if modifier.type == 'PARTICLE_SYSTEM'] 321 | for modifier in modifiers: 322 | particle_system = modifier.particle_system 323 | 324 | if self.name == particle_system.name: 325 | modifier.name = self.new_name 326 | particle_system.name = self.new_name 327 | particle_system.settings.name = self.new_name 328 | 329 | return {'FINISHED'} 330 | 331 | 332 | register.property( 333 | 'atool_enable_armature_parent', 334 | bpy.props.PointerProperty(type = bpy.types.Object), 335 | bpy.types.Object 336 | ) 337 | 338 | class ATOOL_OT_enable_armature(bpy.types.Operator, Poll): 339 | bl_idname = "atool.enable_armature" 340 | bl_label = "Enable Armature" 341 | bl_description = "Enable and parent the armature connected to the active and selected objects or if is an armature for all the users" 342 | bl_options = {'REGISTER', 'UNDO'} 343 | 344 | enable: bpy.props.BoolProperty(name = 'Enable', default = True) 345 | 346 | def execute(self, context): 347 | 348 | selected_objects = context.selected_objects 349 | if not context.object in selected_objects: 350 | selected_objects.append(context.object) 351 | 352 | armatures = set() 353 | 354 | for object in selected_objects: 355 | 356 | if object.type == 'ARMATURE': 357 | armatures.add(object) 358 | continue 359 | 360 | for modifier in object.modifiers: 361 | 362 | if modifier.type != 'ARMATURE': 363 | continue 364 | 365 | armature = modifier.object 366 | if armature: 367 | armatures.add(armature) 368 | 369 | for armature in armatures: 370 | armature.hide_set(not self.enable) 371 | 372 | for object in bpy.data.objects: 373 | for modifier in object.modifiers: 374 | 375 | if modifier.type != 'ARMATURE': 376 | continue 377 | 378 | if modifier.object in armatures: 379 | modifier.show_viewport = self.enable 380 | modifier.show_render = self.enable 381 | 382 | if self.enable: 383 | if object.get('atool_enable_armature_parent'): 384 | object.parent = object['atool_enable_armature_parent'] 385 | else: 386 | if object.parent and object.parent in armatures: 387 | object['atool_enable_armature_parent'] = object.parent 388 | object.parent = None 389 | 390 | return {'FINISHED'} -------------------------------------------------------------------------------- /view_3d_ui.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | 3 | from . import utils 4 | from . import bl_utils 5 | from . import node_utils 6 | from . import data 7 | from . import shader_editor_operator 8 | 9 | register = bl_utils.Register(globals()) 10 | 11 | 12 | class ATOOL_PT_search(bpy.types.Panel): 13 | bl_idname = "ATOOL_PT_search" 14 | bl_label = "" 15 | bl_space_type = 'VIEW_3D' 16 | bl_region_type = 'WINDOW' 17 | bl_context = "objectmode" 18 | 19 | def draw(self, context): 20 | column = self.layout.column() 21 | column.prop(context.window_manager, "at_current_page") 22 | column.prop(context.window_manager, "at_assets_per_page") 23 | 24 | class ATOOL_PT_import_config(bpy.types.Panel): 25 | bl_idname = "ATOOL_PT_import_config" 26 | bl_label = "Material Import Config" 27 | bl_space_type = "VIEW_3D" 28 | bl_region_type = "WINDOW" 29 | 30 | def draw(self, context): 31 | layout = self.layout 32 | layout.alignment = 'LEFT' 33 | shader_editor_operator.draw_import_config(context, layout) 34 | 35 | 36 | class ATOOL_MT_asset(bpy.types.Menu): 37 | bl_idname = "ATOOL_MT_asset" 38 | bl_label = "Asset" 39 | 40 | def draw(self, context): 41 | layout = self.layout 42 | 43 | layout.operator("atool.open_info", icon='FILE_TEXT') 44 | layout.operator("atool.icon_from_clipboard", icon='IMAGE_DATA') 45 | layout.operator("atool.render_icon", icon='RESTRICT_RENDER_OFF') 46 | layout.operator("atool.reload_asset", text='Reload', icon='FILE_REFRESH').do_reimport = False 47 | layout.operator("atool.get_web_info", icon='INFO') 48 | layout.operator("atool.reload_asset", text='Reimport', icon='IMPORT').do_reimport = True 49 | 50 | layout.separator() 51 | layout.operator("atool.import_files", icon="SHADING_TEXTURE") 52 | layout.operator("atool.add_remote_asset", icon="FILEBROWSER") 53 | 54 | layout.separator() 55 | layout.operator("atool.get_web_asset", icon="URL") 56 | layout.menu('ATOOL_MT_urls', icon = 'URL') 57 | 58 | layout.separator() 59 | layout.operator("atool.delete_file_cache") 60 | layout.operator("atool.move_asset_to_desktop", icon='SCREEN_BACK') 61 | 62 | class ATOOL_MT_urls(bpy.types.Menu): 63 | bl_idname = "ATOOL_MT_urls" 64 | bl_label = "URLs" 65 | 66 | def draw(self, context): 67 | layout = self.layout 68 | 69 | layout.operator("atool.open_url", text = "Poly Haven").url = 'https://polyhaven.com/textures' 70 | layout.operator("atool.open_url", text = "Ambient CG").url = 'https://ambientcg.com/list?type=Atlas,Decal,Material' 71 | 72 | class ATOOL_MT_asset_library(bpy.types.Menu): 73 | bl_idname = "ATOOL_MT_asset_library" 74 | bl_label = "Library" 75 | 76 | def draw(self, context): 77 | layout = self.layout 78 | 79 | layout.operator("atool.process_auto", text = "Process Auto Folder", icon="NEWFOLDER") 80 | layout.separator() 81 | layout.operator("atool.reload_addon", text = "Reload Addon") 82 | layout.separator() 83 | layout.operator("atool.delete_all_file_caches") 84 | 85 | class ATOOL_MT_actions(bpy.types.Menu): 86 | bl_idname = "ATOOL_MT_actions" 87 | bl_label = "Actions" 88 | 89 | def draw(self, context): 90 | layout = self.layout 91 | info = context.window_manager.at_browser_asset_info 92 | 93 | layout.menu('ATOOL_MT_asset') 94 | layout.menu('ATOOL_MT_asset_library') 95 | 96 | layout.separator() 97 | layout.popover("ATOOL_PT_import_config") 98 | 99 | layout.separator() 100 | layout.prop(info, "is_shown", text = "Show Info") 101 | layout.prop(info, "is_id_shown", text = "Show ID Info") 102 | 103 | 104 | class ATOOL_MT_unreal(bpy.types.Menu): 105 | bl_idname = "ATOOL_MT_unreal" 106 | bl_label = "Unreal" 107 | 108 | def draw(self, context): 109 | layout = self.layout 110 | layout.operator("atool.import_unreal", icon="IMPORT") 111 | layout.operator("atool.copy_unreal_script", icon="COPYDOWN") 112 | layout.separator() 113 | layout.operator("atool.clear_custom_normals", icon="MOD_NORMALEDIT") 114 | layout.operator("atool.smooth_lowpoly", icon="MOD_SMOOTH") 115 | 116 | register.property( 117 | 'current_browser_asset_id', 118 | bpy.props.StringProperty(options = {'HIDDEN', 'SKIP_SAVE'}) 119 | ) 120 | 121 | register.property( 122 | 'import_button_icon', 123 | bpy.props.StringProperty(default='NONE', options = {'HIDDEN', 'SKIP_SAVE'}) 124 | ) 125 | 126 | register.property( 127 | 'is_remote_atool_asset', 128 | bpy.props.BoolProperty(default = False, options = {'HIDDEN', 'SKIP_SAVE'}) 129 | ) 130 | 131 | class ATOOL_PT_panel(bpy.types.Panel): 132 | bl_idname = "ATOOL_PT_panel" 133 | bl_label = "Load Asset" 134 | bl_category = "AT" 135 | bl_space_type = 'VIEW_3D' 136 | bl_region_type = "UI" 137 | bl_context = "objectmode" 138 | 139 | def draw(self, context): 140 | 141 | wm = context.window_manager 142 | info = wm.at_browser_asset_info 143 | asset_data = wm.at_asset_data # type: data.AssetData 144 | 145 | item_and_page_info = ''.join(( 146 | "Item: ", 147 | str(wm.get("at_asset_previews", 0) + 1 + (wm.at_current_page-1) * asset_data.assets_per_page), 148 | "/", 149 | str(len(asset_data.search_result)), 150 | " ", 151 | "Page: ", 152 | str(wm.at_current_page), 153 | "/", 154 | str(asset_data.number_of_pages) 155 | )) 156 | 157 | column = self.layout.column(align=False) 158 | column.prop(wm, "at_search") 159 | column.popover("ATOOL_PT_search", text=item_and_page_info) 160 | 161 | browser_and_navigation = column.column(align=True) 162 | 163 | previous_and_next_buttons = browser_and_navigation.row(align=True) 164 | previous_and_next_buttons.scale_x = 3 165 | previous_and_next_buttons.operator("atool.navigate", icon ='FRAME_PREV').button_index = 0 166 | previous_and_next_buttons.operator("atool.navigate", icon ='TRIA_LEFT').button_index = 1 167 | previous_and_next_buttons.operator("atool.open_gallery", text='', icon='FILE_IMAGE') 168 | previous_and_next_buttons.operator("atool.navigate", icon ='TRIA_RIGHT').button_index = 2 169 | previous_and_next_buttons.operator("atool.navigate", icon ='FRAME_NEXT').button_index = 3 170 | 171 | browser_and_side_buttons = browser_and_navigation.row(align=True) 172 | browser_and_side_buttons.template_icon_view(wm, "at_asset_previews", show_labels=True, scale=6.0, scale_popup=5.0) 173 | 174 | side_buttons = browser_and_side_buttons.column(align=True) 175 | side_buttons.operator("atool.pin_asset", text='', icon='PINNED') 176 | side_buttons.operator("atool.pin_active_asset", text='', icon='EYEDROPPER') 177 | side_buttons.operator("atool.open_asset_folder", text='', icon='FILE_FOLDER') 178 | side_buttons.operator("atool.open_blend", text='', icon='FILE_BLEND') 179 | side_buttons.operator("atool.render_icon", text='', icon='RESTRICT_RENDER_OFF') 180 | side_buttons.menu('ATOOL_MT_actions', text='', icon='DOWNARROW_HLT') 181 | 182 | library_browser_asset_id = wm.at_asset_previews 183 | if wm.current_browser_asset_id != library_browser_asset_id: 184 | wm.current_browser_asset_id = library_browser_asset_id 185 | 186 | try: 187 | asset = asset_data[library_browser_asset_id] 188 | 189 | wm.is_remote_atool_asset = asset.is_remote 190 | 191 | info["id"] = asset.id 192 | info["name"] = asset.info.get("name", "") 193 | info["url"] = asset.info.get("url", "") 194 | info["author"] = asset.info.get("author", "") 195 | info["author_url"] = asset.info.get("author_url", "") 196 | info["licence"] = asset.info.get("licence", "") 197 | info["licence_url"] = asset.info.get("licence_url", "") 198 | info["description"] = asset.info.get("description", "") 199 | info["tags"] = ' '.join(asset.info.get("tags", [])) 200 | 201 | dimensions = asset.info.get("dimensions", {}) 202 | info['x'] = dimensions.get("x", 1) 203 | info['y'] = dimensions.get("y", 1) 204 | info['z'] = dimensions.get("z", 0.1) 205 | 206 | if 'blend' in asset['system_tags']: 207 | wm.import_button_icon = 'BLENDER' 208 | elif 'image' in asset['system_tags']: 209 | wm.import_button_icon = 'SHADING_TEXTURE' 210 | else: 211 | wm.import_button_icon = 'GHOST_DISABLED' 212 | except: 213 | info["id"] = "" 214 | info["name"] = "" 215 | info["url"] = "" 216 | info["author"] = "" 217 | info["author_url"] = "" 218 | info["licence"] = "" 219 | info["licence_url"] = "" 220 | info["description"] = "" 221 | info["tags"] = "" 222 | info['x'] = 1 223 | info['y'] = 1 224 | info['z'] = 0.1 225 | 226 | # import traceback 227 | # traceback.print_exc() 228 | 229 | column.separator() 230 | column.operator("atool.import_asset", icon = wm.import_button_icon) 231 | column.separator() 232 | 233 | if info.is_shown: 234 | if info.is_id_shown: 235 | row = column.row(align=True) 236 | row.operator("atool.open_asset_folder", text='', icon = 'FILE_FOLDER', emboss=False) 237 | if wm.is_remote_atool_asset: 238 | row.label(text = "Remote Asset") 239 | else: 240 | row.prop(info, "id", text="") 241 | 242 | row = column.row(align=True) 243 | row.operator("atool.open_attr", icon = 'SYNTAX_OFF', emboss=False).attr_name = "name" 244 | row.prop(info, "name", text="") 245 | 246 | row = column.row(align=True) 247 | row.operator("atool.open_attr", icon = 'FILTER', emboss=False).attr_name = "tags" 248 | row.prop(info, "tags", text="") 249 | 250 | row = column.row(align=True) 251 | row.operator("atool.open_attr", icon = 'URL', emboss=False).attr_name = "url" 252 | row.prop(info, "url", text="") 253 | 254 | row = column.row(align=True) 255 | row.operator("atool.open_attr", icon = 'TEXT', emboss=False).attr_name = "description" 256 | row.prop(info, "description", text="") 257 | 258 | row = column.row(align=True) 259 | row.operator("atool.open_attr", icon = 'USER', emboss=False).attr_name = "author" 260 | row.prop(info, "author", text="") 261 | 262 | row.operator("atool.open_attr", icon = 'LINKED', emboss=False).attr_name = "author_url" 263 | row.prop(info, "author_url", text="") 264 | 265 | row = column.row(align=True) 266 | row.operator("atool.open_attr", icon = 'COPY_ID', emboss=False).attr_name = "licence" 267 | row.prop(info, "licence", text="") 268 | 269 | row.operator("atool.open_attr", icon = 'LINKED', emboss=False).attr_name = "licence_url" 270 | row.prop(info, "licence_url", text="") 271 | 272 | row = column.column(align=True) 273 | row.prop(info, "x") 274 | row.prop(info, "y") 275 | row.prop(info, "z") 276 | 277 | 278 | 279 | class ATOOL_PT_save_asset(bpy.types.Panel): 280 | bl_idname = "ATOOL_PT_save_asset" 281 | bl_label = "Save Asset" 282 | bl_category = "AT" 283 | bl_space_type = 'VIEW_3D' 284 | bl_region_type = "UI" 285 | bl_context = "objectmode" 286 | bl_options = {'DEFAULT_CLOSED'} 287 | 288 | def draw(self, context): 289 | column = self.layout.column(align=False) 290 | 291 | self.asset_data = context.window_manager.at_asset_data # type: data.AssetData 292 | if not self.asset_data.library: 293 | column.label(text="No library folder specified.") 294 | return 295 | 296 | self.selected_objects = context.selected_objects 297 | if not self.selected_objects: 298 | column.label(text="No object selected.") 299 | return 300 | 301 | self.info = context.window_manager.at_template_info 302 | 303 | column.label(text=f"ID: {self.id}") 304 | column.label(text=f"Name: {self.id.replace('_', ' ')}") 305 | column.label(text=f"Objects: {len(self.selected_objects)}") 306 | column.prop(self.info, "do_move_images", text=f"Include images: {len(self.used_images)}") 307 | column.prop(self.info, "do_move_sub_assets") 308 | 309 | column.prop(self.info, "name", icon='SYNTAX_OFF', icon_only=True) 310 | column.prop(self.info, "tags", icon='FILTER', icon_only=True) 311 | column.prop(self.info, "url", icon='URL', icon_only=True) 312 | column.prop(self.info, "description", icon='TEXT', icon_only=True) 313 | 314 | row = column.row(align=True) 315 | row.prop(self.info, "author", icon='USER', text="") 316 | row.prop(self.info, "author_url", icon='LINKED', text="") 317 | 318 | row = column.row(align=True) 319 | row.prop(self.info, "licence", icon='COPY_ID', text="") 320 | row.prop(self.info, "licence_url", icon='LINKED', text="") 321 | 322 | column.operator("atool.move_to_library", text="Save") 323 | 324 | @property 325 | def id(self): 326 | id = utils.get_slug(self.info.get("name", "")).strip('-_') 327 | if not id: 328 | id = utils.get_slug(utils.get_longest_substring([object.name for object in self.selected_objects])).strip('-_') 329 | if not id: 330 | id = "untitled_" + utils.get_time_stamp() 331 | id = self.asset_data.ensure_unique_id(id) 332 | return id 333 | 334 | @property 335 | def used_images(self): 336 | all_images = [] 337 | for object in self.selected_objects: 338 | if not (object.data and hasattr(object.data, 'materials')): 339 | continue 340 | for material in object.data.materials: 341 | if material: 342 | all_images.extend(node_utils.get_all_images(material.node_tree)) 343 | return utils.deduplicate(all_images) 344 | 345 | 346 | class ATOOL_MT_others(bpy.types.Menu): 347 | bl_idname = "ATOOL_MT_others" 348 | bl_label = "Others" 349 | 350 | def draw(self, context): 351 | layout = self.layout 352 | 353 | layout.operator("atool.show_current_blend") 354 | layout.operator('atool.cap_resolution') 355 | layout.operator('atool.setup_adaptive_subdivision', icon = 'EXPERIMENTAL') 356 | layout.operator('atool.import_sketchfab_zip_caller', icon = 'EXPERIMENTAL') 357 | layout.operator('atool.copy_attribution', icon = 'EXPERIMENTAL') 358 | layout.operator('atool.render_partial', icon = 'EXPERIMENTAL') 359 | 360 | layout.separator() 361 | layout.menu('ATOOL_MT_unreal') 362 | 363 | layout.separator() 364 | layout.operator("atool.split_blend_file") 365 | 366 | 367 | class ATOOL_PT_view_3d_tools(bpy.types.Panel): 368 | bl_idname = "ATOOL_PT_view_3d_tools" 369 | bl_label = "Tools" 370 | bl_category = "AT" 371 | bl_space_type = 'VIEW_3D' 372 | bl_region_type = "UI" 373 | bl_context = "objectmode" 374 | 375 | def draw(self, context): 376 | 377 | column = self.layout.column() 378 | column.label(text='Dependency', icon='OUTLINER') 379 | subcolumn = column.column(align=True) 380 | subcolumn.operator("atool.os_open", text = "Show") 381 | subcolumn.operator("atool.reload_dependency", text = "Reload") 382 | subcolumn.operator("atool.find_missing", text = "Find") 383 | subcolumn.operator("atool.remap_paths", text = "Repath") 384 | subcolumn.operator("atool.select_linked") 385 | column.separator() 386 | subcolumn = column.column(align=True) 387 | subcolumn.operator("atool.distribute") 388 | subcolumn.operator("atool.match_displacement") 389 | column.separator() 390 | column.operator("atool.dolly_zoom") 391 | column.operator("atool.unrotate") 392 | column.operator("atool.arrange_by_materials") 393 | column.operator("atool.replace_objects_with_active") 394 | column.separator() 395 | column.menu('ATOOL_MT_others') 396 | 397 | 398 | class ATOOL_PT_view_3d_fur_tools(bpy.types.Panel): 399 | bl_idname = "ATOOL_PT_view_3d_fur_tools" 400 | bl_label = "Fur Tools" 401 | bl_category = "AT" 402 | bl_space_type = 'VIEW_3D' 403 | bl_region_type = "UI" 404 | bl_options = {'DEFAULT_CLOSED'} 405 | 406 | @classmethod 407 | def poll(cls, context): 408 | return context.space_data and context.space_data.type == 'VIEW_3D' and context.mode in ('OBJECT', 'PARTICLE') 409 | 410 | def draw(self, context): 411 | 412 | column = self.layout.column() 413 | 414 | column.operator("atool.add_fur") 415 | 416 | column.separator() 417 | 418 | subcolumn = column.column(align=True) 419 | subcolumn.operator("atool.show_all_particle_systems").show_viewport = True 420 | subcolumn.operator("atool.show_all_particle_systems", text = 'Hide All').show_viewport = False 421 | 422 | subcolumn = column.column(align=True) 423 | subcolumn.operator("atool.enable_armature", text = "Enable Armature").enable = True 424 | subcolumn.operator("atool.enable_armature", text = "Disable Armature").enable = False 425 | 426 | column.separator() 427 | 428 | render = context.scene.render 429 | row = column.row(align = True) 430 | row.prop(render, "use_simplify", text="") 431 | 432 | row = row.row(align = True) 433 | row.active = render.use_simplify 434 | row.prop(render, "simplify_child_particles", text="Child Particles") 435 | 436 | box = column.box().column(align=True) 437 | object = context.object 438 | if object: 439 | if object.particle_systems: 440 | modifier_by_particle_system = {modifier.particle_system: modifier for modifier in object.modifiers if modifier.type == 'PARTICLE_SYSTEM'} 441 | active_particle_system = object.particle_systems.active 442 | for particle_system in object.particle_systems: 443 | row = box.row(align = True) 444 | 445 | emboss = particle_system == active_particle_system 446 | icon = 'RESTRICT_VIEW_OFF' if modifier_by_particle_system[particle_system].show_viewport else 'RESTRICT_VIEW_ON' 447 | row.operator("atool.isolate_particle_system", icon = icon, text = str(particle_system.name), depress = True, emboss = emboss).name = particle_system.name 448 | 449 | row.operator("atool.rename_particle_system", text = '', icon = 'FONT_DATA').name = particle_system.name 450 | else: 451 | row = box.row(align = True) 452 | row.label(text = 'No Particle Systems') 453 | 454 | column.operator("atool.render_view") --------------------------------------------------------------------------------