├── assets ├── models │ └── readme.md └── texture │ └── readme.md ├── src ├── i18n │ ├── translations │ │ ├── zh_CN.py │ │ └── zh_HANS.py │ ├── __init__.py │ └── loader.py ├── server │ ├── tools │ │ ├── modifier_tools.py │ │ ├── __init__.py │ │ ├── common.py │ │ ├── common_tools.py │ │ ├── material_tools.py │ │ ├── asset_tools.py │ │ ├── object_tools.py │ │ └── polyhaven_tools.py │ ├── __init__.py │ ├── executor.py │ ├── utils.py │ └── server.py ├── client │ ├── __init__.py │ ├── openrouter.py │ ├── deepseek.py │ ├── oneapi.py │ ├── siliconflow.py │ ├── ollama.py │ ├── claude.py │ ├── openai.py │ └── base.py ├── props.py ├── timer.py ├── __init__.py ├── ui.py ├── logger.py ├── utils.py ├── operator.py ├── watcher.py ├── icon.py └── preference.py ├── .gitignore ├── __init__.py ├── README_CN.md └── README.md /assets/models/readme.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /assets/texture/readme.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/i18n/translations/zh_CN.py: -------------------------------------------------------------------------------- 1 | from .zh_HANS import translations 2 | -------------------------------------------------------------------------------- /src/server/tools/modifier_tools.py: -------------------------------------------------------------------------------- 1 | from .common import ToolsPackageBase 2 | 3 | 4 | class ModifierTools(ToolsPackageBase): 5 | """ 6 | Modifier tools for blender. 7 | """ 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | *.pyc 3 | *.cache 4 | *.DS_Store 5 | test_config.json 6 | log.log 7 | *.log* 8 | .idea 9 | config_cache.json 10 | assets/models 11 | !assets/models/README.md 12 | assets/texture 13 | !assets/texture/README.md -------------------------------------------------------------------------------- /src/server/__init__.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | 3 | reg_modules = [ 4 | "server", 5 | "tools", 6 | ] 7 | 8 | reg, unreg = bpy.utils.register_submodule_factory(__package__, reg_modules) 9 | 10 | 11 | # Registration functions 12 | def register(): 13 | reg() 14 | 15 | 16 | def unregister(): 17 | unreg() 18 | -------------------------------------------------------------------------------- /src/i18n/__init__.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | from .loader import load_translations 3 | 4 | 5 | def _T(text, ctx=None) -> str: 6 | return bpy.app.translations.pgettext(text, ctx) 7 | 8 | 9 | def register(): 10 | translations = load_translations() 11 | bpy.app.translations.register(__name__, translations) 12 | 13 | 14 | def unregister(): 15 | bpy.app.translations.unregister(__name__) 16 | -------------------------------------------------------------------------------- /src/client/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import MCPClientBase 2 | from .openai import MCPClientOpenAI 3 | from .deepseek import MCPClientDeepSeek 4 | from .siliconflow import MCPClientSiliconflow 5 | from .ollama import MCPClientLocalOllama 6 | from .claude import MCPClientClaude 7 | from .openrouter import MCPClientOpenRouter 8 | 9 | def register(): 10 | pass 11 | 12 | 13 | def unregister(): 14 | pass 15 | -------------------------------------------------------------------------------- /src/server/tools/__init__.py: -------------------------------------------------------------------------------- 1 | from .common import ToolsPackageBase 2 | from .asset_tools import AssetTools 3 | from .common_tools import CommonTools 4 | from .material_tools import MaterialTools 5 | from .object_tools import ObjectTools 6 | from .polyhaven_tools import PolyhavenTools 7 | # from .modifier_tools import ModifierTools 8 | 9 | def register(): 10 | ToolsPackageBase.register() 11 | 12 | def unregister(): 13 | ToolsPackageBase.unregister() -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | # type: ignore 2 | bl_info = { 3 | "name": "GenesisCore", 4 | "author": "幻之境开发小组-会飞的键盘侠(KarryCharon)、只剩一瓶辣椒酱", 5 | "version": (0, 0, 1), 6 | "blender": (4, 0, 0), 7 | "location": "3DView > UI > GenesisCore", 8 | "category": "AI", 9 | } 10 | 11 | import bpy 12 | 13 | reg_modules = [ 14 | "src", 15 | ] 16 | 17 | reg, unreg = bpy.utils.register_submodule_factory(__package__, reg_modules) 18 | 19 | 20 | def register(): 21 | reg() 22 | 23 | 24 | def unregister(): 25 | unreg() 26 | -------------------------------------------------------------------------------- /src/props.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | from .i18n.translations.zh_HANS import PROP_TCTX 3 | 4 | 5 | class McpProps(bpy.types.PropertyGroup): 6 | command: bpy.props.StringProperty(default="", name="Command", translation_context=PROP_TCTX) 7 | image: bpy.props.PointerProperty(type=bpy.types.Image, name="Image") 8 | use_viewport_image: bpy.props.BoolProperty(default=False, name="Use Viewport Image") 9 | 10 | 11 | def register(): 12 | bpy.utils.register_class(McpProps) 13 | bpy.types.Scene.mcp_props = bpy.props.PointerProperty(type=McpProps) 14 | 15 | 16 | def unregister(): 17 | del bpy.types.Scene.mcp_props 18 | bpy.utils.unregister_class(McpProps) 19 | -------------------------------------------------------------------------------- /src/client/openrouter.py: -------------------------------------------------------------------------------- 1 | from .openai import MCPClientOpenAI 2 | 3 | 4 | class MCPClientOpenRouter(MCPClientOpenAI): 5 | @classmethod 6 | def info(cls): 7 | return { 8 | "name": "OpenRouter", 9 | "description": "OpenRouter API", 10 | "version": "0.0.1", 11 | } 12 | 13 | @classmethod 14 | def default_config(cls): 15 | return { 16 | "base_url": "https://openrouter.ai/api", 17 | "api_key": "", 18 | "model": "anthropic/claude-3.5-haiku", 19 | } 20 | 21 | def __init__(self, base_url="https://openrouter.ai/api", api_key="", model="", stream=True): 22 | super().__init__(base_url, api_key, model, stream) 23 | -------------------------------------------------------------------------------- /src/client/deepseek.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from .base import logger 3 | from .openai import MCPClientOpenAI 4 | 5 | 6 | class MCPClientDeepSeek(MCPClientOpenAI): 7 | @classmethod 8 | def info(cls): 9 | return { 10 | "name": "DeepSeek", 11 | "description": "A client that uses DeepSeek for the rendering.", 12 | "version": "0.0.1", 13 | } 14 | 15 | @classmethod 16 | def default_config(cls): 17 | return { 18 | "base_url": "https://api.deepseek.com", 19 | "api_key": "", 20 | "model": "deepseek-chat", 21 | } 22 | 23 | def __init__(self, base_url="https://api.deepseek.com", api_key="", model="", stream=True): 24 | super().__init__(base_url, api_key, model, stream) 25 | -------------------------------------------------------------------------------- /src/i18n/loader.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | import importlib 3 | 4 | 5 | def compile_translation(translations: tuple[tuple[str, str]]): 6 | t = {} 7 | for item in translations: 8 | if len(item) < 2: 9 | continue 10 | context = None if len(item) == 2 else item[2] 11 | source, translation = item[:2] 12 | t[(context, source)] = translation 13 | return t 14 | 15 | 16 | def load_translations(): 17 | translations_dir = Path(__file__).parent / "translations" 18 | translations_dict = {} 19 | 20 | for translation_file in translations_dir.glob("*.py"): 21 | if translation_file.stem == "__init__": 22 | continue 23 | language_code = translation_file.stem 24 | locale = language_code.replace('-', '_') 25 | translation_module = importlib.import_module(f".translations.{language_code}", package=__package__) 26 | if not hasattr(translation_module, "translations"): 27 | continue 28 | translations = getattr(translation_module, "translations") 29 | translations_dict[locale] = compile_translation(translations) 30 | 31 | return translations_dict 32 | -------------------------------------------------------------------------------- /src/client/oneapi.py: -------------------------------------------------------------------------------- 1 | import json 2 | import requests 3 | from .openai import MCPClientOpenAI, logger 4 | 5 | 6 | class MCPClientOneAPI(MCPClientOpenAI): 7 | @classmethod 8 | def info(cls): 9 | return { 10 | "name": "OneAPI", 11 | "description": "OneAPI", 12 | "version": "0.0.1", 13 | } 14 | 15 | @classmethod 16 | def default_config(cls): 17 | return { 18 | "base_url": "https://openai.justsong.cn", 19 | "api_key": "", 20 | "model": "Qwen/Qwen2.5-7B-Instruct", 21 | } 22 | 23 | def __init__(self, base_url="https://openai.justsong.cn", api_key="", model="", stream=True): 24 | super().__init__(base_url, api_key, model, stream) 25 | 26 | def response_raise_status(self, response): 27 | try: 28 | response.raise_for_status() 29 | except requests.exceptions.HTTPError: 30 | try: 31 | json_data = response.json() 32 | if message := json_data.get("message"): 33 | if message == "Function call is not supported for this model": 34 | logger.error("此模型不支持工具调用") 35 | raise Exception(message) 36 | except json.JSONDecodeError: 37 | ... 38 | -------------------------------------------------------------------------------- /src/client/siliconflow.py: -------------------------------------------------------------------------------- 1 | import json 2 | import requests 3 | from .openai import MCPClientOpenAI, logger 4 | 5 | 6 | class MCPClientSiliconflow(MCPClientOpenAI): 7 | @classmethod 8 | def info(cls): 9 | return { 10 | "name": "Siliconflow", 11 | "description": "Siliconflow API client", 12 | "version": "0.0.1", 13 | } 14 | 15 | @classmethod 16 | def default_config(cls): 17 | return { 18 | "base_url": "https://api.siliconflow.cn", 19 | "api_key": "", 20 | "model": "Qwen/Qwen2.5-7B-Instruct", 21 | } 22 | 23 | def __init__(self, base_url="https://api.siliconflow.cn", api_key="", model="", stream=True): 24 | super().__init__(base_url, api_key, model, stream) 25 | 26 | def response_raise_status(self, response): 27 | try: 28 | response.raise_for_status() 29 | except requests.exceptions.HTTPError: 30 | try: 31 | json_data = response.json() 32 | if message := json_data.get("message"): 33 | if message == "Function call is not supported for this model": 34 | logger.error("此模型不支持工具调用") 35 | raise Exception(message) 36 | except json.JSONDecodeError: 37 | ... 38 | -------------------------------------------------------------------------------- /src/client/ollama.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import requests 3 | import json 4 | from copy import deepcopy 5 | from .openai import MCPClientOpenAI, logger 6 | 7 | 8 | class MCPClientLocalOllama(MCPClientOpenAI): 9 | @classmethod 10 | def info(cls): 11 | return { 12 | "name": "LocalOllama", 13 | "description": "Local Ollama client", 14 | "version": "1.0.0", 15 | } 16 | 17 | @classmethod 18 | def default_config(cls): 19 | return { 20 | "base_url": "http://localhost:11434", 21 | "api_key": "", 22 | "model": "llama3.2:3b", 23 | } 24 | 25 | def __init__(self, base_url="http://localhost:11434", api_key="ollama", model="", stream=True): 26 | super().__init__(base_url, api_key=api_key, model=model, stream=stream) 27 | 28 | def response_raise_status(self, response: requests.Response): 29 | try: 30 | response.raise_for_status() 31 | except requests.exceptions.HTTPError: 32 | try: 33 | json_data = response.json() 34 | error = json_data.get("error", "") 35 | if message := error.get("message", ""): 36 | if "does not support tools" in message: 37 | logger.error("当前模型不支持工具调用, 请更换模型") 38 | raise Exception(message) 39 | print(json_data) 40 | except json.JSONDecodeError: 41 | raise 42 | -------------------------------------------------------------------------------- /src/client/claude.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from .openai import MCPClientOpenAI, logger 4 | 5 | 6 | class MCPClientClaude(MCPClientOpenAI): 7 | @classmethod 8 | def info(cls): 9 | return { 10 | "name": "Claude", 11 | "description": "Claude is a powerful language model that can be used to generate text.", 12 | "version": "0.0.1", 13 | } 14 | 15 | @classmethod 16 | def default_config(cls): 17 | return { 18 | "base_url": "https://api.anthropic.com", 19 | "api_key": "", 20 | "model": "claude-3-5-haiku-20241022", 21 | } 22 | 23 | def __init__(self, base_url="https://api.anthropic.com", api_key="", model="", stream=True): 24 | super().__init__(base_url, api_key, model, stream) 25 | 26 | def get_chat_url(self): 27 | return f"{self.base_url}/v1/chat/completions" 28 | 29 | def fetch_models_ex(self): 30 | headers = { 31 | "Content-Type": "application/json", 32 | "Accept": "application/json", 33 | "anthropic-version": "2023-06-01", 34 | "x-api-key": self.api_key, 35 | } 36 | 37 | model_url = f"{self.base_url}/v1/models" 38 | if not self.api_key: 39 | logger.error("API密钥不能为空") 40 | return [] 41 | try: 42 | response = requests.get(model_url, headers=headers) 43 | json_data = response.json() 44 | error = json_data.get("error", {}) 45 | if error: 46 | raise Exception(error.get("message", "Unknown error")) 47 | 48 | models = response.json().get("data", []) 49 | self.models = [model["id"] for model in models] 50 | except Exception as e: 51 | logger.error(f"获取模型列表失败, 请检查大模型服务商, API密钥及base url是否正确: {e}") 52 | return self.models 53 | -------------------------------------------------------------------------------- /src/server/executor.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import logging 3 | import json 4 | from ..timer import Timer 5 | from .utils import rounding_dumps 6 | from ..logger import getLogger 7 | 8 | logger = getLogger("BlenderExecutor") 9 | 10 | 11 | class BlenderExecutor: 12 | instance = None 13 | 14 | @classmethod 15 | def get(cls) -> "BlenderExecutor": 16 | return cls.instance or cls() 17 | 18 | @classmethod 19 | def __new__(cls, *args, **kwargs): 20 | if cls.instance is None: 21 | cls.instance = super().__new__(cls) 22 | return cls.instance 23 | 24 | def send_function_call(self, func, params): 25 | name = func.__name__ 26 | command = {"func": func, "name": name,"params": params or {}} 27 | 28 | logger.info(f"收到命令: {name} 参数: {params}") 29 | response = Timer.wait_run_with_context(self.execute_function)(command) 30 | logger.info(f"执行状态: {response.get('status', 'unknown')}") 31 | 32 | if response.get("status") == "error": 33 | logger.error(f"Blender error: {response.get('message')}") 34 | raise Exception(response.get("message", "Unknown error from Blender")) 35 | result_str = rounding_dumps(response.get("result", {}), ensure_ascii=False) 36 | print("\n--------------------------------", flush=True) 37 | print(f"\t所选工具: {name}") 38 | print(f"\t执行结果: {result_str}") 39 | print("--------------------------------\n", flush=True) 40 | return result_str 41 | 42 | def execute_function(self, command): 43 | func = command.get("func") 44 | name = command.get("name") or func.__name__ 45 | try: 46 | params = command.get("params", {}) 47 | logger.info(f"命令执行: {name} 参数: {params}") 48 | result = func(**params) 49 | return {"status": "success", "result": result} 50 | except Exception as e: 51 | logger.error(f"Error execute {name}: {str(e)}") 52 | return {"status": "error", "message": str(e)} 53 | -------------------------------------------------------------------------------- /src/i18n/translations/zh_HANS.py: -------------------------------------------------------------------------------- 1 | PROP_TCTX = "GenesisCorePropTCTX" 2 | PANEL_TCTX = "GenesisCorePanelTCTX" 3 | OPS_TCTX = "GenesisCoreOpsTCTX" 4 | 5 | translations = ( 6 | ("Command", "命令", PROP_TCTX), 7 | ("Model", "模型", PROP_TCTX), 8 | ("LLM Provider", "大模型服务", PROP_TCTX), 9 | ("Host", "主机", PROP_TCTX), 10 | ("Port", "端口", PROP_TCTX), 11 | ("OpenAI Compatible", "OpenAI兼容", PROP_TCTX), 12 | ("Siliconflow", "硅基流动", PROP_TCTX), 13 | ("ToolPackage", "工具模块", PROP_TCTX), 14 | ("CommonTools", "通用工具模块", PROP_TCTX), 15 | ("Common tools for Blender.", "Blender的通用工具"), 16 | ("AssetTools", "资产工具模块", PROP_TCTX), 17 | ("Custom Asset Tools.", "自定义资产的相关工具"), 18 | ("ObjectTools", "物体工具模块", PROP_TCTX), 19 | ("Object tools for the Blender scene.", "Blender场景中的物体工具"), 20 | ("PolyhavenTools", "Polyhaven资产模块", PROP_TCTX), 21 | ("Polyhaven tools. Use the api to download assets.(For commercial use see https://polyhaven.com/our-api)", "Polyhaven工具。使用API下载资产。(商业用途请见https://polyhaven.com/our-api)"), 22 | ("MaterialTools", "材质工具模块", PROP_TCTX), 23 | ("Material tools for Blender.", "Blender的材质工具"), 24 | ("ModifierTools", "修改器工具模块", PROP_TCTX), 25 | ("Modifier tools for blender.", "Blender的修改器工具"), 26 | ("Use History Message", "启用历史消息(否则, 每次执行命令都会清空历史消息). 注意: 启用后Token消耗会显著增加", PROP_TCTX), 27 | ("API Settings", "API设置", PANEL_TCTX), 28 | ("Genesis Core", "创世核心Alpha", PANEL_TCTX), 29 | ("Genesis Core", "创世核心"), 30 | ("Processing...", "处理中..."), 31 | ("Run", "执行", OPS_TCTX), 32 | ("Run the command", "执行命令"), 33 | ("Skip Current Command", "跳过当前命令", OPS_TCTX), 34 | ("Skip the current command", "跳过当前轮次的命令循环, 如果AI正在循环执行, 则会停止循环"), 35 | ("Mark Clean Message", "清理历史消息", OPS_TCTX), 36 | ("Mark the current message as clean", "将当前消息标记为清理, 将在下轮任务前自动清理"), 37 | ("Save Config", "保存配置", OPS_TCTX), 38 | ("Refresh Models", "刷新模型列表", OPS_TCTX), 39 | ("Open Log Window", "打开日志窗口", OPS_TCTX), 40 | ("Open a big text editor window to show the log", "打开文本编辑器窗口来显示日志"), 41 | ) 42 | -------------------------------------------------------------------------------- /src/server/tools/common.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | 3 | 4 | class ToolsPackageBase: 5 | "Base class for all tools" 6 | 7 | __tools__: dict[str, "ToolsPackageBase"] = {} 8 | __exclude_tool_names__: set[str] = {"draw_pref_props", "register", "unregister"} 9 | __pref_props__: dict = {} 10 | 11 | @classmethod 12 | def get_all_tool_packages(cls) -> list["ToolsPackageBase"]: 13 | return cls.__subclasses__() 14 | 15 | @classmethod 16 | def get_all_tool_packages_names(cls) -> list[str]: 17 | return [t.__name__ for t in cls.get_all_tool_packages()] 18 | 19 | @classmethod 20 | def get_package(cls, name: str) -> "ToolsPackageBase": 21 | if name not in cls.__tools__: 22 | for t in cls.get_all_tool_packages(): 23 | cls.__tools__[t.__name__] = t 24 | return cls.__tools__.get(name) 25 | 26 | @classmethod 27 | def get_enum_items(cls): 28 | return [(t.__name__, t.__name__, t.__doc__.strip() or "", 1 << i) for i, t in enumerate(cls.get_all_tool_packages())] 29 | 30 | @classmethod 31 | def get_tool_pref_props(cls): 32 | props = {} 33 | for t in cls.get_all_tool_packages(): 34 | props.update(t.__pref_props__) 35 | return props 36 | 37 | @classmethod 38 | def draw_pref_props(cls, pref, layout: bpy.types.UILayout): 39 | pass 40 | 41 | @classmethod 42 | def get_pref(cls): 43 | from ...preference import get_pref 44 | return get_pref() 45 | 46 | @classmethod 47 | def get_all_tools(cls): 48 | tools = [] 49 | for pname in cls.__dict__: 50 | if pname.startswith("__"): 51 | continue 52 | if pname in cls.__exclude_tool_names__: 53 | continue 54 | p = getattr(cls, pname) 55 | if not callable(p): 56 | continue 57 | # 只添加函数 58 | tools.append(p) 59 | return tools 60 | 61 | @classmethod 62 | def register(cls): 63 | for t in cls.get_all_tool_packages(): 64 | try: 65 | t.register() 66 | except Exception as e: 67 | print(e) 68 | 69 | @classmethod 70 | def unregister(cls): 71 | for t in cls.get_all_tool_packages(): 72 | try: 73 | t.unregister() 74 | except Exception as e: 75 | print(e) -------------------------------------------------------------------------------- /src/server/tools/common_tools.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import traceback 3 | from .common import ToolsPackageBase 4 | 5 | 6 | class CommonTools(ToolsPackageBase): 7 | """ 8 | Common tools for Blender. 9 | """ 10 | 11 | def get_simple_info() -> dict: 12 | """Get basic Blender information""" 13 | return {"blender_version": bpy.app.version, "scene_name": bpy.context.scene.name, "object_count": len(bpy.context.scene.objects)} 14 | 15 | def get_scene_info() -> dict: 16 | """Get information about the current Blender scene""" 17 | try: 18 | print("Getting scene info...") 19 | # Simplify the scene info to reduce data size 20 | scene_info = { 21 | "name": bpy.context.scene.name, 22 | "object_count": len(bpy.context.scene.objects), 23 | "objects": [], 24 | "materials_count": len(bpy.data.materials), 25 | } 26 | 27 | # Collect minimal object information (limit to first 10 objects) 28 | for obj in bpy.context.scene.objects: 29 | obj_info = { 30 | "name": obj.name, 31 | "type": obj.type, 32 | "location": tuple(obj.location), 33 | } 34 | scene_info["objects"].append(obj_info) 35 | 36 | print(f"Scene info collected: {len(scene_info['objects'])} objects") 37 | return scene_info 38 | except Exception as e: 39 | print(f"Error in get_scene_info: {str(e)}") 40 | traceback.print_exc() 41 | return {"error": str(e)} 42 | 43 | def get_active_object_name() -> dict: 44 | """ 45 | Get the name of active object in the Blender scene. 46 | """ 47 | obj = bpy.context.view_layer.objects.active 48 | if not obj: 49 | raise ValueError("No active object found") 50 | return {"name": obj.name} 51 | 52 | def get_selected_objects_names() -> dict: 53 | """ 54 | Get the names of selected objects in the Blender scene. 55 | """ 56 | return {"names": [obj.name for obj in bpy.context.selected_objects]} 57 | 58 | def execute_blender_code(code: str) -> dict: 59 | """ 60 | Execute arbitrary Blender's Python Api code in Blender. 61 | 62 | Args: 63 | - code: The Python code to execute 64 | """ 65 | # This is powerful but potentially dangerous - use with caution 66 | try: 67 | # Create a local namespace for execution 68 | namespace = {"bpy": bpy} 69 | exec(code, namespace) 70 | return {"executed": True} 71 | except Exception as e: 72 | raise Exception(f"Code execution error: {str(e)}") 73 | -------------------------------------------------------------------------------- /src/timer.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import traceback 3 | from queue import Queue 4 | from typing import Any 5 | 6 | 7 | class Timer: 8 | TimerQueue = Queue() 9 | 10 | @classmethod 11 | def put(cls, delegate: Any): 12 | cls.TimerQueue.put(delegate) 13 | 14 | @classmethod 15 | def executor(cls, t): 16 | if type(t) in {list, tuple}: 17 | t[0](*t[1:]) 18 | else: 19 | t() 20 | 21 | @classmethod 22 | def run(cls): 23 | return cls.run_ex(cls.TimerQueue) 24 | 25 | @classmethod 26 | def run_ex(cls, queue: Queue): 27 | while not queue.empty(): 28 | t = queue.get() 29 | try: 30 | cls.executor(t) 31 | except Exception: 32 | traceback.print_exc() 33 | except KeyboardInterrupt: 34 | ... 35 | return 0.016666666666666666 36 | 37 | @classmethod 38 | def clear(cls): 39 | while not cls.TimerQueue.empty(): 40 | cls.TimerQueue.get() 41 | 42 | @classmethod 43 | def wait_run(cls, func): 44 | def wrap(*args, **kwargs): 45 | q = Queue() 46 | 47 | def wrap_job(q): 48 | try: 49 | res = func(*args, **kwargs) 50 | q.put(res) 51 | except Exception as e: 52 | q.put(e) 53 | 54 | cls.put((wrap_job, q)) 55 | res = q.get() 56 | if isinstance(res, Exception): 57 | raise res 58 | return res 59 | 60 | return wrap 61 | 62 | @classmethod 63 | def wait_run_with_context(cls, func): 64 | def wrap(*args, **kwargs): 65 | q = Queue() 66 | 67 | def wrap_job(q): 68 | try: 69 | override = bpy.context.copy() 70 | override["area"] = [area for area in bpy.context.screen.areas if area.type == "VIEW_3D"][0] 71 | with bpy.context.temp_override(**override): 72 | res = func(*args, **kwargs) 73 | q.put(res) 74 | except Exception as e: 75 | q.put(e) 76 | 77 | cls.put((wrap_job, q)) 78 | res = q.get() 79 | if isinstance(res, Exception): 80 | raise res 81 | return res 82 | 83 | return wrap 84 | 85 | @classmethod 86 | def reg(cls): 87 | bpy.app.timers.register(cls.run, persistent=True) 88 | 89 | @classmethod 90 | def unreg(cls): 91 | cls.clear() 92 | try: 93 | bpy.app.timers.unregister(cls.run) 94 | except Exception: 95 | ... 96 | 97 | 98 | def register(): 99 | Timer.reg() 100 | 101 | 102 | def unregister(): 103 | Timer.unreg() 104 | -------------------------------------------------------------------------------- /README_CN.md: -------------------------------------------------------------------------------- 1 | # 创世核心 2 | 3 | ## 简介 4 | 5 | 基于大语言模型(比如Deepseek,Claude)驱动Blender自动化制作的插件. 6 | 使用MCP协议标准化接口, 支持多种大模型提供商, 如DeepSeek, OpenAI, Anthropic, OpenRouter, 硅基流动等. 7 | 8 | ## 手册 / Manuals 9 | 10 | * [中文](./README_CN.md) 11 | * [English](./README.md) 12 | 13 | ## 特性 14 | 15 | * 内置MCP Client实现, 无须借助外部MCP Host 16 | * 支持多个大模型提供商, 如DeepSeek, OpenAI, Anthropic, OpenRouter, 硅基流动等 17 | * 接入Polyhaven在线资产系统(模型、HDRI) 18 | * 支持本地模型库 19 | * 支持历史消息记录控制 20 | * 一键切换模型提供商, 配置自动加载 21 | * 支持sse外部MCP Host连接 22 | * Tools工具模块化, 可扩展 23 | * Client对接模块化, 可扩展 24 | 25 | ## 安装 26 | 27 | ### Blender 28 | 29 | > 下载并安装 [Blender](https://www.blender.org/download/)(建议版本 4.0+). 30 | 31 | ### WINDOWS 32 | 33 | * 方式1: 使用zip压缩包 34 | 1. 下载压缩包: 35 | 2. Blender -> 偏好设置 -> 插件: 从zip安装 36 | 3. 或者直接将压缩包拖拽到blender窗口中, 并根据提示完成安装 37 | 38 | * 方式2: 手动安装(确保你已经安装了git) 39 | ```shell 40 | cd %USERPROFILE%\AppData\Roaming\Blender Foundation\blender\%blender_version%\scripts\addons 41 | git clone https://github.com/AIGODLIKE/GenesisCore.git 42 | ``` 43 | * 然后你可以在Blender的插件菜单中看到插件,在节点分类下搜索`GenesisCore`并启用 44 | 45 | ### Linux 46 | 47 | > 如果你是Linux用户,假设你有一些经验: 48 | 49 | ```bash 50 | cd /home/**YOU**/.config/blender/**BLENDER.VERSION**/scripts/addons 51 | git clone https://github.com/AIGODLIKE/GenesisCore.git 52 | ``` 53 | 54 | * 然后你可以在Blender的插件菜单中看到插件,在节点分类下搜索`GenesisCore`并启用 55 | 56 | ## 使用 57 | 58 | ### 基本使用 59 | 60 | 1. 在3DVeiewport中打开UI面板(N面板), 进入创世核心面板 61 | 2. 选择大模型提供商(如DeepSeek, OpenAI, Anthropic等) 62 | 3. 获取对应的API Key 63 | 4. 在插件设置中输入API Key 64 | 5. 获取支持的模型列表 65 | 6. 选择模型 66 | 7. 选择使用的工具模块(若无自定义资产建议关闭`资产工具模块`) 67 | 1. 按住 shift 点击, 可以选择多个工具模块 68 | 8. 输入命令 69 | 9. 运行命令 70 | 71 | ### 高级 72 | 73 | 1. 历史消息 74 | 1. 当开启历史记录功能时会消耗更多tokens, 但AI能够结合上次的对话做出响应 75 | 2. 当关闭历史记录功能时, token消耗更少, 但每次对话将是独立的(AI会忘记自己说过什么及做过什么) 76 | 3. 使用`清理历史消息`可以在下一次对话时清空历史消息 77 | 2. 配置存储 78 | 1. 每次刷新模型列表时会默认保存配置一次 79 | 2. 当调整模型时, 可以点击`保存配置`按钮保存当前配置 80 | 3. 切换大模型服务时, 不同的大模型服务配置独立存储(无须切换模型时重新配置api及选择模型) 81 | 3. polyhaven 82 | 1. 需要开启polyhaven资产模块 83 | 2. AI会智能分析任务并决定是否使用polyhaven 84 | 3. polyhaven下载的资产会缓存到`临时目录/polyhaven_{资产类型}` 文件夹中 85 | 1. 临时目录对于于windows用户是`C:\Users\{用户名}\AppData\Local\Temp` 86 | 2. 临时目录对于于linux用户是`/tmp` 87 | 3. 资产类型包括`models`, `hdris` 88 | 4. 已缓存的资产会自动加载, 不会重复下载 89 | 4. 外部MCP Host连接, 使用端口45677即可 90 | ```json 91 | { 92 | "mcpServers": { 93 | "BlenderGenesis": { 94 | "url": "http://localhost:45677" 95 | } 96 | } 97 | } 98 | ``` 99 | 5. 编写自定义工具模块 100 | 1. 参考 `src/tools/` 下的 `common_tools` 等模块 101 | 2. 注意, 编写完成后需要在`src/tools/__init__.py` 中导入, 导入顺序会影响UI显示的工具模块的顺序 102 | 6. 编写自定义Client 103 | 1. 参考 `src/client/openai.py` 下的 `MCPClientOpenAI` 模块 104 | 105 | ## 链接 106 | 107 | ### 致谢 108 | 109 | 灵感来自 [BlenderMCP - Blender Model Context Protocol Integration](https://github.com/ahujasid/blender-mcp) 110 | 111 | ### Our AI website 112 | 113 | [AIGODLIKE Community](https://www.aigodlike.com/) 114 | -------------------------------------------------------------------------------- /src/server/tools/material_tools.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import traceback 3 | from uuid import uuid4 4 | from typing import List 5 | from .common import ToolsPackageBase 6 | from ..utils import ensure_material_by_name, NodeTreeUtil 7 | 8 | 9 | class MaterialTools(ToolsPackageBase): 10 | """ 11 | Material tools for Blender. 12 | """ 13 | 14 | def set_material(object_name: str, material_name: str = None, color: List[float] = None) -> dict: 15 | """ 16 | Set or create a material for an object. 17 | 18 | Args: 19 | - object_name: Name of the object to apply the material to 20 | - material_name: Optional name of the material to use or create 21 | - color: Optional [R, G, B] color values (0.0-1.0) 22 | """ 23 | try: 24 | # Get the object 25 | obj = bpy.data.objects.get(object_name) 26 | if not obj: 27 | raise ValueError(f"Object not found: {object_name}") 28 | 29 | material_name = material_name or f"{object_name}_{uuid4().hex}" 30 | 31 | # Make sure object can accept materials 32 | if not hasattr(obj, "data") or not hasattr(obj.data, "materials"): 33 | raise ValueError(f"Object {object_name} cannot accept materials") 34 | 35 | mat = ensure_material_by_name(obj, material_name) 36 | # Get or create Principled BSDF 37 | principled = mat.node_tree.nodes.get("Principled BSDF") 38 | if not principled: 39 | principled = mat.node_tree.nodes.new("ShaderNodeBsdfPrincipled") 40 | principled.name = "Principled BSDF" 41 | # Get or create Material Output 42 | output = NodeTreeUtil.find_node_by_type(mat.node_tree, "OUTPUT_MATERIAL") 43 | if not output: 44 | output = mat.node_tree.nodes.new("ShaderNodeOutputMaterial") 45 | output.name = "Material Output" 46 | mat.node_tree.links.new(principled.outputs[0], output.inputs[0]) 47 | color = color or (1, 1, 1, 1) 48 | color = color if len(color) == 4 else (*color, 1.0) 49 | # Set color if provided 50 | if color and len(color) >= 3: 51 | principled.inputs["Base Color"].default_value = color 52 | print(f"Set material color to {color}") 53 | 54 | # Assign material to object if not already assigned 55 | if not obj.data.materials: 56 | obj.data.materials.append(mat) 57 | else: 58 | # Only modify first material slot 59 | obj.data.materials[0] = mat 60 | print(f"Assigned material {mat.name} to object {object_name}") 61 | return {"status": "success", "object": object_name, "material": mat.name, "color": color if color else None} 62 | except Exception as e: 63 | print(f"Error in set_material: {str(e)}") 64 | traceback.print_exc() 65 | return {"status": "error", "message": str(e), "object": object_name, "material": material_name if "material_name" in locals() else None} 66 | -------------------------------------------------------------------------------- /src/__init__.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | 3 | reg_modules = [ 4 | "preference", 5 | "i18n", 6 | "server", 7 | "timer", 8 | "client", 9 | "operator", 10 | "props", 11 | "ui", 12 | "utils", 13 | "watcher", 14 | ] 15 | 16 | reg, unreg = bpy.utils.register_submodule_factory(__package__, reg_modules) 17 | 18 | 19 | class PkgInstaller: 20 | source = [ 21 | "https://mirrors.aliyun.com/pypi/simple/", 22 | "https://pypi.tuna.tsinghua.edu.cn/simple/", 23 | "https://pypi.mirrors.ustc.edu.cn/simple/", 24 | "https://pypi.python.org/simple/", 25 | "https://pypi.org/simple", 26 | ] 27 | fast_url = "" 28 | 29 | @staticmethod 30 | def select_pip_source(): 31 | if not PkgInstaller.fast_url: 32 | import requests 33 | 34 | t, PkgInstaller.fast_url = 999, PkgInstaller.source[0] 35 | for url in PkgInstaller.source: 36 | try: 37 | tping = requests.get(url, timeout=1).elapsed.total_seconds() 38 | except Exception: 39 | continue 40 | if tping < 0.1: 41 | PkgInstaller.fast_url = url 42 | break 43 | if tping < t: 44 | t, PkgInstaller.fast_url = tping, url 45 | return PkgInstaller.fast_url 46 | 47 | @staticmethod 48 | def is_installed(package): 49 | import importlib 50 | 51 | try: 52 | return importlib.import_module(package) 53 | except ModuleNotFoundError: 54 | return False 55 | 56 | @staticmethod 57 | def prepare_pip(): 58 | import ensurepip 59 | 60 | if PkgInstaller.is_installed("pip"): 61 | return True 62 | try: 63 | ensurepip.bootstrap() 64 | return True 65 | except BaseException: 66 | ... 67 | return False 68 | 69 | @staticmethod 70 | def try_install(*packages): 71 | if not PkgInstaller.prepare_pip(): 72 | return False 73 | need = [pkg for pkg in packages if not PkgInstaller.is_installed(pkg)] 74 | from pip._internal import main 75 | 76 | if need: 77 | url = PkgInstaller.select_pip_source() 78 | for pkg in need: 79 | try: 80 | from urllib.parse import urlparse 81 | 82 | site = urlparse(url) 83 | # 避免build 84 | command = ["install", pkg, "-i", url, "--prefer-binary"] 85 | command.append("--trusted-host") 86 | command.append(site.netloc) 87 | main(command) 88 | if not PkgInstaller.is_installed(pkg): 89 | return False 90 | except Exception: 91 | return False 92 | return True 93 | 94 | 95 | # Registration functions 96 | def register(): 97 | PkgInstaller.try_install( 98 | "mcp", 99 | "websockets", 100 | ) 101 | reg() 102 | 103 | 104 | def unregister(): 105 | unreg() 106 | -------------------------------------------------------------------------------- /src/server/utils.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import json 3 | 4 | 5 | def rounding_dumps(obj, *args, precision=2, **kwargs): 6 | d1 = json.dumps(obj, *args, **kwargs) 7 | l1 = json.loads(d1, parse_float=lambda x: round(float(x), precision)) 8 | return json.dumps(l1, *args, **kwargs) 9 | 10 | 11 | def ensure_material_by_name(obj: bpy.types.Object, mat_name: str): 12 | """ 13 | 保证对象有指定的材质,如果没有则创建它。 14 | Args: 15 | - obj: Blender对象 16 | - mat_name: 材质名称 17 | """ 18 | if mat_name not in bpy.data.materials: 19 | mat = bpy.data.materials.new(name=mat_name) 20 | mat = bpy.data.materials.get(mat_name) 21 | mat.use_nodes = True 22 | if mat.name not in obj.data.materials: 23 | obj.data.materials.append(mat) 24 | return mat 25 | 26 | 27 | class NodeTreeUtil: 28 | @classmethod 29 | def find_node(cls, nt: bpy.types.NodeTree, cb, filter_cb=None): 30 | if not nt: 31 | return None 32 | for node in filter(filter_cb, nt.nodes): 33 | if cb(node): 34 | return node 35 | if node.type == "GROUP": 36 | if node := cls.find_node(node.node_tree, cb, filter_cb): 37 | return node 38 | return None 39 | 40 | @classmethod 41 | def find_nodes(cls, nt: bpy.types.NodeTree, cb, filter_cb=None): 42 | nodes = [] 43 | if not nt: 44 | return nodes 45 | fnodes = filter(filter_cb, nt.nodes) 46 | for node in fnodes: 47 | if cb(node): 48 | nodes.append(node) 49 | if node.type == "GROUP": 50 | nodes.extend(cls.find_nodes(node.node_tree, cb, filter_cb)) 51 | return nodes 52 | 53 | @classmethod 54 | def find_node_by_name(cls, nt: bpy.types.NodeTree, nname, filter_cb=None): 55 | return cls.find_node(nt, lambda n: n.name == nname, filter_cb) 56 | 57 | @classmethod 58 | def find_nodes_by_name(cls, nt: bpy.types.NodeTree, nname, filter_cb=None): 59 | return cls.find_nodes(nt, lambda n: n.name == nname, filter_cb) 60 | 61 | @classmethod 62 | def find_node_by_label(cls, nt: bpy.types.NodeTree, label, filter_cb=None): 63 | return cls.find_node(nt, lambda n: n.label == label, filter_cb) 64 | 65 | @classmethod 66 | def find_nodes_by_label(cls, nt: bpy.types.NodeTree, label, filter_cb=None): 67 | return cls.find_nodes(nt, lambda n: n.label == label, filter_cb) 68 | 69 | @classmethod 70 | def find_node_by_type(cls, nt: bpy.types.NodeTree, ntype, filter_cb=None): 71 | return cls.find_node(nt, lambda n: n.type == ntype, filter_cb) 72 | 73 | @classmethod 74 | def find_nodes_by_type(cls, nt: bpy.types.NodeTree, ntype, filter_cb=None): 75 | return cls.find_nodes(nt, lambda n: n.type == ntype, filter_cb) 76 | 77 | @classmethod 78 | def find_node_by_idname(cls, nt: bpy.types.NodeTree, idname, filter_cb=None): 79 | return cls.find_node(nt, lambda n: n.bl_idname == idname, filter_cb) 80 | 81 | @classmethod 82 | def find_nodes_by_idname(cls, nt: bpy.types.NodeTree, idname, filter_cb=None): 83 | return cls.find_nodes(nt, lambda n: n.bl_idname == idname, filter_cb) 84 | -------------------------------------------------------------------------------- /src/server/tools/asset_tools.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | try: 4 | from .common import ToolsPackageBase 5 | except ImportError: 6 | ToolsPackageBase = object 7 | 8 | 9 | class AssetHelper: 10 | assets_dir = Path(__file__).parent.parent.parent.parent / "assets" 11 | 12 | @classmethod 13 | def list_local_model_assets(cls) -> dict[str]: 14 | assets = {} 15 | assets_dir = cls.assets_dir / "models" 16 | for asset_cat in assets_dir.iterdir(): 17 | if not asset_cat.is_dir(): 18 | continue 19 | for asset in asset_cat.glob("*.blend"): 20 | assets.setdefault(asset_cat.stem, []).append(asset.stem) 21 | return assets 22 | 23 | @classmethod 24 | def load_model_into(cls, asset_path: str) -> dict: 25 | import bpy 26 | from mathutils import Vector 27 | 28 | old_objects = set(bpy.data.objects) 29 | with bpy.data.libraries.load(asset_path) as (data_from, data_to): 30 | data_to.objects = data_from.objects 31 | new_objects = set(bpy.data.objects) - old_objects 32 | loaded_object_info = {} 33 | for o in new_objects: 34 | bpy.context.scene.collection.objects.link(o) 35 | loaded_object_info[o.name] = { 36 | "translation": tuple(o.location), 37 | "bound_box": [tuple(o.matrix_world @ Vector(v)) for v in o.bound_box], 38 | } 39 | return { 40 | "asset_name": asset_path, 41 | "load_status": "success", 42 | "loaded_objects": [o.name for o in new_objects], 43 | "loaded_object_info": loaded_object_info, 44 | } 45 | 46 | 47 | class AssetTools(ToolsPackageBase): 48 | """ 49 | Custom Asset Tools. 50 | """ 51 | 52 | def list_local_model_assets() -> dict[str, list[str]]: 53 | """ 54 | List local model assets, descripted by their category and name. 55 | If user wants to create scene or object, you should use custom assets first if possible. 56 | 57 | Returns: dict[asset_cat, list[asset_name]] like this 58 | { 59 | "Building": ["School", "House"], 60 | "Plant": ["Tree", "Bush"], 61 | } 62 | """ 63 | assets = AssetHelper.list_local_model_assets() 64 | return assets 65 | 66 | # def get_model_info(asset_name: str) -> dict[str]: 67 | # """ 68 | # Get model Info, such as bound box, file path 69 | 70 | # Args: 71 | # - asset_name: Asset Name to Query 72 | # """ 73 | # object_info = AssetHelper.load_model_into(asset_name) 74 | 75 | # return object_info.get("loaded_object_info") 76 | 77 | def load_model(asset_cat: str, asset_name: str) -> dict: 78 | """ 79 | Load Asset to Blender. 80 | 81 | Args: 82 | - asset_cat: Asset Category. 83 | - asset_name: Asset Name to Load 84 | """ 85 | asset_path = AssetHelper.assets_dir / "models" / asset_cat / f"{asset_name}.blend" 86 | if not asset_path.exists(): 87 | raise FileNotFoundError(f"Asset {asset_name} not found in {asset_cat}") 88 | return AssetHelper.load_model_into(asset_path.as_posix()) 89 | 90 | 91 | if __name__ == "__main__": 92 | assets = AssetTools.list_local_model_assets() 93 | print(assets) 94 | -------------------------------------------------------------------------------- /src/server/server.py: -------------------------------------------------------------------------------- 1 | import re 2 | import asyncio 3 | 4 | from functools import update_wrapper 5 | from typing import Callable 6 | from threading import Thread 7 | 8 | from mcp.server.fastmcp import FastMCP 9 | from .executor import BlenderExecutor 10 | from ..logger import getLogger 11 | 12 | logger = getLogger("BlenderMCPServer") 13 | 14 | 15 | class MakeTool: 16 | def __init__(self, func): 17 | self.executor = BlenderExecutor.get() 18 | update_wrapper(self, func) 19 | self.func = func 20 | 21 | def __call__(self, *args, **kwargs): 22 | return self.executor.send_function_call(self.func, kwargs) 23 | 24 | 25 | class BlenderMCPServer(FastMCP): 26 | def __init__(self, *args, **settings): 27 | super().__init__(*args, **settings) 28 | self.make_tool = MakeTool 29 | 30 | def add_tool(self, *arg, **kwargs): 31 | res = super().add_tool(*arg, **kwargs) 32 | # self.list_tools 是异步方法 33 | tool = asyncio.run(self.list_tools())[-1] 34 | try: 35 | properties = tool.inputSchema["properties"] 36 | description = tool.description 37 | for name, info in properties.items(): 38 | # - name: description.....\n 39 | find_description = re.search(f"- {name}: (.*)\n", description) 40 | if not find_description: 41 | continue 42 | info["description"] = find_description.group(1) 43 | logger.debug(f"添加描述 - {name}: {info['description']}") 44 | # 从description中获取属性描述 45 | except Exception as e: 46 | logger.warning(f"Build property description failed: {e}") 47 | return res 48 | 49 | 50 | class Server: 51 | host: str = "localhost" 52 | port: int = 45677 53 | server: "BlenderMCPServer" = None 54 | tools: dict[Callable, None] = {} 55 | make_tool = MakeTool 56 | tool_wraper: None 57 | 58 | @classmethod 59 | def init(cls): 60 | cls.server = BlenderMCPServer(name="BlenderMCPServer", host=cls.host, port=cls.port) 61 | cls.tool_wraper = cls.server.tool() 62 | 63 | @classmethod 64 | def register_tool(cls, tool: Callable) -> None: 65 | if tool in cls.tools: 66 | return 67 | t = cls.make_tool(tool) 68 | cls.tools[tool] = t 69 | cls.tool_wraper(t) 70 | 71 | @classmethod 72 | def register_tools(cls, tools: list[Callable]) -> None: 73 | for tool in tools: 74 | cls.register_tool(tool) 75 | 76 | @classmethod 77 | def unregister_tool(cls, tool: Callable) -> None: 78 | if tool not in cls.tools: 79 | return 80 | try: 81 | t = cls.tools.pop(tool, None) 82 | cls.server._tool_manager._tools.pop(t.__name__) 83 | except Exception as e: 84 | logger.warning(f"Unregister tool failed: {e}") 85 | 86 | @classmethod 87 | def unregister_tools(cls, tools: list[Callable]) -> None: 88 | for tool in tools: 89 | cls.unregister_tool(tool) 90 | 91 | @classmethod 92 | def main(cls): 93 | if not cls.server: 94 | cls.init() 95 | logger.info("创世核心正在运转...") 96 | cls.server.run(transport="sse") 97 | 98 | @classmethod 99 | def run(cls): 100 | """Run the MCP server""" 101 | job = Thread(target=cls.main, daemon=True) 102 | job.start() 103 | 104 | 105 | def register(): 106 | Server.init() 107 | Server.run() 108 | 109 | 110 | def unregister(): 111 | pass 112 | -------------------------------------------------------------------------------- /src/ui.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | from .operator import RunCommand, SkipCurrentCommand, MarkCleanMessage, OpenLogWindow 3 | from .i18n.translations.zh_HANS import PANEL_TCTX, OPS_TCTX, PROP_TCTX 4 | from .preference import get_pref 5 | from .icon import Icon 6 | 7 | 8 | class MCP_PT_Client(bpy.types.Panel): 9 | bl_label = "Genesis Core" 10 | bl_idname = "MCP_PT_Client" 11 | bl_space_type = "VIEW_3D" 12 | bl_region_type = "UI" 13 | bl_category = "Genesis Core" 14 | bl_translation_context = PANEL_TCTX 15 | record_count = 0 16 | direction = 1 17 | record_width = 16 18 | 19 | @classmethod 20 | def update_record_count(cls): 21 | cls.record_count += cls.direction 22 | if cls.record_count >= (cls.record_width - 1) or cls.record_count <= 0: 23 | cls.direction *= -1 24 | 25 | def draw(self, context): 26 | try: 27 | layout = self.layout 28 | pref = get_pref() 29 | mcp_props = bpy.context.scene.mcp_props 30 | box = layout.box() 31 | box.column().prop(pref, "tools", expand=True) 32 | row = box.row(align=True) 33 | row.prop(mcp_props, "command") 34 | row.operator(OpenLogWindow.bl_idname, text="", icon="LINENUMBERS_ON", text_ctxt=OPS_TCTX) 35 | self.show_image_box(box) 36 | col = box.column() 37 | # 如果正在执行命令,则禁用命令输入框 38 | client = pref.get_client_by_name(pref.provider) 39 | processing = False 40 | if client and (c := client.get()): 41 | processing = c.command_processing 42 | if processing: 43 | col.label(text="Processing...") 44 | row = col.row(align=True) 45 | self.update_record_count() 46 | # 绘制图标行 47 | for i in range(self.record_width): 48 | if self.record_count - 1 <= i <= self.record_count + 1: 49 | row.label(text="", icon="RADIOBUT_ON") 50 | else: 51 | row.label(text="", icon="RADIOBUT_OFF") 52 | col = box.column() 53 | col.scale_y = 2 54 | col.scale_x = 1.5 55 | col.enabled = bool(bpy.context.scene.mcp_props.command) 56 | row = col.row(align=True) 57 | row.operator(RunCommand.bl_idname, text_ctxt=OPS_TCTX) 58 | row.operator(SkipCurrentCommand.bl_idname, icon="PAUSE", text="", text_ctxt=OPS_TCTX) 59 | row.prop(pref, "use_history_message", text="", icon="WORDWRAP_ON", text_ctxt=PROP_TCTX) 60 | row.operator(MarkCleanMessage.bl_idname, icon="TRASH", text="", text_ctxt=OPS_TCTX) 61 | 62 | box = layout.box() 63 | pref.draw_ex(box) 64 | except Exception as e: 65 | print(e) 66 | 67 | def show_image_box(self, layout: bpy.types.UILayout): 68 | mcp_props = bpy.context.scene.mcp_props 69 | row = layout.row(align=True) 70 | row.template_ID(mcp_props, "image", new="image.new", open="image.open") 71 | row.prop(mcp_props, "use_viewport_image", text="", icon="RESTRICT_VIEW_OFF", text_ctxt=PROP_TCTX) 72 | if not mcp_props.image: 73 | return 74 | box = layout.box() 75 | prev: bpy.types.Image = mcp_props.image 76 | # 显示高清大图 77 | if not prev: 78 | return 79 | if prev.name not in Icon: 80 | Icon.reg_icon_by_pixel(prev, prev.name) 81 | icon_id = Icon[prev.name] 82 | box.template_icon(icon_value=icon_id, scale=12) 83 | 84 | 85 | clss = [ 86 | MCP_PT_Client, 87 | ] 88 | 89 | register, unregister = bpy.utils.register_classes_factory(clss) 90 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # GenesisCore - Blender AI Automation Addon 2 | 3 | ## Introduction 4 | 5 | A Blender automation addon driven by large language models (e.g. Deepseek, Claude).\ 6 | Using MCP protocol standardized interface, supports multiple LLM providers including DeepSeek, OpenAI, Anthropic, OpenRouter, SilicorFlow, etc. 7 | 8 | ## Manuals / 手册 9 | 10 | * [中文](./README_CN.md) 11 | * [English](./README.md) 12 | 13 | ## Features 14 | 15 | * Built-in MCP Client implementation (No external MCP Host required) 16 | * Supports multiple LLM providers: DeepSeek, OpenAI, Anthropic, OpenRouter, SilicorFlow, etc. 17 | * Integrated Polyhaven online asset system (Models/HDRI) 18 | * Supports local model libraries 19 | * Conversational history control 20 | * One-click provider switching with auto-loaded configurations 21 | * SSE external MCP Host connection support 22 | * Modular Tools system (Extendable) 23 | * Modular Client integration (Extendable) 24 | 25 | ## Installation 26 | 27 | ### Blender 28 | 29 | > Download and install [Blender](https://www.blender.org/download/) (Recommended version 4.0+) 30 | 31 | ### Windows 32 | 33 | * Method 1: Using ZIP package 34 | 1. Download package: 35 | 2. Blender -> Preferences -> Add-ons: Install from ZIP 36 | 3. Or drag ZIP file directly into Blender window and follow prompts 37 | 38 | * Method 2: Manual install (Requires Git) 39 | ```shell 40 | cd %USERPROFILE%\AppData\Roaming\Blender Foundation\blender\%blender_version%\scripts\addons 41 | git clone https://github.com/AIGODLIKE/GenesisCore.git 42 | ``` 43 | * Enable addon via Blender Preferences -> Add-ons -> Search "GenesisCore" 44 | 45 | ### Linux 46 | 47 | > For Linux users (Assumes basic proficiency): 48 | 49 | ```bash 50 | cd /home/**USER**/.config/blender/**BLENDER_VERSION**/scripts/addons 51 | git clone https://github.com/AIGODLIKE/GenesisCore.git 52 | ``` 53 | 54 | * Enable addon via Blender Preferences -> Add-ons -> Search "GenesisCore" 55 | 56 | ## Usage 57 | 58 | ### Basic Usage 59 | 60 | 1. Open UI panel in 3DViewport (N-Panel) -> GenesisCore panel 61 | 2. Select LLM provider (DeepSeek/OpenAI/Anthropic etc.) 62 | 3. Obtain corresponding API Key 63 | 4. Enter API Key in addon settings 64 | 5. Fetch supported model list 65 | 6. Select model 66 | 7. Choose tool modules (Disable "Asset Tools" if no custom assets needed) 67 | 1. Hold shift to select multiple modules 68 | 8. Enter command 69 | 9. Execute command 70 | 71 | ### Advanced 72 | 73 | 1. Conversation History 74 | * Enabled: Consumes more tokens but maintains context 75 | * Disabled: Lower token usage, each command is isolated 76 | * Use "Clear History" to reset conversation context 77 | 78 | 2. Configuration Management 79 | * Config auto-saves when refreshing model list 80 | * Click "Save Config" to manually save current settings 81 | * Each provider maintains independent configurations 82 | 83 | 3. Polyhaven Integration 84 | * Requires enabling "Asset Tools" module 85 | * AI intelligently decides when to use Polyhaven assets 86 | * Downloaded assets cache to: 87 | * Windows: `C:\Users\{USER}\AppData\Local\Temp\polyhaven_{asset_type}` 88 | * Linux: `/tmp/polyhaven_{asset_type}` (I guess, caz I'm not a Linux user) 89 | * Asset types: `models`, `hdris` 90 | * Cached assets auto-load without re-downloading 91 | 92 | 4. External MCP Host Connection (Port 45677) 93 | ```json 94 | { 95 | "mcpServers": { 96 | "BlenderGenesis": { 97 | "url": "http://localhost:45677" 98 | } 99 | } 100 | } 101 | ``` 102 | 103 | 5. Custom Tool Development 104 | * Reference existing modules in `src/tools/` 105 | * Note: Import new modules in `src/tools/__init__.py` (Order affects UI display) 106 | 107 | 6. Custom Client Development 108 | * Reference `src/client/openai.py` (MCPClientOpenAI implementation) 109 | 110 | ## Links 111 | 112 | ### Acknowledgements 113 | 114 | Inspired by [BlenderMCP - Blender Model Context Protocol Integration](https://github.com/ahujasid/blender-mcp) 115 | 116 | ### Our AI Platform 117 | 118 | [AIGODLIKE Community](https://www.aigodlike.com/) 119 | -------------------------------------------------------------------------------- /src/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from logging import handlers 3 | from pathlib import Path 4 | from .utils import BTextHandler 5 | DEBUG = True 6 | LOGFILE = Path(__file__).parent.joinpath("logs", "runtime.log") 7 | NAME = "创世核心" 8 | 9 | L = logging.WARNING 10 | if DEBUG: 11 | L = logging.DEBUG 12 | 13 | FMTDICT = { 14 | 'DEBUG': ["[36m", "DBG"], 15 | 'INFO': ["[37m", "INF"], 16 | 'WARN': ["[33m", "WRN"], 17 | 'WARNING': ["[33m", "WRN"], 18 | 'ERROR': ["[31m", "ERR"], 19 | 'CRITICAL': ["[35m", "CRT"], 20 | } 21 | 22 | 23 | class KcHandler(logging.StreamHandler): 24 | with_same_line = False 25 | 26 | def emit(self, record): 27 | try: 28 | msg = self.format(record) 29 | stream = self.stream 30 | 31 | is_same_line = getattr(record, "same_line", False) 32 | was_same_line = self.with_same_line 33 | self.with_same_line = is_same_line 34 | # 上次不是 这次是 则打印到新行, 但下次打印到同一行(除非再次设置为False) 35 | 36 | if was_same_line and not is_same_line: 37 | # 上次是 sameline 但这次不是 则补换行 38 | stream.write(self.terminator) 39 | 40 | end = "" if is_same_line else self.terminator 41 | stream.write(msg + end) 42 | self.flush() 43 | except RecursionError: 44 | raise 45 | except Exception: 46 | self.handleError(record) 47 | 48 | 49 | class KcFilter(logging.Filter): 50 | def __init__(self, **kwargs) -> None: 51 | super().__init__(**kwargs) 52 | self.translate_func = lambda _: _ 53 | 54 | def fill_color(self, color_code="[37m", msg=""): 55 | return f'\033{color_code}{msg}\033[0m' 56 | 57 | def filter(self, record: logging.LogRecord) -> bool: 58 | # 颜色map 59 | color_code, level_shortname = FMTDICT.get(record.levelname, ["[37m", "UN"]) 60 | record.msg = self.translate_func(record.msg) 61 | record.msg = self.fill_color(color_code, record.msg) 62 | record.levelname = self.fill_color(color_code, level_shortname) 63 | return True 64 | 65 | 66 | class KcLogger(logging.Logger): 67 | def __init__(self, name, level=logging.NOTSET): 68 | self.closed = False 69 | super().__init__(name, level) 70 | 71 | def set_translate(self, translate_func): 72 | for handler in self.handlers: 73 | for filter in handler.filters: 74 | if not isinstance(filter, KcFilter): 75 | continue 76 | filter.translate_func = translate_func 77 | 78 | def close(self): 79 | if self.closed: 80 | return 81 | self.closed = True 82 | for h in reversed(self.handlers[:]): 83 | try: 84 | try: 85 | h.acquire() 86 | h.flush() 87 | h.close() 88 | except (OSError, ValueError): 89 | pass 90 | finally: 91 | h.release() 92 | except BaseException: 93 | ... 94 | 95 | def __del__(self): 96 | self.close() 97 | 98 | 99 | def getLogger(name="CLOG", level=logging.INFO, fmt='[%(name)s-%(levelname)s]: %(message)s', fmt_date="%H:%M:%S") -> KcLogger: 100 | fmter = logging.Formatter('[%(levelname)s]:%(filename)s>%(lineno)s: %(message)s') 101 | # 按 D/H/M 天时分 保存日志, backupcount 为保留数量 102 | if not LOGFILE.exists(): 103 | LOGFILE.parent.mkdir(parents=True, exist_ok=True) 104 | LOGFILE.touch() 105 | dfh = handlers.TimedRotatingFileHandler(filename=LOGFILE, when='D', backupCount=2) 106 | dfh.setLevel(logging.DEBUG) 107 | dfh.setFormatter(fmter) 108 | bth = BTextHandler() 109 | # 命令行打印 110 | filter = KcFilter() 111 | fmter = logging.Formatter(fmt, fmt_date) 112 | ch = KcHandler() 113 | ch.setLevel(level) 114 | ch.setFormatter(fmter) 115 | ch.addFilter(filter) 116 | 117 | l = KcLogger(name) 118 | l.setLevel(level) 119 | # 防止卸载模块后重新加载导致 重复打印 120 | if not l.hasHandlers(): 121 | # 注意添加顺序, ch有filter, 如果fh后添加 则会默认带上ch的filter 122 | l.addHandler(dfh) 123 | l.addHandler(bth) 124 | l.addHandler(ch) 125 | return l 126 | 127 | 128 | logger = getLogger(NAME, L) -------------------------------------------------------------------------------- /src/utils.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import logging 3 | 4 | 5 | class BTextWriter: 6 | _text_name = "GenesisCore.log.py" 7 | _instance = None 8 | 9 | @classmethod 10 | def get(cls): 11 | if cls._instance is None: 12 | cls._instance = cls() 13 | try: 14 | cls._instance.ensure_text() 15 | except AttributeError: 16 | pass 17 | return cls._instance 18 | 19 | def __init__(self): 20 | self.text: bpy.types.Text = None 21 | self.should_flush = False 22 | self.prev_role = "user" 23 | self.prev_index = 0 24 | self.messages = [] 25 | 26 | def ensure_text(self): 27 | if self._text_name not in bpy.data.texts: 28 | self.text = bpy.data.texts.new(self._text_name) 29 | self.text = bpy.data.texts[self._text_name] 30 | 31 | def push(self, message): 32 | self.should_flush = True 33 | self.messages.append(message) 34 | 35 | def flush1(self): 36 | if self.prev_index == len(self.messages): 37 | return 38 | # 先将光标移到末尾 39 | self.text.cursor_set(len(self.text.lines), character=len(self.text.lines[-1].body) * 2) 40 | for message in self.messages[self.prev_index :]: 41 | role = message.get("role", "user") 42 | content = message.get("content", "") 43 | line = "" 44 | # 收到streaming 45 | if role == "streaming": 46 | line = "" if self.prev_role == "streaming" else "\n" # streaming开始时换行 47 | line += content 48 | elif role != "streaming": 49 | line = "" if self.prev_role != "streaming" else "\n" # streaming结束时换行 50 | line += f"{role}:\n{content}\n" 51 | self.prev_role = role 52 | if not content: 53 | continue 54 | self.text.write(line) 55 | self.prev_index = len(self.messages) 56 | 57 | def flush(self): 58 | if self.prev_index == len(self.messages): 59 | return 60 | # 先将光标移到末尾 61 | lines = [] 62 | for message in self.messages[self.prev_index :]: 63 | role = message.get("role", "user") 64 | content = message.get("content", "") 65 | line = "" 66 | # 收到streaming 67 | if role == "streaming": 68 | line = "" if self.prev_role == "streaming" else "\n" # streaming开始时换行 69 | line += content 70 | elif role != "streaming": 71 | line = "" if self.prev_role != "streaming" else "\n" # streaming结束时换行 72 | line += f"{role}:\n{content}\n" 73 | self.prev_role = role 74 | if not content: 75 | continue 76 | lines.append(line) 77 | self.text.from_string("".join(lines)) 78 | self.prev_index = len(self.messages) 79 | 80 | def refresh(self): 81 | # 重新加载文本数据 82 | self.text.clear() 83 | self.prev_role = "user" 84 | self.prev_index = 0 85 | self.flush() 86 | 87 | def clear(self): 88 | self.text.clear() 89 | self.messages.clear() 90 | self.prev_role = "user" 91 | self.prev_index = 0 92 | 93 | 94 | class BTextHandler(logging.StreamHandler): 95 | def emit(self, record): 96 | try: 97 | msg = self.format(record) 98 | stream = BTextWriter.get() 99 | stream.push({"role": logging.getLevelName(record.levelno), "content": msg}) 100 | except RecursionError: 101 | raise 102 | except Exception: 103 | self.handleError(record) 104 | 105 | 106 | def update_screen(): 107 | try: 108 | for area in bpy.context.screen.areas: 109 | if area not in {"VIEW_3D", "TEXT_EDITOR"}: 110 | continue 111 | area.tag_redraw() 112 | except Exception: 113 | ... 114 | 115 | 116 | def update_text(): 117 | try: 118 | text_writer = BTextWriter.get() 119 | if not text_writer.should_flush: 120 | return 121 | text_writer.refresh() 122 | text_writer.should_flush = False 123 | except Exception: 124 | ... 125 | 126 | 127 | def update_timer(): 128 | update_screen() 129 | update_text() 130 | return 1 / 30 131 | 132 | 133 | def register(): 134 | bpy.app.timers.register(update_timer, persistent=True) 135 | 136 | 137 | def unregister(): 138 | bpy.app.timers.unregister(update_timer) 139 | -------------------------------------------------------------------------------- /src/operator.py: -------------------------------------------------------------------------------- 1 | import queue 2 | import bpy 3 | import json 4 | import asyncio 5 | from queue import Queue 6 | from threading import Thread 7 | from pathlib import Path 8 | from .client import MCPClientBase 9 | from .server.server import Server 10 | from .server.tools import ToolsPackageBase 11 | from .i18n.translations.zh_HANS import OPS_TCTX 12 | from .logger import logger 13 | from .preference import get_pref 14 | from .utils import BTextWriter 15 | 16 | 17 | test_config_path = Path(__file__).parent.parent / "test_config.json" 18 | test_config = {} 19 | if test_config_path.exists(): 20 | test_config.update(json.loads(test_config_path.read_text(encoding="utf-8"))) 21 | 22 | 23 | class RunCommand(bpy.types.Operator): 24 | bl_idname = "mcp.run" 25 | bl_label = "Run" 26 | bl_description = "Run the command" 27 | bl_translation_context = OPS_TCTX 28 | 29 | @classmethod 30 | def poll(cls, context): 31 | pref = get_pref() 32 | client = pref.get_client_by_name(pref.provider) 33 | processing = False 34 | if client and (c := client.get()): 35 | processing = c.command_processing 36 | return not processing 37 | 38 | def execute(self, context): 39 | pref = get_pref() 40 | for tname in ToolsPackageBase.get_all_tool_packages_names(): 41 | tp = ToolsPackageBase.get_package(tname) 42 | if not tp: 43 | continue 44 | tools = tp.get_all_tools() 45 | if tname in pref.tools: 46 | # 注册工具 47 | Server.register_tools(tools) 48 | else: 49 | # 注销工具 50 | Server.unregister_tools(tools) 51 | selected_client = pref.provider 52 | for clientclass in MCPClientBase.get_all_clients(): 53 | cname = clientclass.__name__ 54 | client: MCPClientBase = pref.get_client_by_name(cname) 55 | if not client: 56 | continue 57 | if cname != selected_client: 58 | client.stop_client() 59 | continue 60 | 61 | client: MCPClientBase = pref.get_client_by_name(selected_client) 62 | if not client: 63 | self.report({"ERROR"}, "No client selected") 64 | return {"FINISHED"} 65 | client.try_start_client() 66 | # print(all_clients) 67 | command = bpy.context.scene.mcp_props.command 68 | if not command: 69 | return {"FINISHED"} 70 | client.get().command_queue.put(command) 71 | if bpy.context.scene.mcp_props.image: 72 | from tempfile import gettempdir 73 | # 保存图片 -> 图片路径 -> client.get().image_queue.put(图片路径) 74 | image: bpy.types.Image = bpy.context.scene.mcp_props.image 75 | image_path = Path(gettempdir(), f"{image.name}.png").as_posix() 76 | image.save_render(image_path) 77 | client.get().image_queue.put(image_path) 78 | return {"FINISHED"} 79 | 80 | 81 | class SkipCurrentCommand(bpy.types.Operator): 82 | bl_idname = "mcp.skip_current_command" 83 | bl_label = "Skip Current Command" 84 | bl_description = "Skip the current command" 85 | bl_translation_context = OPS_TCTX 86 | 87 | def execute(self, context): 88 | pref = get_pref() 89 | client: MCPClientBase = pref.get_client_by_name(pref.provider) 90 | if not client: 91 | self.report({"ERROR"}, "No client selected") 92 | return {"FINISHED"} 93 | instance = client.get() 94 | if instance: 95 | instance.skip_current_command = True 96 | return {"FINISHED"} 97 | 98 | 99 | class MarkCleanMessage(bpy.types.Operator): 100 | bl_idname = "mcp.mark_clean_message" 101 | bl_label = "Mark Clean Message" 102 | bl_description = "Mark the current message as clean" 103 | bl_translation_context = OPS_TCTX 104 | 105 | def execute(self, context): 106 | pref = get_pref() 107 | client: MCPClientBase = pref.get_client_by_name(pref.provider) 108 | if not client: 109 | return {"FINISHED"} 110 | instance = client.get() 111 | if instance: 112 | instance.should_clear_messages = True 113 | return {"FINISHED"} 114 | 115 | 116 | class OpenLogWindow(bpy.types.Operator): 117 | bl_idname = "mcp.open_log_window" 118 | bl_label = "Open Log Window" 119 | bl_description = "Open a big text editor window to show the log" 120 | bl_translation_context = OPS_TCTX 121 | 122 | def execute(self, context): 123 | bpy.ops.screen.area_dupli("INVOKE_DEFAULT") 124 | area = bpy.context.window_manager.windows[-1].screen.areas[0] 125 | area.ui_type = "TEXT_EDITOR" 126 | tw = BTextWriter.get() 127 | for i in area.spaces: 128 | if i.type != "TEXT_EDITOR": 129 | continue 130 | i.text = tw.text 131 | i.show_word_wrap = True 132 | return {"FINISHED"} 133 | 134 | 135 | clss = [ 136 | RunCommand, 137 | SkipCurrentCommand, 138 | MarkCleanMessage, 139 | OpenLogWindow, 140 | ] 141 | 142 | 143 | reg, unreg = bpy.utils.register_classes_factory(clss) 144 | 145 | 146 | def register(): 147 | reg() 148 | 149 | 150 | def unregister(): 151 | unreg() 152 | -------------------------------------------------------------------------------- /src/watcher.py: -------------------------------------------------------------------------------- 1 | import queue 2 | import time 3 | import platform 4 | from pathlib import Path 5 | from functools import lru_cache, partial 6 | from .logger import logger 7 | 8 | 9 | class FSWatcher: 10 | """ 11 | 监听文件/文件夹变化的工具类 12 | register: 注册监听, 传入路径和回调函数(可空) 13 | unregister: 注销监听 14 | run: 监听循环, 使用单例,只在第一次初始化时调用 15 | stop: 停止监听, 释放资源 16 | consume_change: 消费变化, 当监听对象发生变化时记录为changed, 主动消费后置False, 用于自定义回调函数 17 | """ 18 | _watcher_path: dict[Path, bool] = {} 19 | _watcher_stat = {} 20 | _watcher_callback = {} 21 | _watcher_queue = queue.Queue() 22 | _running = False 23 | _use_threading = False 24 | 25 | @classmethod 26 | def init(cls) -> None: 27 | cls._run() 28 | 29 | @classmethod 30 | def register(cls, path, callback=None): 31 | path = cls.to_path(path) 32 | if path in cls._watcher_path: 33 | return 34 | cls._watcher_path[path] = False 35 | cls._watcher_callback[path] = callback 36 | 37 | @classmethod 38 | def unregister(cls, path): 39 | path = cls.to_path(path) 40 | cls._watcher_path.pop(path) 41 | cls._watcher_callback.pop(path) 42 | 43 | @classmethod 44 | def _run(cls): 45 | if cls._running: 46 | return 47 | cls._running = True 48 | if cls._use_threading: 49 | # use threading 50 | from threading import Thread 51 | Thread(target=cls._loop, daemon=True).start() 52 | Thread(target=cls._run_ex, daemon=True).start() 53 | else: 54 | # use timer 55 | import bpy 56 | cls._ploop_timer = partial(cls._loop_timer) 57 | cls._prun_ex_timer = partial(cls._run_ex_timer) 58 | bpy.app.timers.register(cls._ploop_timer, persistent=True) 59 | bpy.app.timers.register(cls._prun_ex_timer, persistent=True) 60 | 61 | @classmethod 62 | def _run_ex_timer(cls): 63 | cls._run_ex_one() 64 | return 0.5 65 | 66 | @classmethod 67 | def _run_ex(cls): 68 | while cls._running: 69 | cls._run_ex_one() 70 | time.sleep(0.1) 71 | 72 | @classmethod 73 | def _run_ex_one(cls): 74 | if not cls._running: 75 | return 76 | while not cls._watcher_queue.empty(): 77 | path = cls._watcher_queue.get() 78 | if path not in cls._watcher_path: 79 | continue 80 | if callback := cls._watcher_callback[path]: 81 | callback(path) 82 | 83 | @classmethod 84 | def _loop_timer(cls): 85 | cls._loop_one() 86 | return 1 87 | 88 | @classmethod 89 | def _loop(cls): 90 | """ 91 | 监听所有注册的路径, 有变化时记录为changed 92 | """ 93 | while cls._running: 94 | cls._loop_one() 95 | time.sleep(0.5) 96 | 97 | @classmethod 98 | def _loop_one(cls): 99 | if not cls._running: 100 | return 101 | for path, changed in list(cls._watcher_path.items()): 102 | if changed: 103 | continue 104 | if not path.exists(): 105 | continue 106 | mtime = path.stat().st_mtime_ns 107 | if cls._watcher_stat.get(path, None) == mtime: 108 | continue 109 | cls._watcher_stat[path] = mtime 110 | cls._watcher_path[path] = True 111 | cls._watcher_queue.put(path) 112 | 113 | @classmethod 114 | def stop(cls): 115 | cls._watcher_queue.put(None) 116 | cls._running = False 117 | if not cls._use_threading: 118 | import bpy 119 | bpy.app.timers.unregister(cls._ploop_timer) 120 | bpy.app.timers.unregister(cls._prun_ex_timer) 121 | 122 | @classmethod 123 | def consume_change(cls, path) -> bool: 124 | path = cls.to_path(path) 125 | if path in cls._watcher_path and cls._watcher_path[path]: 126 | cls._watcher_path[path] = False 127 | return True 128 | return False 129 | 130 | @classmethod 131 | @lru_cache(maxsize=1024) 132 | def get_nas_mapping(cls): 133 | if platform.system() != "Windows": 134 | return {} 135 | import subprocess 136 | try: 137 | result = subprocess.run("net use", capture_output=True, text=True, encoding="gbk", check=True) 138 | except subprocess.CalledProcessError as e: 139 | logger.warning(e) 140 | return {} 141 | if result.returncode != 0 or result.stdout is None: 142 | return {} 143 | nas_mapping = {} 144 | try: 145 | lines = result.stdout.strip().split("\n")[4:] 146 | for line in lines: 147 | columns = line.split() 148 | if len(columns) < 3: 149 | continue 150 | local_drive = columns[1] + "/" 151 | nas_path = Path(columns[2]).resolve().as_posix() 152 | nas_mapping[local_drive] = nas_path 153 | except Exception: 154 | ... 155 | return nas_mapping 156 | 157 | @classmethod 158 | @lru_cache(maxsize=1024) 159 | def to_str(cls, path: Path): 160 | p = Path(path) 161 | try: 162 | res_str = p.resolve().as_posix() 163 | except FileNotFoundError as e: 164 | res_str = p.as_posix() 165 | logger.warning(e) 166 | # 处理nas路径 167 | for local_drive, nas_path in cls.get_nas_mapping().items(): 168 | if not res_str.startswith(nas_path): 169 | continue 170 | return res_str.replace(nas_path, local_drive) 171 | return res_str 172 | 173 | @classmethod 174 | @lru_cache(maxsize=1024) 175 | def to_path(cls, path: Path): 176 | if not path: 177 | return "" 178 | return Path(path) 179 | 180 | 181 | def register(): 182 | FSWatcher.init() 183 | 184 | 185 | def unregister(): 186 | FSWatcher.stop() 187 | 188 | -------------------------------------------------------------------------------- /src/icon.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | from .watcher import FSWatcher 3 | 4 | from .timer import Timer 5 | from pathlib import Path 6 | 7 | IMG_SUFFIX = {".png", ".jpg", ".jpeg"} 8 | 9 | 10 | class PrevMgr: 11 | __PREV__: dict[int, "bpy.utils.previews.ImagePreviewCollection"] = {} 12 | 13 | @staticmethod 14 | def new() -> "bpy.utils.previews.ImagePreviewCollection": 15 | import bpy.utils.previews 16 | import random 17 | 18 | prev = bpy.utils.previews.new() 19 | while (i := random.randint(0, 999999999)) in PrevMgr.__PREV__: 20 | continue 21 | PrevMgr.__PREV__[i] = prev 22 | return prev 23 | 24 | @staticmethod 25 | def remove(prev): 26 | import bpy.utils.previews 27 | 28 | bpy.utils.previews.remove(prev) 29 | 30 | @staticmethod 31 | def clear(): 32 | for prev in PrevMgr.__PREV__.values(): 33 | prev.clear() 34 | prev.close() 35 | PrevMgr.__PREV__.clear() 36 | 37 | 38 | class MetaIn(type): 39 | def __contains__(cls, name): 40 | return cls.__contains__(cls, name) 41 | 42 | 43 | class Icon(metaclass=MetaIn): 44 | PREV_DICT = PrevMgr.new() 45 | NONE_IMAGE = "" 46 | IMG_STATUS = {} 47 | PIX_STATUS = {} 48 | PATH2BPY = {} 49 | ENABLE_HQ_PREVIEW = True 50 | INSTANCE = None 51 | 52 | def __init__(self) -> None: 53 | if Icon.NONE_IMAGE and Icon.NONE_IMAGE not in Icon: 54 | Icon.NONE_IMAGE = FSWatcher.to_str(Icon.NONE_IMAGE) 55 | self.reg_icon(Icon.NONE_IMAGE) 56 | 57 | def __new__(cls, *args, **kwargs): 58 | if cls.INSTANCE is None: 59 | cls.INSTANCE = object.__new__(cls, *args, **kwargs) 60 | return cls.INSTANCE 61 | 62 | @staticmethod 63 | def update_path2bpy(): 64 | import bpy 65 | 66 | Icon.PATH2BPY.clear() 67 | for i in bpy.data.images: 68 | Icon.PATH2BPY[FSWatcher.to_str(i.filepath)] = i 69 | 70 | @staticmethod 71 | def apply_alpha(img): 72 | if img.file_format != "PNG" or img.channels < 4: 73 | return 74 | # 预乘alpha 到rgb 75 | import numpy as np 76 | 77 | pixels = np.zeros(img.size[0] * img.size[1] * 4, dtype=np.float32) 78 | img.pixels.foreach_get(pixels) 79 | sized_pixels = pixels.reshape(-1, 4) 80 | sized_pixels[:, :3] *= sized_pixels[:, 3].reshape(-1, 1) 81 | img.pixels.foreach_set(pixels) 82 | 83 | @staticmethod 84 | def clear(): 85 | Icon.PREV_DICT.clear() 86 | Icon.IMG_STATUS.clear() 87 | Icon.PIX_STATUS.clear() 88 | Icon.PATH2BPY.clear() 89 | Icon.reg_icon(Icon.NONE_IMAGE) 90 | 91 | @staticmethod 92 | def try_mark_image(path) -> bool: 93 | p = FSWatcher.to_path(path) 94 | path = FSWatcher.to_str(path) 95 | if not p.exists(): 96 | return False 97 | if Icon.IMG_STATUS.get(path, -1) == p.stat().st_mtime_ns: 98 | return False 99 | return True 100 | 101 | @staticmethod 102 | def can_mark_image(path) -> bool: 103 | p = FSWatcher.to_path(path) 104 | path = FSWatcher.to_str(path) 105 | if not Icon.try_mark_image(p): 106 | return False 107 | Icon.IMG_STATUS[path] = p.stat().st_mtime_ns 108 | return True 109 | 110 | @staticmethod 111 | def can_mark_pixel(prev, name) -> bool: 112 | name = FSWatcher.to_str(name) 113 | if Icon.PIX_STATUS.get(name) == hash(prev.pixels): 114 | return False 115 | Icon.PIX_STATUS[name] = hash(prev.pixels) 116 | return True 117 | 118 | @staticmethod 119 | def remove_mark(name) -> bool: 120 | name = FSWatcher.to_str(name) 121 | Icon.IMG_STATUS.pop(name) 122 | Icon.PIX_STATUS.pop(name) 123 | Icon.PREV_DICT.pop(name) 124 | return True 125 | 126 | @staticmethod 127 | def reg_none(none: Path): 128 | none = FSWatcher.to_str(none) 129 | if none in Icon: 130 | return 131 | Icon.NONE_IMAGE = none 132 | Icon.reg_icon(Icon.NONE_IMAGE) 133 | 134 | @staticmethod 135 | def reg_icon(path, reload=False, hq=False): 136 | path = FSWatcher.to_str(path) 137 | if not Icon.can_mark_image(path): 138 | return Icon[path] 139 | if Icon.ENABLE_HQ_PREVIEW and hq: 140 | try: 141 | Icon.reg_icon_hq(path) 142 | except BaseException: 143 | Timer.put((Icon.reg_icon_hq, path)) 144 | return Icon[path] 145 | else: 146 | if path not in Icon: 147 | Icon.PREV_DICT.load(path, path, "IMAGE") 148 | if reload: 149 | Timer.put(Icon.PREV_DICT[path].reload) 150 | return Icon[path] 151 | 152 | @staticmethod 153 | def reg_icon_hq(path): 154 | import bpy 155 | 156 | p = FSWatcher.to_path(path) 157 | path = FSWatcher.to_str(path) 158 | if path in Icon: 159 | return 160 | if p.exists() and p.suffix.lower() in IMG_SUFFIX: 161 | img = bpy.data.images.load(path) 162 | Icon.apply_alpha(img) 163 | Icon.reg_icon_by_pixel(img, path) 164 | Timer.put((bpy.data.images.remove, img)) # 直接使用 bpy.data.images.remove 会导致卡死 165 | 166 | @staticmethod 167 | def find_image(path): 168 | img = Icon.PATH2BPY.get(FSWatcher.to_str(path), None) 169 | if not img: 170 | return None 171 | try: 172 | _ = img.name # hack ref detect 173 | return img 174 | except ReferenceError: 175 | Icon.update_path2bpy() 176 | return None 177 | 178 | @staticmethod 179 | def load_icon(path): 180 | import bpy 181 | 182 | p = FSWatcher.to_path(path) 183 | path = FSWatcher.to_str(path) 184 | 185 | if not Icon.can_mark_image(path): 186 | return 187 | 188 | if img := Icon.find_image(path): 189 | Icon.update_icon_pixel(path, img) 190 | return img 191 | elif p.suffix.lower() in IMG_SUFFIX: 192 | img = bpy.data.images.load(path) 193 | img.filepath = path 194 | Icon.apply_alpha(img) 195 | Icon.update_path2bpy() 196 | # img.name = path 197 | return img 198 | 199 | @staticmethod 200 | def reg_icon_by_pixel(prev, name): 201 | name = FSWatcher.to_str(name) 202 | if not Icon.can_mark_pixel(prev, name): 203 | return 204 | if name in Icon: 205 | return 206 | p = Icon.PREV_DICT.new(name) 207 | p.icon_size = (32, 32) 208 | p.image_size = (prev.size[0], prev.size[1]) 209 | p.image_pixels_float[:] = prev.pixels[:] 210 | 211 | @staticmethod 212 | def get_icon_id(name: Path): 213 | import bpy 214 | 215 | p: bpy.types.ImagePreview = Icon.PREV_DICT.get(FSWatcher.to_str(name), None) 216 | if not p: 217 | p = Icon.PREV_DICT.get(FSWatcher.to_str(Icon.NONE_IMAGE), None) 218 | return p.icon_id if p else 0 219 | 220 | @staticmethod 221 | def update_icon_pixel(name, prev): 222 | """ 223 | 更新bpy.data.image 时一并更新(因为pixel 的hash 不变) 224 | """ 225 | prev.reload() 226 | p = Icon.PREV_DICT.get(name, None) 227 | if not p: 228 | # logger.error("No") 229 | return 230 | p.icon_size = (32, 32) 231 | p.image_size = (prev.size[0], prev.size[1]) 232 | p.image_pixels_float[:] = prev.pixels[:] 233 | 234 | def __getitem__(self, name): 235 | return Icon.get_icon_id(name) 236 | 237 | def __contains__(self, name): 238 | return FSWatcher.to_str(name) in Icon.PREV_DICT 239 | 240 | def __class_getitem__(cls, name): 241 | return cls.__getitem__(cls, name) 242 | -------------------------------------------------------------------------------- /src/server/tools/object_tools.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | from mathutils import Vector 3 | from typing import List 4 | from .common import ToolsPackageBase 5 | 6 | 7 | class ObjectTools(ToolsPackageBase): 8 | """ 9 | Object tools for the Blender scene. 10 | """ 11 | 12 | def get_object_info(object_name: str) -> dict: 13 | """ 14 | Get detailed information about a specific object in the Blender scene. 15 | 16 | Args: 17 | - object_name: The name of the object to get information about 18 | """ 19 | obj = bpy.data.objects.get(object_name) 20 | if not obj: 21 | raise ValueError(f"Object not found: {object_name}") 22 | 23 | # Basic object info 24 | obj_info = { 25 | "name": obj.name, 26 | "type": obj.type, 27 | "location": tuple(obj.location), 28 | "rotation": tuple(obj.rotation_euler), 29 | "scale": tuple(obj.scale), 30 | "visible": obj.visible_get(), 31 | "bound_box": [tuple(obj.matrix_world @ Vector(v)) for v in obj.bound_box], 32 | "materials": [], 33 | } 34 | 35 | obj_info["materials"] = [slot.material.name for slot in obj.material_slots if slot.material] 36 | 37 | # Add mesh data if applicable 38 | if obj.type == "MESH" and obj.data: 39 | mesh = obj.data 40 | obj_info["mesh"] = { 41 | "vertices": len(mesh.vertices), 42 | "edges": len(mesh.edges), 43 | "polygons": len(mesh.polygons), 44 | } 45 | 46 | return obj_info 47 | 48 | def create_object( 49 | entity_type: str = "CUBE", 50 | name: str = "New Object", 51 | location: List[float] = (0, 0, 0), 52 | rotation: List[float] = (0, 0, 0), 53 | scale: List[float] = (1, 1, 1), 54 | align: str = "WORLD", 55 | major_segments: int = 48, 56 | minor_segments: int = 12, 57 | mode: str = "MAJOR_MINOR", 58 | major_radius: float = 1.0, 59 | minor_radius: float = 0.25, 60 | abso_major_rad: float = 1.25, 61 | abso_minor_rad: float = 0.75, 62 | generate_uvs: bool = True, 63 | ) -> dict: 64 | """ 65 | Create a new object in the Blender scene. 66 | 67 | Args: 68 | - entity_type: Entity type to create (CUBE, SPHERE, CYLINDER, PLANE, CONE, TORUS, EMPTY, CAMERA, LIGHT) 69 | - name: Optional name for the object 70 | - location: Optional [x, y, z] location coordinates 71 | - rotation: Optional [x, y, z] rotation in radians 72 | - scale: Optional [x, y, z] scale factors (not used for TORUS) 73 | - align: How to align the torus ('WORLD', 'VIEW', or 'CURSOR') 74 | - major_segments: Number of segments for the main ring 75 | - minor_segments: Number of segments for the cross-section 76 | - mode: Dimension mode ('MAJOR_MINOR' or 'EXT_INT') 77 | - major_radius: Radius from the origin to the center of the cross sections 78 | - minor_radius: Radius of the torus' cross section 79 | - abso_major_rad: Total exterior radius of the torus 80 | - abso_minor_rad: Total interior radius of the torus 81 | - generate_uvs: Whether to generate a default UV map 82 | 83 | Returns: 84 | A message indicating the created object name. 85 | """ 86 | old_objects = set(bpy.data.objects) 87 | # Deselect all objects 88 | bpy.ops.object.select_all(action="DESELECT") 89 | if entity_type == "CUBE": 90 | bpy.ops.mesh.primitive_cube_add(location=location, rotation=rotation, scale=scale) 91 | elif entity_type == "SPHERE": 92 | bpy.ops.mesh.primitive_uv_sphere_add(location=location, rotation=rotation, scale=scale) 93 | elif entity_type == "CYLINDER": 94 | bpy.ops.mesh.primitive_cylinder_add(location=location, rotation=rotation, scale=scale) 95 | elif entity_type == "PLANE": 96 | bpy.ops.mesh.primitive_plane_add(location=location, rotation=rotation, scale=scale) 97 | elif entity_type == "CONE": 98 | bpy.ops.mesh.primitive_cone_add(location=location, rotation=rotation, scale=scale) 99 | elif entity_type == "TORUS": 100 | bpy.ops.mesh.primitive_torus_add( 101 | align=align, 102 | location=location, 103 | rotation=rotation, 104 | major_segments=major_segments, 105 | minor_segments=minor_segments, 106 | mode=mode, 107 | major_radius=major_radius, 108 | minor_radius=minor_radius, 109 | abso_major_rad=abso_major_rad, 110 | abso_minor_rad=abso_minor_rad, 111 | generate_uvs=generate_uvs, 112 | ) 113 | elif entity_type == "EMPTY": 114 | bpy.ops.object.empty_add(location=location, rotation=rotation, scale=scale) 115 | elif entity_type == "CAMERA": 116 | bpy.ops.object.camera_add(location=location, rotation=rotation) 117 | elif entity_type == "LIGHT": 118 | bpy.ops.object.light_add(type="POINT", location=location, rotation=rotation, scale=scale) 119 | else: 120 | raise ValueError(f"Unsupported entity type: {entity_type}") 121 | new_objects = set(bpy.data.objects) - old_objects 122 | if len(new_objects) == 0: 123 | raise Exception(f"Failed to create entity: {entity_type} {name}") 124 | # Get the created object 125 | obj = list(new_objects)[0] 126 | # Rename the object if a name is provided 127 | if name: 128 | obj.name = name 129 | 130 | return { 131 | "name": obj.name, 132 | "type": obj.type, 133 | "location": tuple(obj.location), 134 | "rotation": tuple(obj.rotation_euler), 135 | "bound_box": [tuple(obj.matrix_world @ Vector(v)) for v in obj.bound_box], 136 | "scale": tuple(obj.scale), 137 | } 138 | 139 | def modify_object(name: str, location: List[float] = None, rotation: List[float] = None, scale: List[float] = None, visible: bool = None) -> dict: 140 | """ 141 | Modify an existing object in the Blender scene. 142 | 143 | Args: 144 | - name: Name of the object to modify 145 | - location: Optional [x, y, z] location coordinates 146 | - rotation: Optional [x, y, z] rotation in radians 147 | - scale: Optional [x, y, z] scale factors 148 | - visible: Optional boolean to set visibility 149 | """ 150 | # Find the object by name 151 | obj = bpy.data.objects.get(name) 152 | if not obj: 153 | raise ValueError(f"Object not found: {name}") 154 | 155 | obj.location = location or obj.location 156 | obj.rotation_euler = rotation or obj.rotation_euler 157 | obj.scale = scale or obj.scale 158 | 159 | if visible is not None: 160 | obj.hide_viewport = not visible 161 | obj.hide_render = not visible 162 | 163 | return { 164 | "name": obj.name, 165 | "type": obj.type, 166 | "location": tuple(obj.location), 167 | "rotation": tuple(obj.rotation_euler), 168 | "scale": tuple(obj.scale), 169 | "bound_box": [tuple(obj.matrix_world @ Vector(v)) for v in obj.bound_box], 170 | "visible": obj.visible_get(), 171 | } 172 | 173 | def delete_object(object_name: str) -> dict: 174 | """ 175 | Delete an object from the Blender scene by object name. 176 | 177 | Args: 178 | - object_name: Name of the object to delete 179 | """ 180 | obj = bpy.data.objects.get(object_name) 181 | if not obj: 182 | raise ValueError(f"Object not found: {object_name}") 183 | with bpy.context.temp_override(selected_objects=[obj]): 184 | bpy.ops.object.delete() 185 | return {"deleted": object_name} 186 | -------------------------------------------------------------------------------- /src/preference.py: -------------------------------------------------------------------------------- 1 | # type: ignore 2 | import bpy 3 | import json 4 | import traceback 5 | from pathlib import Path 6 | from .i18n.translations.zh_HANS import PROP_TCTX, PANEL_TCTX, OPS_TCTX 7 | from .server.tools import ToolsPackageBase 8 | 9 | 10 | class AddonPreferences(bpy.types.AddonPreferences): 11 | __annotations__ = ToolsPackageBase.get_tool_pref_props() 12 | bl_idname = __package__.split(".")[0] 13 | config = {} 14 | config_cache = {} 15 | 16 | def load_cache(self): 17 | cache_file = Path(__file__).parent / "config_cache.json" 18 | if not cache_file.exists(): 19 | return 20 | try: 21 | json_data = json.loads(cache_file.read_text(encoding="utf-8")) 22 | self.config_cache.update(json_data) 23 | self.update_provider(None) 24 | except Exception: 25 | traceback.print_exc() 26 | 27 | def save_cache(self): 28 | try: 29 | current_config = self.dump_base_config() 30 | current_config["model"] = self.model 31 | old_config = self.config_cache.get(self.provider, {}) 32 | old_config.update(current_config) 33 | self.config_cache[self.provider] = old_config 34 | cache_file = Path(__file__).parent / "config_cache.json" 35 | cache_file.write_text(json.dumps(self.config_cache, indent=4, ensure_ascii=False), encoding="utf-8") 36 | except Exception: 37 | traceback.print_exc() 38 | 39 | def refresh_cache(self): 40 | if not self.provider: 41 | return 42 | old_config = self.config_cache.get(self.provider) 43 | new_config = self.dump_all_config() 44 | if old_config == new_config: 45 | return 46 | self.config_cache[self.provider] = new_config 47 | self.save_cache() 48 | 49 | def dump_all_config(self): 50 | client = self.get_client_by_name(self.provider) 51 | models = client.get().models if client.get() else [] 52 | return { 53 | "provider": self.provider, 54 | "api_key": self.api_key, 55 | "base_url": self.base_url, 56 | "model": self.model, 57 | "models": models, 58 | } 59 | 60 | def refresh_models_check(self): 61 | config = self.dump_base_config() 62 | if config != self.config: 63 | self.config.update(config) 64 | self.should_refresh_models = True 65 | 66 | use_history_message: bpy.props.BoolProperty(default=False, name="Use History Message", translation_context=PROP_TCTX) 67 | 68 | def get_tools_items(self, context): 69 | from .server.tools import ToolsPackageBase 70 | 71 | return ToolsPackageBase.get_enum_items() 72 | 73 | tools: bpy.props.EnumProperty( 74 | items=get_tools_items, 75 | name="ToolPackage", 76 | options={"ENUM_FLAG"}, 77 | default=0b11110, 78 | translation_context=PROP_TCTX, 79 | ) 80 | 81 | def get_provider_items(self, context): 82 | from .client import MCPClientBase 83 | 84 | return MCPClientBase.get_enum_items() 85 | 86 | def update_provider(self, context): 87 | # 从config_cache加载配置 88 | config = self.config_cache.get(self.provider, {}) 89 | if not config: 90 | # 从MCP Client中加载 91 | client = self.get_client_by_name(self.provider) 92 | config = client.default_config() 93 | self.api_key = config.get("api_key", "") 94 | self.base_url = config.get("base_url", "") 95 | try: 96 | self.model = config.get("model", "") 97 | except Exception: 98 | pass 99 | 100 | provider: bpy.props.EnumProperty( 101 | items=get_provider_items, 102 | name="LLM Provider", 103 | update=update_provider, 104 | translation_context=PROP_TCTX, 105 | ) 106 | 107 | api_key: bpy.props.StringProperty(default="", name="API Key", translation_context=PROP_TCTX) 108 | 109 | base_url: bpy.props.StringProperty( 110 | default="https://api.deepseek.com", 111 | name="Base URL", 112 | translation_context=PROP_TCTX, 113 | ) 114 | 115 | def dump_base_config(self): 116 | return { 117 | "provider": self.provider, 118 | "api_key": self.api_key, 119 | "base_url": self.base_url, 120 | } 121 | 122 | def get_model_items(self, context): 123 | client = self.get_client_by_name(self.provider) 124 | models = client.get().models if client.get() else [] 125 | models = [(m, m, "") for m in models] 126 | if not models: 127 | models = self.config_cache.get(self.provider, {}).get("models", []) 128 | return models or [("None", "None", "")] 129 | 130 | model: bpy.props.EnumProperty(items=get_model_items, name="Model", translation_context=PROP_TCTX) 131 | 132 | def search_model(self, context, text): 133 | client = self.get_client_by_name(self.provider) 134 | models = client.get().models if client.get() else [] 135 | if not models: 136 | models = self.config_cache.get(self.provider, {}).get("models", []) 137 | t = text.lower() 138 | return [m for m in models if t in m.lower()] 139 | 140 | model: bpy.props.StringProperty( 141 | default="", 142 | name="Model", 143 | search=search_model, 144 | search_options={"SORT"}, 145 | translation_context=PROP_TCTX, 146 | ) 147 | 148 | should_refresh_models: bpy.props.BoolProperty(default=True, name="Should Refresh Models", translation_context=PROP_TCTX) 149 | 150 | def get_client_by_name(self, name): 151 | from .client import MCPClientBase 152 | 153 | return MCPClientBase.get_client_by_name(name) 154 | 155 | def draw(self, context): 156 | layout = self.layout 157 | layout.column().prop(self, "tools", expand=True) 158 | box = layout.box() 159 | self.draw_ex(box) 160 | self.draw_tools_props(box) 161 | 162 | def draw_ex(self, layout: bpy.types.UILayout): 163 | row = layout.row(align=True) 164 | row.label(text="API Settings", text_ctxt=PANEL_TCTX) 165 | row.alert = self.should_refresh_models 166 | row.operator(RefreshModels.bl_idname, text="", icon="FILE_REFRESH", text_ctxt=OPS_TCTX) 167 | row.alert = False 168 | row.operator(SaveConfig.bl_idname, text="", icon="FILE_TICK", text_ctxt=OPS_TCTX) 169 | layout.prop(self, "provider") 170 | provider = self.get_client_by_name(self.provider) 171 | if not provider: 172 | return 173 | provider.draw(layout) 174 | 175 | def draw_tools_props(self, layout: bpy.types.UILayout): 176 | for t in ToolsPackageBase.get_all_tool_packages(): 177 | t.draw_pref_props(self, layout) 178 | 179 | 180 | class RefreshModels(bpy.types.Operator): 181 | bl_idname = "mcp.refresh_models" 182 | bl_label = "Refresh Models" 183 | bl_description = "Refresh Models" 184 | bl_translation_context = OPS_TCTX 185 | 186 | def execute(self, context): 187 | from .client import MCPClientBase 188 | 189 | pref = get_pref() 190 | # 先停止所有非当前客户端 191 | for clientclass in MCPClientBase.get_all_clients(): 192 | cname = clientclass.__name__ 193 | client: MCPClientBase = pref.get_client_by_name(cname) 194 | if not client: 195 | continue 196 | if cname != pref.provider: 197 | client.stop_client() 198 | continue 199 | client = pref.get_client_by_name(pref.provider) 200 | if not client.get(): 201 | client.try_start_client() 202 | if not client.get(): 203 | return {"FINISHED"} 204 | models = client.get().fetch_models(force=True) 205 | pref.refresh_cache() 206 | if models: 207 | pref.should_refresh_models = False 208 | return {"FINISHED"} 209 | 210 | 211 | class SaveConfig(bpy.types.Operator): 212 | bl_idname = "mcp.save_config" 213 | bl_label = "Save Config" 214 | bl_description = "Save Config" 215 | bl_translation_context = OPS_TCTX 216 | 217 | def execute(self, context): 218 | pref = get_pref() 219 | pref.save_cache() 220 | return {"FINISHED"} 221 | 222 | 223 | def get_pref() -> AddonPreferences: 224 | return bpy.context.preferences.addons[AddonPreferences.bl_idname].preferences 225 | 226 | 227 | @bpy.app.handlers.persistent 228 | def init_config(scene): 229 | pref = get_pref() 230 | pref.load_cache() 231 | 232 | 233 | def config_checker(): 234 | pref = get_pref() 235 | pref.refresh_models_check() 236 | return 1 237 | 238 | 239 | clss = [ 240 | RefreshModels, 241 | SaveConfig, 242 | AddonPreferences, 243 | ] 244 | 245 | 246 | reg, unreg = bpy.utils.register_classes_factory(clss) 247 | 248 | 249 | def register(): 250 | reg() 251 | bpy.app.handlers.load_post.append(init_config) 252 | bpy.app.timers.register(config_checker, first_interval=1, persistent=True) 253 | 254 | 255 | def unregister(): 256 | unreg() 257 | bpy.app.timers.unregister(config_checker) 258 | bpy.app.handlers.load_post.remove(init_config) 259 | -------------------------------------------------------------------------------- /src/client/openai.py: -------------------------------------------------------------------------------- 1 | import json 2 | import bpy 3 | import base64 4 | import requests 5 | import re 6 | from copy import deepcopy 7 | from pathlib import Path 8 | 9 | from .base import MCPClientBase, logger 10 | 11 | 12 | class MCPClientOpenAI(MCPClientBase): 13 | @classmethod 14 | def info(cls): 15 | return { 16 | "name": "OpenAI Compatible", 17 | "description": "A client that uses OpenAI for the rendering.", 18 | "version": "0.0.1", 19 | } 20 | 21 | @classmethod 22 | def default_config(cls): 23 | return { 24 | "base_url": "https://api.openai.com", 25 | "api_key": "", 26 | "model": "gpt-4o-mini", 27 | } 28 | 29 | def __init__(self, base_url="https://api.openai.com", api_key="", model="", stream=True): 30 | super().__init__(base_url, api_key, model, stream) 31 | 32 | def get_chat_url(self): 33 | return f"{self.base_url}/v1/chat/completions" 34 | 35 | def fetch_models_ex(self): 36 | headers = { 37 | "Content-Type": "application/json", 38 | "Accept": "application/json", 39 | "Authorization": f"Bearer {self.api_key}", 40 | } 41 | 42 | model_url = f"{self.base_url}/v1/models" 43 | try: 44 | response = requests.get(model_url, headers=headers) 45 | models = response.json().get("data", []) 46 | self.models = [model["id"] for model in models] 47 | except Exception: 48 | logger.error("获取模型列表失败, 请检查大模型服务商, API密钥及base url是否正确") 49 | return self.models 50 | 51 | async def prepare_tools(self): 52 | response = await self.session.list_tools() 53 | tools = [] 54 | for tool in response.tools: 55 | tool_info = { 56 | "type": "function", 57 | "function": { 58 | "name": tool.name, 59 | "description": tool.description, 60 | # "parameters": { 61 | # "type": "object", 62 | # "properties": { 63 | # "city": { 64 | # "type": "string", 65 | # "description": "The name of the city", 66 | # }, 67 | # }, 68 | # "required": ["city"], 69 | # }, 70 | }, 71 | } 72 | parameters = deepcopy(tool.inputSchema) 73 | tool_info["function"]["parameters"] = parameters 74 | # region 简化函数描述 75 | description = tool.description 76 | description = description.replace("Args:", "") 77 | for name, info in parameters.get("properties", {}).items(): 78 | find_description = re.search(f"- {name}: (.*)\n", description) 79 | if not find_description: 80 | continue 81 | description = description.replace(find_description.group(0), "") 82 | description = description.replace("\n", "").strip() 83 | while " " in description: 84 | description = description.replace(" ", " ") 85 | # endregion 简化函数描述 86 | tool_info["function"]["description"] = description 87 | tools.append(tool_info) 88 | return tools 89 | 90 | def response_raise_status(self, response: requests.Response): 91 | try: 92 | response.raise_for_status() 93 | except requests.exceptions.HTTPError: 94 | try: 95 | json_data = response.json() 96 | error = json_data.get("error", {}) 97 | if message := error.get("message"): 98 | if "tools is not supported" in message: 99 | logger.error("此模型不支持工具调用") 100 | # for deepseek 101 | if "does not support Function Calling" in message: 102 | logger.error("此模型不支持工具调用") 103 | raise Exception(message) 104 | print(json_data) 105 | except json.JSONDecodeError: 106 | ... 107 | 108 | async def process_query(self, query: str) -> list: 109 | headers = { 110 | "Content-Type": "application/json", 111 | "Accept": "application/json", 112 | "Authorization": f"Bearer {self.api_key}", 113 | } 114 | 115 | data = { 116 | "model": self.model, 117 | "messages": self.messages, 118 | "tools": None, 119 | "stream": self.stream, 120 | } 121 | if not self.use_history: 122 | self.clear_messages() 123 | # messages.append({"role": "system", "content": self.system_prompt()}) 124 | user_content = [{"type": "text", "text": query}] 125 | while not self.image_queue.empty(): 126 | image_path = Path(self.image_queue.get()) 127 | base64_image = base64.b64encode(image_path.read_bytes()).decode() 128 | image_content = { 129 | "type": "image_url", 130 | "image_url": { 131 | "url": f"data:image/jpeg;base64,{base64_image}", 132 | }, 133 | } 134 | user_content.append(image_content) 135 | 136 | self.push_message({"role": "user", "content": user_content}) 137 | data["tools"] = await self.prepare_tools() 138 | with requests.Session() as session: 139 | session.headers.update(headers) 140 | session.stream = self.stream 141 | while not self.should_skip(): 142 | last_call_index = -1 143 | self.tool_calls.clear() 144 | response = session.post(self.get_chat_url(), json=data) 145 | self.response_raise_status(response) 146 | response.encoding = "utf-8" 147 | # print("---------------------------------------START---------------------------------------") 148 | 149 | for line in response.iter_lines(): 150 | if not line: 151 | continue 152 | if self.should_skip(): 153 | break 154 | # print("原始数据:", line) 155 | if not (json_data := self.parse_line(line)): 156 | # print("无法解析原始数据:", line) 157 | continue 158 | choice = json_data.get("choices", [{}])[0] 159 | delta = choice.get("delta", {}) 160 | finish_reason = choice.get("finish_reason", "") 161 | if finish_reason in {"stop", "tool_calls"}: 162 | continue 163 | if json_data.get("type", "") == "ping": 164 | # for claude openai compatible 165 | continue 166 | if error := self.parse_error(json_data): 167 | logger.error(error) 168 | break 169 | if not delta: 170 | logger.warning(f"delta数据缺失: {line}") 171 | continue 172 | # print("delta原始数据:", delta) 173 | # ---------------------------1.文本输出--------------------------- 174 | # 原始数据 {"choices": [{"index": 0, "delta": {"role": "assistant", "content": ""}}]} 175 | if (content := delta.get("content")) or (content := delta.get("reasoning_content")): 176 | self.push_stream_message({"role": "streaming", "content": content}) 177 | print(content, end="", flush=True) 178 | 179 | # ---------------------------2.工具调用--------------------------- 180 | # 原始数据 {"choices": [{"index": 0, "delta": {"role": "assistant", "tool_calls": [{"index": 0, "id": "XXX", "type": "function", "function": {"name": "get_scene_info", "arguments": ""}}]}}]} 181 | if not (tool_call := delta.get("tool_calls", [{}])[0]): 182 | continue 183 | index = tool_call["index"] 184 | fn_name = tool_call.get("function", {}).get("name", "") 185 | # 工具调用的第一条数据 186 | if fn_name and index not in self.tool_calls: 187 | last_call_index = index 188 | self.tool_calls[index] = tool_call 189 | print(f"\n选择工具: {fn_name} 参数: ", end="", flush=True) 190 | # 过滤无效的tool_call(小模型生成的多余arguments) 191 | if index not in self.tool_calls: 192 | continue 193 | # 流式输出拼接arguments 194 | if arguments := tool_call.get("function", {}).get("arguments", ""): 195 | self.tool_calls[index]["function"]["arguments"] += arguments 196 | print(arguments, end="", flush=True) 197 | # 每轮只允许一个工具调用( 当存在连续调用时, 每当tryjson 成功时就调用) 198 | if self.ensure_tool_call(index): 199 | await self.call_tool(index) 200 | # print("----------------------------------------END-----------------------------------------") 201 | if self.should_skip(): 202 | break 203 | if last_call_index == -1: 204 | break 205 | # 保证执行最后一个工具调用 206 | for index in list(self.tool_calls): 207 | # 最后强制调用一次, 如果有报错信息会写入messages 208 | await self.call_tool(index) 209 | return "" 210 | -------------------------------------------------------------------------------- /src/client/base.py: -------------------------------------------------------------------------------- 1 | import json 2 | import bpy 3 | import math 4 | import random 5 | import queue 6 | import asyncio 7 | import requests 8 | from threading import Thread 9 | from copy import deepcopy 10 | from dataclasses import dataclass 11 | from typing import Union, Literal 12 | from contextlib import AsyncExitStack 13 | from mcp import ClientSession 14 | from mcp.client.sse import sse_client 15 | from ..timer import Timer 16 | from ..logger import getLogger 17 | from ..utils import BTextWriter 18 | 19 | logger = getLogger(" BlenderClient") 20 | 21 | 22 | @dataclass 23 | class ContentEmpty: 24 | rtype: Literal["empty"] 25 | text: str 26 | tool_calls: list 27 | error: str 28 | 29 | 30 | @dataclass 31 | class ContentText: 32 | rtype: Literal["text"] 33 | text: str 34 | 35 | 36 | @dataclass 37 | class ContentTool: 38 | rtype: Literal["tool"] 39 | tool_calls: list 40 | arguments: str 41 | 42 | 43 | ContentType = Union[ContentEmpty, ContentText, ContentTool] 44 | 45 | 46 | class ResponseParser: 47 | @staticmethod 48 | def parse_response(response: str) -> ContentType: 49 | try: 50 | data = json.loads(response) 51 | return data 52 | except Exception as e: 53 | print(f"Error parsing response: {e}") 54 | return None 55 | 56 | 57 | class MCPClientBase: 58 | client_pools: dict[object, "MCPClientBase"] = {} 59 | __clients__: dict[str, "MCPClientBase"] = {} 60 | 61 | def __init__(self, base_url="https://api.deepseek.com", api_key="", model="", stream=True): 62 | self._base_url = "" 63 | self.base_url = base_url 64 | self.api_key = api_key 65 | self.model = model 66 | self.stream = stream 67 | self.session: ClientSession = None 68 | self.messages = [] 69 | self.tool_calls: dict[str, dict] = {} 70 | self.should_clear_messages = False 71 | self.command_processing = False 72 | self.use_history = False 73 | self.models = [] 74 | self.exit_stack = AsyncExitStack() 75 | self.should_stop = False 76 | self.skip_current_command = False 77 | self.command_queue = queue.Queue() 78 | self.image_queue = queue.Queue() 79 | self.is_running = False 80 | self.push_instance(self) 81 | # self.response_parser = ResponseParser() 82 | # s = self.response_parser.parse_response("S") 83 | self.reset_config() 84 | self.clear_messages() 85 | 86 | @property 87 | def base_url(self): 88 | return self._base_url 89 | 90 | @base_url.setter 91 | def base_url(self, value): 92 | self._base_url = value[:-1] if value.endswith("/") else value 93 | 94 | def push_stream_message(self, message): 95 | BTextWriter.get().push(deepcopy(message)) 96 | 97 | def push_message(self, message): 98 | BTextWriter.get().push(deepcopy(message)) 99 | self.messages.append(message) 100 | 101 | def clear_messages(self): 102 | self.messages.clear() 103 | BTextWriter.get().clear() 104 | 105 | def update(self): 106 | if self.should_clear_messages: 107 | self.clear_messages() 108 | self.should_clear_messages = False 109 | Timer.put(self.reset_config) 110 | 111 | def reset_config(self): 112 | from ..preference import get_pref 113 | 114 | pref = get_pref() 115 | self.base_url = pref.base_url 116 | self.api_key = pref.api_key 117 | self.model = pref.model 118 | self.use_history = pref.use_history_message 119 | 120 | def get_chat_url(self): 121 | return "" 122 | 123 | def fetch_models(self, force=False) -> list: 124 | if self.models and not force: 125 | return self.models 126 | logger.info("正在获取模型列表...") 127 | self.models = sorted(self.fetch_models_ex()) 128 | return self.models 129 | 130 | def fetch_models_ex(self): 131 | return [] 132 | 133 | @classmethod 134 | def info(cls): 135 | return { 136 | "name": "MCPClientBase", 137 | "description": "A base class for MCP clients", 138 | "version": "0.1.0", 139 | } 140 | 141 | @classmethod 142 | def get(cls): 143 | return cls.client_pools.get(cls, None) 144 | 145 | @classmethod 146 | def push_instance(cls, self): 147 | cls.client_pools[cls] = self 148 | 149 | @classmethod 150 | def pop_instance(cls): 151 | return cls.client_pools.pop(cls, None) 152 | 153 | @classmethod 154 | def get_all_clients(cls): 155 | clients = [] 156 | for c in cls.__subclasses__(): 157 | clients.append(c) 158 | clients += c.get_all_clients() 159 | return clients 160 | 161 | @classmethod 162 | def get_enum_items(cls): 163 | return [(c.__name__, c.info()["name"], c.info()["description"]) for c in cls.get_all_clients()] 164 | 165 | @classmethod 166 | def default_config(cls): 167 | return {} 168 | 169 | @classmethod 170 | def get_client_by_name(cls, name: str) -> "MCPClientBase": 171 | if name not in cls.__clients__: 172 | for c in cls.get_all_clients(): 173 | cls.__clients__[c.__name__] = c 174 | return cls.__clients__[name] 175 | 176 | @classmethod 177 | def stop_client(cls): 178 | if not (instance := cls.pop_instance()): 179 | return 180 | instance.should_stop = True 181 | 182 | @classmethod 183 | def try_start_client(cls): 184 | instance = cls.get() 185 | if not instance or instance.should_stop: 186 | cls.push_instance(cls()) 187 | instance = cls.get() 188 | if instance.is_running: 189 | instance.reset_config() 190 | return 191 | instance.is_running = True 192 | 193 | def run_client(): 194 | asyncio.run(instance.main()) 195 | logger.info(f"客户端 {cls.__name__} 结束运行!") 196 | 197 | job = Thread(target=run_client, daemon=True) 198 | job.start() 199 | 200 | @classmethod 201 | def draw(cls, layout: bpy.types.UILayout): 202 | from ..preference import get_pref 203 | 204 | pref = get_pref() 205 | layout.prop(pref, "api_key") 206 | layout.prop(pref, "base_url") 207 | layout.prop(pref, "model") 208 | 209 | def should_skip(self): 210 | return self.skip_current_command or self.should_stop 211 | 212 | def system_prompt(self): 213 | prompt_cn = """ 214 | 你擅长对用户的问题进行分析, 并选择合适的工具来解决用户的问题. 215 | 逐步思考, 每个思考步骤只保留最少的草稿, 最终选择合适的工具来解决问题. 216 | 注意: 仅能选择提供的可用工具 217 | """ 218 | prompt_en = """ 219 | You are good at analyzing user questions and choosing the appropriate tools to solve user problems. 220 | Think step by step, but keep only a minimum draft for each thinking step, and finally choose the appropriate tool to solve the problem. 221 | Note: Only use the tools you have been provided with. 222 | """ 223 | # prompt_en_path = Path(__file__).parent / "prompt_en.txt" 224 | # prompt_en = prompt_en_path.read_text(encoding="utf-8") 225 | return prompt_en.strip() 226 | 227 | async def connect_to_server(self): 228 | """连接到MCP服务器""" 229 | headers = { 230 | "Content-Type": "application/json", 231 | "Accept": "application/json", 232 | } 233 | stdio_transport = await self.exit_stack.enter_async_context(sse_client(url="http://localhost:45677/sse", headers=headers)) 234 | self.stdio, self.write = stdio_transport 235 | self.session = await self.exit_stack.enter_async_context(ClientSession(self.stdio, self.write)) 236 | 237 | await self.session.initialize() 238 | 239 | def parse_line(self, line: str) -> dict: 240 | if not line: 241 | return {} 242 | line = line.decode("utf-8").replace("data:", "").strip() 243 | # print("收到消息:", line) 244 | if line.endswith(("[DONE]", "PROCESSING")): 245 | return {} 246 | if line.endswith("[ERROR]"): 247 | print(line) 248 | return {} 249 | try: 250 | return json.loads(line) 251 | except Exception: 252 | if "PROCESSING" in line: 253 | return {} 254 | print("Json解析错误", line) 255 | return {} 256 | 257 | def parse_error(self, error: dict): 258 | return error.get("error", {}).get("message", "") 259 | 260 | def response_raise_status(self, response): 261 | pass 262 | 263 | def parse_arguments(self, arguments: str): 264 | arguments = arguments.strip() 265 | # 基本的json格式校验 266 | if not arguments.startswith("{") or not arguments.endswith("}"): 267 | raise json.JSONDecodeError("参数格式错误", arguments, 0) 268 | try: 269 | try: 270 | return eval(arguments, {"math": math, "random": random}) 271 | except Exception: 272 | pass 273 | return json.loads(arguments) 274 | except Exception as e: 275 | logger.info(f"\n错误参数: {arguments}\n") 276 | raise e 277 | 278 | def ensure_tool_call(self, index: int): 279 | tool_call = self.tool_calls[index] 280 | func = tool_call.get("function", {}) 281 | try: 282 | arguments = func.get("arguments", "").strip() 283 | self.parse_arguments(arguments) 284 | except Exception: 285 | return False 286 | return True 287 | 288 | async def call_tool(self, index: int): 289 | """调用工具""" 290 | tool_call = self.tool_calls[index] 291 | func = tool_call.get("function", {}) 292 | fn_name = func.get("name") 293 | arguments = func.get("arguments", "").strip() or "{}" 294 | print() # 每次调用工具时,打印一个空行,方便查看日志 295 | logger.info(f"尝试工具: {fn_name} 参数: {arguments}") 296 | results = await self.call_tool_ex(fn_name, arguments) 297 | self.tool_calls.pop(index) 298 | self.push_message({"role": "assistant", "content": "", "tool_calls": [tool_call]}) 299 | for rtype, result in results: 300 | final_result = f"Selected tool: {fn_name}\nResult: {result}" 301 | tool_call_result = {"role": "tool", "content": final_result, "tool_call_id": tool_call["id"], "name": fn_name} 302 | self.push_message(tool_call_result) 303 | 304 | async def call_tool_ex(self, fn_name: str, arguments: str | dict) -> tuple[str, str]: 305 | try: 306 | arguments = self.parse_arguments(arguments) 307 | except Exception as e: 308 | logger.info(f"参数解析错误:\n{arguments}\n{e}") 309 | return [("error", f"Argument parsing error: {e}")] 310 | res = await self.session.call_tool(fn_name, arguments) 311 | results = [] 312 | for res_content in res.content: 313 | result = "" 314 | rtype = res_content.type 315 | if rtype == "text": 316 | result = res_content.text 317 | elif rtype == "image": 318 | result = res_content.data 319 | elif rtype == "resource": 320 | result = res_content.resource 321 | if isinstance(result, str) and result.startswith("Error"): 322 | rtype = "error" 323 | logger.error(result) 324 | results.append((rtype, result)) 325 | return results 326 | 327 | async def process_query(self, query: str) -> str: 328 | """Process a query using Claude and available tools""" 329 | return "" 330 | 331 | async def main(self): 332 | try: 333 | logger.info("尝试连接到创世核心...") 334 | await self.connect_to_server() 335 | logger.info("创世核心已连接!") 336 | while True: 337 | try: 338 | self.command_processing = False 339 | self.update() 340 | if self.should_stop: 341 | break 342 | try: 343 | query = self.command_queue.get_nowait() 344 | except queue.Empty: 345 | await asyncio.sleep(0.2) 346 | continue 347 | logger.info(f"当前命令: {query}") 348 | self.skip_current_command = False 349 | self.command_processing = True 350 | response = await self.process_query(query) 351 | print() 352 | if self.skip_current_command: 353 | logger.info(f"跳过命令: {query}") 354 | continue 355 | logger.info(f"处理完成: {query}") 356 | # client.session.call_tool 357 | # print(response) 358 | except requests.exceptions.HTTPError as e: 359 | logger.warning(f"HTTP错误(请检查api_key, 模型使用情况或额度): {e}") 360 | except Exception: 361 | import traceback 362 | 363 | traceback.print_exc() 364 | except Exception as e: 365 | logger.error(f"连接失败: {e}") 366 | logger.error("请尝试改变网络环境(开关代理等)") 367 | finally: 368 | await self.cleanup() 369 | 370 | async def cleanup(self): 371 | """清理资源""" 372 | self.command_processing = False 373 | await self.exit_stack.aclose() 374 | -------------------------------------------------------------------------------- /src/server/tools/polyhaven_tools.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import time 3 | import json 4 | from hashlib import md5 5 | from tempfile import gettempdir 6 | from copy import deepcopy 7 | from pathlib import Path 8 | 9 | try: 10 | from .common import ToolsPackageBase 11 | except ImportError: 12 | ToolsPackageBase = object # for quick testing 13 | 14 | 15 | class PolyhavenHelper: 16 | url = "https://api.polyhaven.com" 17 | assets_cache = {} 18 | tags_cache = {} 19 | categories_cache = {} 20 | files_cache = {} 21 | 22 | @classmethod 23 | def fetch_assets_by_type(cls, asset_type: str) -> dict: 24 | asset_list = cls.fetch_assets_by_type_ex(asset_type) 25 | return deepcopy(asset_list) 26 | 27 | @classmethod 28 | def fetch_assets_by_type_ex(cls, asset_type: str) -> dict: 29 | # 优先从本地缓存中获取(缓存时间戳判定间隔为两天) 30 | cache_file = Path(gettempdir()) / f"polyhaven_{asset_type}_cache.json" 31 | cache_file.parent.mkdir(parents=True, exist_ok=True) 32 | if cache_file.exists(): 33 | # TODO: 判定缓存时间戳 34 | time_stamp = cache_file.stat().st_mtime 35 | if time_stamp > (time.time() - 60 * 60 * 24 * 2): 36 | if asset_type not in cls.assets_cache: 37 | with open(cache_file, "r") as f: 38 | data = json.load(f) 39 | cls.assets_cache[asset_type] = data 40 | return cls.assets_cache[asset_type] 41 | try: 42 | response = requests.get(f"{cls.url}/assets?t={asset_type}") 43 | response.raise_for_status() 44 | json_data = response.json() 45 | cache_file.write_text(json.dumps(json_data)) 46 | cls.assets_cache[asset_type] = json_data 47 | return json_data 48 | except requests.exceptions.RequestException as e: 49 | print(f"Error fetching assets: {e}") 50 | return {} 51 | 52 | @classmethod 53 | def fetch_tags(cls, asset_type: str) -> dict: 54 | if asset_type not in cls.tags_cache: 55 | assets_list = cls.fetch_assets_by_type(asset_type) 56 | tags = set() 57 | for asset in assets_list.values(): 58 | tags.update(asset["tags"]) 59 | cls.tags_cache[asset_type] = tags 60 | return cls.tags_cache[asset_type] # models: 848 61 | 62 | @classmethod 63 | def fetch_categories(cls, asset_type: str) -> dict: 64 | if asset_type not in cls.categories_cache: 65 | assets_list = cls.fetch_assets_by_type(asset_type) 66 | categories = set() 67 | for asset in assets_list.values(): 68 | categories.update(asset["categories"]) 69 | cls.categories_cache[asset_type] = categories 70 | return cls.categories_cache[asset_type] # models: 40 71 | 72 | @classmethod 73 | def fetch_model_files(cls, asset_id: str, expected_resolution: str = "1k") -> dict: 74 | if asset_id not in cls.files_cache: 75 | try: 76 | response = requests.get(f"{cls.url}/files/{asset_id}") 77 | json_data = response.json() 78 | config = json_data.get("blend", {}) 79 | cls.files_cache[asset_id] = config 80 | except Exception: 81 | return {} 82 | expected_resolution_int = int(expected_resolution[:-1]) 83 | last_suport_resolution = "1k" 84 | for resolution in sorted(cls.files_cache[asset_id], key=lambda x: int(x[:-1])): 85 | resolution_int = int(resolution[:-1]) 86 | if resolution_int <= expected_resolution_int: 87 | last_suport_resolution = resolution 88 | files = cls.files_cache[asset_id][last_suport_resolution].get("blend", {}) 89 | return files 90 | 91 | @classmethod 92 | def fetch_hdri_file(cls, asset_id: str, expected_resolution: str = "1k") -> dict: 93 | if asset_id not in cls.files_cache: 94 | try: 95 | response = requests.get(f"{cls.url}/files/{asset_id}") 96 | json_data = response.json() 97 | config = json_data.get("hdri", {}) 98 | cls.files_cache[asset_id] = config 99 | except Exception: 100 | return {} 101 | expected_resolution_int = int(expected_resolution[:-1]) 102 | last_suport_resolution = "1k" 103 | for resolution in sorted(cls.files_cache[asset_id], key=lambda x: int(x[:-1])): 104 | resolution_int = int(resolution[:-1]) 105 | if resolution_int <= expected_resolution_int: 106 | last_suport_resolution = resolution 107 | file = cls.files_cache[asset_id][last_suport_resolution] 108 | return file 109 | 110 | @classmethod 111 | def download_hdri_file(cls, asset_id: str, expected_resolution: str = "1k") -> str: 112 | files = cls.fetch_hdri_file(asset_id, expected_resolution) 113 | if not files: 114 | return {} 115 | { 116 | "hdr": { 117 | "size": 102973096, 118 | "url": "https://dl.polyhaven.org/file/ph-assets/HDRIs/hdr/8k/abandoned_bakery_8k.hdr", 119 | "md5": "87ee21eb003d29103f5fe1720f64ec6d", 120 | }, 121 | "exr": { 122 | "size": 94040932, 123 | "url": "https://dl.polyhaven.org/file/ph-assets/HDRIs/exr/8k/abandoned_bakery_8k.exr", 124 | "md5": "e678e3e3924ade087f1ea7f795e26bf2", 125 | }, 126 | } 127 | file = files.get("hdr") 128 | if "exr" in files: 129 | file = files.get("exr") 130 | hdri_cache_dir = Path(gettempdir()) / f"polyhaven_hdris/{asset_id}/{expected_resolution}" 131 | hdri_cache_dir.mkdir(parents=True, exist_ok=True) 132 | hdri_cache_path = hdri_cache_dir.joinpath(f"{asset_id}.hdr").as_posix() 133 | 134 | url = file["url"] 135 | size = int(file["size"]) 136 | md5_hash = file["md5"] 137 | hdri_file = cls.download_file(url, asset_id, size, hdri_cache_path, md5_hash) 138 | 139 | return {"hdri": hdri_file} 140 | 141 | @classmethod 142 | def download_file(cls, url: str, name: str, size: int, file_path: str, md5_hash: str = None) -> str: 143 | if Path(file_path).exists(): 144 | print(f"{name} already exists at {file_path}") 145 | return file_path 146 | # 下载文件 147 | print(f"Downloading {name} from {url}") 148 | response = requests.get(url, stream=True) 149 | data = b"" 150 | # 百分比 进度条 151 | for chunk in response.iter_content(chunk_size=1024): 152 | if not chunk: 153 | break 154 | data += chunk 155 | print(f"\rDownloading {name} ({len(data)}/{size} bytes)", end="") 156 | print(f"\nDownload {name} complete") 157 | # 检查文件MD5 158 | print(f"Checking MD5 hash for {name}") 159 | file_md5 = md5(data).hexdigest() 160 | if file_md5 != md5_hash: 161 | print(f"MD5 hash mismatch for {name}") 162 | return "" 163 | print(f"Saving {name} to {file_path}") 164 | with open(file_path, "wb") as f: 165 | f.write(data) 166 | return file_path 167 | 168 | @classmethod 169 | def download_model_files(cls, asset_id: str, expected_resolution: str = "1k") -> dict: 170 | files = cls.fetch_model_files(asset_id, expected_resolution) 171 | if not files: 172 | return {} 173 | blend_cache_dir = Path(gettempdir()) / f"polyhaven_models/{asset_id}/{expected_resolution}" 174 | blend_cache_dir.mkdir(parents=True, exist_ok=True) 175 | blend_cache_path = blend_cache_dir.joinpath(f"{asset_id}.blend").as_posix() 176 | url = files["url"] 177 | size = int(files["size"]) 178 | md5_hash = files["md5"] 179 | blend_file = cls.download_file(url, asset_id, size, blend_cache_path, md5_hash) 180 | out_files = { 181 | "blend": blend_file, 182 | } 183 | 184 | included_files: dict = files.get("include", {}) 185 | for file_name, info in included_files.items(): 186 | # 'textures/Armchair_01_nor_gl_4k.exr' 187 | if not file_name.startswith("textures/"): 188 | print(f"Skipping file {file_name} as it is not in the textures folder.") 189 | continue 190 | save_path = blend_cache_dir.joinpath(file_name) 191 | save_path.parent.mkdir(parents=True, exist_ok=True) 192 | url = info["url"] 193 | size = int(info["size"]) 194 | md5_hash = info["md5"] 195 | texture_file = cls.download_file(url, file_name, size, save_path.as_posix(), md5_hash) 196 | out_files[file_name] = texture_file 197 | return out_files 198 | 199 | 200 | class PolyhavenTools(ToolsPackageBase): 201 | """ 202 | Polyhaven tools. Use the api to download assets.(For commercial use see https://polyhaven.com/our-api) 203 | """ 204 | 205 | # def polyhaven_list_assets(asset_type: str) -> list: 206 | # """ 207 | # List all assets of a given type. 208 | 209 | # Args: 210 | # - asset_type: The type of asset to list. Can be "models", "texture", or "hdris". 211 | # """ 212 | # if asset_type not in ["models", "texture", "hdris"]: 213 | # raise ValueError("Invalid asset type. Must be 'model', 'texture', or 'hdris'.") 214 | # assets_list = PolyhavenHelper.fetch_assets_by_type(asset_type) 215 | # for asset in assets_list.values(): 216 | # asset.pop("date_published", "") 217 | # asset.pop("type", "") 218 | # asset.pop("authors", "") 219 | # asset.pop("files_hash", "") 220 | # asset.pop("sponsors", "") 221 | # asset.pop("polycount", "") 222 | # asset.pop("texel_density", "") 223 | # asset.pop("download_count", "") 224 | # asset.pop("thumbnail_url", "") 225 | # asset["bound_box"] = asset.pop("dimensions") 226 | # asset["supported_resolutions"] = [] 227 | # # supported_resolutions = ["1k", "2k", "4k", "8k"] 228 | # max_resolution = asset.pop("max_resolution") 229 | # if isinstance(max_resolution, list): 230 | # max_resolution = min(max_resolution) 231 | # if max_resolution >= 1024: 232 | # asset["supported_resolutions"].append("1k") 233 | # if max_resolution >= 2048: 234 | # asset["supported_resolutions"].append("2k") 235 | # if max_resolution >= 4096: 236 | # asset["supported_resolutions"].append("4k") 237 | # if max_resolution >= 8192: 238 | # asset["supported_resolutions"].append("8k") 239 | 240 | # { 241 | # "name": "Arm Chair 01", 242 | # "categories": ["furniture", "seating"], 243 | # "tags": ["gothic", "vintage", "chair", "furniture", "victorian", "couch", "wood", "varnished", "classic"], 244 | # "max_resolution": [4096, 4096], 245 | # "dimensions": [848.4309017658234, 765.7602727413177, 1065.087635157397], 246 | # # "date_published": 1585605600, 247 | # # "type": 2, 248 | # # "authors": {"Kirill Sannikov": "All"}, 249 | # # "files_hash": "d47080c2004a8b2a222ee7edca7a458dc0cbbecb", 250 | # # "sponsors": ["66627515", "4047949"], 251 | # # "polycount": 5626, 252 | # # "texel_density": 1972.3631956341671, 253 | # # "download_count": 26610, 254 | # # "thumbnail_url": "https://cdn.polyhaven.com/asset_img/thumbs/ArmChair_01.png?width=256&height=256", 255 | # } 256 | # return assets_list 257 | 258 | def polyhaven_search_models(names: list[str] = None, tags: list[str] = None, categories: list[str] = None) -> dict: 259 | """ 260 | Search Polyhaven Online Asset Library for models. If you want to use polyhaven asset, you should search first, then try to fetch it, unless user specifies the name. 261 | 262 | Args: 263 | - names: The names of the models to search for. 264 | - tags: The tags of the models to search for. 265 | - categories: The categories of the models to search for. 266 | 267 | Returns: 268 | - A dictionary containing the results of the search. 269 | """ 270 | 271 | assets_list = PolyhavenHelper.fetch_assets_by_type("models") 272 | names = names or [] 273 | tags = tags or [] 274 | categories = categories or [] 275 | results = { 276 | "names_query": { 277 | "query": names, 278 | "results": [], 279 | }, 280 | "tags_query": { 281 | "query": tags, 282 | "results": [], 283 | }, 284 | "categories_query": { 285 | "query": categories, 286 | "results": [], 287 | }, 288 | } 289 | 290 | for search_name in names: 291 | query = search_name.lower() 292 | for name, asset in assets_list.items(): 293 | if query in asset["name"].lower(): 294 | results["names_query"]["results"].append(name) 295 | for search_tag in tags: 296 | query = search_tag.lower() 297 | for name, asset in assets_list.items(): 298 | if query in asset["tags"]: 299 | results["tags_query"]["results"].append(name) 300 | for search_category in categories: 301 | query = search_category.lower() 302 | for name, asset in assets_list.items(): 303 | if query in asset["categories"]: 304 | results["categories_query"]["results"].append(name) 305 | if not names: 306 | results.pop("names_query") 307 | if not tags: 308 | results.pop("tags_query") 309 | if not categories: 310 | results.pop("categories_query") 311 | return results 312 | 313 | def polyhaven_fetch_model_info(asset_id: str) -> dict: 314 | """ 315 | Before you use model asset, use this function to get model info then determint how or whether to use it. 316 | 317 | Args: 318 | - asset_id: The id of the asset info to fetch. 319 | """ 320 | assets_list = PolyhavenHelper.fetch_assets_by_type("models") 321 | asset = assets_list.get(asset_id) 322 | if not asset: 323 | raise ValueError(f"Asset with id {asset_id} not found") 324 | asset.pop("date_published", "") 325 | asset.pop("type", "") 326 | asset.pop("authors", "") 327 | asset.pop("files_hash", "") 328 | asset.pop("sponsors", "") 329 | asset.pop("polycount", "") 330 | asset.pop("texel_density", "") 331 | asset.pop("download_count", "") 332 | asset.pop("thumbnail_url", "") 333 | asset["bound_box"] = asset.pop("dimensions") 334 | asset["supported_resolutions"] = [] 335 | # supported_resolutions = ["1k", "2k", "4k", "8k"] 336 | max_resolution = asset.pop("max_resolution") 337 | if isinstance(max_resolution, list): 338 | max_resolution = min(max_resolution) 339 | if max_resolution >= 1024: 340 | asset["supported_resolutions"].append("1k") 341 | if max_resolution >= 2048: 342 | asset["supported_resolutions"].append("2k") 343 | if max_resolution >= 4096: 344 | asset["supported_resolutions"].append("4k") 345 | if max_resolution >= 8192: 346 | asset["supported_resolutions"].append("8k") 347 | if max_resolution >= 16384: 348 | asset["supported_resolutions"].append("16k") 349 | { 350 | "name": "Arm Chair 01", 351 | "categories": ["furniture", "seating"], 352 | "tags": ["gothic", "vintage", "chair", "furniture", "victorian", "couch", "wood", "varnished", "classic"], 353 | "max_resolution": [4096, 4096], 354 | "dimensions": [848.4309017658234, 765.7602727413177, 1065.087635157397], 355 | # "date_published": 1585605600, 356 | # "type": 2, 357 | # "authors": {"Kirill Sannikov": "All"}, 358 | # "files_hash": "d47080c2004a8b2a222ee7edca7a458dc0cbbecb", 359 | # "sponsors": ["66627515", "4047949"], 360 | # "polycount": 5626, 361 | # "texel_density": 1972.3631956341671, 362 | # "download_count": 26610, 363 | # "thumbnail_url": "https://cdn.polyhaven.com/asset_img/thumbs/ArmChair_01.png?width=256&height=256", 364 | } 365 | return asset 366 | 367 | def polyhaven_use_model_asset(asset_id: str, expected_resolution: str = "1k") -> dict: 368 | """ 369 | If you choose one model from the search results, you can use this tool to load it into scene. 370 | 371 | Args: 372 | - asset_id: The asset id of the model you want to use. 373 | - expected_resolution: The expected resolution of the model. Can be "1k", "2k", "4k", "8k", or "16k". 374 | """ 375 | files = PolyhavenHelper.download_model_files(asset_id, expected_resolution) 376 | { 377 | "blend": "xxx/ArmChair_01.blend", 378 | "textures/Armchair_01_diff_1k.jpg": "xxx/ArmChair_01/textures/Armchair_01_diff_1k.jpg", 379 | "textures/Armchair_01_metallic_1k.exr": "xxx/ArmChair_01/textures/Armchair_01_metallic_1k.exr", 380 | "textures/Armchair_01_nor_gl_1k.exr": "xxx/ArmChair_01/textures/Armchair_01_nor_gl_1k.exr", 381 | "textures/Armchair_01_roughness_1k.jpg": "xxx/ArmChair_01/textures/Armchair_01_roughness_1k.jpg", 382 | } 383 | if "blend" not in files: 384 | raise ValueError("No blend file found for asset id: " + asset_id) 385 | 386 | blend_file_path = files["blend"] 387 | loaded_object_names = [] 388 | import bpy 389 | 390 | old_objects = set(bpy.data.objects) 391 | 392 | with bpy.data.libraries.load(blend_file_path) as (data_from, data_to): 393 | loaded_object_names = list(data_from.objects) 394 | data_to.objects = data_from.objects 395 | 396 | new_objects = set(bpy.data.objects) - old_objects 397 | for obj in new_objects: 398 | bpy.context.collection.objects.link(obj) 399 | return { 400 | "loaded_asset_id": asset_id, 401 | "resolution": expected_resolution, 402 | "loaded_objects": loaded_object_names, 403 | } 404 | 405 | def polyhaven_search_hdris(names: list[str] = None, tags: list[str] = None, categories: list[str] = None) -> list: 406 | """ 407 | Search Polyhaven Online Asset Library for hdris. If you want to use polyhaven asset, you should search first, then try to fetch it, unless user specifies the name. 408 | 409 | Args: 410 | - names: The names of the hdris to search for. 411 | - tags: The tags of the hdris to search for. 412 | - categories: The categories of the hdris to search for. 413 | 414 | Returns: 415 | - A dictionary containing the results of the search. 416 | """ 417 | 418 | assets_list = PolyhavenHelper.fetch_assets_by_type("hdris") 419 | names = names or [] 420 | tags = tags or [] 421 | categories = categories or [] 422 | results = { 423 | "names_query": { 424 | "query": names, 425 | "results": [], 426 | }, 427 | "tags_query": { 428 | "query": tags, 429 | "results": [], 430 | }, 431 | "categories_query": { 432 | "query": categories, 433 | "results": [], 434 | }, 435 | } 436 | 437 | for search_name in names: 438 | query = search_name.lower() 439 | for name, asset in assets_list.items(): 440 | if query in asset["name"].lower(): 441 | results["names_query"]["results"].append(name) 442 | for search_tag in tags: 443 | query = search_tag.lower() 444 | for name, asset in assets_list.items(): 445 | if query in asset["tags"]: 446 | results["tags_query"]["results"].append(name) 447 | for search_category in categories: 448 | query = search_category.lower() 449 | for name, asset in assets_list.items(): 450 | if query in asset["categories"]: 451 | results["categories_query"]["results"].append(name) 452 | if not names: 453 | results.pop("names_query") 454 | if not tags: 455 | results.pop("tags_query") 456 | if not categories: 457 | results.pop("categories_query") 458 | return results 459 | 460 | def polyhaven_fetch_hdri_info(asset_id: str) -> dict: 461 | """ 462 | Before you use hdri asset, use this function to get hdri info then determint how or whether to use it. 463 | 464 | Args: 465 | - asset_id: The id of the asset info to fetch. 466 | """ 467 | assets_list = PolyhavenHelper.fetch_assets_by_type("hdris") 468 | asset = assets_list.get(asset_id) 469 | if not asset: 470 | raise ValueError(f"Asset with id {asset_id} not found") 471 | 472 | max_resolution = asset.pop("max_resolution") 473 | asset = { 474 | "name": asset.get("name"), 475 | "tags": asset.get("tags", []), 476 | "categories": asset.get("categories", []), 477 | "supported_resolutions": [], 478 | } 479 | if isinstance(max_resolution, list): 480 | max_resolution = max(max_resolution) # 这里和模型不一样(取最大值)? 481 | if max_resolution >= 1024: 482 | asset["supported_resolutions"].append("1k") 483 | if max_resolution >= 2048: 484 | asset["supported_resolutions"].append("2k") 485 | if max_resolution >= 4096: 486 | asset["supported_resolutions"].append("4k") 487 | if max_resolution >= 8192: 488 | asset["supported_resolutions"].append("8k") 489 | if max_resolution >= 16384: 490 | asset["supported_resolutions"].append("16k") 491 | if max_resolution >= 32768: 492 | asset["supported_resolutions"].append("32k") 493 | { 494 | "name": "Abandoned Bakery", 495 | "tags": ["abandoned", "empty", "industrial", "windows", "bare", "rubble", "brick", "concrete", "backplates"], 496 | "categories": ["natural light", "artificial light", "urban", "indoor", "high contrast"], 497 | "max_resolution": [16384, 8192], 498 | # "evs_cap": 16, 499 | # "type": 0, 500 | # "whitebalance": 4950, 501 | # "backplates": True, 502 | # "date_taken": 1662805680, 503 | # "coords": [50.786873, 34.774073], 504 | # "info": None, 505 | # "authors": {"Sergej Majboroda": "All"}, 506 | # "date_published": 1663804800, 507 | # "files_hash": "d81af70dd51ebb704af086506e0a9b92bb5d7b84", 508 | # "download_count": 15640, 509 | # "thumbnail_url": "https://cdn.polyhaven.com/asset_img/thumbs/abandoned_bakery.png?width=256&height=256", 510 | } 511 | return asset 512 | 513 | def polyhaven_use_hdri(asset_id: str, expected_resolution: str) -> dict: 514 | """ 515 | If you choose one hdri from the search results, you can use this tool to load it into scene. 516 | If user not specify the resolution or not hint by HQ or LQ, you should use 1k or 2k by default. 517 | 518 | Args: 519 | - asset_id: The asset id of the hdri you want to use. 520 | - expected_resolution: The expected resolution of the hdri. Can be "1k", "2k", "4k", "8k", or "16k". 521 | """ 522 | if expected_resolution not in ["1k", "2k", "4k", "8k", "16k", "32k"]: 523 | raise ValueError(f"Invalid resolution {expected_resolution}. Expected one of ['1k', '2k', '4k', '8k', '16k', '32k']") 524 | file = PolyhavenHelper.download_hdri_file(asset_id, expected_resolution) 525 | if not file: 526 | raise ValueError(f"File for asset {asset_id} and resolution {expected_resolution} not found") 527 | hdri_file = file["hdri"] 528 | if not Path(hdri_file).exists(): 529 | raise ValueError(f"HDRI file {hdri_file} not found") 530 | # 加载hdri为环境贴图 531 | import bpy 532 | 533 | world = bpy.data.worlds.new(name=asset_id) 534 | world.use_nodes = True 535 | from ..utils import NodeTreeUtil 536 | 537 | output = NodeTreeUtil.find_node_by_type(world.node_tree, "OUTPUT_WORLD") 538 | if not output: 539 | output = world.node_tree.nodes.new("ShaderNodeOutputWorld") 540 | background = NodeTreeUtil.find_node_by_type(world.node_tree, "BACKGROUND") 541 | if not background: 542 | background = world.node_tree.nodes.new("ShaderNodeBackground") 543 | world.node_tree.links.new(background.outputs["Background"], output.inputs["Surface"]) 544 | hdri = NodeTreeUtil.find_node_by_type(world.node_tree, "TEX_ENVIRONMENT") 545 | if not hdri: 546 | hdri = world.node_tree.nodes.new("ShaderNodeTexEnvironment") 547 | world.node_tree.links.new(hdri.outputs["Color"], background.inputs["Color"]) 548 | hdri.image = bpy.data.images.load(filepath=hdri_file) 549 | bpy.context.scene.world = world 550 | return file 551 | 552 | # TODO 553 | # def polyhaven_get_all_tags(asset_type: str) -> list: 554 | # assets_list = PolyhavenHelper.fetch_assets_by_type(asset_type) 555 | # tags = set() 556 | # for asset in assets_list.values(): 557 | # tags.update(asset["tags"]) 558 | # return list(tags) # models: 848 559 | 560 | # def polyhaven_get_all_categories(asset_type: str) -> list: 561 | # assets_list = PolyhavenHelper.fetch_assets_by_type(asset_type) 562 | # categories = set() 563 | # for asset in assets_list.values(): 564 | # categories.update(asset["categories"]) 565 | # return list(categories) # models: 40 566 | --------------------------------------------------------------------------------