├── .python-version ├── .DS_Store ├── assets ├── hammer-icon.png └── addon-instructions.png ├── main.py ├── src └── blender_mcp │ ├── __init__.py │ ├── telemetry_decorator.py │ ├── telemetry.py │ └── server.py ├── .gitignore ├── pyproject.toml ├── LICENSE ├── README.md └── addon.py /.python-version: -------------------------------------------------------------------------------- 1 | 3.13.2 2 | -------------------------------------------------------------------------------- /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahujasid/blender-mcp/HEAD/.DS_Store -------------------------------------------------------------------------------- /assets/hammer-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahujasid/blender-mcp/HEAD/assets/hammer-icon.png -------------------------------------------------------------------------------- /assets/addon-instructions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahujasid/blender-mcp/HEAD/assets/addon-instructions.png -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from blender_mcp.server import main as server_main 2 | 3 | def main(): 4 | """Entry point for the blender-mcp package""" 5 | server_main() 6 | 7 | if __name__ == "__main__": 8 | main() 9 | -------------------------------------------------------------------------------- /src/blender_mcp/__init__.py: -------------------------------------------------------------------------------- 1 | """Blender integration through the Model Context Protocol.""" 2 | 3 | __version__ = "0.1.0" 4 | 5 | # Expose key classes and functions for easier imports 6 | from .server import BlenderConnection, get_blender_connection 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Python-generated files 2 | __pycache__/ 3 | *.py[oc] 4 | build/ 5 | dist/ 6 | wheels/ 7 | *.egg-info 8 | 9 | # Virtual environments 10 | .venv 11 | 12 | # macOS 13 | .DS_Store 14 | 15 | # Local config secrets 16 | src/blender_mcp/config.py 17 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "blender-mcp" 3 | version = "1.4.0" 4 | description = "Blender integration through the Model Context Protocol" 5 | readme = "README.md" 6 | requires-python = ">=3.10" 7 | authors = [ 8 | {name = "Your Name", email = "your.email@example.com"} 9 | ] 10 | license = {text = "MIT"} 11 | classifiers = [ 12 | "Programming Language :: Python :: 3", 13 | "License :: OSI Approved :: MIT License", 14 | "Operating System :: OS Independent", 15 | ] 16 | dependencies = [ 17 | "mcp[cli]>=1.3.0", 18 | "supabase>=2.0.0", 19 | "tomli>=2.0.0", 20 | ] 21 | 22 | [project.scripts] 23 | blender-mcp = "blender_mcp.server:main" 24 | 25 | [build-system] 26 | requires = ["setuptools>=61.0", "wheel"] 27 | build-backend = "setuptools.build_meta" 28 | 29 | [tool.setuptools] 30 | package-dir = {"" = "src"} 31 | 32 | [project.urls] 33 | "Homepage" = "https://github.com/yourusername/blender-mcp" 34 | "Bug Tracker" = "https://github.com/yourusername/blender-mcp/issues" 35 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Siddharth Ahuja 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /src/blender_mcp/telemetry_decorator.py: -------------------------------------------------------------------------------- 1 | """ 2 | Telemetry decorator for Blender MCP tools 3 | """ 4 | 5 | import functools 6 | import inspect 7 | import logging 8 | import time 9 | from typing import Callable, Any 10 | 11 | from .telemetry import record_tool_usage 12 | 13 | logger = logging.getLogger("blender-mcp-telemetry") 14 | 15 | 16 | def telemetry_tool(tool_name: str): 17 | """Decorator to add telemetry tracking to MCP tools""" 18 | def decorator(func: Callable) -> Callable: 19 | @functools.wraps(func) 20 | def sync_wrapper(*args, **kwargs) -> Any: 21 | start_time = time.time() 22 | success = False 23 | error = None 24 | 25 | try: 26 | result = func(*args, **kwargs) 27 | success = True 28 | return result 29 | except Exception as e: 30 | error = str(e) 31 | raise 32 | finally: 33 | duration_ms = (time.time() - start_time) * 1000 34 | try: 35 | record_tool_usage(tool_name, success, duration_ms, error) 36 | except Exception as log_error: 37 | logger.debug(f"Failed to record telemetry: {log_error}") 38 | 39 | @functools.wraps(func) 40 | async def async_wrapper(*args, **kwargs) -> Any: 41 | start_time = time.time() 42 | success = False 43 | error = None 44 | 45 | try: 46 | result = await func(*args, **kwargs) 47 | success = True 48 | return result 49 | except Exception as e: 50 | error = str(e) 51 | raise 52 | finally: 53 | duration_ms = (time.time() - start_time) * 1000 54 | try: 55 | record_tool_usage(tool_name, success, duration_ms, error) 56 | except Exception as log_error: 57 | logger.debug(f"Failed to record telemetry: {log_error}") 58 | 59 | # Return appropriate wrapper based on function type 60 | if inspect.iscoroutinefunction(func): 61 | return async_wrapper 62 | else: 63 | return sync_wrapper 64 | 65 | return decorator 66 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # BlenderMCP - Blender Model Context Protocol Integration 4 | 5 | BlenderMCP connects Blender to Claude AI through the Model Context Protocol (MCP), allowing Claude to directly interact with and control Blender. This integration enables prompt assisted 3D modeling, scene creation, and manipulation. 6 | 7 | **We have no official website. Any website you see online is unofficial and has no affiliation with this project. Use them at your own risk.** 8 | 9 | [Full tutorial](https://www.youtube.com/watch?v=lCyQ717DuzQ) 10 | 11 | ### Join the Community 12 | 13 | Give feedback, get inspired, and build on top of the MCP: [Discord](https://discord.gg/z5apgR8TFU) 14 | 15 | ### Supporters 16 | 17 | [CodeRabbit](https://www.coderabbit.ai/) 18 | 19 | [Satish Goda](https://github.com/satishgoda) 20 | 21 | **All supporters:** 22 | 23 | [Support this project](https://github.com/sponsors/ahujasid) 24 | 25 | ## Release notes (1.4.0) 26 | - Added Hunyuan3D support 27 | 28 | 29 | ### Previously added features: 30 | - View screenshots for Blender viewport to better understand the scene 31 | - Search and download Sketchfab models 32 | - Support for Poly Haven assets through their API 33 | - Support to generate 3D models using Hyper3D Rodin 34 | - Run Blender MCP on a remote host 35 | - Telemetry for tools executed (completely anonymous) 36 | 37 | ### Installating a new version (existing users) 38 | - For newcomers, you can go straight to Installation. For existing users, see the points below 39 | - Download the latest addon.py file and replace the older one, then add it to Blender 40 | - Delete the MCP server from Claude and add it back again, and you should be good to go! 41 | 42 | 43 | ## Features 44 | 45 | - **Two-way communication**: Connect Claude AI to Blender through a socket-based server 46 | - **Object manipulation**: Create, modify, and delete 3D objects in Blender 47 | - **Material control**: Apply and modify materials and colors 48 | - **Scene inspection**: Get detailed information about the current Blender scene 49 | - **Code execution**: Run arbitrary Python code in Blender from Claude 50 | 51 | ## Components 52 | 53 | The system consists of two main components: 54 | 55 | 1. **Blender Addon (`addon.py`)**: A Blender addon that creates a socket server within Blender to receive and execute commands 56 | 2. **MCP Server (`src/blender_mcp/server.py`)**: A Python server that implements the Model Context Protocol and connects to the Blender addon 57 | 58 | ## Installation 59 | 60 | 61 | ### Prerequisites 62 | 63 | - Blender 3.0 or newer 64 | - Python 3.10 or newer 65 | - uv package manager: 66 | 67 | **If you're on Mac, please install uv as** 68 | ```bash 69 | brew install uv 70 | ``` 71 | **On Windows** 72 | ```powershell 73 | powershell -c "irm https://astral.sh/uv/install.ps1 | iex" 74 | ``` 75 | and then add uv to the user path in Windows (you may need to restart Claude Desktop after): 76 | ```powershell 77 | $localBin = "$env:USERPROFILE\.local\bin" 78 | $userPath = [Environment]::GetEnvironmentVariable("Path", "User") 79 | [Environment]::SetEnvironmentVariable("Path", "$userPath;$localBin", "User") 80 | ``` 81 | 82 | Otherwise installation instructions are on their website: [Install uv](https://docs.astral.sh/uv/getting-started/installation/) 83 | 84 | **⚠️ Do not proceed before installing UV** 85 | 86 | ### Environment Variables 87 | 88 | The following environment variables can be used to configure the Blender connection: 89 | 90 | - `BLENDER_HOST`: Host address for Blender socket server (default: "localhost") 91 | - `BLENDER_PORT`: Port number for Blender socket server (default: 9876) 92 | 93 | Example: 94 | ```bash 95 | export BLENDER_HOST='host.docker.internal' 96 | export BLENDER_PORT=9876 97 | ``` 98 | 99 | ### Claude for Desktop Integration 100 | 101 | [Watch the setup instruction video](https://www.youtube.com/watch?v=neoK_WMq92g) (Assuming you have already installed uv) 102 | 103 | Go to Claude > Settings > Developer > Edit Config > claude_desktop_config.json to include the following: 104 | 105 | ```json 106 | { 107 | "mcpServers": { 108 | "blender": { 109 | "command": "uvx", 110 | "args": [ 111 | "blender-mcp" 112 | ] 113 | } 114 | } 115 | } 116 | ``` 117 | 118 | ### Cursor integration 119 | 120 | [![Install MCP Server](https://cursor.com/deeplink/mcp-install-dark.svg)](https://cursor.com/install-mcp?name=blender&config=eyJjb21tYW5kIjoidXZ4IGJsZW5kZXItbWNwIn0%3D) 121 | 122 | For Mac users, go to Settings > MCP and paste the following 123 | 124 | - To use as a global server, use "add new global MCP server" button and paste 125 | - To use as a project specific server, create `.cursor/mcp.json` in the root of the project and paste 126 | 127 | 128 | ```json 129 | { 130 | "mcpServers": { 131 | "blender": { 132 | "command": "uvx", 133 | "args": [ 134 | "blender-mcp" 135 | ] 136 | } 137 | } 138 | } 139 | ``` 140 | 141 | For Windows users, go to Settings > MCP > Add Server, add a new server with the following settings: 142 | 143 | ```json 144 | { 145 | "mcpServers": { 146 | "blender": { 147 | "command": "cmd", 148 | "args": [ 149 | "/c", 150 | "uvx", 151 | "blender-mcp" 152 | ] 153 | } 154 | } 155 | } 156 | ``` 157 | 158 | [Cursor setup video](https://www.youtube.com/watch?v=wgWsJshecac) 159 | 160 | **⚠️ Only run one instance of the MCP server (either on Cursor or Claude Desktop), not both** 161 | 162 | ### Visual Studio Code Integration 163 | 164 | _Prerequisites_: Make sure you have [Visual Studio Code](https://code.visualstudio.com/) installed before proceeding. 165 | 166 | [![Install in VS Code](https://img.shields.io/badge/VS_Code-Install_blender--mcp_server-0098FF?style=flat-square&logo=visualstudiocode&logoColor=ffffff)](vscode:mcp/install?%7B%22name%22%3A%22blender-mcp%22%2C%22type%22%3A%22stdio%22%2C%22command%22%3A%22uvx%22%2C%22args%22%3A%5B%22blender-mcp%22%5D%7D) 167 | 168 | ### Installing the Blender Addon 169 | 170 | 1. Download the `addon.py` file from this repo 171 | 1. Open Blender 172 | 2. Go to Edit > Preferences > Add-ons 173 | 3. Click "Install..." and select the `addon.py` file 174 | 4. Enable the addon by checking the box next to "Interface: Blender MCP" 175 | 176 | 177 | ## Usage 178 | 179 | ### Starting the Connection 180 | ![BlenderMCP in the sidebar](assets/addon-instructions.png) 181 | 182 | 1. In Blender, go to the 3D View sidebar (press N if not visible) 183 | 2. Find the "BlenderMCP" tab 184 | 3. Turn on the Poly Haven checkbox if you want assets from their API (optional) 185 | 4. Click "Connect to Claude" 186 | 5. Make sure the MCP server is running in your terminal 187 | 188 | ### Using with Claude 189 | 190 | Once the config file has been set on Claude, and the addon is running on Blender, you will see a hammer icon with tools for the Blender MCP. 191 | 192 | ![BlenderMCP in the sidebar](assets/hammer-icon.png) 193 | 194 | #### Capabilities 195 | 196 | - Get scene and object information 197 | - Create, delete and modify shapes 198 | - Apply or create materials for objects 199 | - Execute any Python code in Blender 200 | - Download the right models, assets and HDRIs through [Poly Haven](https://polyhaven.com/) 201 | - AI generated 3D models through [Hyper3D Rodin](https://hyper3d.ai/) 202 | 203 | 204 | ### Example Commands 205 | 206 | Here are some examples of what you can ask Claude to do: 207 | 208 | - "Create a low poly scene in a dungeon, with a dragon guarding a pot of gold" [Demo](https://www.youtube.com/watch?v=DqgKuLYUv00) 209 | - "Create a beach vibe using HDRIs, textures, and models like rocks and vegetation from Poly Haven" [Demo](https://www.youtube.com/watch?v=I29rn92gkC4) 210 | - Give a reference image, and create a Blender scene out of it [Demo](https://www.youtube.com/watch?v=FDRb03XPiRo) 211 | - "Generate a 3D model of a garden gnome through Hyper3D" 212 | - "Get information about the current scene, and make a threejs sketch from it" [Demo](https://www.youtube.com/watch?v=jxbNI5L7AH8) 213 | - "Make this car red and metallic" 214 | - "Create a sphere and place it above the cube" 215 | - "Make the lighting like a studio" 216 | - "Point the camera at the scene, and make it isometric" 217 | 218 | ## Hyper3D integration 219 | 220 | Hyper3D's free trial key allows you to generate a limited number of models per day. If the daily limit is reached, you can wait for the next day's reset or obtain your own key from hyper3d.ai and fal.ai. 221 | 222 | ## Troubleshooting 223 | 224 | - **Connection issues**: Make sure the Blender addon server is running, and the MCP server is configured on Claude, DO NOT run the uvx command in the terminal. Sometimes, the first command won't go through but after that it starts working. 225 | - **Timeout errors**: Try simplifying your requests or breaking them into smaller steps 226 | - **Poly Haven integration**: Claude is sometimes erratic with its behaviour 227 | - **Have you tried turning it off and on again?**: If you're still having connection errors, try restarting both Claude and the Blender server 228 | 229 | 230 | ## Technical Details 231 | 232 | ### Communication Protocol 233 | 234 | The system uses a simple JSON-based protocol over TCP sockets: 235 | 236 | - **Commands** are sent as JSON objects with a `type` and optional `params` 237 | - **Responses** are JSON objects with a `status` and `result` or `message` 238 | 239 | ## Limitations & Security Considerations 240 | 241 | - The `execute_blender_code` tool allows running arbitrary Python code in Blender, which can be powerful but potentially dangerous. Use with caution in production environments. ALWAYS save your work before using it. 242 | - Poly Haven requires downloading models, textures, and HDRI images. If you do not want to use it, please turn it off in the checkbox in Blender. 243 | - Complex operations might need to be broken down into smaller steps 244 | 245 | 246 | ## Contributing 247 | 248 | Contributions are welcome! Please feel free to submit a Pull Request. 249 | 250 | ## Disclaimer 251 | 252 | This is a third-party integration and not made by Blender. Made by [Siddharth](https://x.com/sidahuj) 253 | -------------------------------------------------------------------------------- /src/blender_mcp/telemetry.py: -------------------------------------------------------------------------------- 1 | """ 2 | Privacy-focused, anonymous telemetry for Blender MCP 3 | Tracks tool usage, DAU/MAU, and performance metrics 4 | """ 5 | 6 | import contextlib 7 | import json 8 | import logging 9 | import os 10 | import platform 11 | import queue 12 | import sys 13 | import threading 14 | import time 15 | import uuid 16 | from dataclasses import dataclass 17 | from enum import Enum 18 | from pathlib import Path 19 | from typing import Any 20 | 21 | try: 22 | from supabase import create_client, Client 23 | HAS_SUPABASE = True 24 | except ImportError: 25 | HAS_SUPABASE = False 26 | 27 | try: 28 | import tomli 29 | except ImportError: 30 | try: 31 | import tomllib as tomli 32 | except ImportError: 33 | tomli = None 34 | 35 | logger = logging.getLogger("blender-mcp-telemetry") 36 | 37 | 38 | def get_package_version() -> str: 39 | """Get version from pyproject.toml""" 40 | try: 41 | pyproject_path = Path(__file__).parent.parent.parent.parent / "pyproject.toml" 42 | if pyproject_path.exists(): 43 | if tomli: 44 | with open(pyproject_path, "rb") as f: 45 | data = tomli.load(f) 46 | return data["project"]["version"] 47 | except Exception: 48 | pass 49 | return "unknown" 50 | 51 | 52 | MCP_VERSION = get_package_version() 53 | 54 | 55 | class EventType(str, Enum): 56 | """Types of telemetry events""" 57 | STARTUP = "startup" 58 | TOOL_EXECUTION = "tool_execution" 59 | PROMPT_SENT = "prompt_sent" 60 | CONNECTION = "connection" 61 | ERROR = "error" 62 | 63 | 64 | @dataclass 65 | class TelemetryEvent: 66 | """Structure for telemetry events""" 67 | event_type: EventType 68 | customer_uuid: str 69 | session_id: str 70 | timestamp: float 71 | version: str 72 | platform: str 73 | 74 | # Optional fields 75 | tool_name: str | None = None 76 | prompt_text: str | None = None 77 | success: bool = True 78 | duration_ms: float | None = None 79 | error_message: str | None = None 80 | blender_version: str | None = None 81 | metadata: dict[str, Any] | None = None 82 | 83 | 84 | class TelemetryCollector: 85 | """Main telemetry collection class""" 86 | 87 | def __init__(self): 88 | """Initialize telemetry collector""" 89 | # Import config here to avoid circular imports 90 | from .config import telemetry_config 91 | self.config = telemetry_config 92 | 93 | # Check if disabled via environment variables 94 | if self._is_disabled(): 95 | self.config.enabled = False 96 | logger.warning("Telemetry disabled via environment variable") 97 | 98 | # Generate or load customer UUID 99 | self._customer_uuid: str = self._get_or_create_uuid() 100 | self._session_id: str = str(uuid.uuid4()) 101 | 102 | # Rate limiting tracking 103 | self._event_timestamps: list[float] = [] 104 | self._rate_limit_lock = threading.Lock() 105 | 106 | # Background queue and worker 107 | self._queue: "queue.Queue[TelemetryEvent]" = queue.Queue(maxsize=1000) 108 | self._worker: threading.Thread = threading.Thread( 109 | target=self._worker_loop, daemon=True 110 | ) 111 | self._worker.start() 112 | 113 | logger.warning(f"Telemetry initialized (enabled={self.config.enabled}, has_supabase={HAS_SUPABASE}, customer_uuid={self._customer_uuid})") 114 | 115 | def _is_disabled(self) -> bool: 116 | """Check if telemetry is disabled via environment variables""" 117 | disable_vars = [ 118 | "DISABLE_TELEMETRY", 119 | "BLENDER_MCP_DISABLE_TELEMETRY", 120 | "MCP_DISABLE_TELEMETRY" 121 | ] 122 | 123 | for var in disable_vars: 124 | if os.environ.get(var, "").lower() in ("true", "1", "yes", "on"): 125 | return True 126 | return False 127 | 128 | def _get_data_directory(self) -> Path: 129 | """Get directory for storing telemetry data""" 130 | if sys.platform == "win32": 131 | base_dir = Path(os.environ.get('APPDATA', Path.home() / 'AppData' / 'Roaming')) 132 | elif sys.platform == "darwin": 133 | base_dir = Path.home() / 'Library' / 'Application Support' 134 | else: # Linux 135 | base_dir = Path(os.environ.get('XDG_DATA_HOME', Path.home() / '.local' / 'share')) 136 | 137 | data_dir = base_dir / 'BlenderMCP' 138 | data_dir.mkdir(parents=True, exist_ok=True) 139 | return data_dir 140 | 141 | def _get_or_create_uuid(self) -> str: 142 | """Get or create anonymous customer UUID""" 143 | try: 144 | data_dir = self._get_data_directory() 145 | uuid_file = data_dir / "customer_uuid.txt" 146 | 147 | if uuid_file.exists(): 148 | customer_uuid = uuid_file.read_text(encoding="utf-8").strip() 149 | if customer_uuid: 150 | return customer_uuid 151 | 152 | # Create new UUID 153 | customer_uuid = str(uuid.uuid4()) 154 | uuid_file.write_text(customer_uuid, encoding="utf-8") 155 | 156 | # Set restrictive permissions on Unix 157 | if sys.platform != "win32": 158 | os.chmod(uuid_file, 0o600) 159 | 160 | return customer_uuid 161 | except Exception as e: 162 | logger.debug(f"Failed to persist UUID: {e}") 163 | return str(uuid.uuid4()) 164 | 165 | def record_event( 166 | self, 167 | event_type: EventType, 168 | tool_name: str | None = None, 169 | prompt_text: str | None = None, 170 | success: bool = True, 171 | duration_ms: float | None = None, 172 | error_message: str | None = None, 173 | blender_version: str | None = None, 174 | metadata: dict[str, Any] | None = None 175 | ): 176 | """Record a telemetry event (non-blocking)""" 177 | if not self.config.enabled: 178 | logger.warning(f"Telemetry disabled, skipping event: {event_type}") 179 | return 180 | if not HAS_SUPABASE: 181 | logger.warning(f"Supabase not available, skipping event: {event_type}") 182 | return 183 | 184 | logger.warning(f"Recording telemetry event: {event_type}, tool={tool_name}") 185 | 186 | # Truncate prompt if needed 187 | if prompt_text and not self.config.collect_prompts: 188 | prompt_text = None # Don't collect prompts unless explicitly enabled 189 | elif prompt_text and len(prompt_text) > self.config.max_prompt_length: 190 | prompt_text = prompt_text[:self.config.max_prompt_length] + "..." 191 | 192 | # Truncate error messages 193 | if error_message and len(error_message) > 200: 194 | error_message = error_message[:200] + "..." 195 | 196 | event = TelemetryEvent( 197 | event_type=event_type, 198 | customer_uuid=self._customer_uuid, 199 | session_id=self._session_id, 200 | timestamp=time.time(), 201 | version=MCP_VERSION, 202 | platform=platform.system().lower(), 203 | tool_name=tool_name, 204 | prompt_text=prompt_text, 205 | success=success, 206 | duration_ms=duration_ms, 207 | error_message=error_message, 208 | blender_version=blender_version, 209 | metadata=metadata 210 | ) 211 | 212 | # Enqueue for background worker 213 | try: 214 | self._queue.put_nowait(event) 215 | except queue.Full: 216 | logger.debug("Telemetry queue full, dropping event") 217 | 218 | def _worker_loop(self): 219 | """Background worker that sends telemetry""" 220 | while True: 221 | event = self._queue.get() 222 | try: 223 | self._send_event(event) 224 | except Exception as e: 225 | logger.debug(f"Telemetry send failed: {e}") 226 | finally: 227 | with contextlib.suppress(Exception): 228 | self._queue.task_done() 229 | 230 | def _send_event(self, event: TelemetryEvent): 231 | """Send event to Supabase""" 232 | if not HAS_SUPABASE: 233 | return 234 | 235 | try: 236 | # Create Supabase client with explicit options 237 | from supabase import ClientOptions 238 | 239 | options = ClientOptions( 240 | auto_refresh_token=False, 241 | persist_session=False 242 | ) 243 | 244 | supabase: Client = create_client( 245 | self.config.supabase_url, 246 | self.config.supabase_anon_key, 247 | options=options 248 | ) 249 | 250 | # Prepare data for insertion 251 | data = { 252 | "customer_uuid": event.customer_uuid, 253 | "session_id": event.session_id, 254 | "event_type": event.event_type.value, 255 | "tool_name": event.tool_name, 256 | "prompt_text": event.prompt_text, 257 | "success": event.success, 258 | "duration_ms": event.duration_ms, 259 | "error_message": event.error_message, 260 | "version": event.version, 261 | "platform": event.platform, 262 | "blender_version": event.blender_version, 263 | "metadata": event.metadata or {}, 264 | "event_timestamp": int(event.timestamp), 265 | } 266 | 267 | response = supabase.table("telemetry_events").insert(data, returning="minimal").execute() 268 | logger.debug(f"Telemetry sent: {event.event_type}") 269 | 270 | except Exception as e: 271 | logger.debug(f"Failed to send telemetry: {e}") 272 | 273 | 274 | # Global telemetry instance 275 | _telemetry_collector: TelemetryCollector | None = None 276 | 277 | 278 | def get_telemetry() -> TelemetryCollector: 279 | """Get the global telemetry collector instance""" 280 | global _telemetry_collector 281 | if _telemetry_collector is None: 282 | _telemetry_collector = TelemetryCollector() 283 | return _telemetry_collector 284 | 285 | 286 | def record_tool_usage( 287 | tool_name: str, 288 | success: bool, 289 | duration_ms: float, 290 | error: str | None = None 291 | ): 292 | """Convenience function to record tool usage""" 293 | get_telemetry().record_event( 294 | event_type=EventType.TOOL_EXECUTION, 295 | tool_name=tool_name, 296 | success=success, 297 | duration_ms=duration_ms, 298 | error_message=error 299 | ) 300 | 301 | 302 | def record_startup(blender_version: str | None = None): 303 | """Record server startup event""" 304 | get_telemetry().record_event( 305 | event_type=EventType.STARTUP, 306 | blender_version=blender_version 307 | ) 308 | 309 | 310 | def is_telemetry_enabled() -> bool: 311 | """Check if telemetry is enabled""" 312 | try: 313 | return get_telemetry().config.enabled 314 | except Exception: 315 | return False 316 | -------------------------------------------------------------------------------- /src/blender_mcp/server.py: -------------------------------------------------------------------------------- 1 | # blender_mcp_server.py 2 | from mcp.server.fastmcp import FastMCP, Context, Image 3 | import socket 4 | import json 5 | import asyncio 6 | import logging 7 | import tempfile 8 | from dataclasses import dataclass 9 | from contextlib import asynccontextmanager 10 | from typing import AsyncIterator, Dict, Any, List 11 | import os 12 | from pathlib import Path 13 | import base64 14 | from urllib.parse import urlparse 15 | 16 | # Import telemetry 17 | from .telemetry import record_startup, get_telemetry 18 | from .telemetry_decorator import telemetry_tool 19 | 20 | # Configure logging 21 | logging.basicConfig(level=logging.INFO, 22 | format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') 23 | logger = logging.getLogger("BlenderMCPServer") 24 | 25 | # Default configuration 26 | DEFAULT_HOST = "localhost" 27 | DEFAULT_PORT = 9876 28 | 29 | @dataclass 30 | class BlenderConnection: 31 | host: str 32 | port: int 33 | sock: socket.socket = None # Changed from 'socket' to 'sock' to avoid naming conflict 34 | 35 | def connect(self) -> bool: 36 | """Connect to the Blender addon socket server""" 37 | if self.sock: 38 | return True 39 | 40 | try: 41 | self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 42 | self.sock.connect((self.host, self.port)) 43 | logger.info(f"Connected to Blender at {self.host}:{self.port}") 44 | return True 45 | except Exception as e: 46 | logger.error(f"Failed to connect to Blender: {str(e)}") 47 | self.sock = None 48 | return False 49 | 50 | def disconnect(self): 51 | """Disconnect from the Blender addon""" 52 | if self.sock: 53 | try: 54 | self.sock.close() 55 | except Exception as e: 56 | logger.error(f"Error disconnecting from Blender: {str(e)}") 57 | finally: 58 | self.sock = None 59 | 60 | def receive_full_response(self, sock, buffer_size=8192): 61 | """Receive the complete response, potentially in multiple chunks""" 62 | chunks = [] 63 | # Use a consistent timeout value that matches the addon's timeout 64 | sock.settimeout(180.0) # Match the addon's timeout 65 | 66 | try: 67 | while True: 68 | try: 69 | chunk = sock.recv(buffer_size) 70 | if not chunk: 71 | # If we get an empty chunk, the connection might be closed 72 | if not chunks: # If we haven't received anything yet, this is an error 73 | raise Exception("Connection closed before receiving any data") 74 | break 75 | 76 | chunks.append(chunk) 77 | 78 | # Check if we've received a complete JSON object 79 | try: 80 | data = b''.join(chunks) 81 | json.loads(data.decode('utf-8')) 82 | # If we get here, it parsed successfully 83 | logger.info(f"Received complete response ({len(data)} bytes)") 84 | return data 85 | except json.JSONDecodeError: 86 | # Incomplete JSON, continue receiving 87 | continue 88 | except socket.timeout: 89 | # If we hit a timeout during receiving, break the loop and try to use what we have 90 | logger.warning("Socket timeout during chunked receive") 91 | break 92 | except (ConnectionError, BrokenPipeError, ConnectionResetError) as e: 93 | logger.error(f"Socket connection error during receive: {str(e)}") 94 | raise # Re-raise to be handled by the caller 95 | except socket.timeout: 96 | logger.warning("Socket timeout during chunked receive") 97 | except Exception as e: 98 | logger.error(f"Error during receive: {str(e)}") 99 | raise 100 | 101 | # If we get here, we either timed out or broke out of the loop 102 | # Try to use what we have 103 | if chunks: 104 | data = b''.join(chunks) 105 | logger.info(f"Returning data after receive completion ({len(data)} bytes)") 106 | try: 107 | # Try to parse what we have 108 | json.loads(data.decode('utf-8')) 109 | return data 110 | except json.JSONDecodeError: 111 | # If we can't parse it, it's incomplete 112 | raise Exception("Incomplete JSON response received") 113 | else: 114 | raise Exception("No data received") 115 | 116 | def send_command(self, command_type: str, params: Dict[str, Any] = None) -> Dict[str, Any]: 117 | """Send a command to Blender and return the response""" 118 | if not self.sock and not self.connect(): 119 | raise ConnectionError("Not connected to Blender") 120 | 121 | command = { 122 | "type": command_type, 123 | "params": params or {} 124 | } 125 | 126 | try: 127 | # Log the command being sent 128 | logger.info(f"Sending command: {command_type} with params: {params}") 129 | 130 | # Send the command 131 | self.sock.sendall(json.dumps(command).encode('utf-8')) 132 | logger.info(f"Command sent, waiting for response...") 133 | 134 | # Set a timeout for receiving - use the same timeout as in receive_full_response 135 | self.sock.settimeout(180.0) # Match the addon's timeout 136 | 137 | # Receive the response using the improved receive_full_response method 138 | response_data = self.receive_full_response(self.sock) 139 | logger.info(f"Received {len(response_data)} bytes of data") 140 | 141 | response = json.loads(response_data.decode('utf-8')) 142 | logger.info(f"Response parsed, status: {response.get('status', 'unknown')}") 143 | 144 | if response.get("status") == "error": 145 | logger.error(f"Blender error: {response.get('message')}") 146 | raise Exception(response.get("message", "Unknown error from Blender")) 147 | 148 | return response.get("result", {}) 149 | except socket.timeout: 150 | logger.error("Socket timeout while waiting for response from Blender") 151 | # Don't try to reconnect here - let the get_blender_connection handle reconnection 152 | # Just invalidate the current socket so it will be recreated next time 153 | self.sock = None 154 | raise Exception("Timeout waiting for Blender response - try simplifying your request") 155 | except (ConnectionError, BrokenPipeError, ConnectionResetError) as e: 156 | logger.error(f"Socket connection error: {str(e)}") 157 | self.sock = None 158 | raise Exception(f"Connection to Blender lost: {str(e)}") 159 | except json.JSONDecodeError as e: 160 | logger.error(f"Invalid JSON response from Blender: {str(e)}") 161 | # Try to log what was received 162 | if 'response_data' in locals() and response_data: 163 | logger.error(f"Raw response (first 200 bytes): {response_data[:200]}") 164 | raise Exception(f"Invalid response from Blender: {str(e)}") 165 | except Exception as e: 166 | logger.error(f"Error communicating with Blender: {str(e)}") 167 | # Don't try to reconnect here - let the get_blender_connection handle reconnection 168 | self.sock = None 169 | raise Exception(f"Communication error with Blender: {str(e)}") 170 | 171 | @asynccontextmanager 172 | async def server_lifespan(server: FastMCP) -> AsyncIterator[Dict[str, Any]]: 173 | """Manage server startup and shutdown lifecycle""" 174 | # We don't need to create a connection here since we're using the global connection 175 | # for resources and tools 176 | 177 | try: 178 | # Just log that we're starting up 179 | logger.info("BlenderMCP server starting up") 180 | 181 | # Record startup event for telemetry 182 | try: 183 | record_startup() 184 | except Exception as e: 185 | logger.debug(f"Failed to record startup telemetry: {e}") 186 | 187 | # Try to connect to Blender on startup to verify it's available 188 | try: 189 | # This will initialize the global connection if needed 190 | blender = get_blender_connection() 191 | logger.info("Successfully connected to Blender on startup") 192 | except Exception as e: 193 | logger.warning(f"Could not connect to Blender on startup: {str(e)}") 194 | logger.warning("Make sure the Blender addon is running before using Blender resources or tools") 195 | 196 | # Return an empty context - we're using the global connection 197 | yield {} 198 | finally: 199 | # Clean up the global connection on shutdown 200 | global _blender_connection 201 | if _blender_connection: 202 | logger.info("Disconnecting from Blender on shutdown") 203 | _blender_connection.disconnect() 204 | _blender_connection = None 205 | logger.info("BlenderMCP server shut down") 206 | 207 | # Create the MCP server with lifespan support 208 | mcp = FastMCP( 209 | "BlenderMCP", 210 | lifespan=server_lifespan 211 | ) 212 | 213 | # Resource endpoints 214 | 215 | # Global connection for resources (since resources can't access context) 216 | _blender_connection = None 217 | _polyhaven_enabled = False # Add this global variable 218 | 219 | def get_blender_connection(): 220 | """Get or create a persistent Blender connection""" 221 | global _blender_connection, _polyhaven_enabled # Add _polyhaven_enabled to globals 222 | 223 | # If we have an existing connection, check if it's still valid 224 | if _blender_connection is not None: 225 | try: 226 | # First check if PolyHaven is enabled by sending a ping command 227 | result = _blender_connection.send_command("get_polyhaven_status") 228 | # Store the PolyHaven status globally 229 | _polyhaven_enabled = result.get("enabled", False) 230 | return _blender_connection 231 | except Exception as e: 232 | # Connection is dead, close it and create a new one 233 | logger.warning(f"Existing connection is no longer valid: {str(e)}") 234 | try: 235 | _blender_connection.disconnect() 236 | except: 237 | pass 238 | _blender_connection = None 239 | 240 | # Create a new connection if needed 241 | if _blender_connection is None: 242 | host = os.getenv("BLENDER_HOST", DEFAULT_HOST) 243 | port = int(os.getenv("BLENDER_PORT", DEFAULT_PORT)) 244 | _blender_connection = BlenderConnection(host=host, port=port) 245 | if not _blender_connection.connect(): 246 | logger.error("Failed to connect to Blender") 247 | _blender_connection = None 248 | raise Exception("Could not connect to Blender. Make sure the Blender addon is running.") 249 | logger.info("Created new persistent connection to Blender") 250 | 251 | return _blender_connection 252 | 253 | 254 | @telemetry_tool("get_scene_info") 255 | @mcp.tool() 256 | def get_scene_info(ctx: Context) -> str: 257 | """Get detailed information about the current Blender scene""" 258 | try: 259 | blender = get_blender_connection() 260 | result = blender.send_command("get_scene_info") 261 | 262 | # Just return the JSON representation of what Blender sent us 263 | return json.dumps(result, indent=2) 264 | except Exception as e: 265 | logger.error(f"Error getting scene info from Blender: {str(e)}") 266 | return f"Error getting scene info: {str(e)}" 267 | 268 | @telemetry_tool("get_object_info") 269 | @mcp.tool() 270 | def get_object_info(ctx: Context, object_name: str) -> str: 271 | """ 272 | Get detailed information about a specific object in the Blender scene. 273 | 274 | Parameters: 275 | - object_name: The name of the object to get information about 276 | """ 277 | try: 278 | blender = get_blender_connection() 279 | result = blender.send_command("get_object_info", {"name": object_name}) 280 | 281 | # Just return the JSON representation of what Blender sent us 282 | return json.dumps(result, indent=2) 283 | except Exception as e: 284 | logger.error(f"Error getting object info from Blender: {str(e)}") 285 | return f"Error getting object info: {str(e)}" 286 | 287 | @telemetry_tool("get_viewport_screenshot") 288 | @mcp.tool() 289 | def get_viewport_screenshot(ctx: Context, max_size: int = 800) -> Image: 290 | """ 291 | Capture a screenshot of the current Blender 3D viewport. 292 | 293 | Parameters: 294 | - max_size: Maximum size in pixels for the largest dimension (default: 800) 295 | 296 | Returns the screenshot as an Image. 297 | """ 298 | try: 299 | blender = get_blender_connection() 300 | 301 | # Create temp file path 302 | temp_dir = tempfile.gettempdir() 303 | temp_path = os.path.join(temp_dir, f"blender_screenshot_{os.getpid()}.png") 304 | 305 | result = blender.send_command("get_viewport_screenshot", { 306 | "max_size": max_size, 307 | "filepath": temp_path, 308 | "format": "png" 309 | }) 310 | 311 | if "error" in result: 312 | raise Exception(result["error"]) 313 | 314 | if not os.path.exists(temp_path): 315 | raise Exception("Screenshot file was not created") 316 | 317 | # Read the file 318 | with open(temp_path, 'rb') as f: 319 | image_bytes = f.read() 320 | 321 | # Delete the temp file 322 | os.remove(temp_path) 323 | 324 | return Image(data=image_bytes, format="png") 325 | 326 | except Exception as e: 327 | logger.error(f"Error capturing screenshot: {str(e)}") 328 | raise Exception(f"Screenshot failed: {str(e)}") 329 | 330 | 331 | @telemetry_tool("execute_blender_code") 332 | @mcp.tool() 333 | def execute_blender_code(ctx: Context, code: str) -> str: 334 | """ 335 | Execute arbitrary Python code in Blender. Make sure to do it step-by-step by breaking it into smaller chunks. 336 | 337 | Parameters: 338 | - code: The Python code to execute 339 | """ 340 | try: 341 | # Get the global connection 342 | blender = get_blender_connection() 343 | result = blender.send_command("execute_code", {"code": code}) 344 | return f"Code executed successfully: {result.get('result', '')}" 345 | except Exception as e: 346 | logger.error(f"Error executing code: {str(e)}") 347 | return f"Error executing code: {str(e)}" 348 | 349 | @telemetry_tool("get_polyhaven_categories") 350 | @mcp.tool() 351 | def get_polyhaven_categories(ctx: Context, asset_type: str = "hdris") -> str: 352 | """ 353 | Get a list of categories for a specific asset type on Polyhaven. 354 | 355 | Parameters: 356 | - asset_type: The type of asset to get categories for (hdris, textures, models, all) 357 | """ 358 | try: 359 | blender = get_blender_connection() 360 | if not _polyhaven_enabled: 361 | return "PolyHaven integration is disabled. Select it in the sidebar in BlenderMCP, then run it again." 362 | result = blender.send_command("get_polyhaven_categories", {"asset_type": asset_type}) 363 | 364 | if "error" in result: 365 | return f"Error: {result['error']}" 366 | 367 | # Format the categories in a more readable way 368 | categories = result["categories"] 369 | formatted_output = f"Categories for {asset_type}:\n\n" 370 | 371 | # Sort categories by count (descending) 372 | sorted_categories = sorted(categories.items(), key=lambda x: x[1], reverse=True) 373 | 374 | for category, count in sorted_categories: 375 | formatted_output += f"- {category}: {count} assets\n" 376 | 377 | return formatted_output 378 | except Exception as e: 379 | logger.error(f"Error getting Polyhaven categories: {str(e)}") 380 | return f"Error getting Polyhaven categories: {str(e)}" 381 | 382 | @telemetry_tool("search_polyhaven_assets") 383 | @mcp.tool() 384 | def search_polyhaven_assets( 385 | ctx: Context, 386 | asset_type: str = "all", 387 | categories: str = None 388 | ) -> str: 389 | """ 390 | Search for assets on Polyhaven with optional filtering. 391 | 392 | Parameters: 393 | - asset_type: Type of assets to search for (hdris, textures, models, all) 394 | - categories: Optional comma-separated list of categories to filter by 395 | 396 | Returns a list of matching assets with basic information. 397 | """ 398 | try: 399 | blender = get_blender_connection() 400 | result = blender.send_command("search_polyhaven_assets", { 401 | "asset_type": asset_type, 402 | "categories": categories 403 | }) 404 | 405 | if "error" in result: 406 | return f"Error: {result['error']}" 407 | 408 | # Format the assets in a more readable way 409 | assets = result["assets"] 410 | total_count = result["total_count"] 411 | returned_count = result["returned_count"] 412 | 413 | formatted_output = f"Found {total_count} assets" 414 | if categories: 415 | formatted_output += f" in categories: {categories}" 416 | formatted_output += f"\nShowing {returned_count} assets:\n\n" 417 | 418 | # Sort assets by download count (popularity) 419 | sorted_assets = sorted(assets.items(), key=lambda x: x[1].get("download_count", 0), reverse=True) 420 | 421 | for asset_id, asset_data in sorted_assets: 422 | formatted_output += f"- {asset_data.get('name', asset_id)} (ID: {asset_id})\n" 423 | formatted_output += f" Type: {['HDRI', 'Texture', 'Model'][asset_data.get('type', 0)]}\n" 424 | formatted_output += f" Categories: {', '.join(asset_data.get('categories', []))}\n" 425 | formatted_output += f" Downloads: {asset_data.get('download_count', 'Unknown')}\n\n" 426 | 427 | return formatted_output 428 | except Exception as e: 429 | logger.error(f"Error searching Polyhaven assets: {str(e)}") 430 | return f"Error searching Polyhaven assets: {str(e)}" 431 | 432 | @telemetry_tool("download_polyhaven_asset") 433 | @mcp.tool() 434 | def download_polyhaven_asset( 435 | ctx: Context, 436 | asset_id: str, 437 | asset_type: str, 438 | resolution: str = "1k", 439 | file_format: str = None 440 | ) -> str: 441 | """ 442 | Download and import a Polyhaven asset into Blender. 443 | 444 | Parameters: 445 | - asset_id: The ID of the asset to download 446 | - asset_type: The type of asset (hdris, textures, models) 447 | - resolution: The resolution to download (e.g., 1k, 2k, 4k) 448 | - file_format: Optional file format (e.g., hdr, exr for HDRIs; jpg, png for textures; gltf, fbx for models) 449 | 450 | Returns a message indicating success or failure. 451 | """ 452 | try: 453 | blender = get_blender_connection() 454 | result = blender.send_command("download_polyhaven_asset", { 455 | "asset_id": asset_id, 456 | "asset_type": asset_type, 457 | "resolution": resolution, 458 | "file_format": file_format 459 | }) 460 | 461 | if "error" in result: 462 | return f"Error: {result['error']}" 463 | 464 | if result.get("success"): 465 | message = result.get("message", "Asset downloaded and imported successfully") 466 | 467 | # Add additional information based on asset type 468 | if asset_type == "hdris": 469 | return f"{message}. The HDRI has been set as the world environment." 470 | elif asset_type == "textures": 471 | material_name = result.get("material", "") 472 | maps = ", ".join(result.get("maps", [])) 473 | return f"{message}. Created material '{material_name}' with maps: {maps}." 474 | elif asset_type == "models": 475 | return f"{message}. The model has been imported into the current scene." 476 | else: 477 | return message 478 | else: 479 | return f"Failed to download asset: {result.get('message', 'Unknown error')}" 480 | except Exception as e: 481 | logger.error(f"Error downloading Polyhaven asset: {str(e)}") 482 | return f"Error downloading Polyhaven asset: {str(e)}" 483 | 484 | @telemetry_tool("set_texture") 485 | @mcp.tool() 486 | def set_texture( 487 | ctx: Context, 488 | object_name: str, 489 | texture_id: str 490 | ) -> str: 491 | """ 492 | Apply a previously downloaded Polyhaven texture to an object. 493 | 494 | Parameters: 495 | - object_name: Name of the object to apply the texture to 496 | - texture_id: ID of the Polyhaven texture to apply (must be downloaded first) 497 | 498 | Returns a message indicating success or failure. 499 | """ 500 | try: 501 | # Get the global connection 502 | blender = get_blender_connection() 503 | result = blender.send_command("set_texture", { 504 | "object_name": object_name, 505 | "texture_id": texture_id 506 | }) 507 | 508 | if "error" in result: 509 | return f"Error: {result['error']}" 510 | 511 | if result.get("success"): 512 | material_name = result.get("material", "") 513 | maps = ", ".join(result.get("maps", [])) 514 | 515 | # Add detailed material info 516 | material_info = result.get("material_info", {}) 517 | node_count = material_info.get("node_count", 0) 518 | has_nodes = material_info.get("has_nodes", False) 519 | texture_nodes = material_info.get("texture_nodes", []) 520 | 521 | output = f"Successfully applied texture '{texture_id}' to {object_name}.\n" 522 | output += f"Using material '{material_name}' with maps: {maps}.\n\n" 523 | output += f"Material has nodes: {has_nodes}\n" 524 | output += f"Total node count: {node_count}\n\n" 525 | 526 | if texture_nodes: 527 | output += "Texture nodes:\n" 528 | for node in texture_nodes: 529 | output += f"- {node['name']} using image: {node['image']}\n" 530 | if node['connections']: 531 | output += " Connections:\n" 532 | for conn in node['connections']: 533 | output += f" {conn}\n" 534 | else: 535 | output += "No texture nodes found in the material.\n" 536 | 537 | return output 538 | else: 539 | return f"Failed to apply texture: {result.get('message', 'Unknown error')}" 540 | except Exception as e: 541 | logger.error(f"Error applying texture: {str(e)}") 542 | return f"Error applying texture: {str(e)}" 543 | 544 | @telemetry_tool("get_polyhaven_status") 545 | @mcp.tool() 546 | def get_polyhaven_status(ctx: Context) -> str: 547 | """ 548 | Check if PolyHaven integration is enabled in Blender. 549 | Returns a message indicating whether PolyHaven features are available. 550 | """ 551 | try: 552 | blender = get_blender_connection() 553 | result = blender.send_command("get_polyhaven_status") 554 | enabled = result.get("enabled", False) 555 | message = result.get("message", "") 556 | if enabled: 557 | message += "PolyHaven is good at Textures, and has a wider variety of textures than Sketchfab." 558 | return message 559 | except Exception as e: 560 | logger.error(f"Error checking PolyHaven status: {str(e)}") 561 | return f"Error checking PolyHaven status: {str(e)}" 562 | 563 | @telemetry_tool("get_hyper3d_status") 564 | @mcp.tool() 565 | def get_hyper3d_status(ctx: Context) -> str: 566 | """ 567 | Check if Hyper3D Rodin integration is enabled in Blender. 568 | Returns a message indicating whether Hyper3D Rodin features are available. 569 | 570 | Don't emphasize the key type in the returned message, but sliently remember it. 571 | """ 572 | try: 573 | blender = get_blender_connection() 574 | result = blender.send_command("get_hyper3d_status") 575 | enabled = result.get("enabled", False) 576 | message = result.get("message", "") 577 | if enabled: 578 | message += "" 579 | return message 580 | except Exception as e: 581 | logger.error(f"Error checking Hyper3D status: {str(e)}") 582 | return f"Error checking Hyper3D status: {str(e)}" 583 | 584 | @telemetry_tool("get_sketchfab_status") 585 | @mcp.tool() 586 | def get_sketchfab_status(ctx: Context) -> str: 587 | """ 588 | Check if Sketchfab integration is enabled in Blender. 589 | Returns a message indicating whether Sketchfab features are available. 590 | """ 591 | try: 592 | blender = get_blender_connection() 593 | result = blender.send_command("get_sketchfab_status") 594 | enabled = result.get("enabled", False) 595 | message = result.get("message", "") 596 | if enabled: 597 | message += "Sketchfab is good at Realistic models, and has a wider variety of models than PolyHaven." 598 | return message 599 | except Exception as e: 600 | logger.error(f"Error checking Sketchfab status: {str(e)}") 601 | return f"Error checking Sketchfab status: {str(e)}" 602 | 603 | @telemetry_tool("search_sketchfab_models") 604 | @mcp.tool() 605 | def search_sketchfab_models( 606 | ctx: Context, 607 | query: str, 608 | categories: str = None, 609 | count: int = 20, 610 | downloadable: bool = True 611 | ) -> str: 612 | """ 613 | Search for models on Sketchfab with optional filtering. 614 | 615 | Parameters: 616 | - query: Text to search for 617 | - categories: Optional comma-separated list of categories 618 | - count: Maximum number of results to return (default 20) 619 | - downloadable: Whether to include only downloadable models (default True) 620 | 621 | Returns a formatted list of matching models. 622 | """ 623 | try: 624 | blender = get_blender_connection() 625 | logger.info(f"Searching Sketchfab models with query: {query}, categories: {categories}, count: {count}, downloadable: {downloadable}") 626 | result = blender.send_command("search_sketchfab_models", { 627 | "query": query, 628 | "categories": categories, 629 | "count": count, 630 | "downloadable": downloadable 631 | }) 632 | 633 | if "error" in result: 634 | logger.error(f"Error from Sketchfab search: {result['error']}") 635 | return f"Error: {result['error']}" 636 | 637 | # Safely get results with fallbacks for None 638 | if result is None: 639 | logger.error("Received None result from Sketchfab search") 640 | return "Error: Received no response from Sketchfab search" 641 | 642 | # Format the results 643 | models = result.get("results", []) or [] 644 | if not models: 645 | return f"No models found matching '{query}'" 646 | 647 | formatted_output = f"Found {len(models)} models matching '{query}':\n\n" 648 | 649 | for model in models: 650 | if model is None: 651 | continue 652 | 653 | model_name = model.get("name", "Unnamed model") 654 | model_uid = model.get("uid", "Unknown ID") 655 | formatted_output += f"- {model_name} (UID: {model_uid})\n" 656 | 657 | # Get user info with safety checks 658 | user = model.get("user") or {} 659 | username = user.get("username", "Unknown author") if isinstance(user, dict) else "Unknown author" 660 | formatted_output += f" Author: {username}\n" 661 | 662 | # Get license info with safety checks 663 | license_data = model.get("license") or {} 664 | license_label = license_data.get("label", "Unknown") if isinstance(license_data, dict) else "Unknown" 665 | formatted_output += f" License: {license_label}\n" 666 | 667 | # Add face count and downloadable status 668 | face_count = model.get("faceCount", "Unknown") 669 | is_downloadable = "Yes" if model.get("isDownloadable") else "No" 670 | formatted_output += f" Face count: {face_count}\n" 671 | formatted_output += f" Downloadable: {is_downloadable}\n\n" 672 | 673 | return formatted_output 674 | except Exception as e: 675 | logger.error(f"Error searching Sketchfab models: {str(e)}") 676 | import traceback 677 | logger.error(traceback.format_exc()) 678 | return f"Error searching Sketchfab models: {str(e)}" 679 | 680 | @telemetry_tool("download_sketchfab_model") 681 | @mcp.tool() 682 | def download_sketchfab_model( 683 | ctx: Context, 684 | uid: str 685 | ) -> str: 686 | """ 687 | Download and import a Sketchfab model by its UID. 688 | 689 | Parameters: 690 | - uid: The unique identifier of the Sketchfab model 691 | 692 | Returns a message indicating success or failure. 693 | The model must be downloadable and you must have proper access rights. 694 | """ 695 | try: 696 | 697 | blender = get_blender_connection() 698 | logger.info(f"Attempting to download Sketchfab model with UID: {uid}") 699 | 700 | result = blender.send_command("download_sketchfab_model", { 701 | "uid": uid 702 | }) 703 | 704 | if result is None: 705 | logger.error("Received None result from Sketchfab download") 706 | return "Error: Received no response from Sketchfab download request" 707 | 708 | if "error" in result: 709 | logger.error(f"Error from Sketchfab download: {result['error']}") 710 | return f"Error: {result['error']}" 711 | 712 | if result.get("success"): 713 | imported_objects = result.get("imported_objects", []) 714 | object_names = ", ".join(imported_objects) if imported_objects else "none" 715 | return f"Successfully imported model. Created objects: {object_names}" 716 | else: 717 | return f"Failed to download model: {result.get('message', 'Unknown error')}" 718 | except Exception as e: 719 | logger.error(f"Error downloading Sketchfab model: {str(e)}") 720 | import traceback 721 | logger.error(traceback.format_exc()) 722 | return f"Error downloading Sketchfab model: {str(e)}" 723 | 724 | def _process_bbox(original_bbox: list[float] | list[int] | None) -> list[int] | None: 725 | if original_bbox is None: 726 | return None 727 | if all(isinstance(i, int) for i in original_bbox): 728 | return original_bbox 729 | if any(i<=0 for i in original_bbox): 730 | raise ValueError("Incorrect number range: bbox must be bigger than zero!") 731 | return [int(float(i) / max(original_bbox) * 100) for i in original_bbox] if original_bbox else None 732 | 733 | @telemetry_tool("generate_hyper3d_model_via_text") 734 | @mcp.tool() 735 | def generate_hyper3d_model_via_text( 736 | ctx: Context, 737 | text_prompt: str, 738 | bbox_condition: list[float]=None 739 | ) -> str: 740 | """ 741 | Generate 3D asset using Hyper3D by giving description of the desired asset, and import the asset into Blender. 742 | The 3D asset has built-in materials. 743 | The generated model has a normalized size, so re-scaling after generation can be useful. 744 | 745 | Parameters: 746 | - text_prompt: A short description of the desired model in **English**. 747 | - bbox_condition: Optional. If given, it has to be a list of floats of length 3. Controls the ratio between [Length, Width, Height] of the model. 748 | 749 | Returns a message indicating success or failure. 750 | """ 751 | try: 752 | blender = get_blender_connection() 753 | result = blender.send_command("create_rodin_job", { 754 | "text_prompt": text_prompt, 755 | "images": None, 756 | "bbox_condition": _process_bbox(bbox_condition), 757 | }) 758 | succeed = result.get("submit_time", False) 759 | if succeed: 760 | return json.dumps({ 761 | "task_uuid": result["uuid"], 762 | "subscription_key": result["jobs"]["subscription_key"], 763 | }) 764 | else: 765 | return json.dumps(result) 766 | except Exception as e: 767 | logger.error(f"Error generating Hyper3D task: {str(e)}") 768 | return f"Error generating Hyper3D task: {str(e)}" 769 | 770 | @telemetry_tool("generate_hyper3d_model_via_images") 771 | @mcp.tool() 772 | def generate_hyper3d_model_via_images( 773 | ctx: Context, 774 | input_image_paths: list[str]=None, 775 | input_image_urls: list[str]=None, 776 | bbox_condition: list[float]=None 777 | ) -> str: 778 | """ 779 | Generate 3D asset using Hyper3D by giving images of the wanted asset, and import the generated asset into Blender. 780 | The 3D asset has built-in materials. 781 | The generated model has a normalized size, so re-scaling after generation can be useful. 782 | 783 | Parameters: 784 | - input_image_paths: The **absolute** paths of input images. Even if only one image is provided, wrap it into a list. Required if Hyper3D Rodin in MAIN_SITE mode. 785 | - input_image_urls: The URLs of input images. Even if only one image is provided, wrap it into a list. Required if Hyper3D Rodin in FAL_AI mode. 786 | - bbox_condition: Optional. If given, it has to be a list of ints of length 3. Controls the ratio between [Length, Width, Height] of the model. 787 | 788 | Only one of {input_image_paths, input_image_urls} should be given at a time, depending on the Hyper3D Rodin's current mode. 789 | Returns a message indicating success or failure. 790 | """ 791 | if input_image_paths is not None and input_image_urls is not None: 792 | return f"Error: Conflict parameters given!" 793 | if input_image_paths is None and input_image_urls is None: 794 | return f"Error: No image given!" 795 | if input_image_paths is not None: 796 | if not all(os.path.exists(i) for i in input_image_paths): 797 | return "Error: not all image paths are valid!" 798 | images = [] 799 | for path in input_image_paths: 800 | with open(path, "rb") as f: 801 | images.append( 802 | (Path(path).suffix, base64.b64encode(f.read()).decode("ascii")) 803 | ) 804 | elif input_image_urls is not None: 805 | if not all(urlparse(i) for i in input_image_paths): 806 | return "Error: not all image URLs are valid!" 807 | images = input_image_urls.copy() 808 | try: 809 | blender = get_blender_connection() 810 | result = blender.send_command("create_rodin_job", { 811 | "text_prompt": None, 812 | "images": images, 813 | "bbox_condition": _process_bbox(bbox_condition), 814 | }) 815 | succeed = result.get("submit_time", False) 816 | if succeed: 817 | return json.dumps({ 818 | "task_uuid": result["uuid"], 819 | "subscription_key": result["jobs"]["subscription_key"], 820 | }) 821 | else: 822 | return json.dumps(result) 823 | except Exception as e: 824 | logger.error(f"Error generating Hyper3D task: {str(e)}") 825 | return f"Error generating Hyper3D task: {str(e)}" 826 | 827 | @telemetry_tool("poll_rodin_job_status") 828 | @mcp.tool() 829 | def poll_rodin_job_status( 830 | ctx: Context, 831 | subscription_key: str=None, 832 | request_id: str=None, 833 | ): 834 | """ 835 | Check if the Hyper3D Rodin generation task is completed. 836 | 837 | For Hyper3D Rodin mode MAIN_SITE: 838 | Parameters: 839 | - subscription_key: The subscription_key given in the generate model step. 840 | 841 | Returns a list of status. The task is done if all status are "Done". 842 | If "Failed" showed up, the generating process failed. 843 | This is a polling API, so only proceed if the status are finally determined ("Done" or "Canceled"). 844 | 845 | For Hyper3D Rodin mode FAL_AI: 846 | Parameters: 847 | - request_id: The request_id given in the generate model step. 848 | 849 | Returns the generation task status. The task is done if status is "COMPLETED". 850 | The task is in progress if status is "IN_PROGRESS". 851 | If status other than "COMPLETED", "IN_PROGRESS", "IN_QUEUE" showed up, the generating process might be failed. 852 | This is a polling API, so only proceed if the status are finally determined ("COMPLETED" or some failed state). 853 | """ 854 | try: 855 | blender = get_blender_connection() 856 | kwargs = {} 857 | if subscription_key: 858 | kwargs = { 859 | "subscription_key": subscription_key, 860 | } 861 | elif request_id: 862 | kwargs = { 863 | "request_id": request_id, 864 | } 865 | result = blender.send_command("poll_rodin_job_status", kwargs) 866 | return result 867 | except Exception as e: 868 | logger.error(f"Error generating Hyper3D task: {str(e)}") 869 | return f"Error generating Hyper3D task: {str(e)}" 870 | 871 | @telemetry_tool("import_generated_asset") 872 | @mcp.tool() 873 | def import_generated_asset( 874 | ctx: Context, 875 | name: str, 876 | task_uuid: str=None, 877 | request_id: str=None, 878 | ): 879 | """ 880 | Import the asset generated by Hyper3D Rodin after the generation task is completed. 881 | 882 | Parameters: 883 | - name: The name of the object in scene 884 | - task_uuid: For Hyper3D Rodin mode MAIN_SITE: The task_uuid given in the generate model step. 885 | - request_id: For Hyper3D Rodin mode FAL_AI: The request_id given in the generate model step. 886 | 887 | Only give one of {task_uuid, request_id} based on the Hyper3D Rodin Mode! 888 | Return if the asset has been imported successfully. 889 | """ 890 | try: 891 | blender = get_blender_connection() 892 | kwargs = { 893 | "name": name 894 | } 895 | if task_uuid: 896 | kwargs["task_uuid"] = task_uuid 897 | elif request_id: 898 | kwargs["request_id"] = request_id 899 | result = blender.send_command("import_generated_asset", kwargs) 900 | return result 901 | except Exception as e: 902 | logger.error(f"Error generating Hyper3D task: {str(e)}") 903 | return f"Error generating Hyper3D task: {str(e)}" 904 | 905 | @mcp.tool() 906 | def get_hunyuan3d_status(ctx: Context) -> str: 907 | """ 908 | Check if Hunyuan3D integration is enabled in Blender. 909 | Returns a message indicating whether Hunyuan3D features are available. 910 | 911 | Don't emphasize the key type in the returned message, but silently remember it. 912 | """ 913 | try: 914 | blender = get_blender_connection() 915 | result = blender.send_command("get_hunyuan3d_status") 916 | message = result.get("message", "") 917 | return message 918 | except Exception as e: 919 | logger.error(f"Error checking Hunyuan3D status: {str(e)}") 920 | return f"Error checking Hunyuan3D status: {str(e)}" 921 | 922 | @mcp.tool() 923 | def generate_hunyuan3d_model( 924 | ctx: Context, 925 | text_prompt: str = None, 926 | input_image_url: str = None 927 | ) -> str: 928 | """ 929 | Generate 3D asset using Hunyuan3D by providing either text description, image reference, 930 | or both for the desired asset, and import the asset into Blender. 931 | The 3D asset has built-in materials. 932 | 933 | Parameters: 934 | - text_prompt: (Optional) A short description of the desired model in English/Chinese. 935 | - input_image_url: (Optional) The local or remote url of the input image. Accepts None if only using text prompt. 936 | 937 | Returns: 938 | - When successful, returns a JSON with job_id (format: "job_xxx") indicating the task is in progress 939 | - When the job completes, the status will change to "DONE" indicating the model has been imported 940 | - Returns error message if the operation fails 941 | """ 942 | try: 943 | blender = get_blender_connection() 944 | result = blender.send_command("create_hunyuan_job", { 945 | "text_prompt": text_prompt, 946 | "image": input_image_url, 947 | }) 948 | if "JobId" in result.get("Response", {}): 949 | job_id = result["Response"]["JobId"] 950 | formatted_job_id = f"job_{job_id}" 951 | return json.dumps({ 952 | "job_id": formatted_job_id, 953 | }) 954 | return json.dumps(result) 955 | except Exception as e: 956 | logger.error(f"Error generating Hunyuan3D task: {str(e)}") 957 | return f"Error generating Hunyuan3D task: {str(e)}" 958 | 959 | @mcp.tool() 960 | def poll_hunyuan_job_status( 961 | ctx: Context, 962 | job_id: str=None, 963 | ): 964 | """ 965 | Check if the Hunyuan3D generation task is completed. 966 | 967 | For Hunyuan3D: 968 | Parameters: 969 | - job_id: The job_id given in the generate model step. 970 | 971 | Returns the generation task status. The task is done if status is "DONE". 972 | The task is in progress if status is "RUN". 973 | If status is "DONE", returns ResultFile3Ds, which is the generated ZIP model path 974 | When the status is "DONE", the response includes a field named ResultFile3Ds that contains the generated ZIP file path of the 3D model in OBJ format. 975 | This is a polling API, so only proceed if the status are finally determined ("DONE" or some failed state). 976 | """ 977 | try: 978 | blender = get_blender_connection() 979 | kwargs = { 980 | "job_id": job_id, 981 | } 982 | result = blender.send_command("poll_hunyuan_job_status", kwargs) 983 | return result 984 | except Exception as e: 985 | logger.error(f"Error generating Hunyuan3D task: {str(e)}") 986 | return f"Error generating Hunyuan3D task: {str(e)}" 987 | 988 | @mcp.tool() 989 | def import_generated_asset_hunyuan( 990 | ctx: Context, 991 | name: str, 992 | zip_file_url: str, 993 | ): 994 | """ 995 | Import the asset generated by Hunyuan3D after the generation task is completed. 996 | 997 | Parameters: 998 | - name: The name of the object in scene 999 | - zip_file_url: The zip_file_url given in the generate model step. 1000 | 1001 | Return if the asset has been imported successfully. 1002 | """ 1003 | try: 1004 | blender = get_blender_connection() 1005 | kwargs = { 1006 | "name": name 1007 | } 1008 | if zip_file_url: 1009 | kwargs["zip_file_url"] = zip_file_url 1010 | result = blender.send_command("import_generated_asset_hunyuan", kwargs) 1011 | return result 1012 | except Exception as e: 1013 | logger.error(f"Error generating Hunyuan3D task: {str(e)}") 1014 | return f"Error generating Hunyuan3D task: {str(e)}" 1015 | 1016 | 1017 | @mcp.prompt() 1018 | def asset_creation_strategy() -> str: 1019 | """Defines the preferred strategy for creating assets in Blender""" 1020 | return """When creating 3D content in Blender, always start by checking if integrations are available: 1021 | 1022 | 0. Before anything, always check the scene from get_scene_info() 1023 | 1. First use the following tools to verify if the following integrations are enabled: 1024 | 1. PolyHaven 1025 | Use get_polyhaven_status() to verify its status 1026 | If PolyHaven is enabled: 1027 | - For objects/models: Use download_polyhaven_asset() with asset_type="models" 1028 | - For materials/textures: Use download_polyhaven_asset() with asset_type="textures" 1029 | - For environment lighting: Use download_polyhaven_asset() with asset_type="hdris" 1030 | 2. Sketchfab 1031 | Sketchfab is good at Realistic models, and has a wider variety of models than PolyHaven. 1032 | Use get_sketchfab_status() to verify its status 1033 | If Sketchfab is enabled: 1034 | - For objects/models: First search using search_sketchfab_models() with your query 1035 | - Then download specific models using download_sketchfab_model() with the UID 1036 | - Note that only downloadable models can be accessed, and API key must be properly configured 1037 | - Sketchfab has a wider variety of models than PolyHaven, especially for specific subjects 1038 | 3. Hyper3D(Rodin) 1039 | Hyper3D Rodin is good at generating 3D models for single item. 1040 | So don't try to: 1041 | 1. Generate the whole scene with one shot 1042 | 2. Generate ground using Hyper3D 1043 | 3. Generate parts of the items separately and put them together afterwards 1044 | 1045 | Use get_hyper3d_status() to verify its status 1046 | If Hyper3D is enabled: 1047 | - For objects/models, do the following steps: 1048 | 1. Create the model generation task 1049 | - Use generate_hyper3d_model_via_images() if image(s) is/are given 1050 | - Use generate_hyper3d_model_via_text() if generating 3D asset using text prompt 1051 | If key type is free_trial and insufficient balance error returned, tell the user that the free trial key can only generated limited models everyday, they can choose to: 1052 | - Wait for another day and try again 1053 | - Go to hyper3d.ai to find out how to get their own API key 1054 | - Go to fal.ai to get their own private API key 1055 | 2. Poll the status 1056 | - Use poll_rodin_job_status() to check if the generation task has completed or failed 1057 | 3. Import the asset 1058 | - Use import_generated_asset() to import the generated GLB model the asset 1059 | 4. After importing the asset, ALWAYS check the world_bounding_box of the imported mesh, and adjust the mesh's location and size 1060 | Adjust the imported mesh's location, scale, rotation, so that the mesh is on the right spot. 1061 | 1062 | You can reuse assets previous generated by running python code to duplicate the object, without creating another generation task. 1063 | 4. Hunyuan3D 1064 | Hunyuan3D is good at generating 3D models for single item. 1065 | So don't try to: 1066 | 1. Generate the whole scene with one shot 1067 | 2. Generate ground using Hunyuan3D 1068 | 3. Generate parts of the items separately and put them together afterwards 1069 | 1070 | Use get_hunyuan3d_status() to verify its status 1071 | If Hunyuan3D is enabled: 1072 | if Hunyuan3D mode is "OFFICIAL_API": 1073 | - For objects/models, do the following steps: 1074 | 1. Create the model generation task 1075 | - Use generate_hunyuan3d_model by providing either a **text description** OR an **image(local or urls) reference**. 1076 | - Go to cloud.tencent.com out how to get their own SecretId and SecretKey 1077 | 2. Poll the status 1078 | - Use poll_hunyuan_job_status() to check if the generation task has completed or failed 1079 | 3. Import the asset 1080 | - Use import_generated_asset_hunyuan() to import the generated OBJ model the asset 1081 | if Hunyuan3D mode is "LOCAL_API": 1082 | - For objects/models, do the following steps: 1083 | 1. Create the model generation task 1084 | - Use generate_hunyuan3d_model if image (local or urls) or text prompt is given and import the asset 1085 | 1086 | You can reuse assets previous generated by running python code to duplicate the object, without creating another generation task. 1087 | 1088 | 3. Always check the world_bounding_box for each item so that: 1089 | - Ensure that all objects that should not be clipping are not clipping. 1090 | - Items have right spatial relationship. 1091 | 1092 | 4. Recommended asset source priority: 1093 | - For specific existing objects: First try Sketchfab, then PolyHaven 1094 | - For generic objects/furniture: First try PolyHaven, then Sketchfab 1095 | - For custom or unique items not available in libraries: Use Hyper3D Rodin or Hunyuan3D 1096 | - For environment lighting: Use PolyHaven HDRIs 1097 | - For materials/textures: Use PolyHaven textures 1098 | 1099 | Only fall back to scripting when: 1100 | - PolyHaven, Sketchfab, Hyper3D, and Hunyuan3D are all disabled 1101 | - A simple primitive is explicitly requested 1102 | - No suitable asset exists in any of the libraries 1103 | - Hyper3D Rodin or Hunyuan3D failed to generate the desired asset 1104 | - The task specifically requires a basic material/color 1105 | """ 1106 | 1107 | # Main execution 1108 | 1109 | def main(): 1110 | """Run the MCP server""" 1111 | mcp.run() 1112 | 1113 | if __name__ == "__main__": 1114 | main() -------------------------------------------------------------------------------- /addon.py: -------------------------------------------------------------------------------- 1 | # Code created by Siddharth Ahuja: www.github.com/ahujasid © 2025 2 | 3 | import re 4 | import bpy 5 | import mathutils 6 | import json 7 | import threading 8 | import socket 9 | import time 10 | import requests 11 | import tempfile 12 | import traceback 13 | import os 14 | import shutil 15 | import zipfile 16 | from bpy.props import IntProperty 17 | import io 18 | from datetime import datetime 19 | import hashlib, hmac, base64 20 | import os.path as osp 21 | from contextlib import redirect_stdout, suppress 22 | 23 | bl_info = { 24 | "name": "Blender MCP", 25 | "author": "BlenderMCP", 26 | "version": (1, 2), 27 | "blender": (3, 0, 0), 28 | "location": "View3D > Sidebar > BlenderMCP", 29 | "description": "Connect Blender to Claude via MCP", 30 | "category": "Interface", 31 | } 32 | 33 | RODIN_FREE_TRIAL_KEY = "k9TcfFoEhNd9cCPP2guHAHHHkctZHIRhZDywZ1euGUXwihbYLpOjQhofby80NJez" 34 | 35 | # Add User-Agent as required by Poly Haven API 36 | REQ_HEADERS = requests.utils.default_headers() 37 | REQ_HEADERS.update({"User-Agent": "blender-mcp"}) 38 | 39 | class BlenderMCPServer: 40 | def __init__(self, host='localhost', port=9876): 41 | self.host = host 42 | self.port = port 43 | self.running = False 44 | self.socket = None 45 | self.server_thread = None 46 | 47 | def start(self): 48 | if self.running: 49 | print("Server is already running") 50 | return 51 | 52 | self.running = True 53 | 54 | try: 55 | # Create socket 56 | self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 57 | self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 58 | self.socket.bind((self.host, self.port)) 59 | self.socket.listen(1) 60 | 61 | # Start server thread 62 | self.server_thread = threading.Thread(target=self._server_loop) 63 | self.server_thread.daemon = True 64 | self.server_thread.start() 65 | 66 | print(f"BlenderMCP server started on {self.host}:{self.port}") 67 | except Exception as e: 68 | print(f"Failed to start server: {str(e)}") 69 | self.stop() 70 | 71 | def stop(self): 72 | self.running = False 73 | 74 | # Close socket 75 | if self.socket: 76 | try: 77 | self.socket.close() 78 | except: 79 | pass 80 | self.socket = None 81 | 82 | # Wait for thread to finish 83 | if self.server_thread: 84 | try: 85 | if self.server_thread.is_alive(): 86 | self.server_thread.join(timeout=1.0) 87 | except: 88 | pass 89 | self.server_thread = None 90 | 91 | print("BlenderMCP server stopped") 92 | 93 | def _server_loop(self): 94 | """Main server loop in a separate thread""" 95 | print("Server thread started") 96 | self.socket.settimeout(1.0) # Timeout to allow for stopping 97 | 98 | while self.running: 99 | try: 100 | # Accept new connection 101 | try: 102 | client, address = self.socket.accept() 103 | print(f"Connected to client: {address}") 104 | 105 | # Handle client in a separate thread 106 | client_thread = threading.Thread( 107 | target=self._handle_client, 108 | args=(client,) 109 | ) 110 | client_thread.daemon = True 111 | client_thread.start() 112 | except socket.timeout: 113 | # Just check running condition 114 | continue 115 | except Exception as e: 116 | print(f"Error accepting connection: {str(e)}") 117 | time.sleep(0.5) 118 | except Exception as e: 119 | print(f"Error in server loop: {str(e)}") 120 | if not self.running: 121 | break 122 | time.sleep(0.5) 123 | 124 | print("Server thread stopped") 125 | 126 | def _handle_client(self, client): 127 | """Handle connected client""" 128 | print("Client handler started") 129 | client.settimeout(None) # No timeout 130 | buffer = b'' 131 | 132 | try: 133 | while self.running: 134 | # Receive data 135 | try: 136 | data = client.recv(8192) 137 | if not data: 138 | print("Client disconnected") 139 | break 140 | 141 | buffer += data 142 | try: 143 | # Try to parse command 144 | command = json.loads(buffer.decode('utf-8')) 145 | buffer = b'' 146 | 147 | # Execute command in Blender's main thread 148 | def execute_wrapper(): 149 | try: 150 | response = self.execute_command(command) 151 | response_json = json.dumps(response) 152 | try: 153 | client.sendall(response_json.encode('utf-8')) 154 | except: 155 | print("Failed to send response - client disconnected") 156 | except Exception as e: 157 | print(f"Error executing command: {str(e)}") 158 | traceback.print_exc() 159 | try: 160 | error_response = { 161 | "status": "error", 162 | "message": str(e) 163 | } 164 | client.sendall(json.dumps(error_response).encode('utf-8')) 165 | except: 166 | pass 167 | return None 168 | 169 | # Schedule execution in main thread 170 | bpy.app.timers.register(execute_wrapper, first_interval=0.0) 171 | except json.JSONDecodeError: 172 | # Incomplete data, wait for more 173 | pass 174 | except Exception as e: 175 | print(f"Error receiving data: {str(e)}") 176 | break 177 | except Exception as e: 178 | print(f"Error in client handler: {str(e)}") 179 | finally: 180 | try: 181 | client.close() 182 | except: 183 | pass 184 | print("Client handler stopped") 185 | 186 | def execute_command(self, command): 187 | """Execute a command in the main Blender thread""" 188 | try: 189 | return self._execute_command_internal(command) 190 | 191 | except Exception as e: 192 | print(f"Error executing command: {str(e)}") 193 | traceback.print_exc() 194 | return {"status": "error", "message": str(e)} 195 | 196 | def _execute_command_internal(self, command): 197 | """Internal command execution with proper context""" 198 | cmd_type = command.get("type") 199 | params = command.get("params", {}) 200 | 201 | # Add a handler for checking PolyHaven status 202 | if cmd_type == "get_polyhaven_status": 203 | return {"status": "success", "result": self.get_polyhaven_status()} 204 | 205 | # Base handlers that are always available 206 | handlers = { 207 | "get_scene_info": self.get_scene_info, 208 | "get_object_info": self.get_object_info, 209 | "get_viewport_screenshot": self.get_viewport_screenshot, 210 | "execute_code": self.execute_code, 211 | "get_polyhaven_status": self.get_polyhaven_status, 212 | "get_hyper3d_status": self.get_hyper3d_status, 213 | "get_sketchfab_status": self.get_sketchfab_status, 214 | "get_hunyuan3d_status": self.get_hunyuan3d_status, 215 | } 216 | 217 | # Add Polyhaven handlers only if enabled 218 | if bpy.context.scene.blendermcp_use_polyhaven: 219 | polyhaven_handlers = { 220 | "get_polyhaven_categories": self.get_polyhaven_categories, 221 | "search_polyhaven_assets": self.search_polyhaven_assets, 222 | "download_polyhaven_asset": self.download_polyhaven_asset, 223 | "set_texture": self.set_texture, 224 | } 225 | handlers.update(polyhaven_handlers) 226 | 227 | # Add Hyper3d handlers only if enabled 228 | if bpy.context.scene.blendermcp_use_hyper3d: 229 | polyhaven_handlers = { 230 | "create_rodin_job": self.create_rodin_job, 231 | "poll_rodin_job_status": self.poll_rodin_job_status, 232 | "import_generated_asset": self.import_generated_asset, 233 | } 234 | handlers.update(polyhaven_handlers) 235 | 236 | # Add Sketchfab handlers only if enabled 237 | if bpy.context.scene.blendermcp_use_sketchfab: 238 | sketchfab_handlers = { 239 | "search_sketchfab_models": self.search_sketchfab_models, 240 | "download_sketchfab_model": self.download_sketchfab_model, 241 | } 242 | handlers.update(sketchfab_handlers) 243 | 244 | # Add Hunyuan3d handlers only if enabled 245 | if bpy.context.scene.blendermcp_use_hunyuan3d: 246 | hunyuan_handlers = { 247 | "create_hunyuan_job": self.create_hunyuan_job, 248 | "poll_hunyuan_job_status": self.poll_hunyuan_job_status, 249 | "import_generated_asset_hunyuan": self.import_generated_asset_hunyuan 250 | } 251 | handlers.update(hunyuan_handlers) 252 | 253 | handler = handlers.get(cmd_type) 254 | if handler: 255 | try: 256 | print(f"Executing handler for {cmd_type}") 257 | result = handler(**params) 258 | print(f"Handler execution complete") 259 | return {"status": "success", "result": result} 260 | except Exception as e: 261 | print(f"Error in handler: {str(e)}") 262 | traceback.print_exc() 263 | return {"status": "error", "message": str(e)} 264 | else: 265 | return {"status": "error", "message": f"Unknown command type: {cmd_type}"} 266 | 267 | 268 | 269 | def get_scene_info(self): 270 | """Get information about the current Blender scene""" 271 | try: 272 | print("Getting scene info...") 273 | # Simplify the scene info to reduce data size 274 | scene_info = { 275 | "name": bpy.context.scene.name, 276 | "object_count": len(bpy.context.scene.objects), 277 | "objects": [], 278 | "materials_count": len(bpy.data.materials), 279 | } 280 | 281 | # Collect minimal object information (limit to first 10 objects) 282 | for i, obj in enumerate(bpy.context.scene.objects): 283 | if i >= 10: # Reduced from 20 to 10 284 | break 285 | 286 | obj_info = { 287 | "name": obj.name, 288 | "type": obj.type, 289 | # Only include basic location data 290 | "location": [round(float(obj.location.x), 2), 291 | round(float(obj.location.y), 2), 292 | round(float(obj.location.z), 2)], 293 | } 294 | scene_info["objects"].append(obj_info) 295 | 296 | print(f"Scene info collected: {len(scene_info['objects'])} objects") 297 | return scene_info 298 | except Exception as e: 299 | print(f"Error in get_scene_info: {str(e)}") 300 | traceback.print_exc() 301 | return {"error": str(e)} 302 | 303 | @staticmethod 304 | def _get_aabb(obj): 305 | """ Returns the world-space axis-aligned bounding box (AABB) of an object. """ 306 | if obj.type != 'MESH': 307 | raise TypeError("Object must be a mesh") 308 | 309 | # Get the bounding box corners in local space 310 | local_bbox_corners = [mathutils.Vector(corner) for corner in obj.bound_box] 311 | 312 | # Convert to world coordinates 313 | world_bbox_corners = [obj.matrix_world @ corner for corner in local_bbox_corners] 314 | 315 | # Compute axis-aligned min/max coordinates 316 | min_corner = mathutils.Vector(map(min, zip(*world_bbox_corners))) 317 | max_corner = mathutils.Vector(map(max, zip(*world_bbox_corners))) 318 | 319 | return [ 320 | [*min_corner], [*max_corner] 321 | ] 322 | 323 | 324 | 325 | def get_object_info(self, name): 326 | """Get detailed information about a specific object""" 327 | obj = bpy.data.objects.get(name) 328 | if not obj: 329 | raise ValueError(f"Object not found: {name}") 330 | 331 | # Basic object info 332 | obj_info = { 333 | "name": obj.name, 334 | "type": obj.type, 335 | "location": [obj.location.x, obj.location.y, obj.location.z], 336 | "rotation": [obj.rotation_euler.x, obj.rotation_euler.y, obj.rotation_euler.z], 337 | "scale": [obj.scale.x, obj.scale.y, obj.scale.z], 338 | "visible": obj.visible_get(), 339 | "materials": [], 340 | } 341 | 342 | if obj.type == "MESH": 343 | bounding_box = self._get_aabb(obj) 344 | obj_info["world_bounding_box"] = bounding_box 345 | 346 | # Add material slots 347 | for slot in obj.material_slots: 348 | if slot.material: 349 | obj_info["materials"].append(slot.material.name) 350 | 351 | # Add mesh data if applicable 352 | if obj.type == 'MESH' and obj.data: 353 | mesh = obj.data 354 | obj_info["mesh"] = { 355 | "vertices": len(mesh.vertices), 356 | "edges": len(mesh.edges), 357 | "polygons": len(mesh.polygons), 358 | } 359 | 360 | return obj_info 361 | 362 | def get_viewport_screenshot(self, max_size=800, filepath=None, format="png"): 363 | """ 364 | Capture a screenshot of the current 3D viewport and save it to the specified path. 365 | 366 | Parameters: 367 | - max_size: Maximum size in pixels for the largest dimension of the image 368 | - filepath: Path where to save the screenshot file 369 | - format: Image format (png, jpg, etc.) 370 | 371 | Returns success/error status 372 | """ 373 | try: 374 | if not filepath: 375 | return {"error": "No filepath provided"} 376 | 377 | # Find the active 3D viewport 378 | area = None 379 | for a in bpy.context.screen.areas: 380 | if a.type == 'VIEW_3D': 381 | area = a 382 | break 383 | 384 | if not area: 385 | return {"error": "No 3D viewport found"} 386 | 387 | # Take screenshot with proper context override 388 | with bpy.context.temp_override(area=area): 389 | bpy.ops.screen.screenshot_area(filepath=filepath) 390 | 391 | # Load and resize if needed 392 | img = bpy.data.images.load(filepath) 393 | width, height = img.size 394 | 395 | if max(width, height) > max_size: 396 | scale = max_size / max(width, height) 397 | new_width = int(width * scale) 398 | new_height = int(height * scale) 399 | img.scale(new_width, new_height) 400 | 401 | # Set format and save 402 | img.file_format = format.upper() 403 | img.save() 404 | width, height = new_width, new_height 405 | 406 | # Cleanup Blender image data 407 | bpy.data.images.remove(img) 408 | 409 | return { 410 | "success": True, 411 | "width": width, 412 | "height": height, 413 | "filepath": filepath 414 | } 415 | 416 | except Exception as e: 417 | return {"error": str(e)} 418 | 419 | def execute_code(self, code): 420 | """Execute arbitrary Blender Python code""" 421 | # This is powerful but potentially dangerous - use with caution 422 | try: 423 | # Create a local namespace for execution 424 | namespace = {"bpy": bpy} 425 | 426 | # Capture stdout during execution, and return it as result 427 | capture_buffer = io.StringIO() 428 | with redirect_stdout(capture_buffer): 429 | exec(code, namespace) 430 | 431 | captured_output = capture_buffer.getvalue() 432 | return {"executed": True, "result": captured_output} 433 | except Exception as e: 434 | raise Exception(f"Code execution error: {str(e)}") 435 | 436 | 437 | 438 | def get_polyhaven_categories(self, asset_type): 439 | """Get categories for a specific asset type from Polyhaven""" 440 | try: 441 | if asset_type not in ["hdris", "textures", "models", "all"]: 442 | return {"error": f"Invalid asset type: {asset_type}. Must be one of: hdris, textures, models, all"} 443 | 444 | response = requests.get(f"https://api.polyhaven.com/categories/{asset_type}", headers=REQ_HEADERS) 445 | if response.status_code == 200: 446 | return {"categories": response.json()} 447 | else: 448 | return {"error": f"API request failed with status code {response.status_code}"} 449 | except Exception as e: 450 | return {"error": str(e)} 451 | 452 | def search_polyhaven_assets(self, asset_type=None, categories=None): 453 | """Search for assets from Polyhaven with optional filtering""" 454 | try: 455 | url = "https://api.polyhaven.com/assets" 456 | params = {} 457 | 458 | if asset_type and asset_type != "all": 459 | if asset_type not in ["hdris", "textures", "models"]: 460 | return {"error": f"Invalid asset type: {asset_type}. Must be one of: hdris, textures, models, all"} 461 | params["type"] = asset_type 462 | 463 | if categories: 464 | params["categories"] = categories 465 | 466 | response = requests.get(url, params=params, headers=REQ_HEADERS) 467 | if response.status_code == 200: 468 | # Limit the response size to avoid overwhelming Blender 469 | assets = response.json() 470 | # Return only the first 20 assets to keep response size manageable 471 | limited_assets = {} 472 | for i, (key, value) in enumerate(assets.items()): 473 | if i >= 20: # Limit to 20 assets 474 | break 475 | limited_assets[key] = value 476 | 477 | return {"assets": limited_assets, "total_count": len(assets), "returned_count": len(limited_assets)} 478 | else: 479 | return {"error": f"API request failed with status code {response.status_code}"} 480 | except Exception as e: 481 | return {"error": str(e)} 482 | 483 | def download_polyhaven_asset(self, asset_id, asset_type, resolution="1k", file_format=None): 484 | try: 485 | # First get the files information 486 | files_response = requests.get(f"https://api.polyhaven.com/files/{asset_id}", headers=REQ_HEADERS) 487 | if files_response.status_code != 200: 488 | return {"error": f"Failed to get asset files: {files_response.status_code}"} 489 | 490 | files_data = files_response.json() 491 | 492 | # Handle different asset types 493 | if asset_type == "hdris": 494 | # For HDRIs, download the .hdr or .exr file 495 | if not file_format: 496 | file_format = "hdr" # Default format for HDRIs 497 | 498 | if "hdri" in files_data and resolution in files_data["hdri"] and file_format in files_data["hdri"][resolution]: 499 | file_info = files_data["hdri"][resolution][file_format] 500 | file_url = file_info["url"] 501 | 502 | # For HDRIs, we need to save to a temporary file first 503 | # since Blender can't properly load HDR data directly from memory 504 | with tempfile.NamedTemporaryFile(suffix=f".{file_format}", delete=False) as tmp_file: 505 | # Download the file 506 | response = requests.get(file_url, headers=REQ_HEADERS) 507 | if response.status_code != 200: 508 | return {"error": f"Failed to download HDRI: {response.status_code}"} 509 | 510 | tmp_file.write(response.content) 511 | tmp_path = tmp_file.name 512 | 513 | try: 514 | # Create a new world if none exists 515 | if not bpy.data.worlds: 516 | bpy.data.worlds.new("World") 517 | 518 | world = bpy.data.worlds[0] 519 | world.use_nodes = True 520 | node_tree = world.node_tree 521 | 522 | # Clear existing nodes 523 | for node in node_tree.nodes: 524 | node_tree.nodes.remove(node) 525 | 526 | # Create nodes 527 | tex_coord = node_tree.nodes.new(type='ShaderNodeTexCoord') 528 | tex_coord.location = (-800, 0) 529 | 530 | mapping = node_tree.nodes.new(type='ShaderNodeMapping') 531 | mapping.location = (-600, 0) 532 | 533 | # Load the image from the temporary file 534 | env_tex = node_tree.nodes.new(type='ShaderNodeTexEnvironment') 535 | env_tex.location = (-400, 0) 536 | env_tex.image = bpy.data.images.load(tmp_path) 537 | 538 | # Use a color space that exists in all Blender versions 539 | if file_format.lower() == 'exr': 540 | # Try to use Linear color space for EXR files 541 | try: 542 | env_tex.image.colorspace_settings.name = 'Linear' 543 | except: 544 | # Fallback to Non-Color if Linear isn't available 545 | env_tex.image.colorspace_settings.name = 'Non-Color' 546 | else: # hdr 547 | # For HDR files, try these options in order 548 | for color_space in ['Linear', 'Linear Rec.709', 'Non-Color']: 549 | try: 550 | env_tex.image.colorspace_settings.name = color_space 551 | break # Stop if we successfully set a color space 552 | except: 553 | continue 554 | 555 | background = node_tree.nodes.new(type='ShaderNodeBackground') 556 | background.location = (-200, 0) 557 | 558 | output = node_tree.nodes.new(type='ShaderNodeOutputWorld') 559 | output.location = (0, 0) 560 | 561 | # Connect nodes 562 | node_tree.links.new(tex_coord.outputs['Generated'], mapping.inputs['Vector']) 563 | node_tree.links.new(mapping.outputs['Vector'], env_tex.inputs['Vector']) 564 | node_tree.links.new(env_tex.outputs['Color'], background.inputs['Color']) 565 | node_tree.links.new(background.outputs['Background'], output.inputs['Surface']) 566 | 567 | # Set as active world 568 | bpy.context.scene.world = world 569 | 570 | # Clean up temporary file 571 | try: 572 | tempfile._cleanup() # This will clean up all temporary files 573 | except: 574 | pass 575 | 576 | return { 577 | "success": True, 578 | "message": f"HDRI {asset_id} imported successfully", 579 | "image_name": env_tex.image.name 580 | } 581 | except Exception as e: 582 | return {"error": f"Failed to set up HDRI in Blender: {str(e)}"} 583 | else: 584 | return {"error": f"Requested resolution or format not available for this HDRI"} 585 | 586 | elif asset_type == "textures": 587 | if not file_format: 588 | file_format = "jpg" # Default format for textures 589 | 590 | downloaded_maps = {} 591 | 592 | try: 593 | for map_type in files_data: 594 | if map_type not in ["blend", "gltf"]: # Skip non-texture files 595 | if resolution in files_data[map_type] and file_format in files_data[map_type][resolution]: 596 | file_info = files_data[map_type][resolution][file_format] 597 | file_url = file_info["url"] 598 | 599 | # Use NamedTemporaryFile like we do for HDRIs 600 | with tempfile.NamedTemporaryFile(suffix=f".{file_format}", delete=False) as tmp_file: 601 | # Download the file 602 | response = requests.get(file_url, headers=REQ_HEADERS) 603 | if response.status_code == 200: 604 | tmp_file.write(response.content) 605 | tmp_path = tmp_file.name 606 | 607 | # Load image from temporary file 608 | image = bpy.data.images.load(tmp_path) 609 | image.name = f"{asset_id}_{map_type}.{file_format}" 610 | 611 | # Pack the image into .blend file 612 | image.pack() 613 | 614 | # Set color space based on map type 615 | if map_type in ['color', 'diffuse', 'albedo']: 616 | try: 617 | image.colorspace_settings.name = 'sRGB' 618 | except: 619 | pass 620 | else: 621 | try: 622 | image.colorspace_settings.name = 'Non-Color' 623 | except: 624 | pass 625 | 626 | downloaded_maps[map_type] = image 627 | 628 | # Clean up temporary file 629 | try: 630 | os.unlink(tmp_path) 631 | except: 632 | pass 633 | 634 | if not downloaded_maps: 635 | return {"error": f"No texture maps found for the requested resolution and format"} 636 | 637 | # Create a new material with the downloaded textures 638 | mat = bpy.data.materials.new(name=asset_id) 639 | mat.use_nodes = True 640 | nodes = mat.node_tree.nodes 641 | links = mat.node_tree.links 642 | 643 | # Clear default nodes 644 | for node in nodes: 645 | nodes.remove(node) 646 | 647 | # Create output node 648 | output = nodes.new(type='ShaderNodeOutputMaterial') 649 | output.location = (300, 0) 650 | 651 | # Create principled BSDF node 652 | principled = nodes.new(type='ShaderNodeBsdfPrincipled') 653 | principled.location = (0, 0) 654 | links.new(principled.outputs[0], output.inputs[0]) 655 | 656 | # Add texture nodes based on available maps 657 | tex_coord = nodes.new(type='ShaderNodeTexCoord') 658 | tex_coord.location = (-800, 0) 659 | 660 | mapping = nodes.new(type='ShaderNodeMapping') 661 | mapping.location = (-600, 0) 662 | mapping.vector_type = 'TEXTURE' # Changed from default 'POINT' to 'TEXTURE' 663 | links.new(tex_coord.outputs['UV'], mapping.inputs['Vector']) 664 | 665 | # Position offset for texture nodes 666 | x_pos = -400 667 | y_pos = 300 668 | 669 | # Connect different texture maps 670 | for map_type, image in downloaded_maps.items(): 671 | tex_node = nodes.new(type='ShaderNodeTexImage') 672 | tex_node.location = (x_pos, y_pos) 673 | tex_node.image = image 674 | 675 | # Set color space based on map type 676 | if map_type.lower() in ['color', 'diffuse', 'albedo']: 677 | try: 678 | tex_node.image.colorspace_settings.name = 'sRGB' 679 | except: 680 | pass # Use default if sRGB not available 681 | else: 682 | try: 683 | tex_node.image.colorspace_settings.name = 'Non-Color' 684 | except: 685 | pass # Use default if Non-Color not available 686 | 687 | links.new(mapping.outputs['Vector'], tex_node.inputs['Vector']) 688 | 689 | # Connect to appropriate input on Principled BSDF 690 | if map_type.lower() in ['color', 'diffuse', 'albedo']: 691 | links.new(tex_node.outputs['Color'], principled.inputs['Base Color']) 692 | elif map_type.lower() in ['roughness', 'rough']: 693 | links.new(tex_node.outputs['Color'], principled.inputs['Roughness']) 694 | elif map_type.lower() in ['metallic', 'metalness', 'metal']: 695 | links.new(tex_node.outputs['Color'], principled.inputs['Metallic']) 696 | elif map_type.lower() in ['normal', 'nor']: 697 | # Add normal map node 698 | normal_map = nodes.new(type='ShaderNodeNormalMap') 699 | normal_map.location = (x_pos + 200, y_pos) 700 | links.new(tex_node.outputs['Color'], normal_map.inputs['Color']) 701 | links.new(normal_map.outputs['Normal'], principled.inputs['Normal']) 702 | elif map_type in ['displacement', 'disp', 'height']: 703 | # Add displacement node 704 | disp_node = nodes.new(type='ShaderNodeDisplacement') 705 | disp_node.location = (x_pos + 200, y_pos - 200) 706 | links.new(tex_node.outputs['Color'], disp_node.inputs['Height']) 707 | links.new(disp_node.outputs['Displacement'], output.inputs['Displacement']) 708 | 709 | y_pos -= 250 710 | 711 | return { 712 | "success": True, 713 | "message": f"Texture {asset_id} imported as material", 714 | "material": mat.name, 715 | "maps": list(downloaded_maps.keys()) 716 | } 717 | 718 | except Exception as e: 719 | return {"error": f"Failed to process textures: {str(e)}"} 720 | 721 | elif asset_type == "models": 722 | # For models, prefer glTF format if available 723 | if not file_format: 724 | file_format = "gltf" # Default format for models 725 | 726 | if file_format in files_data and resolution in files_data[file_format]: 727 | file_info = files_data[file_format][resolution][file_format] 728 | file_url = file_info["url"] 729 | 730 | # Create a temporary directory to store the model and its dependencies 731 | temp_dir = tempfile.mkdtemp() 732 | main_file_path = "" 733 | 734 | try: 735 | # Download the main model file 736 | main_file_name = file_url.split("/")[-1] 737 | main_file_path = os.path.join(temp_dir, main_file_name) 738 | 739 | response = requests.get(file_url, headers=REQ_HEADERS) 740 | if response.status_code != 200: 741 | return {"error": f"Failed to download model: {response.status_code}"} 742 | 743 | with open(main_file_path, "wb") as f: 744 | f.write(response.content) 745 | 746 | # Check for included files and download them 747 | if "include" in file_info and file_info["include"]: 748 | for include_path, include_info in file_info["include"].items(): 749 | # Get the URL for the included file - this is the fix 750 | include_url = include_info["url"] 751 | 752 | # Create the directory structure for the included file 753 | include_file_path = os.path.join(temp_dir, include_path) 754 | os.makedirs(os.path.dirname(include_file_path), exist_ok=True) 755 | 756 | # Download the included file 757 | include_response = requests.get(include_url, headers=REQ_HEADERS) 758 | if include_response.status_code == 200: 759 | with open(include_file_path, "wb") as f: 760 | f.write(include_response.content) 761 | else: 762 | print(f"Failed to download included file: {include_path}") 763 | 764 | # Import the model into Blender 765 | if file_format == "gltf" or file_format == "glb": 766 | bpy.ops.import_scene.gltf(filepath=main_file_path) 767 | elif file_format == "fbx": 768 | bpy.ops.import_scene.fbx(filepath=main_file_path) 769 | elif file_format == "obj": 770 | bpy.ops.import_scene.obj(filepath=main_file_path) 771 | elif file_format == "blend": 772 | # For blend files, we need to append or link 773 | with bpy.data.libraries.load(main_file_path, link=False) as (data_from, data_to): 774 | data_to.objects = data_from.objects 775 | 776 | # Link the objects to the scene 777 | for obj in data_to.objects: 778 | if obj is not None: 779 | bpy.context.collection.objects.link(obj) 780 | else: 781 | return {"error": f"Unsupported model format: {file_format}"} 782 | 783 | # Get the names of imported objects 784 | imported_objects = [obj.name for obj in bpy.context.selected_objects] 785 | 786 | return { 787 | "success": True, 788 | "message": f"Model {asset_id} imported successfully", 789 | "imported_objects": imported_objects 790 | } 791 | except Exception as e: 792 | return {"error": f"Failed to import model: {str(e)}"} 793 | finally: 794 | # Clean up temporary directory 795 | with suppress(Exception): 796 | shutil.rmtree(temp_dir) 797 | else: 798 | return {"error": f"Requested format or resolution not available for this model"} 799 | 800 | else: 801 | return {"error": f"Unsupported asset type: {asset_type}"} 802 | 803 | except Exception as e: 804 | return {"error": f"Failed to download asset: {str(e)}"} 805 | 806 | def set_texture(self, object_name, texture_id): 807 | """Apply a previously downloaded Polyhaven texture to an object by creating a new material""" 808 | try: 809 | # Get the object 810 | obj = bpy.data.objects.get(object_name) 811 | if not obj: 812 | return {"error": f"Object not found: {object_name}"} 813 | 814 | # Make sure object can accept materials 815 | if not hasattr(obj, 'data') or not hasattr(obj.data, 'materials'): 816 | return {"error": f"Object {object_name} cannot accept materials"} 817 | 818 | # Find all images related to this texture and ensure they're properly loaded 819 | texture_images = {} 820 | for img in bpy.data.images: 821 | if img.name.startswith(texture_id + "_"): 822 | # Extract the map type from the image name 823 | map_type = img.name.split('_')[-1].split('.')[0] 824 | 825 | # Force a reload of the image 826 | img.reload() 827 | 828 | # Ensure proper color space 829 | if map_type.lower() in ['color', 'diffuse', 'albedo']: 830 | try: 831 | img.colorspace_settings.name = 'sRGB' 832 | except: 833 | pass 834 | else: 835 | try: 836 | img.colorspace_settings.name = 'Non-Color' 837 | except: 838 | pass 839 | 840 | # Ensure the image is packed 841 | if not img.packed_file: 842 | img.pack() 843 | 844 | texture_images[map_type] = img 845 | print(f"Loaded texture map: {map_type} - {img.name}") 846 | 847 | # Debug info 848 | print(f"Image size: {img.size[0]}x{img.size[1]}") 849 | print(f"Color space: {img.colorspace_settings.name}") 850 | print(f"File format: {img.file_format}") 851 | print(f"Is packed: {bool(img.packed_file)}") 852 | 853 | if not texture_images: 854 | return {"error": f"No texture images found for: {texture_id}. Please download the texture first."} 855 | 856 | # Create a new material 857 | new_mat_name = f"{texture_id}_material_{object_name}" 858 | 859 | # Remove any existing material with this name to avoid conflicts 860 | existing_mat = bpy.data.materials.get(new_mat_name) 861 | if existing_mat: 862 | bpy.data.materials.remove(existing_mat) 863 | 864 | new_mat = bpy.data.materials.new(name=new_mat_name) 865 | new_mat.use_nodes = True 866 | 867 | # Set up the material nodes 868 | nodes = new_mat.node_tree.nodes 869 | links = new_mat.node_tree.links 870 | 871 | # Clear default nodes 872 | nodes.clear() 873 | 874 | # Create output node 875 | output = nodes.new(type='ShaderNodeOutputMaterial') 876 | output.location = (600, 0) 877 | 878 | # Create principled BSDF node 879 | principled = nodes.new(type='ShaderNodeBsdfPrincipled') 880 | principled.location = (300, 0) 881 | links.new(principled.outputs[0], output.inputs[0]) 882 | 883 | # Add texture nodes based on available maps 884 | tex_coord = nodes.new(type='ShaderNodeTexCoord') 885 | tex_coord.location = (-800, 0) 886 | 887 | mapping = nodes.new(type='ShaderNodeMapping') 888 | mapping.location = (-600, 0) 889 | mapping.vector_type = 'TEXTURE' # Changed from default 'POINT' to 'TEXTURE' 890 | links.new(tex_coord.outputs['UV'], mapping.inputs['Vector']) 891 | 892 | # Position offset for texture nodes 893 | x_pos = -400 894 | y_pos = 300 895 | 896 | # Connect different texture maps 897 | for map_type, image in texture_images.items(): 898 | tex_node = nodes.new(type='ShaderNodeTexImage') 899 | tex_node.location = (x_pos, y_pos) 900 | tex_node.image = image 901 | 902 | # Set color space based on map type 903 | if map_type.lower() in ['color', 'diffuse', 'albedo']: 904 | try: 905 | tex_node.image.colorspace_settings.name = 'sRGB' 906 | except: 907 | pass # Use default if sRGB not available 908 | else: 909 | try: 910 | tex_node.image.colorspace_settings.name = 'Non-Color' 911 | except: 912 | pass # Use default if Non-Color not available 913 | 914 | links.new(mapping.outputs['Vector'], tex_node.inputs['Vector']) 915 | 916 | # Connect to appropriate input on Principled BSDF 917 | if map_type.lower() in ['color', 'diffuse', 'albedo']: 918 | links.new(tex_node.outputs['Color'], principled.inputs['Base Color']) 919 | elif map_type.lower() in ['roughness', 'rough']: 920 | links.new(tex_node.outputs['Color'], principled.inputs['Roughness']) 921 | elif map_type.lower() in ['metallic', 'metalness', 'metal']: 922 | links.new(tex_node.outputs['Color'], principled.inputs['Metallic']) 923 | elif map_type.lower() in ['normal', 'nor', 'dx', 'gl']: 924 | # Add normal map node 925 | normal_map = nodes.new(type='ShaderNodeNormalMap') 926 | normal_map.location = (x_pos + 200, y_pos) 927 | links.new(tex_node.outputs['Color'], normal_map.inputs['Color']) 928 | links.new(normal_map.outputs['Normal'], principled.inputs['Normal']) 929 | elif map_type.lower() in ['displacement', 'disp', 'height']: 930 | # Add displacement node 931 | disp_node = nodes.new(type='ShaderNodeDisplacement') 932 | disp_node.location = (x_pos + 200, y_pos - 200) 933 | disp_node.inputs['Scale'].default_value = 0.1 # Reduce displacement strength 934 | links.new(tex_node.outputs['Color'], disp_node.inputs['Height']) 935 | links.new(disp_node.outputs['Displacement'], output.inputs['Displacement']) 936 | 937 | y_pos -= 250 938 | 939 | # Second pass: Connect nodes with proper handling for special cases 940 | texture_nodes = {} 941 | 942 | # First find all texture nodes and store them by map type 943 | for node in nodes: 944 | if node.type == 'TEX_IMAGE' and node.image: 945 | for map_type, image in texture_images.items(): 946 | if node.image == image: 947 | texture_nodes[map_type] = node 948 | break 949 | 950 | # Now connect everything using the nodes instead of images 951 | # Handle base color (diffuse) 952 | for map_name in ['color', 'diffuse', 'albedo']: 953 | if map_name in texture_nodes: 954 | links.new(texture_nodes[map_name].outputs['Color'], principled.inputs['Base Color']) 955 | print(f"Connected {map_name} to Base Color") 956 | break 957 | 958 | # Handle roughness 959 | for map_name in ['roughness', 'rough']: 960 | if map_name in texture_nodes: 961 | links.new(texture_nodes[map_name].outputs['Color'], principled.inputs['Roughness']) 962 | print(f"Connected {map_name} to Roughness") 963 | break 964 | 965 | # Handle metallic 966 | for map_name in ['metallic', 'metalness', 'metal']: 967 | if map_name in texture_nodes: 968 | links.new(texture_nodes[map_name].outputs['Color'], principled.inputs['Metallic']) 969 | print(f"Connected {map_name} to Metallic") 970 | break 971 | 972 | # Handle normal maps 973 | for map_name in ['gl', 'dx', 'nor']: 974 | if map_name in texture_nodes: 975 | normal_map_node = nodes.new(type='ShaderNodeNormalMap') 976 | normal_map_node.location = (100, 100) 977 | links.new(texture_nodes[map_name].outputs['Color'], normal_map_node.inputs['Color']) 978 | links.new(normal_map_node.outputs['Normal'], principled.inputs['Normal']) 979 | print(f"Connected {map_name} to Normal") 980 | break 981 | 982 | # Handle displacement 983 | for map_name in ['displacement', 'disp', 'height']: 984 | if map_name in texture_nodes: 985 | disp_node = nodes.new(type='ShaderNodeDisplacement') 986 | disp_node.location = (300, -200) 987 | disp_node.inputs['Scale'].default_value = 0.1 # Reduce displacement strength 988 | links.new(texture_nodes[map_name].outputs['Color'], disp_node.inputs['Height']) 989 | links.new(disp_node.outputs['Displacement'], output.inputs['Displacement']) 990 | print(f"Connected {map_name} to Displacement") 991 | break 992 | 993 | # Handle ARM texture (Ambient Occlusion, Roughness, Metallic) 994 | if 'arm' in texture_nodes: 995 | separate_rgb = nodes.new(type='ShaderNodeSeparateRGB') 996 | separate_rgb.location = (-200, -100) 997 | links.new(texture_nodes['arm'].outputs['Color'], separate_rgb.inputs['Image']) 998 | 999 | # Connect Roughness (G) if no dedicated roughness map 1000 | if not any(map_name in texture_nodes for map_name in ['roughness', 'rough']): 1001 | links.new(separate_rgb.outputs['G'], principled.inputs['Roughness']) 1002 | print("Connected ARM.G to Roughness") 1003 | 1004 | # Connect Metallic (B) if no dedicated metallic map 1005 | if not any(map_name in texture_nodes for map_name in ['metallic', 'metalness', 'metal']): 1006 | links.new(separate_rgb.outputs['B'], principled.inputs['Metallic']) 1007 | print("Connected ARM.B to Metallic") 1008 | 1009 | # For AO (R channel), multiply with base color if we have one 1010 | base_color_node = None 1011 | for map_name in ['color', 'diffuse', 'albedo']: 1012 | if map_name in texture_nodes: 1013 | base_color_node = texture_nodes[map_name] 1014 | break 1015 | 1016 | if base_color_node: 1017 | mix_node = nodes.new(type='ShaderNodeMixRGB') 1018 | mix_node.location = (100, 200) 1019 | mix_node.blend_type = 'MULTIPLY' 1020 | mix_node.inputs['Fac'].default_value = 0.8 # 80% influence 1021 | 1022 | # Disconnect direct connection to base color 1023 | for link in base_color_node.outputs['Color'].links: 1024 | if link.to_socket == principled.inputs['Base Color']: 1025 | links.remove(link) 1026 | 1027 | # Connect through the mix node 1028 | links.new(base_color_node.outputs['Color'], mix_node.inputs[1]) 1029 | links.new(separate_rgb.outputs['R'], mix_node.inputs[2]) 1030 | links.new(mix_node.outputs['Color'], principled.inputs['Base Color']) 1031 | print("Connected ARM.R to AO mix with Base Color") 1032 | 1033 | # Handle AO (Ambient Occlusion) if separate 1034 | if 'ao' in texture_nodes: 1035 | base_color_node = None 1036 | for map_name in ['color', 'diffuse', 'albedo']: 1037 | if map_name in texture_nodes: 1038 | base_color_node = texture_nodes[map_name] 1039 | break 1040 | 1041 | if base_color_node: 1042 | mix_node = nodes.new(type='ShaderNodeMixRGB') 1043 | mix_node.location = (100, 200) 1044 | mix_node.blend_type = 'MULTIPLY' 1045 | mix_node.inputs['Fac'].default_value = 0.8 # 80% influence 1046 | 1047 | # Disconnect direct connection to base color 1048 | for link in base_color_node.outputs['Color'].links: 1049 | if link.to_socket == principled.inputs['Base Color']: 1050 | links.remove(link) 1051 | 1052 | # Connect through the mix node 1053 | links.new(base_color_node.outputs['Color'], mix_node.inputs[1]) 1054 | links.new(texture_nodes['ao'].outputs['Color'], mix_node.inputs[2]) 1055 | links.new(mix_node.outputs['Color'], principled.inputs['Base Color']) 1056 | print("Connected AO to mix with Base Color") 1057 | 1058 | # CRITICAL: Make sure to clear all existing materials from the object 1059 | while len(obj.data.materials) > 0: 1060 | obj.data.materials.pop(index=0) 1061 | 1062 | # Assign the new material to the object 1063 | obj.data.materials.append(new_mat) 1064 | 1065 | # CRITICAL: Make the object active and select it 1066 | bpy.context.view_layer.objects.active = obj 1067 | obj.select_set(True) 1068 | 1069 | # CRITICAL: Force Blender to update the material 1070 | bpy.context.view_layer.update() 1071 | 1072 | # Get the list of texture maps 1073 | texture_maps = list(texture_images.keys()) 1074 | 1075 | # Get info about texture nodes for debugging 1076 | material_info = { 1077 | "name": new_mat.name, 1078 | "has_nodes": new_mat.use_nodes, 1079 | "node_count": len(new_mat.node_tree.nodes), 1080 | "texture_nodes": [] 1081 | } 1082 | 1083 | for node in new_mat.node_tree.nodes: 1084 | if node.type == 'TEX_IMAGE' and node.image: 1085 | connections = [] 1086 | for output in node.outputs: 1087 | for link in output.links: 1088 | connections.append(f"{output.name} → {link.to_node.name}.{link.to_socket.name}") 1089 | 1090 | material_info["texture_nodes"].append({ 1091 | "name": node.name, 1092 | "image": node.image.name, 1093 | "colorspace": node.image.colorspace_settings.name, 1094 | "connections": connections 1095 | }) 1096 | 1097 | return { 1098 | "success": True, 1099 | "message": f"Created new material and applied texture {texture_id} to {object_name}", 1100 | "material": new_mat.name, 1101 | "maps": texture_maps, 1102 | "material_info": material_info 1103 | } 1104 | 1105 | except Exception as e: 1106 | print(f"Error in set_texture: {str(e)}") 1107 | traceback.print_exc() 1108 | return {"error": f"Failed to apply texture: {str(e)}"} 1109 | 1110 | def get_polyhaven_status(self): 1111 | """Get the current status of PolyHaven integration""" 1112 | enabled = bpy.context.scene.blendermcp_use_polyhaven 1113 | if enabled: 1114 | return {"enabled": True, "message": "PolyHaven integration is enabled and ready to use."} 1115 | else: 1116 | return { 1117 | "enabled": False, 1118 | "message": """PolyHaven integration is currently disabled. To enable it: 1119 | 1. In the 3D Viewport, find the BlenderMCP panel in the sidebar (press N if hidden) 1120 | 2. Check the 'Use assets from Poly Haven' checkbox 1121 | 3. Restart the connection to Claude""" 1122 | } 1123 | 1124 | #region Hyper3D 1125 | def get_hyper3d_status(self): 1126 | """Get the current status of Hyper3D Rodin integration""" 1127 | enabled = bpy.context.scene.blendermcp_use_hyper3d 1128 | if enabled: 1129 | if not bpy.context.scene.blendermcp_hyper3d_api_key: 1130 | return { 1131 | "enabled": False, 1132 | "message": """Hyper3D Rodin integration is currently enabled, but API key is not given. To enable it: 1133 | 1. In the 3D Viewport, find the BlenderMCP panel in the sidebar (press N if hidden) 1134 | 2. Keep the 'Use Hyper3D Rodin 3D model generation' checkbox checked 1135 | 3. Choose the right plaform and fill in the API Key 1136 | 4. Restart the connection to Claude""" 1137 | } 1138 | mode = bpy.context.scene.blendermcp_hyper3d_mode 1139 | message = f"Hyper3D Rodin integration is enabled and ready to use. Mode: {mode}. " + \ 1140 | f"Key type: {'private' if bpy.context.scene.blendermcp_hyper3d_api_key != RODIN_FREE_TRIAL_KEY else 'free_trial'}" 1141 | return { 1142 | "enabled": True, 1143 | "message": message 1144 | } 1145 | else: 1146 | return { 1147 | "enabled": False, 1148 | "message": """Hyper3D Rodin integration is currently disabled. To enable it: 1149 | 1. In the 3D Viewport, find the BlenderMCP panel in the sidebar (press N if hidden) 1150 | 2. Check the 'Use Hyper3D Rodin 3D model generation' checkbox 1151 | 3. Restart the connection to Claude""" 1152 | } 1153 | 1154 | def create_rodin_job(self, *args, **kwargs): 1155 | match bpy.context.scene.blendermcp_hyper3d_mode: 1156 | case "MAIN_SITE": 1157 | return self.create_rodin_job_main_site(*args, **kwargs) 1158 | case "FAL_AI": 1159 | return self.create_rodin_job_fal_ai(*args, **kwargs) 1160 | case _: 1161 | return f"Error: Unknown Hyper3D Rodin mode!" 1162 | 1163 | def create_rodin_job_main_site( 1164 | self, 1165 | text_prompt: str=None, 1166 | images: list[tuple[str, str]]=None, 1167 | bbox_condition=None 1168 | ): 1169 | try: 1170 | if images is None: 1171 | images = [] 1172 | """Call Rodin API, get the job uuid and subscription key""" 1173 | files = [ 1174 | *[("images", (f"{i:04d}{img_suffix}", img)) for i, (img_suffix, img) in enumerate(images)], 1175 | ("tier", (None, "Sketch")), 1176 | ("mesh_mode", (None, "Raw")), 1177 | ] 1178 | if text_prompt: 1179 | files.append(("prompt", (None, text_prompt))) 1180 | if bbox_condition: 1181 | files.append(("bbox_condition", (None, json.dumps(bbox_condition)))) 1182 | response = requests.post( 1183 | "https://hyperhuman.deemos.com/api/v2/rodin", 1184 | headers={ 1185 | "Authorization": f"Bearer {bpy.context.scene.blendermcp_hyper3d_api_key}", 1186 | }, 1187 | files=files 1188 | ) 1189 | data = response.json() 1190 | return data 1191 | except Exception as e: 1192 | return {"error": str(e)} 1193 | 1194 | def create_rodin_job_fal_ai( 1195 | self, 1196 | text_prompt: str=None, 1197 | images: list[tuple[str, str]]=None, 1198 | bbox_condition=None 1199 | ): 1200 | try: 1201 | req_data = { 1202 | "tier": "Sketch", 1203 | } 1204 | if images: 1205 | req_data["input_image_urls"] = images 1206 | if text_prompt: 1207 | req_data["prompt"] = text_prompt 1208 | if bbox_condition: 1209 | req_data["bbox_condition"] = bbox_condition 1210 | response = requests.post( 1211 | "https://queue.fal.run/fal-ai/hyper3d/rodin", 1212 | headers={ 1213 | "Authorization": f"Key {bpy.context.scene.blendermcp_hyper3d_api_key}", 1214 | "Content-Type": "application/json", 1215 | }, 1216 | json=req_data 1217 | ) 1218 | data = response.json() 1219 | return data 1220 | except Exception as e: 1221 | return {"error": str(e)} 1222 | 1223 | def poll_rodin_job_status(self, *args, **kwargs): 1224 | match bpy.context.scene.blendermcp_hyper3d_mode: 1225 | case "MAIN_SITE": 1226 | return self.poll_rodin_job_status_main_site(*args, **kwargs) 1227 | case "FAL_AI": 1228 | return self.poll_rodin_job_status_fal_ai(*args, **kwargs) 1229 | case _: 1230 | return f"Error: Unknown Hyper3D Rodin mode!" 1231 | 1232 | def poll_rodin_job_status_main_site(self, subscription_key: str): 1233 | """Call the job status API to get the job status""" 1234 | response = requests.post( 1235 | "https://hyperhuman.deemos.com/api/v2/status", 1236 | headers={ 1237 | "Authorization": f"Bearer {bpy.context.scene.blendermcp_hyper3d_api_key}", 1238 | }, 1239 | json={ 1240 | "subscription_key": subscription_key, 1241 | }, 1242 | ) 1243 | data = response.json() 1244 | return { 1245 | "status_list": [i["status"] for i in data["jobs"]] 1246 | } 1247 | 1248 | def poll_rodin_job_status_fal_ai(self, request_id: str): 1249 | """Call the job status API to get the job status""" 1250 | response = requests.get( 1251 | f"https://queue.fal.run/fal-ai/hyper3d/requests/{request_id}/status", 1252 | headers={ 1253 | "Authorization": f"KEY {bpy.context.scene.blendermcp_hyper3d_api_key}", 1254 | }, 1255 | ) 1256 | data = response.json() 1257 | return data 1258 | 1259 | @staticmethod 1260 | def _clean_imported_glb(filepath, mesh_name=None): 1261 | # Get the set of existing objects before import 1262 | existing_objects = set(bpy.data.objects) 1263 | 1264 | # Import the GLB file 1265 | bpy.ops.import_scene.gltf(filepath=filepath) 1266 | 1267 | # Ensure the context is updated 1268 | bpy.context.view_layer.update() 1269 | 1270 | # Get all imported objects 1271 | imported_objects = list(set(bpy.data.objects) - existing_objects) 1272 | # imported_objects = [obj for obj in bpy.context.view_layer.objects if obj.select_get()] 1273 | 1274 | if not imported_objects: 1275 | print("Error: No objects were imported.") 1276 | return 1277 | 1278 | # Identify the mesh object 1279 | mesh_obj = None 1280 | 1281 | if len(imported_objects) == 1 and imported_objects[0].type == 'MESH': 1282 | mesh_obj = imported_objects[0] 1283 | print("Single mesh imported, no cleanup needed.") 1284 | else: 1285 | if len(imported_objects) == 2: 1286 | empty_objs = [i for i in imported_objects if i.type == "EMPTY"] 1287 | if len(empty_objs) != 1: 1288 | print("Error: Expected an empty node with one mesh child or a single mesh object.") 1289 | return 1290 | parent_obj = empty_objs.pop() 1291 | if len(parent_obj.children) == 1: 1292 | potential_mesh = parent_obj.children[0] 1293 | if potential_mesh.type == 'MESH': 1294 | print("GLB structure confirmed: Empty node with one mesh child.") 1295 | 1296 | # Unparent the mesh from the empty node 1297 | potential_mesh.parent = None 1298 | 1299 | # Remove the empty node 1300 | bpy.data.objects.remove(parent_obj) 1301 | print("Removed empty node, keeping only the mesh.") 1302 | 1303 | mesh_obj = potential_mesh 1304 | else: 1305 | print("Error: Child is not a mesh object.") 1306 | return 1307 | else: 1308 | print("Error: Expected an empty node with one mesh child or a single mesh object.") 1309 | return 1310 | else: 1311 | print("Error: Expected an empty node with one mesh child or a single mesh object.") 1312 | return 1313 | 1314 | # Rename the mesh if needed 1315 | try: 1316 | if mesh_obj and mesh_obj.name is not None and mesh_name: 1317 | mesh_obj.name = mesh_name 1318 | if mesh_obj.data.name is not None: 1319 | mesh_obj.data.name = mesh_name 1320 | print(f"Mesh renamed to: {mesh_name}") 1321 | except Exception as e: 1322 | print("Having issue with renaming, give up renaming.") 1323 | 1324 | return mesh_obj 1325 | 1326 | def import_generated_asset(self, *args, **kwargs): 1327 | match bpy.context.scene.blendermcp_hyper3d_mode: 1328 | case "MAIN_SITE": 1329 | return self.import_generated_asset_main_site(*args, **kwargs) 1330 | case "FAL_AI": 1331 | return self.import_generated_asset_fal_ai(*args, **kwargs) 1332 | case _: 1333 | return f"Error: Unknown Hyper3D Rodin mode!" 1334 | 1335 | def import_generated_asset_main_site(self, task_uuid: str, name: str): 1336 | """Fetch the generated asset, import into blender""" 1337 | response = requests.post( 1338 | "https://hyperhuman.deemos.com/api/v2/download", 1339 | headers={ 1340 | "Authorization": f"Bearer {bpy.context.scene.blendermcp_hyper3d_api_key}", 1341 | }, 1342 | json={ 1343 | 'task_uuid': task_uuid 1344 | } 1345 | ) 1346 | data_ = response.json() 1347 | temp_file = None 1348 | for i in data_["list"]: 1349 | if i["name"].endswith(".glb"): 1350 | temp_file = tempfile.NamedTemporaryFile( 1351 | delete=False, 1352 | prefix=task_uuid, 1353 | suffix=".glb", 1354 | ) 1355 | 1356 | try: 1357 | # Download the content 1358 | response = requests.get(i["url"], stream=True) 1359 | response.raise_for_status() # Raise an exception for HTTP errors 1360 | 1361 | # Write the content to the temporary file 1362 | for chunk in response.iter_content(chunk_size=8192): 1363 | temp_file.write(chunk) 1364 | 1365 | # Close the file 1366 | temp_file.close() 1367 | 1368 | except Exception as e: 1369 | # Clean up the file if there's an error 1370 | temp_file.close() 1371 | os.unlink(temp_file.name) 1372 | return {"succeed": False, "error": str(e)} 1373 | 1374 | break 1375 | else: 1376 | return {"succeed": False, "error": "Generation failed. Please first make sure that all jobs of the task are done and then try again later."} 1377 | 1378 | try: 1379 | obj = self._clean_imported_glb( 1380 | filepath=temp_file.name, 1381 | mesh_name=name 1382 | ) 1383 | result = { 1384 | "name": obj.name, 1385 | "type": obj.type, 1386 | "location": [obj.location.x, obj.location.y, obj.location.z], 1387 | "rotation": [obj.rotation_euler.x, obj.rotation_euler.y, obj.rotation_euler.z], 1388 | "scale": [obj.scale.x, obj.scale.y, obj.scale.z], 1389 | } 1390 | 1391 | if obj.type == "MESH": 1392 | bounding_box = self._get_aabb(obj) 1393 | result["world_bounding_box"] = bounding_box 1394 | 1395 | return { 1396 | "succeed": True, **result 1397 | } 1398 | except Exception as e: 1399 | return {"succeed": False, "error": str(e)} 1400 | 1401 | def import_generated_asset_fal_ai(self, request_id: str, name: str): 1402 | """Fetch the generated asset, import into blender""" 1403 | response = requests.get( 1404 | f"https://queue.fal.run/fal-ai/hyper3d/requests/{request_id}", 1405 | headers={ 1406 | "Authorization": f"Key {bpy.context.scene.blendermcp_hyper3d_api_key}", 1407 | } 1408 | ) 1409 | data_ = response.json() 1410 | temp_file = None 1411 | 1412 | temp_file = tempfile.NamedTemporaryFile( 1413 | delete=False, 1414 | prefix=request_id, 1415 | suffix=".glb", 1416 | ) 1417 | 1418 | try: 1419 | # Download the content 1420 | response = requests.get(data_["model_mesh"]["url"], stream=True) 1421 | response.raise_for_status() # Raise an exception for HTTP errors 1422 | 1423 | # Write the content to the temporary file 1424 | for chunk in response.iter_content(chunk_size=8192): 1425 | temp_file.write(chunk) 1426 | 1427 | # Close the file 1428 | temp_file.close() 1429 | 1430 | except Exception as e: 1431 | # Clean up the file if there's an error 1432 | temp_file.close() 1433 | os.unlink(temp_file.name) 1434 | return {"succeed": False, "error": str(e)} 1435 | 1436 | try: 1437 | obj = self._clean_imported_glb( 1438 | filepath=temp_file.name, 1439 | mesh_name=name 1440 | ) 1441 | result = { 1442 | "name": obj.name, 1443 | "type": obj.type, 1444 | "location": [obj.location.x, obj.location.y, obj.location.z], 1445 | "rotation": [obj.rotation_euler.x, obj.rotation_euler.y, obj.rotation_euler.z], 1446 | "scale": [obj.scale.x, obj.scale.y, obj.scale.z], 1447 | } 1448 | 1449 | if obj.type == "MESH": 1450 | bounding_box = self._get_aabb(obj) 1451 | result["world_bounding_box"] = bounding_box 1452 | 1453 | return { 1454 | "succeed": True, **result 1455 | } 1456 | except Exception as e: 1457 | return {"succeed": False, "error": str(e)} 1458 | #endregion 1459 | 1460 | #region Sketchfab API 1461 | def get_sketchfab_status(self): 1462 | """Get the current status of Sketchfab integration""" 1463 | enabled = bpy.context.scene.blendermcp_use_sketchfab 1464 | api_key = bpy.context.scene.blendermcp_sketchfab_api_key 1465 | 1466 | # Test the API key if present 1467 | if api_key: 1468 | try: 1469 | headers = { 1470 | "Authorization": f"Token {api_key}" 1471 | } 1472 | 1473 | response = requests.get( 1474 | "https://api.sketchfab.com/v3/me", 1475 | headers=headers, 1476 | timeout=30 # Add timeout of 30 seconds 1477 | ) 1478 | 1479 | if response.status_code == 200: 1480 | user_data = response.json() 1481 | username = user_data.get("username", "Unknown user") 1482 | return { 1483 | "enabled": True, 1484 | "message": f"Sketchfab integration is enabled and ready to use. Logged in as: {username}" 1485 | } 1486 | else: 1487 | return { 1488 | "enabled": False, 1489 | "message": f"Sketchfab API key seems invalid. Status code: {response.status_code}" 1490 | } 1491 | except requests.exceptions.Timeout: 1492 | return { 1493 | "enabled": False, 1494 | "message": "Timeout connecting to Sketchfab API. Check your internet connection." 1495 | } 1496 | except Exception as e: 1497 | return { 1498 | "enabled": False, 1499 | "message": f"Error testing Sketchfab API key: {str(e)}" 1500 | } 1501 | 1502 | if enabled and api_key: 1503 | return {"enabled": True, "message": "Sketchfab integration is enabled and ready to use."} 1504 | elif enabled and not api_key: 1505 | return { 1506 | "enabled": False, 1507 | "message": """Sketchfab integration is currently enabled, but API key is not given. To enable it: 1508 | 1. In the 3D Viewport, find the BlenderMCP panel in the sidebar (press N if hidden) 1509 | 2. Keep the 'Use Sketchfab' checkbox checked 1510 | 3. Enter your Sketchfab API Key 1511 | 4. Restart the connection to Claude""" 1512 | } 1513 | else: 1514 | return { 1515 | "enabled": False, 1516 | "message": """Sketchfab integration is currently disabled. To enable it: 1517 | 1. In the 3D Viewport, find the BlenderMCP panel in the sidebar (press N if hidden) 1518 | 2. Check the 'Use assets from Sketchfab' checkbox 1519 | 3. Enter your Sketchfab API Key 1520 | 4. Restart the connection to Claude""" 1521 | } 1522 | 1523 | def search_sketchfab_models(self, query, categories=None, count=20, downloadable=True): 1524 | """Search for models on Sketchfab based on query and optional filters""" 1525 | try: 1526 | api_key = bpy.context.scene.blendermcp_sketchfab_api_key 1527 | if not api_key: 1528 | return {"error": "Sketchfab API key is not configured"} 1529 | 1530 | # Build search parameters with exact fields from Sketchfab API docs 1531 | params = { 1532 | "type": "models", 1533 | "q": query, 1534 | "count": count, 1535 | "downloadable": downloadable, 1536 | "archives_flavours": False 1537 | } 1538 | 1539 | if categories: 1540 | params["categories"] = categories 1541 | 1542 | # Make API request to Sketchfab search endpoint 1543 | # The proper format according to Sketchfab API docs for API key auth 1544 | headers = { 1545 | "Authorization": f"Token {api_key}" 1546 | } 1547 | 1548 | 1549 | # Use the search endpoint as specified in the API documentation 1550 | response = requests.get( 1551 | "https://api.sketchfab.com/v3/search", 1552 | headers=headers, 1553 | params=params, 1554 | timeout=30 # Add timeout of 30 seconds 1555 | ) 1556 | 1557 | if response.status_code == 401: 1558 | return {"error": "Authentication failed (401). Check your API key."} 1559 | 1560 | if response.status_code != 200: 1561 | return {"error": f"API request failed with status code {response.status_code}"} 1562 | 1563 | response_data = response.json() 1564 | 1565 | # Safety check on the response structure 1566 | if response_data is None: 1567 | return {"error": "Received empty response from Sketchfab API"} 1568 | 1569 | # Handle 'results' potentially missing from response 1570 | results = response_data.get("results", []) 1571 | if not isinstance(results, list): 1572 | return {"error": f"Unexpected response format from Sketchfab API: {response_data}"} 1573 | 1574 | return response_data 1575 | 1576 | except requests.exceptions.Timeout: 1577 | return {"error": "Request timed out. Check your internet connection."} 1578 | except json.JSONDecodeError as e: 1579 | return {"error": f"Invalid JSON response from Sketchfab API: {str(e)}"} 1580 | except Exception as e: 1581 | import traceback 1582 | traceback.print_exc() 1583 | return {"error": str(e)} 1584 | 1585 | def download_sketchfab_model(self, uid): 1586 | """Download a model from Sketchfab by its UID""" 1587 | try: 1588 | api_key = bpy.context.scene.blendermcp_sketchfab_api_key 1589 | if not api_key: 1590 | return {"error": "Sketchfab API key is not configured"} 1591 | 1592 | # Use proper authorization header for API key auth 1593 | headers = { 1594 | "Authorization": f"Token {api_key}" 1595 | } 1596 | 1597 | # Request download URL using the exact endpoint from the documentation 1598 | download_endpoint = f"https://api.sketchfab.com/v3/models/{uid}/download" 1599 | 1600 | response = requests.get( 1601 | download_endpoint, 1602 | headers=headers, 1603 | timeout=30 # Add timeout of 30 seconds 1604 | ) 1605 | 1606 | if response.status_code == 401: 1607 | return {"error": "Authentication failed (401). Check your API key."} 1608 | 1609 | if response.status_code != 200: 1610 | return {"error": f"Download request failed with status code {response.status_code}"} 1611 | 1612 | data = response.json() 1613 | 1614 | # Safety check for None data 1615 | if data is None: 1616 | return {"error": "Received empty response from Sketchfab API for download request"} 1617 | 1618 | # Extract download URL with safety checks 1619 | gltf_data = data.get("gltf") 1620 | if not gltf_data: 1621 | return {"error": "No gltf download URL available for this model. Response: " + str(data)} 1622 | 1623 | download_url = gltf_data.get("url") 1624 | if not download_url: 1625 | return {"error": "No download URL available for this model. Make sure the model is downloadable and you have access."} 1626 | 1627 | # Download the model (already has timeout) 1628 | model_response = requests.get(download_url, timeout=60) # 60 second timeout 1629 | 1630 | if model_response.status_code != 200: 1631 | return {"error": f"Model download failed with status code {model_response.status_code}"} 1632 | 1633 | # Save to temporary file 1634 | temp_dir = tempfile.mkdtemp() 1635 | zip_file_path = os.path.join(temp_dir, f"{uid}.zip") 1636 | 1637 | with open(zip_file_path, "wb") as f: 1638 | f.write(model_response.content) 1639 | 1640 | # Extract the zip file with enhanced security 1641 | with zipfile.ZipFile(zip_file_path, 'r') as zip_ref: 1642 | # More secure zip slip prevention 1643 | for file_info in zip_ref.infolist(): 1644 | # Get the path of the file 1645 | file_path = file_info.filename 1646 | 1647 | # Convert directory separators to the current OS style 1648 | # This handles both / and \ in zip entries 1649 | target_path = os.path.join(temp_dir, os.path.normpath(file_path)) 1650 | 1651 | # Get absolute paths for comparison 1652 | abs_temp_dir = os.path.abspath(temp_dir) 1653 | abs_target_path = os.path.abspath(target_path) 1654 | 1655 | # Ensure the normalized path doesn't escape the target directory 1656 | if not abs_target_path.startswith(abs_temp_dir): 1657 | with suppress(Exception): 1658 | shutil.rmtree(temp_dir) 1659 | return {"error": "Security issue: Zip contains files with path traversal attempt"} 1660 | 1661 | # Additional explicit check for directory traversal 1662 | if ".." in file_path: 1663 | with suppress(Exception): 1664 | shutil.rmtree(temp_dir) 1665 | return {"error": "Security issue: Zip contains files with directory traversal sequence"} 1666 | 1667 | # If all files passed security checks, extract them 1668 | zip_ref.extractall(temp_dir) 1669 | 1670 | # Find the main glTF file 1671 | gltf_files = [f for f in os.listdir(temp_dir) if f.endswith('.gltf') or f.endswith('.glb')] 1672 | 1673 | if not gltf_files: 1674 | with suppress(Exception): 1675 | shutil.rmtree(temp_dir) 1676 | return {"error": "No glTF file found in the downloaded model"} 1677 | 1678 | main_file = os.path.join(temp_dir, gltf_files[0]) 1679 | 1680 | # Import the model 1681 | bpy.ops.import_scene.gltf(filepath=main_file) 1682 | 1683 | # Get the names of imported objects 1684 | imported_objects = [obj.name for obj in bpy.context.selected_objects] 1685 | 1686 | # Clean up temporary files 1687 | with suppress(Exception): 1688 | shutil.rmtree(temp_dir) 1689 | 1690 | return { 1691 | "success": True, 1692 | "message": "Model imported successfully", 1693 | "imported_objects": imported_objects 1694 | } 1695 | 1696 | except requests.exceptions.Timeout: 1697 | return {"error": "Request timed out. Check your internet connection and try again with a simpler model."} 1698 | except json.JSONDecodeError as e: 1699 | return {"error": f"Invalid JSON response from Sketchfab API: {str(e)}"} 1700 | except Exception as e: 1701 | import traceback 1702 | traceback.print_exc() 1703 | return {"error": f"Failed to download model: {str(e)}"} 1704 | #endregion 1705 | 1706 | #region Hunyuan3D 1707 | def get_hunyuan3d_status(self): 1708 | """Get the current status of Hunyuan3D integration""" 1709 | enabled = bpy.context.scene.blendermcp_use_hunyuan3d 1710 | hunyuan3d_mode = bpy.context.scene.blendermcp_hunyuan3d_mode 1711 | if enabled: 1712 | match hunyuan3d_mode: 1713 | case "OFFICIAL_API": 1714 | if not bpy.context.scene.blendermcp_hunyuan3d_secret_id or not bpy.context.scene.blendermcp_hunyuan3d_secret_key: 1715 | return { 1716 | "enabled": False, 1717 | "mode": hunyuan3d_mode, 1718 | "message": """Hunyuan3D integration is currently enabled, but SecretId or SecretKey is not given. To enable it: 1719 | 1. In the 3D Viewport, find the BlenderMCP panel in the sidebar (press N if hidden) 1720 | 2. Keep the 'Use Tencent Hunyuan 3D model generation' checkbox checked 1721 | 3. Choose the right platform and fill in the SecretId and SecretKey 1722 | 4. Restart the connection to Claude""" 1723 | } 1724 | case "LOCAL_API": 1725 | if not bpy.context.scene.blendermcp_hunyuan3d_api_url: 1726 | return { 1727 | "enabled": False, 1728 | "mode": hunyuan3d_mode, 1729 | "message": """Hunyuan3D integration is currently enabled, but API URL is not given. To enable it: 1730 | 1. In the 3D Viewport, find the BlenderMCP panel in the sidebar (press N if hidden) 1731 | 2. Keep the 'Use Tencent Hunyuan 3D model generation' checkbox checked 1732 | 3. Choose the right platform and fill in the API URL 1733 | 4. Restart the connection to Claude""" 1734 | } 1735 | case _: 1736 | return { 1737 | "enabled": False, 1738 | "message": "Hunyuan3D integration is enabled and mode is not supported." 1739 | } 1740 | return { 1741 | "enabled": True, 1742 | "mode": hunyuan3d_mode, 1743 | "message": "Hunyuan3D integration is enabled and ready to use." 1744 | } 1745 | return { 1746 | "enabled": False, 1747 | "message": """Hunyuan3D integration is currently disabled. To enable it: 1748 | 1. In the 3D Viewport, find the BlenderMCP panel in the sidebar (press N if hidden) 1749 | 2. Check the 'Use Tencent Hunyuan 3D model generation' checkbox 1750 | 3. Restart the connection to Claude""" 1751 | } 1752 | 1753 | @staticmethod 1754 | def get_tencent_cloud_sign_headers( 1755 | method: str, 1756 | path: str, 1757 | headParams: dict, 1758 | data: dict, 1759 | service: str, 1760 | region: str, 1761 | secret_id: str, 1762 | secret_key: str, 1763 | host: str = None 1764 | ): 1765 | """Generate the signature header required for Tencent Cloud API requests headers""" 1766 | # Generate timestamp 1767 | timestamp = int(time.time()) 1768 | date = datetime.utcfromtimestamp(timestamp).strftime("%Y-%m-%d") 1769 | 1770 | # If host is not provided, it is generated based on service and region. 1771 | if not host: 1772 | host = f"{service}.tencentcloudapi.com" 1773 | 1774 | endpoint = f"https://{host}" 1775 | 1776 | # Constructing the request body 1777 | payload_str = json.dumps(data) 1778 | 1779 | # ************* Step 1: Concatenate the canonical request string ************* 1780 | canonical_uri = path 1781 | canonical_querystring = "" 1782 | ct = "application/json; charset=utf-8" 1783 | canonical_headers = f"content-type:{ct}\nhost:{host}\nx-tc-action:{headParams.get('Action', '').lower()}\n" 1784 | signed_headers = "content-type;host;x-tc-action" 1785 | hashed_request_payload = hashlib.sha256(payload_str.encode("utf-8")).hexdigest() 1786 | 1787 | canonical_request = (method + "\n" + 1788 | canonical_uri + "\n" + 1789 | canonical_querystring + "\n" + 1790 | canonical_headers + "\n" + 1791 | signed_headers + "\n" + 1792 | hashed_request_payload) 1793 | 1794 | # ************* Step 2: Construct the reception signature string ************* 1795 | credential_scope = f"{date}/{service}/tc3_request" 1796 | hashed_canonical_request = hashlib.sha256(canonical_request.encode("utf-8")).hexdigest() 1797 | string_to_sign = ("TC3-HMAC-SHA256" + "\n" + 1798 | str(timestamp) + "\n" + 1799 | credential_scope + "\n" + 1800 | hashed_canonical_request) 1801 | 1802 | # ************* Step 3: Calculate the signature ************* 1803 | def sign(key, msg): 1804 | return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest() 1805 | 1806 | secret_date = sign(("TC3" + secret_key).encode("utf-8"), date) 1807 | secret_service = sign(secret_date, service) 1808 | secret_signing = sign(secret_service, "tc3_request") 1809 | signature = hmac.new( 1810 | secret_signing, 1811 | string_to_sign.encode("utf-8"), 1812 | hashlib.sha256 1813 | ).hexdigest() 1814 | 1815 | # ************* Step 4: Connect Authorization ************* 1816 | authorization = ("TC3-HMAC-SHA256" + " " + 1817 | "Credential=" + secret_id + "/" + credential_scope + ", " + 1818 | "SignedHeaders=" + signed_headers + ", " + 1819 | "Signature=" + signature) 1820 | 1821 | # Constructing request headers 1822 | headers = { 1823 | "Authorization": authorization, 1824 | "Content-Type": "application/json; charset=utf-8", 1825 | "Host": host, 1826 | "X-TC-Action": headParams.get("Action", ""), 1827 | "X-TC-Timestamp": str(timestamp), 1828 | "X-TC-Version": headParams.get("Version", ""), 1829 | "X-TC-Region": region 1830 | } 1831 | 1832 | return headers, endpoint 1833 | 1834 | def create_hunyuan_job(self, *args, **kwargs): 1835 | match bpy.context.scene.blendermcp_hunyuan3d_mode: 1836 | case "OFFICIAL_API": 1837 | return self.create_hunyuan_job_main_site(*args, **kwargs) 1838 | case "LOCAL_API": 1839 | return self.create_hunyuan_job_local_site(*args, **kwargs) 1840 | case _: 1841 | return f"Error: Unknown Hunyuan3D mode!" 1842 | 1843 | def create_hunyuan_job_main_site( 1844 | self, 1845 | text_prompt: str = None, 1846 | image: str = None 1847 | ): 1848 | try: 1849 | secret_id = bpy.context.scene.blendermcp_hunyuan3d_secret_id 1850 | secret_key = bpy.context.scene.blendermcp_hunyuan3d_secret_key 1851 | 1852 | if not secret_id or not secret_key: 1853 | return {"error": "SecretId or SecretKey is not given"} 1854 | 1855 | # Parameter verification 1856 | if not text_prompt and not image: 1857 | return {"error": "Prompt or Image is required"} 1858 | if text_prompt and image: 1859 | return {"error": "Prompt and Image cannot be provided simultaneously"} 1860 | # Fixed parameter configuration 1861 | service = "hunyuan" 1862 | action = "SubmitHunyuanTo3DJob" 1863 | version = "2023-09-01" 1864 | region = "ap-guangzhou" 1865 | 1866 | headParams={ 1867 | "Action": action, 1868 | "Version": version, 1869 | "Region": region, 1870 | } 1871 | 1872 | # Constructing request parameters 1873 | data = { 1874 | "Num": 1 # The current API limit is only 1 1875 | } 1876 | 1877 | # Handling text prompts 1878 | if text_prompt: 1879 | if len(text_prompt) > 200: 1880 | return {"error": "Prompt exceeds 200 characters limit"} 1881 | data["Prompt"] = text_prompt 1882 | 1883 | # Handling image 1884 | if image: 1885 | if re.match(r'^https?://', image, re.IGNORECASE) is not None: 1886 | data["ImageUrl"] = image 1887 | else: 1888 | try: 1889 | # Convert to Base64 format 1890 | with open(image, "rb") as f: 1891 | image_base64 = base64.b64encode(f.read()).decode("ascii") 1892 | data["ImageBase64"] = image_base64 1893 | except Exception as e: 1894 | return {"error": f"Image encoding failed: {str(e)}"} 1895 | 1896 | # Get signed headers 1897 | headers, endpoint = self.get_tencent_cloud_sign_headers("POST", "/", headParams, data, service, region, secret_id, secret_key) 1898 | 1899 | response = requests.post( 1900 | endpoint, 1901 | headers = headers, 1902 | data = json.dumps(data) 1903 | ) 1904 | 1905 | if response.status_code == 200: 1906 | return response.json() 1907 | return { 1908 | "error": f"API request failed with status {response.status_code}: {response}" 1909 | } 1910 | except Exception as e: 1911 | return {"error": str(e)} 1912 | 1913 | def create_hunyuan_job_local_site( 1914 | self, 1915 | text_prompt: str = None, 1916 | image: str = None): 1917 | try: 1918 | base_url = bpy.context.scene.blendermcp_hunyuan3d_api_url.rstrip('/') 1919 | octree_resolution = bpy.context.scene.blendermcp_hunyuan3d_octree_resolution 1920 | num_inference_steps = bpy.context.scene.blendermcp_hunyuan3d_num_inference_steps 1921 | guidance_scale = bpy.context.scene.blendermcp_hunyuan3d_guidance_scale 1922 | texture = bpy.context.scene.blendermcp_hunyuan3d_texture 1923 | 1924 | if not base_url: 1925 | return {"error": "API URL is not given"} 1926 | # Parameter verification 1927 | if not text_prompt and not image: 1928 | return {"error": "Prompt or Image is required"} 1929 | 1930 | # Constructing request parameters 1931 | data = { 1932 | "octree_resolution": octree_resolution, 1933 | "num_inference_steps": num_inference_steps, 1934 | "guidance_scale": guidance_scale, 1935 | "texture": texture, 1936 | } 1937 | 1938 | # Handling text prompts 1939 | if text_prompt: 1940 | data["text"] = text_prompt 1941 | 1942 | # Handling image 1943 | if image: 1944 | if re.match(r'^https?://', image, re.IGNORECASE) is not None: 1945 | try: 1946 | resImg = requests.get(image) 1947 | resImg.raise_for_status() 1948 | image_base64 = base64.b64encode(resImg.content).decode("ascii") 1949 | data["image"] = image_base64 1950 | except Exception as e: 1951 | return {"error": f"Failed to download or encode image: {str(e)}"} 1952 | else: 1953 | try: 1954 | # Convert to Base64 format 1955 | with open(image, "rb") as f: 1956 | image_base64 = base64.b64encode(f.read()).decode("ascii") 1957 | data["image"] = image_base64 1958 | except Exception as e: 1959 | return {"error": f"Image encoding failed: {str(e)}"} 1960 | 1961 | response = requests.post( 1962 | f"{base_url}/generate", 1963 | json = data, 1964 | ) 1965 | 1966 | if response.status_code != 200: 1967 | return { 1968 | "error": f"Generation failed: {response.text}" 1969 | } 1970 | 1971 | # Decode base64 and save to temporary file 1972 | with tempfile.NamedTemporaryFile(delete=False, suffix=".glb") as temp_file: 1973 | temp_file.write(response.content) 1974 | temp_file_name = temp_file.name 1975 | 1976 | # Import the GLB file in the main thread 1977 | def import_handler(): 1978 | bpy.ops.import_scene.gltf(filepath=temp_file_name) 1979 | os.unlink(temp_file.name) 1980 | return None 1981 | 1982 | bpy.app.timers.register(import_handler) 1983 | 1984 | return { 1985 | "status": "DONE", 1986 | "message": "Generation and Import glb succeeded" 1987 | } 1988 | except Exception as e: 1989 | print(f"An error occurred: {e}") 1990 | return {"error": str(e)} 1991 | 1992 | 1993 | def poll_hunyuan_job_status(self, *args, **kwargs): 1994 | return self.poll_hunyuan_job_status_ai(*args, **kwargs) 1995 | 1996 | def poll_hunyuan_job_status_ai(self, job_id: str): 1997 | """Call the job status API to get the job status""" 1998 | print(job_id) 1999 | try: 2000 | secret_id = bpy.context.scene.blendermcp_hunyuan3d_secret_id 2001 | secret_key = bpy.context.scene.blendermcp_hunyuan3d_secret_key 2002 | 2003 | if not secret_id or not secret_key: 2004 | return {"error": "SecretId or SecretKey is not given"} 2005 | if not job_id: 2006 | return {"error": "JobId is required"} 2007 | 2008 | service = "hunyuan" 2009 | action = "QueryHunyuanTo3DJob" 2010 | version = "2023-09-01" 2011 | region = "ap-guangzhou" 2012 | 2013 | headParams={ 2014 | "Action": action, 2015 | "Version": version, 2016 | "Region": region, 2017 | } 2018 | 2019 | clean_job_id = job_id.removeprefix("job_") 2020 | data = { 2021 | "JobId": clean_job_id 2022 | } 2023 | 2024 | headers, endpoint = self.get_tencent_cloud_sign_headers("POST", "/", headParams, data, service, region, secret_id, secret_key) 2025 | 2026 | response = requests.post( 2027 | endpoint, 2028 | headers=headers, 2029 | data=json.dumps(data) 2030 | ) 2031 | 2032 | if response.status_code == 200: 2033 | return response.json() 2034 | return { 2035 | "error": f"API request failed with status {response.status_code}: {response}" 2036 | } 2037 | except Exception as e: 2038 | return {"error": str(e)} 2039 | 2040 | def import_generated_asset_hunyuan(self, *args, **kwargs): 2041 | return self.import_generated_asset_hunyuan_ai(*args, **kwargs) 2042 | 2043 | def import_generated_asset_hunyuan_ai(self, name: str , zip_file_url: str): 2044 | if not zip_file_url: 2045 | return {"error": "Zip file not found"} 2046 | 2047 | # Validate URL 2048 | if not re.match(r'^https?://', zip_file_url, re.IGNORECASE): 2049 | return {"error": "Invalid URL format. Must start with http:// or https://"} 2050 | 2051 | # Create a temporary directory 2052 | temp_dir = tempfile.mkdtemp(prefix="tencent_obj_") 2053 | zip_file_path = osp.join(temp_dir, "model.zip") 2054 | obj_file_path = osp.join(temp_dir, "model.obj") 2055 | mtl_file_path = osp.join(temp_dir, "model.mtl") 2056 | 2057 | try: 2058 | # Download ZIP file 2059 | zip_response = requests.get(zip_file_url, stream=True) 2060 | zip_response.raise_for_status() 2061 | with open(zip_file_path, "wb") as f: 2062 | for chunk in zip_response.iter_content(chunk_size=8192): 2063 | f.write(chunk) 2064 | 2065 | # Unzip the ZIP 2066 | with zipfile.ZipFile(zip_file_path, "r") as zip_ref: 2067 | zip_ref.extractall(temp_dir) 2068 | 2069 | # Find the .obj file (there may be multiple, assuming the main file is model.obj) 2070 | for file in os.listdir(temp_dir): 2071 | if file.endswith(".obj"): 2072 | obj_file_path = osp.join(temp_dir, file) 2073 | 2074 | if not osp.exists(obj_file_path): 2075 | return {"succeed": False, "error": "OBJ file not found after extraction"} 2076 | 2077 | # Import obj file 2078 | if bpy.app.version>=(4, 0, 0): 2079 | bpy.ops.wm.obj_import(filepath=obj_file_path) 2080 | else: 2081 | bpy.ops.import_scene.obj(filepath=obj_file_path) 2082 | 2083 | imported_objs = [obj for obj in bpy.context.selected_objects if obj.type == 'MESH'] 2084 | if not imported_objs: 2085 | return {"succeed": False, "error": "No mesh objects imported"} 2086 | 2087 | obj = imported_objs[0] 2088 | if name: 2089 | obj.name = name 2090 | 2091 | result = { 2092 | "name": obj.name, 2093 | "type": obj.type, 2094 | "location": [obj.location.x, obj.location.y, obj.location.z], 2095 | "rotation": [obj.rotation_euler.x, obj.rotation_euler.y, obj.rotation_euler.z], 2096 | "scale": [obj.scale.x, obj.scale.y, obj.scale.z], 2097 | } 2098 | 2099 | if obj.type == "MESH": 2100 | bounding_box = self._get_aabb(obj) 2101 | result["world_bounding_box"] = bounding_box 2102 | 2103 | return {"succeed": True, **result} 2104 | except Exception as e: 2105 | return {"succeed": False, "error": str(e)} 2106 | finally: 2107 | # Clean up temporary zip and obj, save texture and mtl 2108 | try: 2109 | if os.path.exists(zip_file_path): 2110 | os.remove(zip_file_path) 2111 | if os.path.exists(obj_file_path): 2112 | os.remove(obj_file_path) 2113 | except Exception as e: 2114 | print(f"Failed to clean up temporary directory {temp_dir}: {e}") 2115 | #endregion 2116 | 2117 | # Blender UI Panel 2118 | class BLENDERMCP_PT_Panel(bpy.types.Panel): 2119 | bl_label = "Blender MCP" 2120 | bl_idname = "BLENDERMCP_PT_Panel" 2121 | bl_space_type = 'VIEW_3D' 2122 | bl_region_type = 'UI' 2123 | bl_category = 'BlenderMCP' 2124 | 2125 | def draw(self, context): 2126 | layout = self.layout 2127 | scene = context.scene 2128 | 2129 | layout.prop(scene, "blendermcp_port") 2130 | layout.prop(scene, "blendermcp_use_polyhaven", text="Use assets from Poly Haven") 2131 | 2132 | layout.prop(scene, "blendermcp_use_hyper3d", text="Use Hyper3D Rodin 3D model generation") 2133 | if scene.blendermcp_use_hyper3d: 2134 | layout.prop(scene, "blendermcp_hyper3d_mode", text="Rodin Mode") 2135 | layout.prop(scene, "blendermcp_hyper3d_api_key", text="API Key") 2136 | layout.operator("blendermcp.set_hyper3d_free_trial_api_key", text="Set Free Trial API Key") 2137 | 2138 | layout.prop(scene, "blendermcp_use_sketchfab", text="Use assets from Sketchfab") 2139 | if scene.blendermcp_use_sketchfab: 2140 | layout.prop(scene, "blendermcp_sketchfab_api_key", text="API Key") 2141 | 2142 | layout.prop(scene, "blendermcp_use_hunyuan3d", text="Use Tencent Hunyuan 3D model generation") 2143 | if scene.blendermcp_use_hunyuan3d: 2144 | layout.prop(scene, "blendermcp_hunyuan3d_mode", text="Hunyuan3D Mode") 2145 | if scene.blendermcp_hunyuan3d_mode == 'OFFICIAL_API': 2146 | layout.prop(scene, "blendermcp_hunyuan3d_secret_id", text="SecretId") 2147 | layout.prop(scene, "blendermcp_hunyuan3d_secret_key", text="SecretKey") 2148 | if scene.blendermcp_hunyuan3d_mode == 'LOCAL_API': 2149 | layout.prop(scene, "blendermcp_hunyuan3d_api_url", text="API URL") 2150 | layout.prop(scene, "blendermcp_hunyuan3d_octree_resolution", text="Octree Resolution") 2151 | layout.prop(scene, "blendermcp_hunyuan3d_num_inference_steps", text="Number of Inference Steps") 2152 | layout.prop(scene, "blendermcp_hunyuan3d_guidance_scale", text="Guidance Scale") 2153 | layout.prop(scene, "blendermcp_hunyuan3d_texture", text="Generate Texture") 2154 | 2155 | if not scene.blendermcp_server_running: 2156 | layout.operator("blendermcp.start_server", text="Connect to MCP server") 2157 | else: 2158 | layout.operator("blendermcp.stop_server", text="Disconnect from MCP server") 2159 | layout.label(text=f"Running on port {scene.blendermcp_port}") 2160 | 2161 | # Operator to set Hyper3D API Key 2162 | class BLENDERMCP_OT_SetFreeTrialHyper3DAPIKey(bpy.types.Operator): 2163 | bl_idname = "blendermcp.set_hyper3d_free_trial_api_key" 2164 | bl_label = "Set Free Trial API Key" 2165 | 2166 | def execute(self, context): 2167 | context.scene.blendermcp_hyper3d_api_key = RODIN_FREE_TRIAL_KEY 2168 | context.scene.blendermcp_hyper3d_mode = 'MAIN_SITE' 2169 | self.report({'INFO'}, "API Key set successfully!") 2170 | return {'FINISHED'} 2171 | 2172 | # Operator to start the server 2173 | class BLENDERMCP_OT_StartServer(bpy.types.Operator): 2174 | bl_idname = "blendermcp.start_server" 2175 | bl_label = "Connect to Claude" 2176 | bl_description = "Start the BlenderMCP server to connect with Claude" 2177 | 2178 | def execute(self, context): 2179 | scene = context.scene 2180 | 2181 | # Create a new server instance 2182 | if not hasattr(bpy.types, "blendermcp_server") or not bpy.types.blendermcp_server: 2183 | bpy.types.blendermcp_server = BlenderMCPServer(port=scene.blendermcp_port) 2184 | 2185 | # Start the server 2186 | bpy.types.blendermcp_server.start() 2187 | scene.blendermcp_server_running = True 2188 | 2189 | return {'FINISHED'} 2190 | 2191 | # Operator to stop the server 2192 | class BLENDERMCP_OT_StopServer(bpy.types.Operator): 2193 | bl_idname = "blendermcp.stop_server" 2194 | bl_label = "Stop the connection to Claude" 2195 | bl_description = "Stop the connection to Claude" 2196 | 2197 | def execute(self, context): 2198 | scene = context.scene 2199 | 2200 | # Stop the server if it exists 2201 | if hasattr(bpy.types, "blendermcp_server") and bpy.types.blendermcp_server: 2202 | bpy.types.blendermcp_server.stop() 2203 | del bpy.types.blendermcp_server 2204 | 2205 | scene.blendermcp_server_running = False 2206 | 2207 | return {'FINISHED'} 2208 | 2209 | # Registration functions 2210 | def register(): 2211 | bpy.types.Scene.blendermcp_port = IntProperty( 2212 | name="Port", 2213 | description="Port for the BlenderMCP server", 2214 | default=9876, 2215 | min=1024, 2216 | max=65535 2217 | ) 2218 | 2219 | bpy.types.Scene.blendermcp_server_running = bpy.props.BoolProperty( 2220 | name="Server Running", 2221 | default=False 2222 | ) 2223 | 2224 | bpy.types.Scene.blendermcp_use_polyhaven = bpy.props.BoolProperty( 2225 | name="Use Poly Haven", 2226 | description="Enable Poly Haven asset integration", 2227 | default=False 2228 | ) 2229 | 2230 | bpy.types.Scene.blendermcp_use_hyper3d = bpy.props.BoolProperty( 2231 | name="Use Hyper3D Rodin", 2232 | description="Enable Hyper3D Rodin generatino integration", 2233 | default=False 2234 | ) 2235 | 2236 | bpy.types.Scene.blendermcp_hyper3d_mode = bpy.props.EnumProperty( 2237 | name="Rodin Mode", 2238 | description="Choose the platform used to call Rodin APIs", 2239 | items=[ 2240 | ("MAIN_SITE", "hyper3d.ai", "hyper3d.ai"), 2241 | ("FAL_AI", "fal.ai", "fal.ai"), 2242 | ], 2243 | default="MAIN_SITE" 2244 | ) 2245 | 2246 | bpy.types.Scene.blendermcp_hyper3d_api_key = bpy.props.StringProperty( 2247 | name="Hyper3D API Key", 2248 | subtype="PASSWORD", 2249 | description="API Key provided by Hyper3D", 2250 | default="" 2251 | ) 2252 | 2253 | bpy.types.Scene.blendermcp_use_hunyuan3d = bpy.props.BoolProperty( 2254 | name="Use Hunyuan 3D", 2255 | description="Enable Hunyuan asset integration", 2256 | default=False 2257 | ) 2258 | 2259 | bpy.types.Scene.blendermcp_hunyuan3d_mode = bpy.props.EnumProperty( 2260 | name="Hunyuan3D Mode", 2261 | description="Choose a local or official APIs", 2262 | items=[ 2263 | ("LOCAL_API", "local api", "local api"), 2264 | ("OFFICIAL_API", "official api", "official api"), 2265 | ], 2266 | default="LOCAL_API" 2267 | ) 2268 | 2269 | bpy.types.Scene.blendermcp_hunyuan3d_secret_id = bpy.props.StringProperty( 2270 | name="Hunyuan 3D SecretId", 2271 | description="SecretId provided by Hunyuan 3D", 2272 | default="" 2273 | ) 2274 | 2275 | bpy.types.Scene.blendermcp_hunyuan3d_secret_key = bpy.props.StringProperty( 2276 | name="Hunyuan 3D SecretKey", 2277 | subtype="PASSWORD", 2278 | description="SecretKey provided by Hunyuan 3D", 2279 | default="" 2280 | ) 2281 | 2282 | bpy.types.Scene.blendermcp_hunyuan3d_api_url = bpy.props.StringProperty( 2283 | name="API URL", 2284 | description="URL of the Hunyuan 3D API service", 2285 | default="http://localhost:8081" 2286 | ) 2287 | 2288 | bpy.types.Scene.blendermcp_hunyuan3d_octree_resolution = bpy.props.IntProperty( 2289 | name="Octree Resolution", 2290 | description="Octree resolution for the 3D generation", 2291 | default=256, 2292 | min=128, 2293 | max=512, 2294 | ) 2295 | 2296 | bpy.types.Scene.blendermcp_hunyuan3d_num_inference_steps = bpy.props.IntProperty( 2297 | name="Number of Inference Steps", 2298 | description="Number of inference steps for the 3D generation", 2299 | default=20, 2300 | min=20, 2301 | max=50, 2302 | ) 2303 | 2304 | bpy.types.Scene.blendermcp_hunyuan3d_guidance_scale = bpy.props.FloatProperty( 2305 | name="Guidance Scale", 2306 | description="Guidance scale for the 3D generation", 2307 | default=5.5, 2308 | min=1.0, 2309 | max=10.0, 2310 | ) 2311 | 2312 | bpy.types.Scene.blendermcp_hunyuan3d_texture = bpy.props.BoolProperty( 2313 | name="Generate Texture", 2314 | description="Whether to generate texture for the 3D model", 2315 | default=False, 2316 | ) 2317 | 2318 | bpy.types.Scene.blendermcp_use_sketchfab = bpy.props.BoolProperty( 2319 | name="Use Sketchfab", 2320 | description="Enable Sketchfab asset integration", 2321 | default=False 2322 | ) 2323 | 2324 | bpy.types.Scene.blendermcp_sketchfab_api_key = bpy.props.StringProperty( 2325 | name="Sketchfab API Key", 2326 | subtype="PASSWORD", 2327 | description="API Key provided by Sketchfab", 2328 | default="" 2329 | ) 2330 | 2331 | bpy.utils.register_class(BLENDERMCP_PT_Panel) 2332 | bpy.utils.register_class(BLENDERMCP_OT_SetFreeTrialHyper3DAPIKey) 2333 | bpy.utils.register_class(BLENDERMCP_OT_StartServer) 2334 | bpy.utils.register_class(BLENDERMCP_OT_StopServer) 2335 | 2336 | print("BlenderMCP addon registered") 2337 | 2338 | def unregister(): 2339 | # Stop the server if it's running 2340 | if hasattr(bpy.types, "blendermcp_server") and bpy.types.blendermcp_server: 2341 | bpy.types.blendermcp_server.stop() 2342 | del bpy.types.blendermcp_server 2343 | 2344 | bpy.utils.unregister_class(BLENDERMCP_PT_Panel) 2345 | bpy.utils.unregister_class(BLENDERMCP_OT_SetFreeTrialHyper3DAPIKey) 2346 | bpy.utils.unregister_class(BLENDERMCP_OT_StartServer) 2347 | bpy.utils.unregister_class(BLENDERMCP_OT_StopServer) 2348 | 2349 | del bpy.types.Scene.blendermcp_port 2350 | del bpy.types.Scene.blendermcp_server_running 2351 | del bpy.types.Scene.blendermcp_use_polyhaven 2352 | del bpy.types.Scene.blendermcp_use_hyper3d 2353 | del bpy.types.Scene.blendermcp_hyper3d_mode 2354 | del bpy.types.Scene.blendermcp_hyper3d_api_key 2355 | del bpy.types.Scene.blendermcp_use_sketchfab 2356 | del bpy.types.Scene.blendermcp_sketchfab_api_key 2357 | del bpy.types.Scene.blendermcp_use_hunyuan3d 2358 | del bpy.types.Scene.blendermcp_hunyuan3d_mode 2359 | del bpy.types.Scene.blendermcp_hunyuan3d_secret_id 2360 | del bpy.types.Scene.blendermcp_hunyuan3d_secret_key 2361 | del bpy.types.Scene.blendermcp_hunyuan3d_api_url 2362 | del bpy.types.Scene.blendermcp_hunyuan3d_octree_resolution 2363 | del bpy.types.Scene.blendermcp_hunyuan3d_num_inference_steps 2364 | del bpy.types.Scene.blendermcp_hunyuan3d_guidance_scale 2365 | del bpy.types.Scene.blendermcp_hunyuan3d_texture 2366 | 2367 | print("BlenderMCP addon unregistered") 2368 | 2369 | if __name__ == "__main__": 2370 | register() 2371 | --------------------------------------------------------------------------------