├── README.md ├── RH_AudioUploader.py ├── RH_ExecuteNode.py ├── RH_ImageUploaderNode.py ├── RH_NodeInfoListNode.py ├── RH_SettingsNode.py ├── RH_Utils.py ├── RH_VideoUploader.py ├── __init__.py ├── examples ├── rh_audio_uploader_tts.json ├── rh_execute_others_allinone.json ├── rh_image_to_image.json ├── rh_image_to_image_webapp.json ├── rh_image_to_video_FramePack.json ├── rh_save_video.json ├── rh_text_to_image.json ├── rh_text_to_image_webapp.json └── rh_video_uploader.json ├── requirements.txt └── web └── js ├── audioUploaderNode.js └── videoUploaderNode.js /README.md: -------------------------------------------------------------------------------- 1 | # 更新说明 - [2025-05-05] 2 | 3 | 本次重大升级,增强了功能并大幅提升了稳定性。 4 | 5 | ## 主要更新亮点 6 | * **支持调用AI WebAPP**: 7 | * **全面的输入支持**: 8 | * **新增**: 支持 video、audio的文件上传。 9 | * **全面的输出支持**: 10 | * **新增**: 支持 images、video frames、Latent、Text、Audio 文件输出。 11 | 12 | * **实时进度与监控**: 13 | * 增加了 ComfyUI **进度条** 显示,并提供任务状态反馈,更完善的控制台log输出 14 | * **增强的可靠性、并发控制**: 15 | # ComfyUI 插件:调用和执行 RunningHub 工作流 16 | 17 | 该插件用于在本地 ComfyUI 环境中便捷地调用和执行 RunningHub 上的工作流。它是对 [RunningHub API 文档](https://gold-spleen-bf1.notion.site/RunningHub-API-1432ece0cf5f8026aaa8e4b9190f6f8e) 的一个 ComfyUI 实现。在使用本插件之前建议花2分钟阅读。如果你希望扩展该插件,或在使用过程中遇到问题,请参考上述文档。 18 | ### 可以通过本插件,将RunningHub方便的接入[Photoshop](https://github.com/NimaNzrii/comfyui-photoshop),[变现宝](https://github.com/zhulu111/ComfyUI_Bxb) 等各种插件 19 | 20 | ## 使用步骤 21 | 22 | ### 1. 安装插件 23 | 在终端中运行以下命令以克隆插件到本地: 24 | ```bash 25 | git clone https://github.com/HM-RunningHub/ComfyUI_RH_APICall 26 | ``` 27 | ### 2. 注册并获取 API Key 28 | 访问 [RunningHub 官网](https://www.runninghub.cn) 注册账户并获取你的 API Key。 29 | 30 | ### 3. 在 ComfyUI 中调用 RunningHub 上的个人工作流 31 | 完成安装和配置后,你就可以在本地的 ComfyUI 环境中调用 RunningHub 上的个人工作流了。请把示例中的相关配置参数改成你自己的。 32 | 33 | ### 4. 本地工作流与RunningHub工作流节点对应关系与配置说明。 34 | 通过NodeInfolist节点,可以修改RH工作流每个节点的值。比如常见的提示词,种子,生成批次等 35 | ![image](https://github.com/user-attachments/assets/e6d76026-13bb-4ee7-8bcf-2cbc64a046ce) 36 | 37 | ### 5. 实例(工作流json在examples目录下) 38 | #### ALL IN One 39 | ![image](https://github.com/user-attachments/assets/0dfe5998-206b-491f-a740-82938eb04e8d) 40 | #### 文生图: 41 | ![image](https://github.com/user-attachments/assets/3b00beeb-1d0d-4fc2-b635-d31cfcf06887) 42 | #### 图生图: 43 | ![image](https://github.com/user-attachments/assets/552bf53c-8913-474e-838a-c110e9dbc6d0) 44 | #### 连接Photoshop 45 | ![image](https://github.com/user-attachments/assets/72c7ff4a-f6ef-43d5-a95c-242fbff5aafc) 46 | -------------------------------------------------------------------------------- /RH_AudioUploader.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import os 4 | import time 5 | 6 | # Try importing folder_paths safely 7 | try: 8 | import folder_paths 9 | comfyui_env_available = True 10 | except ImportError: 11 | folder_paths = None # Set to None if not available 12 | comfyui_env_available = False 13 | print("RH_AudioUploader: ComfyUI folder_paths not found. Cannot determine input file path.") 14 | 15 | 16 | class RH_AudioUploader: 17 | @classmethod 18 | def INPUT_TYPES(cls): 19 | return { 20 | "required": { 21 | "apiConfig": ("STRUCT",), 22 | # This input receives the filename assigned by ComfyUI's upload endpoint via JS 23 | "audio": ("STRING", {"default": "", "multiline": False}), 24 | } 25 | } 26 | 27 | RETURN_TYPES = ("STRING",) 28 | RETURN_NAMES = ("filename",) 29 | FUNCTION = "upload_and_get_filename" 30 | CATEGORY = "RunningHub" 31 | OUTPUT_NODE = False 32 | 33 | def upload_and_get_filename(self, apiConfig, audio): 34 | """ 35 | Reads the audio file from ComfyUI's input directory based on the 'audio' filename, 36 | uploads it to the RunningHub API, and returns the resulting filename/ID. 37 | """ 38 | # 1. Validate inputs 39 | if not comfyui_env_available or not folder_paths: 40 | raise ImportError("folder_paths module is required for RH_AudioUploader to find input files.") 41 | 42 | if not isinstance(apiConfig, dict) or not apiConfig.get("apiKey") or not apiConfig.get("base_url"): 43 | raise ValueError("Invalid or missing apiConfig structure provided to RH_AudioUploader.") 44 | 45 | if not audio or audio.strip() == "": 46 | raise ValueError("No audio filename provided. Please select and upload an audio file using the node's widget.") 47 | 48 | apiKey = apiConfig["apiKey"] 49 | baseUrl = apiConfig["base_url"] 50 | 51 | # 2. Get the full path to the uploaded file in ComfyUI's input directory 52 | try: 53 | audio_path = folder_paths.get_annotated_filepath(audio) 54 | if not audio_path or not os.path.exists(audio_path): 55 | potential_path = os.path.join(folder_paths.get_input_directory(), audio) 56 | if os.path.exists(potential_path): 57 | audio_path = potential_path 58 | else: 59 | potential_path = os.path.join(folder_paths.get_input_directory(), 'uploads', audio) 60 | if os.path.exists(potential_path): 61 | audio_path = potential_path 62 | else: 63 | raise FileNotFoundError(f"Audio file not found in input directory: {audio}") 64 | 65 | print(f"RH_AudioUploader: Found audio file at: {audio_path}") 66 | 67 | except Exception as e: 68 | raise FileNotFoundError(f"Error finding audio file '{audio}': {e}") 69 | 70 | # 3. Prepare for RunningHub API upload 71 | upload_api_url = f"{baseUrl}/task/openapi/upload" # Using the same endpoint as image/video 72 | headers = { 73 | 'User-Agent': 'ComfyUI-RH_AudioUploader/1.0' # Updated User-Agent 74 | } 75 | data = { 76 | 'apiKey': apiKey, 77 | 'fileType': 'audio' # Changed fileType to 'audio' 78 | } 79 | 80 | # 4. Read the file and upload with retry logic 81 | print(f"RH_AudioUploader: Uploading {audio_path} to {upload_api_url}...") 82 | max_retries = 5 83 | retry_delay = 1 84 | last_exception = None 85 | response = None 86 | 87 | for attempt in range(max_retries): 88 | try: 89 | with open(audio_path, 'rb') as f: 90 | files = { 91 | 'file': (os.path.basename(audio_path), f) 92 | } 93 | response = requests.post(upload_api_url, headers=headers, data=data, files=files) 94 | print(f"RH_AudioUploader: Upload attempt {attempt + 1}/{max_retries} - Status Code: {response.status_code}") 95 | response.raise_for_status() 96 | break # Success 97 | except requests.exceptions.RequestException as e: 98 | last_exception = e 99 | print(f"RH_AudioUploader: Upload attempt {attempt + 1} failed: {e}") 100 | if e.response is not None: 101 | print(f"RH_AudioUploader: Response content on error: {e.response.text}") 102 | if attempt < max_retries - 1: 103 | print(f"RH_AudioUploader: Retrying in {retry_delay} seconds...") 104 | time.sleep(retry_delay) 105 | retry_delay *= 2 106 | else: 107 | print(f"RH_AudioUploader: Max retries ({max_retries}) reached.") 108 | raise ConnectionError(f"Failed to upload audio to RunningHub API after {max_retries} attempts. Last error: {last_exception}") from last_exception 109 | 110 | if response is None: 111 | raise ConnectionError(f"Audio upload failed after {max_retries} attempts, no response received. Last error: {last_exception}") 112 | 113 | # 5. Parse successful response 114 | try: 115 | response_json = response.json() 116 | print(f"RH_AudioUploader: Upload API Response JSON: {response_json}") 117 | except json.JSONDecodeError as e: 118 | print(f"RH_AudioUploader: Failed to decode JSON response: {response.text}") 119 | raise ValueError(f"Failed to decode API response after successful upload: {e}") from e 120 | 121 | if response_json.get('code') != 0: 122 | raise ValueError(f"RunningHub API reported an error after upload: {response_json.get('msg', 'Unknown API error')}") 123 | 124 | rh_data = response_json.get("data", {}) 125 | uploaded_filename = None 126 | if isinstance(rh_data, dict): 127 | uploaded_filename = rh_data.get("fileName") 128 | elif isinstance(rh_data, str): 129 | uploaded_filename = rh_data 130 | 131 | if not isinstance(uploaded_filename, str) or not uploaded_filename: 132 | raise ValueError("Upload succeeded but 'fileName' not found in RunningHub API response.data.") 133 | 134 | print(f"RH_AudioUploader: Upload successful. RunningHub filename/ID: {uploaded_filename}") 135 | return (uploaded_filename,) -------------------------------------------------------------------------------- /RH_ExecuteNode.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import time 3 | import json 4 | from PIL import Image, ImageDraw, ImageFont 5 | from io import BytesIO 6 | import numpy as np 7 | import torch 8 | import os 9 | import websocket # 需要安装 websocket-client 包 10 | import threading 11 | import comfy.utils # Import comfy utils for ProgressBar 12 | import cv2 # <<< Added import for OpenCV 13 | import safetensors.torch # <<< Added safetensors import 14 | import torchaudio 15 | import torch.nn.functional as F # <<< Add F for padding 16 | 17 | # Try importing folder_paths safely 18 | try: 19 | import folder_paths 20 | comfyui_env_available = True # Use a more generic name 21 | except ImportError: 22 | comfyui_env_available = False 23 | print("ComfyUI folder_paths not found. Some features like specific output paths might use fallbacks.") 24 | 25 | 26 | class ExecuteNode: 27 | ESTIMATED_TOTAL_NODES = 10 # Default estimate 28 | 29 | def __init__(self): 30 | self.ws = None 31 | self.task_completed = False 32 | self.ws_error = None 33 | self.executed_nodes = set() 34 | self.prompt_tips = "{}" 35 | self.pbar = None 36 | self.node_lock = threading.Lock() 37 | self.total_nodes = None 38 | self.current_steps = 0 # Track current steps for logging 39 | 40 | def update_progress(self): 41 | """Increments the progress bar by one step and logs, stopping at total_nodes.""" 42 | with self.node_lock: 43 | # Guard 1: Check completion status first 44 | if self.task_completed: 45 | # Optional: Log if needed, but return silently to avoid spam 46 | # print(f"Skipping progress update because task is already completed.") 47 | return 48 | 49 | # Guard 2: Check if progress bar exists AND if we are already at or beyond the total 50 | if not self.pbar or self.current_steps >= self.total_nodes: 51 | # Optional: Log if trying to update when already >= total for debugging 52 | # if self.pbar and self.current_steps >= self.total_nodes: 53 | # print(f"Debug: update_progress called when steps ({self.current_steps}) >= total ({self.total_nodes}). Skipping update.") 54 | return 55 | 56 | # --- If guards passed, proceed with increment and update --- 57 | self.current_steps += 1 58 | # Increment the ComfyUI progress bar by 1 59 | self.pbar.update(1) 60 | # Log the current state 61 | # Use min for logging safety, although current_steps should now never exceed total_nodes here 62 | display_steps = min(self.current_steps, self.total_nodes) 63 | print(f"Progress Update: Step {display_steps}/{self.total_nodes} ({(display_steps/self.total_nodes)*100:.1f}%)") 64 | 65 | 66 | def complete_progress(self): 67 | """Sets the progress bar to 100% and marks task as completed.""" 68 | # --- Use lock for thread safety --- 69 | with self.node_lock: 70 | # Check if already completed to prevent redundant calls/logs 71 | if self.task_completed: 72 | return 73 | 74 | print(f"Finalizing progress: Setting task_completed = True") 75 | # --- Set completion flag FIRST --- 76 | self.task_completed = True 77 | 78 | # --- Update progress bar to final state --- 79 | if self.pbar: 80 | # Ensure the bar visually reaches 100% regardless of intermediate steps received 81 | print(f"Forcing progress bar to 100% ({self.total_nodes}/{self.total_nodes}). Current steps internally were {self.current_steps}.") 82 | # Use update_absolute to set the final value and total explicitly. 83 | # This handles cases where it finished early or exactly on time. 84 | self.pbar.update_absolute(self.total_nodes, self.total_nodes) 85 | # Also update internal counter for consistency, although it might be redundant now 86 | self.current_steps = self.total_nodes 87 | else: 88 | print("Progress bar not available during finalization.") 89 | 90 | 91 | @classmethod 92 | def INPUT_TYPES(cls): 93 | return { 94 | "required": { 95 | "apiConfig": ("STRUCT",), 96 | }, 97 | "optional": { 98 | "nodeInfoList": ("ARRAY", {"default": []}), 99 | "run_timeout": ("INT", {"default": 600, "min": 1, "max": 9999999}), # Corrected comma and added closing brace 100 | "concurrency_limit": ("INT", {"default": 1, "min": 1, "max": 100}), # Restored min/max 101 | "is_webapp_task": ("BOOLEAN", {"default": False}), 102 | }, 103 | } 104 | 105 | RETURN_TYPES = ("IMAGE", "IMAGE", "LATENT", "STRING", "AUDIO") 106 | RETURN_NAMES = ("images", "video_frames", "latent", "text", "audio") 107 | 108 | CATEGORY = "RunningHub" 109 | FUNCTION = "process" 110 | OUTPUT_NODE = True # Indicate support for progress display 111 | 112 | # --- WebSocket Handlers --- 113 | def on_ws_message(self, ws, message): 114 | """处理 WebSocket 消息,更新内部状态和进度条""" 115 | try: 116 | # Check completion status AT THE START 117 | with self.node_lock: 118 | is_completed = self.task_completed 119 | if is_completed: 120 | # print("WS Message received after task completion, ignoring.") # Optional: reduce log spam 121 | return 122 | 123 | # --- Log the raw message for debugging --- 124 | print(f"--- Raw WS Message Received ---") 125 | try: 126 | # Attempt pretty print first 127 | data = json.loads(message) 128 | print(json.dumps(data, indent=2, ensure_ascii=False)) 129 | except json.JSONDecodeError: 130 | # If not valid JSON, print the raw string 131 | print(message) 132 | # Re-raise or handle appropriately? Let's parse again for the logic below 133 | # but acknowledge the format issue. 134 | print("Warning: Received non-JSON WS message.") 135 | # We still need 'data' for the logic below, parse again (might fail again) 136 | # but acknowledge the format issue. 137 | print("Warning: Received non-JSON WS message.") 138 | # We still need 'data' for the logic below, parse again (might fail again) 139 | # Or perhaps better to return if not valid JSON? 140 | # For now, let's proceed assuming the first parse failed but the second might work 141 | # if the issue was temporary or specific to logging. 142 | # A more robust approach might return here or handle specific non-JSON messages. 143 | try: 144 | data = json.loads(message) 145 | except json.JSONDecodeError as e: 146 | print(f"Error: Could not parse WS message as JSON: {e}") 147 | return # Stop processing this message if definitely not JSON 148 | print(f"-----------------------------") 149 | # --- End Log raw message --- 150 | 151 | # data = json.loads(message) # Already parsed above 152 | message_type = data.get("type") 153 | node_data = data.get("data", {}) 154 | node_id = node_data.get("node") 155 | 156 | # Handle node execution updates (both 'executing' and 'execution_success') 157 | # Based on user feedback, 'execution_success' might signal single node completion. 158 | if message_type == "executing" or message_type == "execution_success": 159 | if node_id is not None: 160 | # Check if it's a new node before calling update 161 | # Use lock to safely check and add to executed_nodes 162 | with self.node_lock: 163 | is_new_node = node_id not in self.executed_nodes 164 | if is_new_node: 165 | self.executed_nodes.add(node_id) 166 | 167 | if is_new_node: 168 | self.update_progress() # This method is guarded internally 169 | print(f"WS ({message_type}): Node {node_id} reported.") 170 | else: 171 | print(f"WS ({message_type}): Node {node_id} reported again (ignored for progress).") 172 | elif message_type == "executing" and node_id is None: # Null node signal 173 | print("WS (executing): Received null node signal, potentially end of execution phase.") 174 | elif message_type == "execution_success" and node_id is None: 175 | # If execution_success doesn't have a node_id, what does it mean? 176 | # Log it for now, DO NOT call complete_progress. 177 | print(f"WS (execution_success): Received signal without node_id. Data: {node_data}") 178 | # self.complete_progress() # <<< REMOVED - This was incorrect based on user feedback 179 | 180 | # Handle other message types if necessary (e.g., specific overall error messages) 181 | # elif message_type == "execution_error": # Hypothetical example 182 | # error_details = node_data.get("error", "Unknown WS error") 183 | # print(f"WS: Received execution error: {error_details}") 184 | # with self.node_lock: 185 | # if not self.task_completed: 186 | # if self.ws_error is None: 187 | # self.ws_error = Exception(f"WS Error: {error_details}") 188 | # self.task_completed = True 189 | 190 | else: 191 | print(f"WS: Received unhandled message type '{message_type}': {data}") 192 | 193 | except Exception as e: 194 | print(f"Error processing WebSocket message: {e}") 195 | # Set error state; rely on polling or finally block for full completion logic 196 | with self.node_lock: 197 | if not self.task_completed: 198 | if self.ws_error is None: 199 | self.ws_error = e 200 | # Don't necessarily mark completed here, let polling confirm final state 201 | # self.task_completed = True 202 | 203 | def on_ws_error(self, ws, error): 204 | """处理 WebSocket 错误""" 205 | print(f"WebSocket error: {error}") 206 | self.ws_error = error 207 | # Mark task as complete via the centralized method 208 | self.complete_progress() 209 | 210 | def on_ws_close(self, ws, close_status_code, close_msg): 211 | """处理 WebSocket 关闭""" 212 | print(f"WebSocket closed: {close_status_code} - {close_msg}") 213 | # If closed unexpectedly, mark as complete to end loop 214 | # Use lock temporarily just to read task_completed safely 215 | with self.node_lock: 216 | should_complete = not self.task_completed 217 | if should_complete: 218 | print("Warning: WebSocket closed unexpectedly. Forcing task completion.") 219 | self.ws_error = self.ws_error or IOError(f"WebSocket closed unexpectedly ({close_status_code})") 220 | # Mark task as complete via the centralized method 221 | self.complete_progress() 222 | 223 | def on_ws_open(self, ws): 224 | """处理 WebSocket 连接打开""" 225 | print("WebSocket connection established") 226 | # Note: executed_nodes should be cleared at the start of 'process' 227 | 228 | def connect_websocket(self, wss_url): 229 | """建立 WebSocket 连接""" 230 | print(f"Connecting to WebSocket: {wss_url}") 231 | websocket.enableTrace(False) # Keep this false unless debugging WS protocol 232 | self.ws = websocket.WebSocketApp( 233 | wss_url, 234 | on_message=self.on_ws_message, 235 | on_error=self.on_ws_error, 236 | on_close=self.on_ws_close, 237 | on_open=self.on_ws_open 238 | ) 239 | ws_thread = threading.Thread(target=self.ws.run_forever, name="RH_ExecuteNode_WSThread") 240 | ws_thread.daemon = True 241 | ws_thread.start() 242 | print("WebSocket thread started.") 243 | 244 | def check_and_complete_task(self): 245 | """If task times out after null node, force completion.""" 246 | # complete_progress now checks the flag internally and uses lock 247 | print("Task completion timeout after null node signal - attempting forced completion.") 248 | self.complete_progress() 249 | 250 | def get_workflow_node_count(self, api_key, base_url, workflow_id): 251 | """Get the total number of nodes from workflow JSON.""" 252 | url = f"{base_url}/api/openapi/getJsonApiFormat" 253 | headers = { 254 | "Content-Type": "application/json", 255 | "User-Agent": "ComfyUI-RH-APICall-Node/1.0", 256 | } 257 | data = { 258 | "apiKey": api_key, 259 | "workflowId": workflow_id 260 | } 261 | 262 | max_retries = 5 263 | retry_delay = 1 264 | last_exception = None 265 | node_count = None 266 | 267 | for attempt in range(max_retries): 268 | response = None 269 | try: 270 | print(f"Attempt {attempt + 1}/{max_retries} to get workflow node count...") 271 | response = requests.post(url, json=data, headers=headers, timeout=30) 272 | response.raise_for_status() 273 | 274 | result = response.json() 275 | 276 | if result.get("code") != 0: 277 | api_msg = result.get('msg', 'Unknown API error') 278 | print(f"API error on attempt {attempt + 1}: {api_msg}") 279 | raise Exception(f"API error getting workflow node count: {api_msg}") 280 | 281 | workflow_json = result.get("data", {}).get("prompt") 282 | if not workflow_json: 283 | raise Exception("No workflow data found in response") 284 | 285 | # Parse the workflow JSON 286 | workflow_data = json.loads(workflow_json) 287 | 288 | # Count the number of nodes 289 | node_count = len(workflow_data) 290 | print(f"Workflow contains {node_count} nodes") 291 | return node_count 292 | 293 | except (requests.exceptions.RequestException, json.JSONDecodeError, ValueError, Exception) as e: 294 | print(f"Error on attempt {attempt + 1}/{max_retries}: {e}") 295 | last_exception = e 296 | if isinstance(e, json.JSONDecodeError) and response is not None: 297 | print(f"Raw response text on JSON decode error: {response.text}") 298 | 299 | if attempt < max_retries - 1: 300 | print(f"Retrying in {retry_delay} seconds...") 301 | time.sleep(retry_delay) 302 | retry_delay *= 2 303 | else: 304 | print("Max retries reached for getting workflow node count.") 305 | raise Exception(f"Failed to get workflow node count after {max_retries} attempts. Last error: {last_exception}") from last_exception 306 | 307 | # This should ideally not be reached if the loop logic is correct 308 | raise Exception(f"Failed to get workflow node count after {max_retries} attempts (unexpected loop end). Last error: {last_exception}") 309 | 310 | # --- Main Process Method --- 311 | def process(self, apiConfig, nodeInfoList=None, run_timeout=600, concurrency_limit=1, is_webapp_task=False): 312 | # Reset state 313 | with self.node_lock: # Use lock for resetting shared state 314 | self.executed_nodes.clear() 315 | self.task_completed = False 316 | self.ws_error = None 317 | self.prompt_tips = "{}" 318 | self.current_steps = 0 # Reset step counter 319 | 320 | # Get config values 321 | api_key = apiConfig.get("apiKey") 322 | base_url = apiConfig.get("base_url") 323 | 324 | if not api_key or not base_url: 325 | raise ValueError("Missing required apiConfig fields: apiKey, base_url") 326 | 327 | # Get workflow node count from API (only for non-AI App tasks) 328 | self.total_nodes = self.ESTIMATED_TOTAL_NODES # Default 329 | retrieved_workflow_id = apiConfig.get("workflowId_webappId") # <<< Changed key here 330 | 331 | if not is_webapp_task: 332 | # --- Standard ComfyUI Task --- 333 | print("Standard ComfyUI Task mode enabled.") 334 | try: 335 | # workflow_id = apiConfig.get("workflowId_webappId") # Already retrieved 336 | if not retrieved_workflow_id: 337 | print("Warning: workflowId_webappId missing in apiConfig for standard task. Using default node estimate.") 338 | # Fall through to use default estimate 339 | else: 340 | # Get actual node count from workflow 341 | actual_node_count = self.get_workflow_node_count(api_key, base_url, retrieved_workflow_id) 342 | # Use the actual node count directly 343 | self.total_nodes = actual_node_count 344 | print(f"Using actual total nodes for progress: {self.total_nodes}") 345 | except Exception as e: 346 | print(f"Error getting workflow node count, using default value: {e}") 347 | # self.total_nodes is already set to default 348 | print(f"Using default total nodes for progress: {self.total_nodes}") 349 | else: 350 | # --- AI App Task --- 351 | # Rename print log message to reflect webapp task 352 | print(f"Webapp Task mode enabled. Using default estimated nodes for progress: {self.total_nodes}") 353 | # Validate that workflowId (acting as webappId) is provided in config 354 | if not retrieved_workflow_id: 355 | # Update ValueError message 356 | raise ValueError("workflowId_webappId (acting as webappId) must be provided in apiConfig when is_webapp_task is True.") 357 | # Optional: Add validation if webappId must be numeric, though API might handle string conversion 358 | try: 359 | # Attempt conversion to int, but keep it as string for the API call if needed 360 | int(retrieved_workflow_id) 361 | # Update print log message 362 | print(f"Using workflowId_webappId from apiConfig as webappId: {retrieved_workflow_id}") 363 | except ValueError: 364 | # Update print log message 365 | print(f"Warning: workflowId_webappId '{retrieved_workflow_id}' provided for Webapp Task is not purely numeric, but proceeding.") 366 | 367 | 368 | # Initialize ComfyUI progress bar 369 | self.pbar = comfy.utils.ProgressBar(self.total_nodes) 370 | print("Progress bar initialized at 0") 371 | 372 | # --- Concurrency Check --- 373 | # api_key and base_url are already validated 374 | try: 375 | account_status = self.check_account_status(api_key, base_url) 376 | current_tasks = int(account_status["currentTaskCounts"]) 377 | print(f"There are {current_tasks} tasks running") 378 | 379 | if current_tasks >= concurrency_limit: 380 | print(f"Concurrency limit ({concurrency_limit}) reached, waiting...") 381 | start_wait_time = time.time() 382 | # Use a shorter sleep interval while waiting for concurrency 383 | wait_interval = 2 # seconds 384 | while current_tasks >= concurrency_limit: 385 | if time.time() - start_wait_time > run_timeout: 386 | if self.pbar: self.pbar.update_absolute(1.0) # Use absolute directly for setup failure 387 | raise Exception(f"Timeout waiting for concurrent tasks ({current_tasks}/{concurrency_limit}) to finish.") 388 | print(f"Waiting for concurrent tasks... ({current_tasks}/{concurrency_limit})") 389 | time.sleep(wait_interval) 390 | account_status = self.check_account_status(api_key, base_url) 391 | current_tasks = int(account_status["currentTaskCounts"]) 392 | print("Concurrency slot available.") 393 | except Exception as e: 394 | print(f"Error checking account status or waiting: {e}") 395 | if self.pbar: self.pbar.update_absolute(1.0) # Use absolute directly for setup failure 396 | raise 397 | 398 | # --- Task Creation & WebSocket --- 399 | task_id = None 400 | wss_url = None # <<< Initialize wss_url 401 | try: 402 | print(f"ExecuteNode NodeInfoList: {nodeInfoList}") 403 | 404 | # <<< Decide which creation function to call >>> 405 | if is_webapp_task: 406 | # Call AI App Task creation, passing the retrieved ID as webappId 407 | webappId_to_pass = retrieved_workflow_id # Use the ID from config 408 | # Update print log message 409 | print(f"Creating Webapp task with webappId: {webappId_to_pass}...") 410 | task_creation_result = self.create_ai_app_task(apiConfig, nodeInfoList or [], webappId_to_pass) 411 | else: 412 | # Call standard ComfyUI Task creation 413 | print("Creating standard ComfyUI task...") 414 | # <<< Add base_url back to the create_task call >>> 415 | task_creation_result = self.create_task(apiConfig, nodeInfoList or [], base_url) 416 | 417 | print(f"Task Creation Result: {json.dumps(task_creation_result, indent=2, ensure_ascii=False)}") 418 | 419 | # Validate task creation response structure before accessing data 420 | if not isinstance(task_creation_result.get("data"), dict): 421 | raise ValueError("Invalid task creation response data structure.") 422 | 423 | task_data = task_creation_result["data"] 424 | self.prompt_tips = task_data.get("promptTips", "{}") 425 | task_id = task_data.get("taskId") 426 | initial_status = task_data.get("taskStatus") 427 | wss_url = task_data.get("netWssUrl") # <<< Get initial WSS URL 428 | 429 | if not task_id: 430 | raise ValueError("Missing taskId in task creation response.") 431 | 432 | print(f"Task created, taskId: {task_id}, Initial Status: {initial_status}") 433 | 434 | # --- Handle QUEUED state --- 435 | if initial_status == "QUEUED" and not wss_url: 436 | print("Task is QUEUED. Polling for RUNNING status and WebSocket URL...") 437 | queue_start_time = time.time() 438 | poll_interval = 2 # seconds 439 | while True: 440 | # Check timeout while waiting in queue 441 | if time.time() - queue_start_time > run_timeout: 442 | raise TimeoutError(f"Timeout waiting for task {task_id} to leave QUEUED state.") 443 | 444 | # Check task status 445 | status_result = self.check_task_status(task_id, api_key, base_url) 446 | current_status = status_result.get("taskStatus") 447 | print(f" Polling status for queued task {task_id}: {current_status}") 448 | 449 | if current_status == "RUNNING": 450 | # Task is running, try to get WSS URL from status check 451 | wss_url = status_result.get("netWssUrl") 452 | if wss_url: 453 | print(f"Task {task_id} is RUNNING. WebSocket URL obtained: {wss_url}") 454 | break # Exit queue polling loop 455 | else: 456 | # This case might indicate an API design issue or a transient state 457 | print(f"Warning: Task {task_id} is RUNNING but WebSocket URL not yet available from status check. Retrying check...") 458 | # Keep polling, maybe the URL will appear shortly 459 | elif current_status == "error": 460 | error_msg = status_result.get('error', 'Unknown error during queue polling') 461 | raise Exception(f"Task {task_id} failed while in queue: {error_msg}") 462 | elif isinstance(status_result, list): # Task completed while polling queue status 463 | print(f"Task {task_id} completed while polling queue status. Skipping WebSocket connection.") 464 | # Set wss_url to a non-None dummy value to skip connection attempt later 465 | wss_url = "skipped_completed_in_queue" 466 | break # Exit queue polling loop 467 | elif current_status != "QUEUED": 468 | # Handle unexpected status if necessary 469 | print(f"Warning: Task {task_id} transitioned to unexpected status '{current_status}' while polling queue.") 470 | # Decide if we should break or continue polling based on the status 471 | 472 | # Wait before next poll 473 | time.sleep(poll_interval) 474 | 475 | # --- Connect WebSocket if URL is available and not skipped --- 476 | if wss_url and wss_url != "skipped_completed_in_queue": 477 | print(f"Attempting to connect WebSocket: {wss_url}") 478 | self.connect_websocket(wss_url) 479 | elif not wss_url: 480 | # If still no WSS URL after potential polling (e.g., finished directly, or RUNNING but no URL provided) 481 | # Raise error or proceed without WS? Let's raise error for now. 482 | raise ValueError(f"Failed to obtain WebSocket URL for task {task_id} after creation/polling.") 483 | else: # wss_url == "skipped_completed_in_queue" 484 | print("WebSocket connection skipped as task already completed.") 485 | 486 | except Exception as e: 487 | print(f"Error during task creation, queue polling, or WS connection: {e}") 488 | if self.pbar: self.pbar.update_absolute(1.0) # Use absolute directly for setup failure 489 | raise 490 | 491 | # --- Task Monitoring Loop --- 492 | task_start_time = time.time() 493 | loop_sleep_interval = 0.1 # Short sleep for responsiveness 494 | poll_status_interval = 5 # Poll HTTP status every 5 seconds 495 | last_poll_time = time.time() # Track last poll time 496 | print("Starting task monitoring loop...") 497 | 498 | timeout_timer = None 499 | final_error = None # <<< Define final_error outside try/finally 500 | 501 | try: 502 | # Setup global timeout timer 503 | def force_timeout(): 504 | # Use lock to safely check task_completed 505 | with self.node_lock: 506 | is_completed = self.task_completed 507 | if not is_completed: 508 | print("Global timeout reached - forcing task completion.") 509 | # Use lock to set error safely 510 | with self.node_lock: 511 | # Check if ws_error is already set to avoid overwriting a more specific WS error 512 | if self.ws_error is None: 513 | self.ws_error = Exception("Global timeout reached") 514 | # Just set the flags here to break loop 515 | self.task_completed = True # Set flag directly here to break loop 516 | 517 | timeout_timer = threading.Timer(run_timeout, force_timeout) 518 | timeout_timer.daemon = True 519 | timeout_timer.start() 520 | 521 | # Main wait loop 522 | while True: # <<< Modified loop structure 523 | # 1. Check completion flags (set by WS handlers or polling) 524 | with self.node_lock: 525 | is_completed = self.task_completed 526 | current_error = self.ws_error 527 | if is_completed or current_error: 528 | print(f"Loop Exit: Task Completed={is_completed}, Error Present={current_error is not None}") 529 | break # Exit loop if completed or error occurred via WS or polling 530 | 531 | # 2. Check for global timeout explicitly 532 | if time.time() - task_start_time > run_timeout: 533 | print("Task monitoring loop timeout check triggered.") 534 | with self.node_lock: 535 | if not self.task_completed: # Avoid overwriting specific error 536 | if self.ws_error is None: 537 | self.ws_error = Exception(f"Timeout: Task {task_id} did not complete within {run_timeout} seconds.") 538 | self.task_completed = True # Ensure loop exit 539 | break # Exit loop 540 | 541 | # 3. Periodic HTTP Status Polling (Robustness check) 542 | current_time = time.time() 543 | if current_time - last_poll_time >= poll_status_interval: 544 | print(f"Polling HTTP status for task {task_id}...") 545 | last_poll_time = current_time # Update last poll time 546 | try: 547 | # Call check_task_status (requires api_key, base_url) 548 | status_result = self.check_task_status(task_id, api_key, base_url) 549 | 550 | # Analyze polling result 551 | if isinstance(status_result, list): # Task completed successfully 552 | print(f"Polling detected task {task_id} completed successfully.") 553 | # Use lock to set flags safely 554 | with self.node_lock: 555 | if not self.task_completed: # Avoid redundant completion if WS already handled it 556 | self.task_completed = True 557 | # No need to set ws_error if successful 558 | # Loop will break on next iteration due to task_completed flag 559 | elif isinstance(status_result, dict): 560 | polled_status = status_result.get("taskStatus") 561 | if polled_status == "error": 562 | error_msg = status_result.get('error', 'Unknown error reported by polling') 563 | print(f"Polling detected task {task_id} failed: {error_msg}") 564 | # Use lock to set flags safely 565 | with self.node_lock: 566 | if not self.task_completed: # Check completion first 567 | # Set error only if no other error is already present 568 | if self.ws_error is None: 569 | self.ws_error = Exception(f"Task failed (polled): {error_msg}") 570 | self.task_completed = True # Mark as complete to exit loop 571 | # Loop will break on next iteration 572 | elif polled_status in ["RUNNING", "QUEUED"]: 573 | print(f"Polling: Task {task_id} is still {polled_status}.") 574 | # Optionally check for netWssUrl again if needed, but primary goal is status check 575 | else: 576 | print(f"Polling: Received unexpected status '{polled_status}' for task {task_id}.") 577 | else: 578 | print(f"Polling: Received unexpected result type for task {task_id}: {type(status_result)}") 579 | 580 | except Exception as poll_e: 581 | # Don't necessarily stop the whole process on a single polling error, 582 | # maybe it's transient. Log it. WS might still be active. 583 | print(f"Warning: Error during periodic status polling for task {task_id}: {poll_e}") 584 | # Consider adding a counter to stop polling after too many errors? 585 | 586 | # 4. Yield CPU 587 | time.sleep(loop_sleep_interval) 588 | 589 | # Handle exit conditions after loop 590 | with self.node_lock: # Read error flag safely 591 | final_error = self.ws_error # Assign to outer scope variable 592 | 593 | if final_error: 594 | print(f"Task ended with error: {final_error}") 595 | # complete_progress handles internal checks and ensures final state 596 | self.complete_progress() 597 | else: # Task completed normally (either via WS or polling success) 598 | print("Task monitoring completed successfully.") 599 | # Ensure completion is marked, even if WS didn't send success or polling found success 600 | self.complete_progress() 601 | 602 | finally: # <<< Existing finally clause remains 603 | # Cleanup 604 | if timeout_timer: 605 | timeout_timer.cancel() 606 | if self.ws: 607 | try: 608 | self.ws.close() 609 | except Exception as e: 610 | print(f"Error closing WebSocket: {e}") 611 | self.ws = None 612 | 613 | # Final safety net: Ensure progress is marked complete. 614 | self.complete_progress() 615 | 616 | # If an error occurred during the loop, raise it now after cleanup 617 | if final_error: 618 | raise final_error 619 | 620 | # --- Process Output --- 621 | print("Processing task output...") 622 | # Pass the validated api_key and base_url again 623 | return self.process_task_output(task_id, api_key, base_url) 624 | 625 | def process_task_output(self, task_id, api_key, base_url): 626 | """Handles task output, separating images, video frames, audio, etc.""" 627 | max_retries = 30 628 | retry_interval = 1 629 | max_retry_interval = 5 630 | image_data_list = [] # <<< For regular images 631 | frame_data_list = [] # <<< For video frames 632 | latent_data = None 633 | text_data = None 634 | audio_data = None # <<< For audio data 635 | 636 | for attempt in range(max_retries): 637 | task_status_result = None 638 | try: 639 | task_status_result = self.check_task_status(task_id, api_key, base_url) 640 | print(f"Check output attempt {attempt + 1}/{max_retries}") 641 | 642 | if isinstance(task_status_result, dict) and task_status_result.get("taskStatus") in ["RUNNING", "QUEUED"]: 643 | wait_time = min(retry_interval * (1.5 ** attempt), max_retry_interval) 644 | print(f"Task still running ({task_status_result.get('taskStatus')}), waiting {wait_time:.1f} seconds...") 645 | time.sleep(wait_time) 646 | continue # <<< Continue within loop 647 | 648 | if isinstance(task_status_result, list) and len(task_status_result) > 0: 649 | print("Got valid output result, processing files...") 650 | image_urls = [] 651 | video_urls = [] 652 | latent_urls = [] 653 | text_urls = [] 654 | audio_urls = [] # <<< Add list for audio urls 655 | 656 | for output in task_status_result: # <<< Indent loop correctly 657 | if isinstance(output, dict): 658 | file_url = output.get("fileUrl") 659 | file_type = output.get("fileType") 660 | if file_url and file_type: 661 | file_type_lower = file_type.lower() 662 | if file_type_lower in ["png", "jpg", "jpeg", "webp", "bmp", "gif"]: 663 | image_urls.append(file_url) 664 | elif file_type_lower in ["mp4", "avi", "mov", "webm"]: 665 | video_urls.append(file_url) 666 | elif file_type_lower == "latent": 667 | latent_urls.append(file_url) 668 | elif file_type_lower == "txt": 669 | text_urls.append(file_url) 670 | # <<< Add common audio types 671 | elif file_type_lower in ["wav", "mp3", "flac", "ogg"]: 672 | audio_urls.append(file_url) 673 | 674 | # Process Images -> Add to image_data_list 675 | if image_urls: 676 | print(f"Processing {len(image_urls)} images...") 677 | # Download all images first 678 | downloaded_images = [] 679 | for url in image_urls: 680 | try: 681 | img_tensor = self.download_image(url) 682 | if img_tensor is not None: 683 | downloaded_images.append(img_tensor) 684 | print(f"Successfully downloaded image from {url} (Shape: {img_tensor.shape})") 685 | # Remove the break to process all images 686 | # print(f"Successfully processed first image from {url}. Skipping remaining images.") 687 | # break # Process only the first image 688 | except Exception as img_e: 689 | print(f"Error downloading image {url}: {img_e}") 690 | 691 | # If images were downloaded, find max dimensions and pad 692 | if downloaded_images: 693 | if len(downloaded_images) > 1: 694 | print("Multiple images found. Checking dimensions and padding if necessary...") 695 | max_h = 0 696 | max_w = 0 697 | for img in downloaded_images: 698 | # Shape is [1, H, W, C] 699 | max_h = max(max_h, img.shape[1]) 700 | max_w = max(max_w, img.shape[2]) 701 | print(f"Max dimensions found: Height={max_h}, Width={max_w}") 702 | 703 | padded_images = [] 704 | for i, img_tensor in enumerate(downloaded_images): 705 | _, h, w, _ = img_tensor.shape 706 | if h < max_h or w < max_w: 707 | pad_h_total = max_h - h 708 | pad_w_total = max_w - w 709 | pad_top = pad_h_total // 2 710 | pad_bottom = pad_h_total - pad_top 711 | pad_left = pad_w_total // 2 712 | pad_right = pad_w_total - pad_left 713 | 714 | # Permute [1, H, W, C] -> [1, C, H, W] for F.pad 715 | img_permuted = img_tensor.permute(0, 3, 1, 2) 716 | # Pad (pad is specified for last dimensions first: W, then H) 717 | padded_permuted = F.pad(img_permuted, (pad_left, pad_right, pad_top, pad_bottom), "constant", 0) 718 | # Permute back [1, C, H, W] -> [1, H, W, C] 719 | padded_img = padded_permuted.permute(0, 2, 3, 1) 720 | print(f" Padded image {i+1} from {h}x{w} to {max_h}x{max_w}") 721 | padded_images.append(padded_img) 722 | else: 723 | print(f" Image {i+1} already has max dimensions.") 724 | padded_images.append(img_tensor) # Already max size 725 | image_data_list = padded_images # Use the padded list for concatenation 726 | else: 727 | # Only one image, no padding needed, just use it 728 | print("Only one image found, no padding needed.") 729 | image_data_list = downloaded_images 730 | # else: image_data_list remains empty 731 | 732 | # Process Videos (extract frames) -> Add to frame_data_list 733 | if video_urls: 734 | print(f"Processing {len(video_urls)} videos for frames...") 735 | for url in video_urls: 736 | try: 737 | frame_tensors = self.download_video(url) 738 | if frame_tensors: 739 | frame_data_list.extend(frame_tensors) # <<< Add to frame_data_list 740 | print(f"Extracted {len(frame_tensors)} frames from video {url}") 741 | except Exception as vid_e: 742 | print(f"Error processing video {url}: {vid_e}") 743 | 744 | # Process Latents (load the first one found) 745 | if latent_urls and latent_data is None: 746 | print(f"Processing {len(latent_urls)} latent file(s)...") 747 | for url in latent_urls: 748 | try: 749 | loaded_latent = self.download_and_load_latent(url) 750 | if loaded_latent is not None: 751 | latent_data = loaded_latent 752 | print(f"Successfully loaded latent from {url}") 753 | break # Process only the first successful latent 754 | except Exception as lat_e: 755 | print(f"Error processing latent {url}: {lat_e}") 756 | 757 | # Process Text Files (read the first one found) 758 | if text_urls and text_data is None: 759 | print(f"Processing {len(text_urls)} text file(s)...") 760 | for url in text_urls: 761 | try: 762 | loaded_text = self.download_and_read_text(url) 763 | if loaded_text is not None: 764 | text_data = loaded_text 765 | print(f"Successfully read text from {url}") 766 | break # Process only the first successful text file 767 | except Exception as txt_e: 768 | print(f"Error processing text file {url}: {txt_e}") 769 | 770 | # <<< Process Audio Files (load the first one found) 771 | if audio_urls and audio_data is None: 772 | print(f"Processing {len(audio_urls)} audio file(s)...") 773 | for url in audio_urls: 774 | try: 775 | loaded_audio = self.download_and_process_audio(url) 776 | if loaded_audio is not None: 777 | audio_data = loaded_audio 778 | print(f"Successfully processed audio from {url}") 779 | break # Process only the first successful audio file 780 | except Exception as aud_e: 781 | print(f"Error processing audio file {url}: {aud_e}") 782 | 783 | # Task processing complete, break the retry loop 784 | break # <<< Break within loop 785 | 786 | elif isinstance(task_status_result, dict) and task_status_result.get("taskStatus") == "error": # <<< Use elif 787 | print(f"Task failed with error: {task_status_result.get('error', 'Unknown error')}") 788 | break # <<< Break within loop 789 | 790 | else: # <<< Handle other cases or unexpected results 791 | print(f"Unexpected task status or empty result, waiting...") 792 | time.sleep(min(retry_interval * (1.5 ** attempt), max_retry_interval)) 793 | 794 | except Exception as e: # <<< Added except clause 795 | print(f"Error checking/processing task status (attempt {attempt + 1}): {e}") 796 | # Check if the result indicates an error, even if an exception occurred during processing 797 | if isinstance(task_status_result, dict) and task_status_result.get("taskStatus") == "error": 798 | print("Stopping retries due to reported task error.") 799 | break # <<< Break within loop 800 | # Simple exponential backoff for retries 801 | time.sleep(min(retry_interval * (1.5 ** attempt), max_retry_interval)) 802 | 803 | # --- Final Output Generation --- 804 | 805 | # Placeholder for regular images 806 | if not image_data_list: 807 | print("No regular images generated, creating placeholder.") 808 | image_data_list.append(self.create_placeholder_image(text="No image output")) 809 | 810 | # Placeholder for video frames 811 | if not frame_data_list: 812 | print("No video frames generated, creating placeholder.") 813 | frame_data_list.append(self.create_placeholder_image(text="No video frame output")) 814 | 815 | # Placeholder for latent 816 | if latent_data is None: 817 | print("No latent generated, creating placeholder.") 818 | latent_data = self.create_placeholder_latent() 819 | 820 | # Default for text 821 | if text_data is None: 822 | print("No text file processed, returning 'null' string.") 823 | text_data = "null" 824 | 825 | # <<< Placeholder for audio 826 | if audio_data is None: 827 | print("No audio generated, creating placeholder.") 828 | audio_data = self.create_placeholder_audio() 829 | 830 | # Batch images and frames separately 831 | final_image_batch = torch.cat(image_data_list, dim=0) if image_data_list else None 832 | final_frame_batch = torch.cat(frame_data_list, dim=0) if frame_data_list else None # <<< Batch frames 833 | 834 | # Ensure we return a tuple matching RETURN_TYPES 835 | # <<< Add audio_data to the return tuple 836 | return (final_image_batch, final_frame_batch, latent_data, text_data, audio_data) 837 | 838 | def create_placeholder_image(self, text="No image/video output", width=256, height=64): 839 | """Creates a placeholder image tensor with text.""" 840 | img = Image.new('RGB', (width, height), color = (50, 50, 50)) # Dark gray background 841 | d = ImageDraw.Draw(img) 842 | try: 843 | # Attempt to load a simple default font (may vary by system) 844 | # A small default size to fit the image 845 | fontsize = 15 846 | # Try common system font names/paths 847 | font_paths = ["arial.ttf", "LiberationSans-Regular.ttf", "DejaVuSans.ttf"] 848 | font = None 849 | for fp in font_paths: 850 | try: 851 | font = ImageFont.truetype(fp, fontsize) 852 | break 853 | except IOError: 854 | continue 855 | if font is None: 856 | font = ImageFont.load_default() # Fallback to PIL default bitmap font 857 | print("Warning: Could not load system font, using PIL default.") 858 | 859 | # Calculate text position for centering 860 | text_bbox = d.textbbox((0, 0), text, font=font) 861 | text_width = text_bbox[2] - text_bbox[0] 862 | text_height = text_bbox[3] - text_bbox[1] 863 | text_x = (width - text_width) / 2 864 | text_y = (height - text_height) / 2 865 | d.text((text_x, text_y), text, fill=(200, 200, 200), font=font) # Light gray text 866 | except Exception as e: 867 | print(f"Error adding text to placeholder image: {e}. Returning image without text.") 868 | 869 | img_array = np.array(img).astype(np.float32) / 255.0 870 | img_tensor = torch.from_numpy(img_array)[None,] # Shape: [1, H, W, C] 871 | return img_tensor 872 | 873 | def create_placeholder_latent(self, batch_size=1, channels=4, height=64, width=64): 874 | """Creates a placeholder latent tensor dictionary.""" 875 | latent = torch.zeros([batch_size, channels, height, width]) 876 | return {"samples": latent} 877 | 878 | # <<< Add placeholder audio function 879 | def create_placeholder_audio(self, sample_rate=44100, duration_sec=0.01): 880 | """Creates a placeholder silent audio dictionary.""" 881 | print(f"Creating silent placeholder audio: {duration_sec}s @ {sample_rate}Hz") 882 | num_samples = int(sample_rate * duration_sec) 883 | waveform = torch.zeros((1, num_samples), dtype=torch.float32) # Mono silence 884 | return {"waveform": waveform, "sample_rate": sample_rate} 885 | 886 | def download_image(self, image_url): 887 | """ 888 | 从 URL 下载图像并转换为适合预览或保存的 torch.Tensor 格式。 889 | 包含重试机制,最多重试5次。 890 | Returns tensor [1, H, W, C] or None on failure. 891 | """ 892 | max_retries = 5 893 | retry_delay = 1 894 | last_exception = None 895 | img_tensor = None # Define img_tensor outside try 896 | 897 | for attempt in range(max_retries): 898 | try: 899 | response = requests.get(image_url, timeout=30) 900 | print(f"Download image attempt {attempt + 1} ({image_url}): Status code: {response.status_code}") 901 | response.raise_for_status() 902 | 903 | content_type = response.headers.get('Content-Type', '').lower() 904 | # Consider validating content_type if needed 905 | 906 | img = Image.open(BytesIO(response.content)).convert("RGB") 907 | img_array = np.array(img).astype(np.float32) / 255.0 908 | img_tensor = torch.from_numpy(img_array)[None,] # Shape: [1, H, W, C] 909 | return img_tensor # Return on success 910 | 911 | except (requests.exceptions.RequestException, IOError, Image.UnidentifiedImageError) as e: # <<< Correct except clause 912 | print(f"Download image attempt {attempt + 1} failed: {e}") 913 | last_exception = e 914 | if attempt < max_retries - 1: 915 | print(f"Retrying in {retry_delay} seconds...") 916 | time.sleep(retry_delay) 917 | retry_delay *= 2 918 | # else: # <<< Implicitly handled by loop ending 919 | # print(f"Failed to download image {image_url} after {max_retries} attempts.") 920 | # # Keep img_tensor as None 921 | 922 | # If loop finishes without returning, it means all retries failed 923 | print(f"Failed to download image {image_url} after {max_retries} attempts.") 924 | return None 925 | 926 | 927 | def download_video(self, video_url): 928 | """ 929 | Downloads a video, extracts all frames, converts them to tensors, 930 | deletes the video file, and returns a list of image tensors. 931 | Requires opencv-python (cv2). 932 | Returns list[torch.Tensor] or None on failure. Each tensor shape [1, H, W, C]. <<< Updated shape comment 933 | """ 934 | max_retries = 5 935 | retry_delay = 1 936 | last_exception = None 937 | video_path = None 938 | output_dir = "temp" # Use a temp directory for downloaded videos 939 | 940 | # --- Ensure temp directory exists --- 941 | if not os.path.exists(output_dir): # <<< Correct indentation 942 | try: 943 | os.makedirs(output_dir) 944 | print(f"Created temporary directory: {output_dir}") 945 | except OSError as e: 946 | print(f"Error creating temporary directory {output_dir}: {e}") 947 | return None # Cannot proceed without temp dir 948 | 949 | # --- Download the video file --- 950 | for attempt in range(max_retries): 951 | video_path = None # Reset path for each attempt 952 | try: 953 | # Generate a unique temporary filename 954 | try: 955 | safe_filename = f"temp_video_{os.path.basename(video_url)}_{str(int(time.time()*1000))}.tmp" 956 | safe_filename = "".join(c if c.isalnum() or c in ['.', '_', '-'] else '_' for c in safe_filename)[:150] # Basic sanitization and length limit 957 | video_path = os.path.join(output_dir, safe_filename) 958 | except Exception as path_e: 959 | print(f"Error creating temporary video path: {path_e}") 960 | # Fallback filename 961 | video_path = os.path.join(output_dir, f"temp_video_{str(int(time.time()*1000))}.tmp") 962 | 963 | print(f"Attempt {attempt + 1}/{max_retries} to download video to temp path: {video_path}") 964 | response = requests.get(video_url, stream=True, timeout=60) 965 | response.raise_for_status() 966 | 967 | downloaded_size = 0 968 | with open(video_path, "wb") as f: 969 | for chunk in response.iter_content(chunk_size=65536): 970 | if chunk: 971 | f.write(chunk) 972 | downloaded_size += len(chunk) 973 | 974 | if downloaded_size > 0: 975 | print(f"Temporary video downloaded successfully: {video_path}") 976 | break # Exit retry loop on successful download 977 | else: 978 | print(f"Warning: Downloaded video file is empty: {video_path}") 979 | if os.path.exists(video_path): 980 | try: os.remove(video_path) 981 | except OSError: pass 982 | last_exception = IOError("Downloaded video file is empty.") 983 | # Continue to retry 984 | 985 | except (requests.exceptions.RequestException, IOError) as e: # <<< Correct except clause 986 | print(f"Download video attempt {attempt + 1} failed: {e}") 987 | last_exception = e 988 | if video_path and os.path.exists(video_path): 989 | try: os.remove(video_path) 990 | except OSError: pass # Ignore error removing partial file 991 | # Continue to retry unless it's the last attempt 992 | 993 | if attempt < max_retries - 1: 994 | print(f"Retrying download in {retry_delay} seconds...") 995 | time.sleep(retry_delay) 996 | retry_delay *= 2 997 | # else: # Implicitly handled by loop ending 998 | # print(f"Failed to download video {video_url} after {max_retries} attempts.") 999 | # # video_path will likely be None or point to a non-existent/empty file 1000 | 1001 | # Check if download succeeded (video_path exists and is not empty) 1002 | if not video_path or not os.path.exists(video_path) or os.path.getsize(video_path) == 0: 1003 | print(f"Failed to download video {video_url} successfully after {max_retries} attempts.") 1004 | # Clean up potentially empty file if it exists 1005 | if video_path and os.path.exists(video_path): 1006 | try: os.remove(video_path) 1007 | except OSError: pass 1008 | return None 1009 | 1010 | # --- Extract frames if download was successful --- 1011 | frame_tensors = [] 1012 | cap = None 1013 | try: 1014 | print(f"Extracting frames from {video_path}...") 1015 | cap = cv2.VideoCapture(video_path) 1016 | if not cap.isOpened(): 1017 | raise IOError(f"Cannot open video file: {video_path}") 1018 | 1019 | frame_count = 0 1020 | while True: 1021 | ret, frame = cap.read() 1022 | if not ret: 1023 | break # End of video 1024 | 1025 | # Convert frame (BGR) to RGB, then to Tensor [1, H, W, C] (float32, 0-1) 1026 | frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) 1027 | # Reuse PIL conversion for consistency? Or keep cv2->numpy path 1028 | img_array = frame_rgb.astype(np.float32) / 255.0 # Direct conversion 1029 | # img = Image.fromarray(frame_rgb) 1030 | # img_array = np.array(img).astype(np.float32) / 255.0 1031 | img_tensor = torch.from_numpy(img_array)[None,] # <<< Added batch dimension 1032 | frame_tensors.append(img_tensor) 1033 | frame_count += 1 1034 | # Optional: Add progress logging for long videos 1035 | # if frame_count % 100 == 0: print(f" Extracted {frame_count} frames...") 1036 | 1037 | print(f"Finished extracting {frame_count} frames.") 1038 | except Exception as e: 1039 | print(f"Error extracting frames from video {video_path}: {e}") 1040 | # Return None or potentially partially extracted frames? Let's return None for consistency. 1041 | frame_tensors = None # Indicate failure 1042 | finally: 1043 | # --- Cleanup --- 1044 | if cap: 1045 | cap.release() 1046 | # Delete the temporary video file regardless of extraction success/failure 1047 | if video_path and os.path.exists(video_path): 1048 | try: 1049 | os.remove(video_path) 1050 | print(f"Deleted temporary video file: {video_path}") 1051 | except OSError as e: 1052 | print(f"Error deleting temporary video file {video_path}: {e}") 1053 | 1054 | return frame_tensors 1055 | 1056 | def download_and_load_latent(self, latent_url): 1057 | """ 1058 | Downloads a .latent file, loads it using safetensors, applies multiplier, 1059 | cleans up the temp file, and returns the latent dictionary. 1060 | Returns dict { "samples": tensor } or None on failure. 1061 | """ 1062 | max_retries = 5 1063 | retry_delay = 1 1064 | last_exception = None 1065 | latent_path = None 1066 | output_dir = "temp" # Use temp directory 1067 | 1068 | # Ensure temp directory exists 1069 | if not os.path.exists(output_dir): 1070 | try: 1071 | os.makedirs(output_dir) 1072 | except OSError as e: 1073 | print(f"Error creating temporary directory {output_dir}: {e}") 1074 | return None 1075 | 1076 | # --- Download the latent file --- 1077 | for attempt in range(max_retries): 1078 | latent_path = None # Reset path for each attempt 1079 | try: 1080 | # Generate a unique temporary filename 1081 | try: 1082 | safe_filename = f"temp_latent_{os.path.basename(latent_url)}_{str(int(time.time()*1000))}.latent" 1083 | safe_filename = "".join(c if c.isalnum() or c in ['.', '_', '-'] else '_' for c in safe_filename)[:150] 1084 | latent_path = os.path.join(output_dir, safe_filename) 1085 | except Exception as path_e: 1086 | print(f"Error creating temporary latent path: {path_e}") 1087 | latent_path = os.path.join(output_dir, f"temp_latent_{str(int(time.time()*1000))}.latent") 1088 | 1089 | print(f"Attempt {attempt + 1}/{max_retries} to download latent to temp path: {latent_path}") 1090 | response = requests.get(latent_url, stream=True, timeout=30) 1091 | response.raise_for_status() 1092 | 1093 | downloaded_size = 0 1094 | with open(latent_path, "wb") as f: 1095 | for chunk in response.iter_content(chunk_size=65536): 1096 | if chunk: # <<< Correct indent 1097 | f.write(chunk) 1098 | downloaded_size += len(chunk) # <<< Correct indent 1099 | 1100 | if downloaded_size > 0: 1101 | print(f"Temporary latent downloaded successfully: {latent_path}") 1102 | break # Exit retry loop on successful download 1103 | else: 1104 | print(f"Warning: Downloaded latent file is empty: {latent_path}") 1105 | if os.path.exists(latent_path): 1106 | try: os.remove(latent_path) 1107 | except OSError: pass 1108 | last_exception = IOError("Downloaded latent file is empty.") 1109 | # Continue retry loop 1110 | 1111 | except (requests.exceptions.RequestException, IOError) as e: 1112 | print(f"Download latent attempt {attempt + 1} failed: {e}") 1113 | last_exception = e 1114 | if latent_path and os.path.exists(latent_path): 1115 | try: os.remove(latent_path) 1116 | except OSError: pass 1117 | # Continue retry loop 1118 | 1119 | if attempt < max_retries - 1: 1120 | print(f"Retrying download in {retry_delay} seconds...") 1121 | time.sleep(retry_delay) 1122 | retry_delay *= 2 1123 | # else: # Implicitly handled by loop ending 1124 | # print(f"Failed to download latent {latent_url} after {max_retries} attempts.") 1125 | 1126 | # Check if download succeeded 1127 | if not latent_path or not os.path.exists(latent_path) or os.path.getsize(latent_path) == 0: 1128 | print(f"Failed to download latent {latent_url} successfully after {max_retries} attempts.") 1129 | if latent_path and os.path.exists(latent_path): 1130 | try: os.remove(latent_path) 1131 | except OSError: pass 1132 | return None 1133 | 1134 | # --- Load the latent file --- 1135 | loaded_latent_dict = None 1136 | try: 1137 | print(f"Loading latent from {latent_path}...") 1138 | # Use safetensors.torch.load_file 1139 | latent_content = safetensors.torch.load_file(latent_path, device="cpu") 1140 | 1141 | if "latent_tensor" not in latent_content: 1142 | raise ValueError("'latent_tensor' key not found in the loaded latent file.") 1143 | 1144 | # Apply multiplier based on LoadLatent logic 1145 | multiplier = 1.0 1146 | if "latent_format_version_0" not in latent_content: 1147 | multiplier = 1.0 / 0.18215 1148 | print(f"Applying multiplier {multiplier:.5f} (old latent format detected)") 1149 | 1150 | samples_tensor = latent_content["latent_tensor"].float() * multiplier 1151 | loaded_latent_dict = {"samples": samples_tensor} 1152 | print("Latent loaded successfully.") 1153 | 1154 | except Exception as e: 1155 | print(f"Error loading latent file {latent_path}: {e}") 1156 | # Ensure loaded_latent_dict remains None on error 1157 | loaded_latent_dict = None 1158 | finally: 1159 | # --- Cleanup --- 1160 | if latent_path and os.path.exists(latent_path): 1161 | try: 1162 | os.remove(latent_path) 1163 | print(f"Deleted temporary latent file: {latent_path}") 1164 | except OSError as e: 1165 | print(f"Error deleting temporary latent file {latent_path}: {e}") 1166 | 1167 | return loaded_latent_dict 1168 | 1169 | def download_and_read_text(self, text_url): 1170 | """ 1171 | Downloads a .txt file, reads its content as UTF-8, 1172 | cleans up the temp file, and returns the text content. 1173 | Returns str or None on failure. 1174 | """ 1175 | max_retries = 5 1176 | retry_delay = 1 1177 | last_exception = None 1178 | text_path = None 1179 | output_dir = "temp" 1180 | 1181 | if not os.path.exists(output_dir): 1182 | try: os.makedirs(output_dir) 1183 | except OSError as e: print(f"Error creating temp dir {output_dir}: {e}"); return None 1184 | 1185 | # --- Download the text file --- 1186 | for attempt in range(max_retries): 1187 | text_path = None 1188 | try: 1189 | try: 1190 | safe_filename = f"temp_text_{os.path.basename(text_url)}_{str(int(time.time()*1000))}.txt" 1191 | safe_filename = "".join(c if c.isalnum() or c in ['.', '_', '-'] else '_' for c in safe_filename)[:150] 1192 | text_path = os.path.join(output_dir, safe_filename) 1193 | except Exception as path_e: 1194 | print(f"Error creating temporary text path: {path_e}") 1195 | text_path = os.path.join(output_dir, f"temp_text_{str(int(time.time()*1000))}.txt") 1196 | 1197 | print(f"Attempt {attempt + 1}/{max_retries} to download text to temp path: {text_path}") 1198 | response = requests.get(text_url, stream=True, timeout=20) # Shorter timeout for text 1199 | response.raise_for_status() 1200 | 1201 | downloaded_size = 0 1202 | with open(text_path, "wb") as f: # Write in binary first 1203 | for chunk in response.iter_content(chunk_size=4096): 1204 | if chunk: f.write(chunk); downloaded_size += len(chunk) 1205 | 1206 | if downloaded_size > 0: 1207 | print(f"Temporary text file downloaded: {text_path}") 1208 | break # Success 1209 | else: 1210 | if os.path.exists(text_path): 1211 | try: os.remove(text_path) 1212 | except OSError: pass 1213 | last_exception = IOError("Downloaded text file is empty.") 1214 | # Continue retries 1215 | 1216 | except (requests.exceptions.RequestException, IOError) as e: 1217 | print(f"Download text attempt {attempt + 1} failed: {e}") 1218 | last_exception = e 1219 | if text_path and os.path.exists(text_path): 1220 | try: os.remove(text_path) 1221 | except OSError: pass 1222 | # Continue retries 1223 | 1224 | if attempt < max_retries - 1: 1225 | print(f"Retrying download in {retry_delay} seconds...") 1226 | time.sleep(retry_delay); retry_delay *= 2 1227 | # else: # Implicitly handled by loop ending 1228 | # print(f"Failed to download text {text_url} after {max_retries} attempts.") 1229 | 1230 | # Check download success 1231 | if not text_path or not os.path.exists(text_path) or os.path.getsize(text_path) == 0: 1232 | print(f"Failed to download text {text_url} successfully after {max_retries} attempts.") 1233 | if text_path and os.path.exists(text_path): 1234 | try: os.remove(text_path) 1235 | except OSError: pass 1236 | return None 1237 | 1238 | # --- Read the text file --- 1239 | read_content = None 1240 | try: 1241 | print(f"Reading text from {text_path}...") 1242 | # Read with UTF-8 encoding, handle potential errors 1243 | with open(text_path, "r", encoding="utf-8", errors="replace") as f: 1244 | read_content = f.read() 1245 | print("Text read successfully.") 1246 | except Exception as e: 1247 | print(f"Error reading text file {text_path}: {e}") 1248 | read_content = None 1249 | finally: 1250 | # --- Cleanup --- 1251 | if text_path and os.path.exists(text_path): 1252 | try: 1253 | os.remove(text_path) 1254 | print(f"Deleted temporary text file: {text_path}") 1255 | except OSError as e: 1256 | print(f"Error deleting temporary text file {text_path}: {e}") 1257 | 1258 | return read_content 1259 | 1260 | # <<< Add audio download and processing function 1261 | def download_and_process_audio(self, audio_url): 1262 | """ 1263 | Downloads an audio file, processes it using torchaudio, 1264 | cleans up the temp file, and returns the audio dictionary. 1265 | Returns dict { "waveform": tensor [Channels, Samples], "sample_rate": int } or None on failure. 1266 | """ 1267 | max_retries = 5 1268 | retry_delay = 1 1269 | last_exception = None 1270 | audio_path = None 1271 | output_dir = "temp" 1272 | 1273 | if not os.path.exists(output_dir): 1274 | try: os.makedirs(output_dir) 1275 | except OSError as e: print(f"Error creating temp dir {output_dir}: {e}"); return None 1276 | 1277 | # --- Download the audio file --- 1278 | for attempt in range(max_retries): 1279 | audio_path = None 1280 | try: 1281 | # Generate temp filename based on URL extension if possible 1282 | try: 1283 | basename = os.path.basename(audio_url) 1284 | _, ext = os.path.splitext(basename) 1285 | if not ext: ext = ".audio" # Default if no extension 1286 | safe_filename = f"temp_audio_{str(int(time.time()*1000))}{ext}" 1287 | safe_filename = "".join(c if c.isalnum() or c in ['.', '_', '-'] else '_' for c in safe_filename)[:150] 1288 | audio_path = os.path.join(output_dir, safe_filename) 1289 | except Exception as path_e: 1290 | print(f"Error creating temporary audio path: {path_e}") 1291 | audio_path = os.path.join(output_dir, f"temp_audio_{str(int(time.time()*1000))}.tmp") 1292 | 1293 | print(f"Attempt {attempt + 1}/{max_retries} to download audio to temp path: {audio_path}") 1294 | response = requests.get(audio_url, stream=True, timeout=60) # Longer timeout for audio/video 1295 | response.raise_for_status() 1296 | 1297 | downloaded_size = 0 1298 | with open(audio_path, "wb") as f: 1299 | for chunk in response.iter_content(chunk_size=65536): 1300 | if chunk: f.write(chunk); downloaded_size += len(chunk) 1301 | 1302 | if downloaded_size > 0: 1303 | print(f"Temporary audio file downloaded: {audio_path} ({downloaded_size} bytes)") 1304 | break # Success 1305 | else: 1306 | if os.path.exists(audio_path): 1307 | try: os.remove(audio_path) 1308 | except OSError: pass 1309 | last_exception = IOError("Downloaded audio file is empty.") 1310 | # Continue retries 1311 | 1312 | except (requests.exceptions.RequestException, IOError) as e: 1313 | print(f"Download audio attempt {attempt + 1} failed: {e}") 1314 | last_exception = e 1315 | if audio_path and os.path.exists(audio_path): 1316 | try: os.remove(audio_path) 1317 | except OSError: pass 1318 | # Continue retries 1319 | 1320 | if attempt < max_retries - 1: 1321 | print(f"Retrying download in {retry_delay} seconds...") 1322 | time.sleep(retry_delay); retry_delay *= 2 1323 | # else: 1324 | # print(f"Failed to download audio {audio_url} after {max_retries} attempts.") 1325 | 1326 | # Check download success 1327 | if not audio_path or not os.path.exists(audio_path) or os.path.getsize(audio_path) == 0: 1328 | print(f"Failed to download audio {audio_url} successfully after {max_retries} attempts.") 1329 | if audio_path and os.path.exists(audio_path): 1330 | try: os.remove(audio_path) 1331 | except OSError: pass 1332 | return None 1333 | 1334 | # --- Process the audio file --- 1335 | processed_audio = None 1336 | try: 1337 | print(f"Processing audio from {audio_path} using torchaudio...") 1338 | # Use torchaudio.load to get waveform and sample rate 1339 | waveform, sample_rate = torchaudio.load(audio_path) 1340 | 1341 | # Ensure waveform is float32, which is common for ComfyUI audio nodes 1342 | if waveform.dtype != torch.float32: 1343 | print(f"Converting waveform from {waveform.dtype} to float32.") 1344 | waveform = waveform.to(torch.float32) 1345 | 1346 | # <<< Ensure the tensor is contiguous <<< 1347 | if not waveform.is_contiguous(): 1348 | print("Audio waveform is not contiguous. Making it contiguous.") 1349 | waveform = waveform.contiguous() 1350 | 1351 | # <<< ADD BATCH DIMENSION TO MATCH STANDARD COMFYUI AUDIO FORMAT <<< 1352 | waveform = waveform.unsqueeze(0) 1353 | 1354 | # Most nodes seem to work with [channels, samples] or just [samples] if mono. 1355 | # torchaudio.load returns [channels, samples]. Let's stick with that. 1356 | print(f"Audio loaded successfully: Shape={waveform.shape}, Sample Rate={sample_rate} Hz, dtype={waveform.dtype}, Contiguous={waveform.is_contiguous()}") # <<< Added contiguous log 1357 | processed_audio = {"waveform": waveform, "sample_rate": sample_rate} 1358 | 1359 | except Exception as e: 1360 | print(f"Error processing audio file {audio_path} with torchaudio: {e}") 1361 | processed_audio = None # Ensure it's None on error 1362 | finally: 1363 | # --- Cleanup --- 1364 | if audio_path and os.path.exists(audio_path): 1365 | try: 1366 | os.remove(audio_path) 1367 | print(f"Deleted temporary audio file: {audio_path}") 1368 | except OSError as e: 1369 | print(f"Error deleting temporary audio file {audio_path}: {e}") 1370 | 1371 | return processed_audio 1372 | 1373 | 1374 | def check_account_status(self, api_key, base_url): 1375 | """ 1376 | 查询账户状态,检查是否可以提交新任务。包含重试机制。 1377 | """ 1378 | if not api_key or not base_url: 1379 | raise ValueError("API Key and Base URL are required for checking account status.") 1380 | 1381 | url = f"{base_url}/uc/openapi/accountStatus" 1382 | headers = { 1383 | "User-Agent": "ComfyUI-RH-APICall-Node/1.0", 1384 | "Content-Type": "application/json", 1385 | } 1386 | data = {"apikey": api_key} 1387 | 1388 | max_retries = 5 1389 | retry_delay = 1 1390 | last_exception = None 1391 | 1392 | for attempt in range(max_retries): 1393 | response = None 1394 | try: # <<< Added try block 1395 | print(f"Attempt {attempt + 1}/{max_retries} to check account status...") 1396 | response = requests.post(url, json=data, headers=headers, timeout=15) 1397 | response.raise_for_status() 1398 | 1399 | result = response.json() 1400 | 1401 | if result.get("code") != 0: # <<< Correct indent 1402 | api_msg = result.get('msg', 'Unknown API error') 1403 | print(f"API error on attempt {attempt + 1}: {api_msg}") 1404 | raise Exception(f"API error getting account status: {api_msg}") 1405 | 1406 | account_data = result.get("data") 1407 | if not account_data or "currentTaskCounts" not in account_data: 1408 | raise ValueError("Invalid response structure for account status.") 1409 | 1410 | try: # <<< Correct indent (inner try for int conversion) 1411 | current_task_counts = int(account_data["currentTaskCounts"]) 1412 | account_data["currentTaskCounts"] = current_task_counts 1413 | print("Account status check successful.") 1414 | return account_data # Success 1415 | except (ValueError, TypeError) as e: # <<< Correct indent 1416 | raise ValueError(f"Invalid value for currentTaskCounts: {account_data.get('currentTaskCounts')}. Error: {e}") 1417 | 1418 | except (requests.exceptions.RequestException, json.JSONDecodeError, ValueError, Exception) as e: # <<< Correct indent 1419 | print(f"Error on attempt {attempt + 1}/{max_retries}: {e}") 1420 | last_exception = e 1421 | if isinstance(e, json.JSONDecodeError) and response is not None: 1422 | print(f"Raw response text on JSON decode error: {response.text}") 1423 | 1424 | if attempt < max_retries - 1: 1425 | print(f"Retrying in {retry_delay} seconds...") 1426 | time.sleep(retry_delay) 1427 | retry_delay *= 2 1428 | else: 1429 | print("Max retries reached for checking account status.") 1430 | raise Exception(f"Failed to check account status after {max_retries} attempts. Last error: {last_exception}") from last_exception 1431 | 1432 | # This should ideally not be reached if the loop logic is correct 1433 | raise Exception(f"Failed to check account status after {max_retries} attempts (unexpected loop end). Last error: {last_exception}") 1434 | 1435 | 1436 | def create_task(self, apiConfig, nodeInfoList, base_url): 1437 | """ 1438 | 创建任务,包含重试机制,最多重试5次 1439 | """ 1440 | safe_base_url = apiConfig.get('base_url') 1441 | # Use the updated key name here 1442 | safe_workflow_id = apiConfig.get("workflowId_webappId") 1443 | safe_api_key = apiConfig.get("apiKey") 1444 | 1445 | if not safe_base_url or not safe_workflow_id or not safe_api_key: 1446 | # Update the error message to reflect the new key 1447 | raise ValueError("Missing required apiConfig fields: 'base_url', 'workflowId_webappId', 'apiKey'") 1448 | 1449 | url = f"{safe_base_url}/task/openapi/create" 1450 | headers = { 1451 | "Content-Type": "application/json", 1452 | "User-Agent": "ComfyUI-RH-APICall-Node/1.0", 1453 | } 1454 | # Also update the key used in the API payload 1455 | data = { 1456 | "workflowId": safe_workflow_id, 1457 | "apiKey": safe_api_key, 1458 | "nodeInfoList": nodeInfoList, 1459 | } 1460 | 1461 | max_retries = 5 1462 | retry_delay = 1 1463 | last_exception = None 1464 | 1465 | for attempt in range(max_retries): 1466 | response = None 1467 | current_last_exception = None 1468 | success = False # Flag to indicate success within try block 1469 | try: 1470 | print(f"Create task attempt {attempt + 1}/{max_retries}...") 1471 | response = requests.post(url, json=data, headers=headers, timeout=30) 1472 | print(f"Create task attempt {attempt + 1}: Status code {response.status_code}") 1473 | response.raise_for_status() 1474 | 1475 | result = response.json() 1476 | 1477 | if result.get("code") == 0: 1478 | if "data" in result and "taskId" in result["data"] and "netWssUrl" in result["data"]: 1479 | print("Task created successfully.") 1480 | success = True # Mark as success 1481 | return result # Return successful result 1482 | else: 1483 | print(f"API success code 0, but response structure invalid: {result}") 1484 | current_last_exception = ValueError(f"API success code 0, but response structure invalid.") 1485 | else: 1486 | api_msg = result.get('msg', 'Unknown API error') 1487 | print(f"API error creating task (code {result.get('code')}): {api_msg}") 1488 | current_last_exception = Exception(f"API error (code {result.get('code')}): {api_msg}") 1489 | 1490 | except requests.exceptions.Timeout as e: 1491 | print(f"Create task attempt {attempt + 1} timed out.") 1492 | current_last_exception = e 1493 | except requests.exceptions.RequestException as e: 1494 | print(f"Create task attempt {attempt + 1} network error: {e}") 1495 | current_last_exception = e 1496 | except json.JSONDecodeError as e: 1497 | print(f"Create task attempt {attempt + 1} failed to decode JSON response.") 1498 | if response is not None: print(f"Raw response text: {response.text}") 1499 | current_last_exception = e 1500 | except Exception as e: 1501 | print(f"Create task attempt {attempt + 1} unexpected error: {e}") 1502 | current_last_exception = e 1503 | 1504 | # If successful, we already returned. If not successful, process the error. 1505 | if not success: 1506 | last_exception = current_last_exception # Store the most recent error 1507 | if attempt < max_retries - 1: 1508 | print(f"Retrying task creation in {retry_delay} seconds...") 1509 | time.sleep(retry_delay) 1510 | retry_delay *= 2 1511 | else: # Max retries reached 1512 | error_message = f"Failed to create task after {max_retries} attempts." 1513 | if last_exception: 1514 | error_message += f" Last error: {last_exception}" 1515 | print(error_message) 1516 | raise Exception(error_message) from last_exception 1517 | 1518 | # Should not be reachable if logic is correct 1519 | raise Exception("Task creation failed unexpectedly after retry loop.") 1520 | 1521 | # <<< Add new function for creating AI App tasks >>> 1522 | def create_ai_app_task(self, apiConfig, nodeInfoList, webappId): 1523 | """ 1524 | 创建 AI 应用任务 (using /task/openapi/ai-app/run), 包含重试机制. 1525 | """ 1526 | safe_base_url = apiConfig.get('base_url') 1527 | safe_api_key = apiConfig.get("apiKey") 1528 | 1529 | if not safe_base_url or not safe_api_key: 1530 | raise ValueError("Missing required apiConfig fields: 'base_url', 'apiKey'") 1531 | 1532 | # <<< Use the AI App endpoint >>> 1533 | url = f"{safe_base_url}/task/openapi/ai-app/run" 1534 | headers = { 1535 | "Content-Type": "application/json", 1536 | "User-Agent": "ComfyUI-RH-APICall-Node/1.0", 1537 | # Host header is typically handled by requests library 1538 | } 1539 | # <<< Construct payload for AI App task, converting webappId to int >>> 1540 | try: 1541 | webappId_int = int(webappId) 1542 | except ValueError: 1543 | # Handle error if the ID from config cannot be converted to int 1544 | raise ValueError(f"Invalid webappId provided: '{webappId}'. It must be convertible to an integer.") 1545 | 1546 | data = { 1547 | "webappId": webappId_int, 1548 | "apiKey": safe_api_key, 1549 | "nodeInfoList": nodeInfoList, 1550 | } 1551 | 1552 | max_retries = 5 1553 | retry_delay = 1 1554 | last_exception = None 1555 | 1556 | for attempt in range(max_retries): 1557 | response = None 1558 | current_last_exception = None 1559 | success = False # Flag to indicate success within try block 1560 | try: 1561 | print(f"Create AI App task attempt {attempt + 1}/{max_retries}...") 1562 | response = requests.post(url, json=data, headers=headers, timeout=30) 1563 | print(f"Create AI App task attempt {attempt + 1}: Status code {response.status_code}") 1564 | response.raise_for_status() 1565 | 1566 | result = response.json() 1567 | 1568 | # Response structure seems identical to standard task, check code and data fields 1569 | if result.get("code") == 0: 1570 | if "data" in result and "taskId" in result["data"]: # Don't strictly require netWssUrl here 1571 | print("AI App Task created/queued successfully.") 1572 | success = True # Mark as success 1573 | return result # Return successful result 1574 | else: 1575 | print(f"AI App Task API success code 0, but response structure invalid: {result}") 1576 | current_last_exception = ValueError(f"API success code 0, but response structure invalid.") 1577 | else: 1578 | api_msg = result.get('msg', 'Unknown API error') 1579 | print(f"API error creating AI App task (code {result.get('code')}): {api_msg}") 1580 | current_last_exception = Exception(f"API error (code {result.get('code')}): {api_msg}") 1581 | 1582 | except requests.exceptions.Timeout as e: 1583 | print(f"Create AI App task attempt {attempt + 1} timed out.") 1584 | current_last_exception = e 1585 | except requests.exceptions.RequestException as e: 1586 | print(f"Create AI App task attempt {attempt + 1} network error: {e}") 1587 | current_last_exception = e 1588 | except json.JSONDecodeError as e: 1589 | print(f"Create AI App task attempt {attempt + 1} failed to decode JSON response.") 1590 | if response is not None: print(f"Raw response text: {response.text}") 1591 | current_last_exception = e 1592 | except Exception as e: 1593 | print(f"Create AI App task attempt {attempt + 1} unexpected error: {e}") 1594 | current_last_exception = e 1595 | 1596 | # If successful, we already returned. If not successful, process the error. 1597 | if not success: 1598 | last_exception = current_last_exception # Store the most recent error 1599 | if attempt < max_retries - 1: 1600 | print(f"Retrying AI App task creation in {retry_delay} seconds...") 1601 | time.sleep(retry_delay) 1602 | retry_delay *= 2 1603 | else: # Max retries reached 1604 | error_message = f"Failed to create AI App task after {max_retries} attempts." 1605 | if last_exception: 1606 | error_message += f" Last error: {last_exception}" 1607 | print(error_message) 1608 | raise Exception(error_message) from last_exception 1609 | 1610 | # Should not be reachable if logic is correct 1611 | raise Exception("AI App Task creation failed unexpectedly after retry loop.") 1612 | 1613 | 1614 | def check_task_status(self, task_id, api_key, base_url): 1615 | """ 1616 | 查询任务状态。 Returns a dictionary representing status or list of outputs on success. 1617 | """ 1618 | if not task_id or not api_key or not base_url: 1619 | raise ValueError("Task ID, API Key, and Base URL are required for checking task status.") 1620 | url = f"{base_url}/task/openapi/outputs" 1621 | headers = { 1622 | "User-Agent": "ComfyUI-RH-APICall-Node/1.0", 1623 | "Content-Type": "application/json", 1624 | } 1625 | data = { "taskId": task_id, "apiKey": api_key } 1626 | 1627 | response = None # Define response outside try 1628 | result = None # Define result outside try 1629 | 1630 | # <<< Add retry loop for the network request itself <<< 1631 | max_retries = 5 1632 | retry_delay = 1 1633 | last_exception = None 1634 | 1635 | for attempt in range(max_retries): 1636 | try: # <<< Outer try block for requests/JSON processing 1637 | print(f"Check status attempt {attempt + 1}/{max_retries} (TaskID: {task_id})...") 1638 | response = requests.post(url, json=data, headers=headers, timeout=20) 1639 | print(f"Check status ({task_id}): Response Status Code: {response.status_code}") 1640 | 1641 | # Process the response (JSON decoding, status checks) only if request succeeded 1642 | try: # <<< Inner try block for JSON decoding 1643 | result = response.json() 1644 | print(f"Check status ({task_id}): Response JSON: {json.dumps(result, indent=2, ensure_ascii=False)}") 1645 | except json.JSONDecodeError: # <<< Correct indent 1646 | print(f"Check status ({task_id}): Failed to decode JSON. Response Text: {response.text}") 1647 | error_msg = f"HTTP Error {response.status_code} and Invalid JSON" if response.status_code != 200 else "Invalid JSON response" 1648 | # Consider this a failure for retry purposes if status code indicates error 1649 | if response.status_code != 200: 1650 | raise requests.exceptions.RequestException(f"HTTP Error {response.status_code} with Invalid JSON") 1651 | else: # If status 200 but bad JSON, treat as terminal error for this check 1652 | return {"taskStatus": "error", "error": error_msg} 1653 | 1654 | # Process the decoded JSON result 1655 | api_code = result.get("code") # <<< Correct indent 1656 | api_msg = result.get("msg", "") # <<< Correct indent 1657 | api_data = result.get("data") # <<< Correct indent 1658 | 1659 | # Handle Non-200 status codes AFTER potential JSON decoding 1660 | if response.status_code != 200: # <<< Correct indent 1661 | error_detail = api_msg if api_msg else f"HTTP Error {response.status_code}" 1662 | print(f"Warning: Non-200 status code ({response.status_code}). API Message: {api_msg}") 1663 | # Raise exception to trigger retry for server-side issues (e.g., 5xx) 1664 | if 500 <= response.status_code < 600: 1665 | raise requests.exceptions.RequestException(f"Server Error {response.status_code}: {error_detail}") 1666 | else: # Treat other non-200 codes (like 4xx) as terminal for this check 1667 | return {"taskStatus": "error", "error": error_detail} # <<< Correct indent 1668 | 1669 | # --- If we got here, the request was successful (status 200, valid JSON) --- 1670 | # Now interpret the API result 1671 | # 1. Check for successful completion (code 0, list data) 1672 | if api_code == 0 and isinstance(api_data, list) and api_data: 1673 | return api_data # SUCCESS, return output data 1674 | 1675 | # 2. Check for explicit QUEUED message 1676 | elif api_msg == "APIKEY_TASK_IS_QUEUED": 1677 | print(f"Check status ({task_id}): Task QUEUED.") 1678 | return {"taskStatus": "QUEUED"} 1679 | 1680 | # 3. Check for explicit RUNNING message 1681 | elif api_msg == "APIKEY_TASK_IS_RUNNING": 1682 | possible_wss_url = None 1683 | if isinstance(api_data, dict): # Check if api_data is a dict 1684 | possible_wss_url = api_data.get("netWssUrl") 1685 | # No need to check result["data"] separately here, as api_data holds it 1686 | 1687 | if possible_wss_url: 1688 | print(f"Check status ({task_id}): Task RUNNING, found netWssUrl.") 1689 | return {"taskStatus": "RUNNING", "netWssUrl": possible_wss_url} 1690 | else: 1691 | print(f"Check status ({task_id}): Task RUNNING, but netWssUrl not found in response data.") 1692 | return {"taskStatus": "RUNNING"} # Return RUNNING status without URL 1693 | 1694 | # 4. Check for API-reported errors (non-zero code, excluding specific handled messages) 1695 | elif api_code != 0: 1696 | print(f"API Error checking status (code {api_code}): {api_msg}") 1697 | return {"taskStatus": "error", "error": api_msg} 1698 | 1699 | # 5. Check for code 0 but no data/output list (likely still running/initializing) 1700 | elif api_code == 0 and (api_data is None or (isinstance(api_data, list) and not api_data)): 1701 | print(f"Check status ({task_id}): Task RUNNING (code 0, no output data yet).") 1702 | # Check if WSS url is available even in this state 1703 | possible_wss_url = None 1704 | if isinstance(api_data, dict): 1705 | possible_wss_url = api_data.get("netWssUrl") 1706 | 1707 | if possible_wss_url: 1708 | print(f"Check status ({task_id}): Task RUNNING (code 0, no output), found netWssUrl.") 1709 | return {"taskStatus": "RUNNING", "netWssUrl": possible_wss_url} 1710 | else: 1711 | print(f"Check status ({task_id}): Task RUNNING (code 0, no output), netWssUrl not found.") 1712 | return {"taskStatus": "RUNNING"} 1713 | 1714 | # 6. Fallback for unknown successful response structure 1715 | else: 1716 | print(f"Unknown task status response structure: {result}") 1717 | return {"taskStatus": "unknown", "details": result} 1718 | 1719 | except requests.exceptions.Timeout as e: # <<< Correct except clause indent 1720 | print(f"Network timeout on attempt {attempt + 1}/{max_retries} for task {task_id}") 1721 | last_exception = e 1722 | # Continue to retry loop 1723 | except requests.exceptions.RequestException as e: # <<< Correct except clause indent 1724 | print(f"Network error on attempt {attempt + 1}/{max_retries}: {e}") 1725 | last_exception = e 1726 | # Continue to retry loop 1727 | # Note: json.JSONDecodeError or other processing errors after successful request 1728 | # are handled inside the try block and return specific statuses without retry here. 1729 | 1730 | # If exception occurred and not the last attempt, wait and retry 1731 | if last_exception is not None and attempt < max_retries - 1: 1732 | print(f"Retrying status check in {retry_delay} seconds...") 1733 | time.sleep(retry_delay) 1734 | retry_delay *= 2 1735 | elif last_exception is not None: # Max retries reached after an exception 1736 | print(f"Max retries ({max_retries}) reached for status check due to network errors. Last error: {last_exception}") 1737 | return {"taskStatus": "error", "error": f"Network Error after retries: {last_exception}"} # <<< Return error after retries 1738 | 1739 | # This point should theoretically not be reached if the loop handles all cases, but as a fallback: 1740 | print(f"Status check loop completed unexpectedly after {max_retries} attempts.") 1741 | return {"taskStatus": "error", "error": f"Status check failed after {max_retries} attempts. Last error: {last_exception}"} 1742 | 1743 | 1744 | # <<< Add NODE_CLASS_MAPPINGS and NODE_DISPLAY_NAME_MAPPINGS 1745 | NODE_CLASS_MAPPINGS = { 1746 | "RH_ExecuteNode": ExecuteNode 1747 | } 1748 | 1749 | NODE_DISPLAY_NAME_MAPPINGS = { 1750 | "RH_ExecuteNode": "RunningHub Execute Task" 1751 | } 1752 | 1753 | # <<< Standard Python entry point check (optional but good practice) 1754 | if __name__ == "__main__": 1755 | # Example usage or testing code could go here 1756 | pass 1757 | 1758 | -------------------------------------------------------------------------------- /RH_ImageUploaderNode.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from PIL import Image 3 | from io import BytesIO 4 | import torch 5 | import numpy as np 6 | import time # Add this import 7 | 8 | class ImageUploaderNode: 9 | """ 10 | ComfyUI 节点:ImageUploaderNode 11 | 功能:将输入的图像上传到服务器,并返回服务器返回的文件名。 12 | """ 13 | 14 | @classmethod 15 | def INPUT_TYPES(cls): 16 | return { 17 | "required": { 18 | "apiConfig": ("STRUCT",), # API 配置参数,必须包含 apiKey 和 base_url 19 | "image": ("IMAGE",), # 输入图像张量 20 | }, 21 | } 22 | 23 | RETURN_TYPES = ("STRING",) # 输出类型为字符串 24 | RETURN_NAMES = ("filename",) # 输出名称为 filename 25 | CATEGORY = "RunningHub" # 节点类别 26 | FUNCTION = "process" # 指定处理方法 27 | 28 | def process(self, image: torch.Tensor, apiConfig: dict) -> tuple: 29 | """ 30 | 处理方法:将图像上传到服务器并返回文件名。 31 | 32 | 参数: 33 | image (torch.Tensor): 输入的图像张量,形状可能为 [C, H, W]、[H, W, C] 或其他。 34 | apiConfig (dict): API 配置参数,必须包含 'apiKey' 和 'base_url'。 35 | 36 | 返回: 37 | tuple: 包含上传后返回的文件名。 38 | """ 39 | # 检查输入的图像类型 40 | if not isinstance(image, torch.Tensor): 41 | raise TypeError(f"Expected image to be a torch.Tensor, but got {type(image)}.") 42 | 43 | # 将图像张量转换为 NumPy 数组 44 | image_np = image.detach().cpu().numpy() 45 | 46 | # 打印图像形状以进行调试 47 | print(f"Original image shape: {image_np.shape}") 48 | 49 | # 处理图像的形状,确保为 [H, W, C] 50 | if image_np.ndim == 4: 51 | # 处理批量维度,例如 [B, C, H, W] 52 | print("Detected 4D tensor. Assuming shape [B, C, H, W]. Taking the first image in the batch.") 53 | image_np = image_np[0] 54 | print(f"Image shape after removing batch dimension: {image_np.shape}") 55 | 56 | if image_np.ndim == 3: 57 | if image_np.shape[0] in [1, 3, 4]: # [C, H, W] 58 | image_np = np.transpose(image_np, (1, 2, 0)) # 转换为 [H, W, C] 59 | print(f"Transposed image shape to [H, W, C]: {image_np.shape}") 60 | elif image_np.shape[2] in [1, 3, 4]: # [H, W, C] 61 | # 已经是 [H, W, C],无需转置 62 | print(f"Image already in [H, W, C] format: {image_np.shape}") 63 | else: 64 | raise ValueError(f"Unsupported number of channels: {image_np.shape[2]}") 65 | elif image_np.ndim == 2: 66 | # 灰度图像 [H, W] 67 | image_np = np.expand_dims(image_np, axis=-1) # 转换为 [H, W, 1] 68 | print(f"Expanded grayscale image to [H, W, 1]: {image_np.shape}") 69 | else: 70 | raise ValueError(f"Unsupported image shape: {image_np.shape}") 71 | 72 | # 确定图像模式 73 | if image_np.shape[2] == 1: 74 | mode = "L" # 灰度图像 75 | image_pil = Image.fromarray((image_np.squeeze(-1) * 255).astype(np.uint8), mode) 76 | print("Converted to PIL Image with mode 'L'") 77 | elif image_np.shape[2] == 3: 78 | mode = "RGB" # RGB 图像 79 | image_pil = Image.fromarray((image_np * 255).astype(np.uint8), mode) 80 | print("Converted to PIL Image with mode 'RGB'") 81 | elif image_np.shape[2] == 4: 82 | mode = "RGBA" # RGBA 图像 83 | image_pil = Image.fromarray((image_np * 255).astype(np.uint8), mode) 84 | print("Converted to PIL Image with mode 'RGBA'") 85 | else: 86 | raise ValueError(f"Unsupported number of channels: {image_np.shape[2]}") 87 | 88 | # 将 PIL 图像保存到 BytesIO 缓冲区 89 | buffer = BytesIO() 90 | image_pil.save(buffer, format='PNG') # 可以根据需要选择 'JPEG' 或其他格式 91 | # 先获取缓冲区大小 92 | buffer_size = buffer.tell() 93 | # 然后重置指针到开头 94 | buffer.seek(0) 95 | print("Saved PIL Image to BytesIO buffer.") 96 | 97 | # 打印图像大小,以 MB 为单位 98 | buffer_size_mb = buffer_size / (1024 * 1024) 99 | print(f"Image size: {buffer_size_mb:.2f} MB") 100 | 101 | # 检查图像大小是否超过 10MB 102 | max_size_bytes = 10 * 1024 * 1024 # 10MB 103 | if buffer_size > max_size_bytes: 104 | raise Exception(f"Image size {buffer_size_mb:.2f}MB exceeds the 10MB limit.") 105 | 106 | # 准备 multipart/form-data 107 | files = { 108 | 'file': ('image.png', buffer, 'image/png') # 文件名和内容类型 109 | } 110 | data = { 111 | 'apiKey': apiConfig.get('apiKey'), 112 | 'fileType': 'image', 113 | } 114 | 115 | # 获取 base_url,默认为 'https://www.runninghub.cn' 116 | base_url = apiConfig.get('base_url', 'https://www.runninghub.cn') 117 | upload_url = f"{base_url}/task/openapi/upload" 118 | 119 | print(f"Uploading image to {upload_url} with apiKey: {data['apiKey']}") 120 | 121 | # 发送 POST 请求,添加重试机制 122 | max_retries = 5 123 | retry_delay = 1 # 初始延迟1秒 124 | 125 | for attempt in range(max_retries): 126 | try: 127 | response = requests.post(upload_url, data=data, files=files) 128 | print(f"Attempt {attempt + 1}: Received response with status code: {response.status_code}") 129 | 130 | if response.status_code == 200: 131 | break # 成功则跳出重试循环 132 | 133 | except requests.exceptions.RequestException as e: 134 | if attempt == max_retries - 1: # 最后一次尝试 135 | raise Exception(f"Failed to connect to the server after {max_retries} attempts: {e}") 136 | print(f"Attempt {attempt + 1} failed: {e}. Retrying in {retry_delay} seconds...") 137 | time.sleep(retry_delay) 138 | retry_delay *= 2 # 指数退避,每次失败后延迟时间翻倍 139 | continue 140 | 141 | # 如果所有重试都失败了 142 | if response.status_code != 200: 143 | raise Exception(f"Upload failed with status code {response.status_code} after {max_retries} attempts.") 144 | 145 | # 解析 JSON 响应 146 | try: 147 | response_json = response.json() 148 | print(f"Response JSON: {response_json}") 149 | except ValueError: 150 | raise Exception("Failed to parse JSON response from the server.") 151 | 152 | # 检查 API 返回的 code 153 | if response_json.get('code') != 0: 154 | raise Exception(f"Upload failed: {response_json.get('msg')}") 155 | 156 | # 提取 filename 157 | data_field = response_json.get('data', {}) 158 | filename = data_field.get('fileName') 159 | 160 | if not filename: 161 | raise Exception("Upload succeeded but 'fileName' not found in the response.") 162 | 163 | print(f"Uploaded filename: {filename}") 164 | 165 | return (filename,) 166 | -------------------------------------------------------------------------------- /RH_NodeInfoListNode.py: -------------------------------------------------------------------------------- 1 | class NodeInfoListNode: 2 | def __init__(self): 3 | # 初始化一个空的 node_info_list,用于存储所有的 nodeInfo 4 | self.node_info_list = [] 5 | 6 | @classmethod 7 | def INPUT_TYPES(cls): 8 | return { 9 | "required": { 10 | "nodeId": ("INT", {"default": 0}), 11 | "fieldName": ("STRING", {"default": ""}), 12 | "fieldValue": ("STRING", {"default": ""}), 13 | }, 14 | "optional": { 15 | "previousNodeInfoList": ("ARRAY", {"default": []}), # 使其为可选,默认值为空列表 16 | } 17 | } 18 | 19 | RETURN_TYPES = ("ARRAY",) # 输出类型改为 ARRAY 20 | CATEGORY = "RunningHub" 21 | FUNCTION = "process" 22 | 23 | def process(self, nodeId, fieldName, fieldValue, previousNodeInfoList=[]): 24 | """ 25 | 该节点允许用户配置多个 nodeId、fieldName 和 fieldValue 参数, 26 | 并将多个 nodeInfoList 输出为数组。支持串联,多个节点将合并成一个数组。 27 | """ 28 | self.node_info_list = [] 29 | # 输出调试信息,查看 previousNodeInfoList 30 | print(f"Processing nodeId: {nodeId}, fieldName: {fieldName}, fieldValue: {fieldValue}") 31 | print(f"previousNodeInfoList: {previousNodeInfoList}") 32 | # 当前的 node_info 33 | node_info = {"nodeId": nodeId, "fieldName": fieldName, "fieldValue": fieldValue} 34 | 35 | # 如果前一个节点有输出(previousNodeInfoList),则将其添加到当前 node_info_list 中 36 | if previousNodeInfoList: 37 | self.node_info_list.extend(previousNodeInfoList) # 将前一个节点的输出合并进来 38 | 39 | # 将当前的 node_info 加入 node_info_list 中 40 | self.node_info_list.append(node_info) 41 | 42 | # 输出调试信息,查看当前的 node_info_list 43 | print(f"Updated node_info_list: {self.node_info_list}") 44 | 45 | # 返回整个 node_info_list 数组,包含当前节点和之前节点的输出 46 | return [self.node_info_list] 47 | -------------------------------------------------------------------------------- /RH_SettingsNode.py: -------------------------------------------------------------------------------- 1 | class SettingsNode: 2 | def __init__(self): 3 | # 初始化节点的任何必要参数 4 | pass 5 | 6 | @classmethod 7 | def INPUT_TYPES(cls): 8 | return { 9 | "required": { 10 | "base_url": ("STRING", {"default": "https://www.runninghub.cn"}), 11 | "apiKey": ("STRING", {"default": ""}), 12 | "workflowId_webappId": ("STRING", {"default": ""}), 13 | }, 14 | } 15 | 16 | RETURN_TYPES = ("STRUCT",) 17 | CATEGORY = "RunningHub" 18 | FUNCTION = "process" # 添加 FUNCTION 属性并指向 process 方法 19 | 20 | def process(self,base_url,apiKey, workflowId_webappId): 21 | """ 22 | 该节点接收 apiKey 和 workflowId,返回结构化数据供后续节点使用 23 | """ 24 | # 返回一个结构体,包含 apiKey 和 workflowId 25 | return [{"base_url": base_url, "apiKey": apiKey, "workflowId_webappId": workflowId_webappId}] 26 | -------------------------------------------------------------------------------- /RH_Utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import json 4 | import folder_paths 5 | import zipfile 6 | import shutil 7 | import numpy as np 8 | import torch 9 | from PIL import Image, ImageOps 10 | 11 | class AllTrue(str): 12 | def __init__(self, representation=None) -> None: 13 | self.repr = representation 14 | pass 15 | def __ne__(self, __value: object) -> bool: 16 | return False 17 | # isinstance, jsonserializable hijack 18 | def __instancecheck__(self, instance): 19 | return True 20 | def __subclasscheck__(self, subclass): 21 | return True 22 | def __bool__(self): 23 | return True 24 | def __str__(self): 25 | return self.repr 26 | # jsonserializable hijack 27 | def __jsonencode__(self): 28 | return self.repr 29 | def __repr__(self) -> str: 30 | return self.repr 31 | def __eq__(self, __value: object) -> bool: 32 | return True 33 | anytype = AllTrue("*") # when a != b is called, it will always return False 34 | 35 | class AnyToStringNode: 36 | def __init__(self): 37 | # Initialize any necessary parameters for the node 38 | pass 39 | 40 | @classmethod 41 | def INPUT_TYPES(cls): 42 | return { 43 | "required": { 44 | "anything": (anytype, {"default": 0.0}), 45 | } 46 | } 47 | 48 | RETURN_TYPES = ("STRING",) # Output type is string 49 | CATEGORY = "RunningHub" # Category name is RunningHub 50 | FUNCTION = "process" # The processing function is the 'process' method 51 | 52 | def process(self, anything): 53 | """ 54 | Converts any input type to a string. 55 | If the input is a string that can be converted to an integer, it performs the conversion. 56 | Otherwise, it directly converts the input to a string. 57 | """ 58 | if isinstance(anything, str): 59 | try: 60 | # Attempt to convert the string to an integer and then back to string 61 | return (str(int(anything)),) 62 | except ValueError: 63 | # If conversion fails, return the original string 64 | return (anything,) 65 | else: 66 | # For non-string types, directly convert to string 67 | return (str(anything),) 68 | 69 | class RH_Extract_Image_From_List(): 70 | def __init__(self): 71 | pass 72 | 73 | @classmethod 74 | def INPUT_TYPES(s): 75 | return { 76 | "required": { 77 | "images": ("IMAGE", {"tooltip": "The images list"}), 78 | "image_index": ("INT", {"default": 0 }), 79 | }, 80 | } 81 | 82 | RETURN_TYPES = ("IMAGE",) 83 | RETURN_NAMES = ("image",) 84 | 85 | FUNCTION = "rh_extract_image" 86 | 87 | OUTPUT_NODE = False 88 | 89 | CATEGORY = "RunningHub" 90 | 91 | def rh_extract_image(self, images, image_index): 92 | out = images[int(image_index)].unsqueeze(0) 93 | return (out,) 94 | 95 | class RH_Batch_Images_From_List(): 96 | def __init__(self): 97 | pass 98 | 99 | @classmethod 100 | def INPUT_TYPES(s): 101 | return { 102 | "required": { 103 | "images": ("IMAGE", {"tooltip": "The images list"}), 104 | "image_indices": ("STRING", {"default":"0-3,4,5-7","tooltip": "Some like 0-2, 3, 4-5. Leaving it empty means selecting all."}), 105 | }, 106 | } 107 | 108 | RETURN_TYPES = ("IMAGE",) 109 | RETURN_NAMES = ("image",) 110 | 111 | FUNCTION = "rh_batch_images" 112 | 113 | OUTPUT_NODE = False 114 | 115 | CATEGORY = "RunningHub" 116 | 117 | def rh_batch_images(self, images, image_indices): 118 | image_indices = image_indices.replace(" ", "") 119 | out = [] 120 | if image_indices == "": 121 | out = images 122 | image_indices = image_indices.split(',') 123 | for index in image_indices: 124 | if '-' in index: 125 | sindex = index.split('-') 126 | out.extend(images[int(sindex[0]):int(sindex[1])+1]) 127 | else: 128 | out.append(images[int(index)]) 129 | batchsize = len(out) 130 | max_height = max(image.shape[0] for image in out) 131 | max_width = max(image.shape[1] for image in out) 132 | max_channels = max(image.shape[2] for image in out) 133 | batch_images = torch.zeros([batchsize, max_height, max_width, max_channels]) 134 | for (batch_number, image) in enumerate(out): 135 | h, w, c = image.shape 136 | batch_images[batch_number, 0:h, 0:w, 0:c] = image 137 | return (batch_images,) -------------------------------------------------------------------------------- /RH_VideoUploader.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import os 4 | import time 5 | 6 | # Try importing folder_paths safely for potential future use, though not strictly needed for this node's core logic 7 | try: 8 | import folder_paths 9 | comfyui_env_available = True 10 | except ImportError: 11 | folder_paths = None # Set to None if not available 12 | comfyui_env_available = False 13 | print("RH_VideoUploader: ComfyUI folder_paths not found. Cannot determine input file path.") 14 | 15 | 16 | class RH_VideoUploader: 17 | # This node relies heavily on its JavaScript counterpart for the actual upload. 18 | # The Python part primarily retrieves the filename returned by the JS upload process. 19 | 20 | @classmethod 21 | def INPUT_TYPES(cls): 22 | # Add a required input for the filename provided by the JS widget after ComfyUI upload 23 | # This widget type should allow JS to set its value. 24 | # A standard STRING input where JS sets the value seems appropriate. 25 | return { 26 | "required": { 27 | "apiConfig": ("STRUCT",), 28 | # This input receives the filename assigned by ComfyUI's upload endpoint 29 | # It's made visible and editable, but primarily set by the JS interaction. 30 | "video": ("STRING", {"default": "", "multiline": False}), 31 | } 32 | } 33 | 34 | RETURN_TYPES = ("STRING",) 35 | RETURN_NAMES = ("filename",) 36 | FUNCTION = "upload_and_get_filename" # Renamed function to reflect action 37 | CATEGORY = "RunningHub" # Or your preferred category 38 | OUTPUT_NODE = False # This node outputs data, not a final image/video display 39 | 40 | def upload_and_get_filename(self, apiConfig, video): 41 | """ 42 | Reads the video file from ComfyUI's input directory based on the 'video' filename, 43 | uploads it to the RunningHub API, and returns the resulting filename/ID. 44 | """ 45 | # 1. Validate inputs 46 | if not comfyui_env_available or not folder_paths: 47 | raise ImportError("folder_paths module is required for RH_VideoUploader to find input files.") 48 | 49 | if not isinstance(apiConfig, dict) or not apiConfig.get("apiKey") or not apiConfig.get("base_url"): 50 | raise ValueError("Invalid or missing apiConfig structure provided to RH_VideoUploader.") 51 | 52 | if not video or video.strip() == "": 53 | raise ValueError("No video filename provided. Please select and upload a video using the node's widget.") 54 | 55 | apiKey = apiConfig["apiKey"] 56 | baseUrl = apiConfig["base_url"] 57 | 58 | # 2. Get the full path to the uploaded file in ComfyUI's input directory 59 | # The 'video' input contains the relative path within the input directory 60 | try: 61 | video_path = folder_paths.get_annotated_filepath(video) 62 | if not video_path or not os.path.exists(video_path): 63 | # Check subdirectories common for uploads if get_annotated_filepath fails directly 64 | # (get_annotated_filepath usually handles this, but as a fallback) 65 | potential_path = os.path.join(folder_paths.get_input_directory(), video) 66 | if os.path.exists(potential_path): 67 | video_path = potential_path 68 | else: # Check common subfolders like 'uploads' 69 | potential_path = os.path.join(folder_paths.get_input_directory(), 'uploads', video) 70 | if os.path.exists(potential_path): 71 | video_path = potential_path 72 | else: 73 | raise FileNotFoundError(f"Video file not found in input directory: {video}") 74 | 75 | print(f"RH_VideoUploader: Found video file at: {video_path}") 76 | 77 | except Exception as e: 78 | raise FileNotFoundError(f"Error finding video file '{video}': {e}") 79 | 80 | # 3. Prepare for RunningHub API upload 81 | # *** Use the same endpoint and data structure as ImageUploader *** 82 | upload_api_url = f"{baseUrl}/task/openapi/upload" # Corrected endpoint 83 | # Headers should likely NOT contain the apiKey based on ImageUploader 84 | headers = { 85 | # 'X-API-Key': apiKey, # REMOVED based on ImageUploader 86 | 'User-Agent': 'ComfyUI-RH_VideoUploader/1.0' 87 | } 88 | # Data payload containing apiKey and fileType 89 | data = { 90 | 'apiKey': apiKey, 91 | 'fileType': 'video' # Added fileType, assuming 'video' is correct 92 | } 93 | 94 | # 4. Read the file and upload 95 | print(f"RH_VideoUploader: Uploading {video_path} to {upload_api_url}...") 96 | 97 | # Add retry logic 98 | max_retries = 5 99 | retry_delay = 1 # Initial delay in seconds 100 | last_exception = None 101 | response = None 102 | 103 | for attempt in range(max_retries): 104 | try: 105 | with open(video_path, 'rb') as f: 106 | files = { 107 | 'file': (os.path.basename(video_path), f) 108 | } 109 | # Send request with apiKey only in data, files attached 110 | response = requests.post(upload_api_url, headers=headers, data=data, files=files) 111 | print(f"RH_VideoUploader: Upload attempt {attempt + 1}/{max_retries} - Status Code: {response.status_code}") 112 | response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx) 113 | 114 | # If successful, break the loop 115 | break 116 | 117 | except requests.exceptions.Timeout as e: 118 | last_exception = e 119 | print(f"RH_VideoUploader: Upload attempt {attempt + 1} timed out.") 120 | except requests.exceptions.ConnectionError as e: 121 | last_exception = e 122 | print(f"RH_VideoUploader: Upload attempt {attempt + 1} connection error: {e}") 123 | except requests.exceptions.RequestException as e: 124 | last_exception = e 125 | print(f"RH_VideoUploader: Upload attempt {attempt + 1} failed: {e}") 126 | # Check if response exists for potential non-2xx status codes handled by raise_for_status 127 | if e.response is not None: 128 | print(f"RH_VideoUploader: Response content on error: {e.response.text}") 129 | # Potentially break early for client errors (4xx) that won't be fixed by retrying? 130 | # For now, we retry all RequestExceptions 131 | 132 | # Wait before retrying, unless it's the last attempt 133 | if attempt < max_retries - 1: 134 | print(f"RH_VideoUploader: Retrying in {retry_delay} seconds...") 135 | time.sleep(retry_delay) 136 | retry_delay *= 2 # Exponential backoff 137 | else: 138 | print(f"RH_VideoUploader: Max retries ({max_retries}) reached.") 139 | # Raise the last exception encountered after all retries fail 140 | raise ConnectionError(f"Failed to upload video to RunningHub API after {max_retries} attempts. Last error: {last_exception}") from last_exception 141 | 142 | # If loop completes without breaking (shouldn't happen if success breaks), or response is None 143 | if response is None: 144 | raise ConnectionError(f"Upload failed after {max_retries} attempts, no response received. Last error: {last_exception}") 145 | 146 | # 5. Parse successful response (outside the retry loop) 147 | try: 148 | response_json = response.json() 149 | print(f"RH_VideoUploader: Upload API Response JSON: {response_json}") 150 | except json.JSONDecodeError as e: 151 | print(f"RH_VideoUploader: Failed to decode JSON response: {response.text}") 152 | raise ValueError(f"Failed to decode API response after successful upload: {e}") from e 153 | 154 | # Check API-level success code 155 | if response_json.get('code') != 0: 156 | raise ValueError(f"RunningHub API reported an error after upload: {response_json.get('msg', 'Unknown API error')}") 157 | 158 | # Extract filename using the correct key 'fileName' 159 | rh_data = response_json.get("data", {}) 160 | uploaded_filename = None # Initialize 161 | if isinstance(rh_data, dict): 162 | uploaded_filename = rh_data.get("fileName") # Corrected key: fileName 163 | # Add fallbacks if needed, though fileName seems primary based on logs 164 | # uploaded_filename = uploaded_filename or rh_data.get("fileId") or rh_data.get("url") 165 | elif isinstance(rh_data, str): 166 | uploaded_filename = rh_data 167 | 168 | if not isinstance(uploaded_filename, str) or not uploaded_filename: 169 | raise ValueError("Upload succeeded but 'fileName' (or compatible field) not found in RunningHub API response.data.") 170 | 171 | print(f"RH_VideoUploader: Upload successful. RunningHub filename/ID: {uploaded_filename}") 172 | return (uploaded_filename,) 173 | 174 | # Removed generic Exception catch block to let specific errors propagate 175 | # except Exception as e: 176 | # print(f"RH_VideoUploader: An unexpected error occurred during upload: {e}") 177 | # raise RuntimeError(f"Unexpected error during video upload: {e}") from e 178 | 179 | # Mappings for ComfyUI 180 | # Moved to __init__.py typically, but included here for completeness if run standalone initially 181 | # NODE_CLASS_MAPPINGS = { 182 | # "VideoUploaderNode_RH": VideoUploaderNode 183 | # } 184 | # NODE_DISPLAY_NAME_MAPPINGS = { 185 | # "VideoUploaderNode_RH": "RunningHub Video Uploader" 186 | # } -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | from .RH_SettingsNode import SettingsNode 2 | from .RH_NodeInfoListNode import NodeInfoListNode 3 | from .RH_ExecuteNode import ExecuteNode 4 | from .RH_ImageUploaderNode import ImageUploaderNode 5 | from .RH_VideoUploader import RH_VideoUploader 6 | from .RH_AudioUploader import RH_AudioUploader 7 | 8 | from .RH_Utils import * 9 | 10 | 11 | 12 | NODE_CLASS_MAPPINGS = { 13 | "RH_SettingsNode": SettingsNode, 14 | "RH_NodeInfoListNode": NodeInfoListNode, 15 | "RH_ExecuteNode": ExecuteNode, 16 | "RH_ImageUploaderNode": ImageUploaderNode, 17 | "RH_Utils": AnyToStringNode, 18 | "RH_ExtractImage": RH_Extract_Image_From_List, 19 | "RH_BatchImages": RH_Batch_Images_From_List, 20 | "RH_VideoUploader": RH_VideoUploader, 21 | "RH_AudioUploader": RH_AudioUploader, 22 | 23 | 24 | } 25 | 26 | NODE_DISPLAY_NAME_MAPPINGS = { 27 | "RH_SettingsNode": "RH Settings", 28 | "RH_NodeInfoListNode": "RH Node Info List", 29 | "RH_ExecuteNode": "RH Execute", 30 | "RH_ImageUploaderNode": "RH Image Uploader", 31 | "RH_Utils": "RH Anything to String", 32 | "RH_ExtractImage": "RH Extract Image From ImageList", 33 | "RH_BatchImages": "RH Batch Images From ImageList", 34 | "RH_VideoUploader": "RH Video Uploader", 35 | "RH_AudioUploader": "RH Audio Uploader", 36 | 37 | } 38 | 39 | # Web Directory Setup 40 | # Tells ComfyUI where to find the JavaScript files associated with nodes in this package 41 | WEB_DIRECTORY = "./web/js" 42 | 43 | 44 | __all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS", "WEB_DIRECTORY"] 45 | -------------------------------------------------------------------------------- /examples/rh_audio_uploader_tts.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "31211066-e402-4e8a-8063-5004f7e91b5d", 3 | "revision": 0, 4 | "last_node_id": 20, 5 | "last_link_id": 18, 6 | "nodes": [ 7 | { 8 | "id": 6, 9 | "type": "RH_SettingsNode", 10 | "pos": [ 11 | 1444.8616943359375, 12 | 281.51458740234375 13 | ], 14 | "size": [ 15 | 315, 16 | 106 17 | ], 18 | "flags": {}, 19 | "order": 0, 20 | "mode": 0, 21 | "inputs": [], 22 | "outputs": [ 23 | { 24 | "label": "STRUCT", 25 | "name": "STRUCT", 26 | "type": "STRUCT", 27 | "slot_index": 0, 28 | "links": [ 29 | 8, 30 | 18 31 | ] 32 | } 33 | ], 34 | "properties": { 35 | "cnr_id": "ComfyUI_RH_APICall", 36 | "ver": "f0f16c02a2fd8ee80a84a4233713e1fb2f7c516c", 37 | "Node name for S&R": "RH_SettingsNode" 38 | }, 39 | "widgets_values": [ 40 | "https://www.runninghub.cn", 41 | "ed37fbdd79c34a7ca612aedbe5cea13e", 42 | "1919062047542415362" 43 | ] 44 | }, 45 | { 46 | "id": 11, 47 | "type": "RH_NodeInfoListNode", 48 | "pos": [ 49 | 1672.409912109375, 50 | 604.7130126953125 51 | ], 52 | "size": [ 53 | 330, 54 | 126 55 | ], 56 | "flags": {}, 57 | "order": 2, 58 | "mode": 0, 59 | "inputs": [ 60 | { 61 | "label": "previousNodeInfoList", 62 | "name": "previousNodeInfoList", 63 | "shape": 7, 64 | "type": "ARRAY", 65 | "link": null 66 | }, 67 | { 68 | "label": "fieldValue", 69 | "name": "fieldValue", 70 | "type": "STRING", 71 | "widget": { 72 | "name": "fieldValue" 73 | }, 74 | "link": 16 75 | } 76 | ], 77 | "outputs": [ 78 | { 79 | "label": "ARRAY", 80 | "name": "ARRAY", 81 | "type": "ARRAY", 82 | "links": [ 83 | 9 84 | ] 85 | } 86 | ], 87 | "properties": { 88 | "cnr_id": "ComfyUI_RH_APICall", 89 | "ver": "f0f16c02a2fd8ee80a84a4233713e1fb2f7c516c", 90 | "Node name for S&R": "RH_NodeInfoListNode" 91 | }, 92 | "widgets_values": [ 93 | 1, 94 | "audio", 95 | "18" 96 | ] 97 | }, 98 | { 99 | "id": 10, 100 | "type": "RH_ExecuteNode", 101 | "pos": [ 102 | 2043.489501953125, 103 | 430.2236633300781 104 | ], 105 | "size": [ 106 | 315, 107 | 186 108 | ], 109 | "flags": {}, 110 | "order": 3, 111 | "mode": 0, 112 | "inputs": [ 113 | { 114 | "label": "apiConfig", 115 | "name": "apiConfig", 116 | "type": "STRUCT", 117 | "link": 8 118 | }, 119 | { 120 | "label": "nodeInfoList", 121 | "name": "nodeInfoList", 122 | "shape": 7, 123 | "type": "ARRAY", 124 | "link": 9 125 | } 126 | ], 127 | "outputs": [ 128 | { 129 | "label": "images", 130 | "name": "images", 131 | "type": "IMAGE", 132 | "slot_index": 0, 133 | "links": [] 134 | }, 135 | { 136 | "label": "video_frames", 137 | "name": "video_frames", 138 | "type": "IMAGE", 139 | "links": [] 140 | }, 141 | { 142 | "label": "latent", 143 | "name": "latent", 144 | "type": "LATENT", 145 | "links": null 146 | }, 147 | { 148 | "label": "text", 149 | "name": "text", 150 | "type": "STRING", 151 | "links": null 152 | }, 153 | { 154 | "label": "audio", 155 | "name": "audio", 156 | "type": "AUDIO", 157 | "links": [ 158 | 17 159 | ] 160 | } 161 | ], 162 | "properties": { 163 | "cnr_id": "ComfyUI_RH_APICall", 164 | "ver": "f0f16c02a2fd8ee80a84a4233713e1fb2f7c516c", 165 | "Node name for S&R": "RH_ExecuteNode" 166 | }, 167 | "widgets_values": [ 168 | 600, 169 | 3, 170 | false 171 | ] 172 | }, 173 | { 174 | "id": 20, 175 | "type": "SaveAudio", 176 | "pos": [ 177 | 2485.482666015625, 178 | 520.2659301757812 179 | ], 180 | "size": [ 181 | 315, 182 | 112 183 | ], 184 | "flags": {}, 185 | "order": 4, 186 | "mode": 0, 187 | "inputs": [ 188 | { 189 | "label": "audio", 190 | "name": "audio", 191 | "type": "AUDIO", 192 | "link": 17 193 | } 194 | ], 195 | "outputs": [], 196 | "properties": { 197 | "cnr_id": "comfy-core", 198 | "ver": "0.3.30", 199 | "Node name for S&R": "SaveAudio" 200 | }, 201 | "widgets_values": [ 202 | "audio/ComfyUI" 203 | ] 204 | }, 205 | { 206 | "id": 19, 207 | "type": "RH_AudioUploader", 208 | "pos": [ 209 | 1292.1536865234375, 210 | 581.9247436523438 211 | ], 212 | "size": [ 213 | 315, 214 | 100 215 | ], 216 | "flags": {}, 217 | "order": 1, 218 | "mode": 0, 219 | "inputs": [ 220 | { 221 | "label": "apiConfig", 222 | "name": "apiConfig", 223 | "type": "STRUCT", 224 | "link": 18 225 | } 226 | ], 227 | "outputs": [ 228 | { 229 | "label": "filename", 230 | "name": "filename", 231 | "type": "STRING", 232 | "links": [ 233 | 16 234 | ] 235 | } 236 | ], 237 | "properties": { 238 | "cnr_id": "ComfyUI_RH_APICall", 239 | "ver": "a0d3ab30a7b1324e4503f0358cd2a1ba8f27f5ab", 240 | "Node name for S&R": "RH_AudioUploader" 241 | }, 242 | "widgets_values": [ 243 | "hmyun.m4a", 244 | "" 245 | ] 246 | } 247 | ], 248 | "links": [ 249 | [ 250 | 8, 251 | 6, 252 | 0, 253 | 10, 254 | 0, 255 | "STRUCT" 256 | ], 257 | [ 258 | 9, 259 | 11, 260 | 0, 261 | 10, 262 | 1, 263 | "ARRAY" 264 | ], 265 | [ 266 | 16, 267 | 19, 268 | 0, 269 | 11, 270 | 1, 271 | "STRING" 272 | ], 273 | [ 274 | 17, 275 | 10, 276 | 4, 277 | 20, 278 | 0, 279 | "AUDIO" 280 | ], 281 | [ 282 | 18, 283 | 6, 284 | 0, 285 | 19, 286 | 0, 287 | "STRUCT" 288 | ] 289 | ], 290 | "groups": [], 291 | "config": {}, 292 | "extra": { 293 | "ds": { 294 | "scale": 0.8311834575010493, 295 | "offset": [ 296 | -1060.0981378882645, 297 | -77.37312124779665 298 | ] 299 | }, 300 | "frontendVersion": "1.17.11", 301 | "ue_links": [], 302 | "VHS_latentpreview": false, 303 | "VHS_latentpreviewrate": 0, 304 | "VHS_MetadataImage": true, 305 | "VHS_KeepIntermediate": true 306 | }, 307 | "version": 0.4 308 | } -------------------------------------------------------------------------------- /examples/rh_execute_others_allinone.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 25, 3 | "last_link_id": 31, 4 | "nodes": [ 5 | { 6 | "id": 19, 7 | "type": "GR85_ShowText", 8 | "pos": [ 9 | 2143.660888671875, 10 | 764.2783203125 11 | ], 12 | "size": [ 13 | 435.0155029296875, 14 | 248.83677673339844 15 | ], 16 | "flags": {}, 17 | "order": 6, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "text", 22 | "label": "text", 23 | "type": "STRING", 24 | "widget": { 25 | "name": "text" 26 | }, 27 | "link": 26 28 | } 29 | ], 30 | "outputs": [ 31 | { 32 | "name": "STRING", 33 | "label": "STRING", 34 | "type": "STRING", 35 | "shape": 6, 36 | "links": null 37 | } 38 | ], 39 | "properties": { 40 | "cnr_id": "comfyui_gr85", 41 | "ver": "ec29a1369630f85935078e7ac599bf51a82f196e", 42 | "Node name for S&R": "GR85_ShowText" 43 | }, 44 | "widgets_values": [ 45 | "", 46 | "i am RunningHub" 47 | ] 48 | }, 49 | { 50 | "id": 17, 51 | "type": "WanVideoVAELoader", 52 | "pos": [ 53 | 2205.31689453125, 54 | 393.16900634765625 55 | ], 56 | "size": [ 57 | 315, 58 | 82 59 | ], 60 | "flags": {}, 61 | "order": 0, 62 | "mode": 0, 63 | "inputs": [], 64 | "outputs": [ 65 | { 66 | "name": "vae", 67 | "label": "vae", 68 | "type": "WANVAE", 69 | "links": [ 70 | 20 71 | ], 72 | "slot_index": 0 73 | } 74 | ], 75 | "properties": { 76 | "cnr_id": "ComfyUI-WanVideoWrapper", 77 | "ver": "19044adc78e523596c7655688d66bc5fa3010837", 78 | "Node name for S&R": "WanVideoVAELoader" 79 | }, 80 | "widgets_values": [ 81 | "Wan2_1_VAE_bf16.safetensors", 82 | "bf16" 83 | ] 84 | }, 85 | { 86 | "id": 16, 87 | "type": "WanVideoDecode", 88 | "pos": [ 89 | 2208.783203125, 90 | 507.4647216796875 91 | ], 92 | "size": [ 93 | 315, 94 | 174 95 | ], 96 | "flags": {}, 97 | "order": 5, 98 | "mode": 0, 99 | "inputs": [ 100 | { 101 | "name": "vae", 102 | "label": "vae", 103 | "type": "WANVAE", 104 | "link": 20 105 | }, 106 | { 107 | "name": "samples", 108 | "label": "samples", 109 | "type": "LATENT", 110 | "link": 25 111 | } 112 | ], 113 | "outputs": [ 114 | { 115 | "name": "images", 116 | "label": "images", 117 | "type": "IMAGE", 118 | "links": [ 119 | 27 120 | ], 121 | "slot_index": 0 122 | } 123 | ], 124 | "properties": { 125 | "cnr_id": "ComfyUI-WanVideoWrapper", 126 | "ver": "19044adc78e523596c7655688d66bc5fa3010837", 127 | "Node name for S&R": "WanVideoDecode" 128 | }, 129 | "widgets_values": [ 130 | false, 131 | 272, 132 | 272, 133 | 144, 134 | 128 135 | ] 136 | }, 137 | { 138 | "id": 23, 139 | "type": "VHS_VideoCombine", 140 | "pos": [ 141 | 2660.65576171875, 142 | 426.9168701171875 143 | ], 144 | "size": [ 145 | 214.7587890625, 146 | 446.7587890625 147 | ], 148 | "flags": {}, 149 | "order": 8, 150 | "mode": 0, 151 | "inputs": [ 152 | { 153 | "name": "images", 154 | "label": "images", 155 | "type": "IMAGE", 156 | "link": 27 157 | }, 158 | { 159 | "name": "audio", 160 | "label": "audio", 161 | "type": "AUDIO", 162 | "shape": 7, 163 | "link": 29 164 | }, 165 | { 166 | "name": "meta_batch", 167 | "label": "meta_batch", 168 | "type": "VHS_BatchManager", 169 | "shape": 7, 170 | "link": null 171 | }, 172 | { 173 | "name": "vae", 174 | "label": "vae", 175 | "type": "VAE", 176 | "shape": 7, 177 | "link": null 178 | } 179 | ], 180 | "outputs": [ 181 | { 182 | "name": "Filenames", 183 | "label": "Filenames", 184 | "type": "VHS_FILENAMES", 185 | "links": null 186 | } 187 | ], 188 | "properties": { 189 | "cnr_id": "comfyui-videohelpersuite", 190 | "ver": "df55f01d1df2f7bf5cc772294bc2e6d8bab22d66", 191 | "Node name for S&R": "VHS_VideoCombine" 192 | }, 193 | "widgets_values": { 194 | "frame_rate": 8, 195 | "loop_count": 0, 196 | "filename_prefix": "AnimateDiff", 197 | "format": "image/gif", 198 | "pingpong": false, 199 | "save_output": true, 200 | "videopreview": { 201 | "hidden": false, 202 | "paused": false, 203 | "params": { 204 | "filename": "AnimateDiff_00016.gif", 205 | "subfolder": "", 206 | "type": "output", 207 | "format": "image/gif", 208 | "frame_rate": 8, 209 | "workflow": "AnimateDiff_00016.png", 210 | "fullpath": "D:\\ComfyUI_windows_portable\\ComfyUI\\output\\AnimateDiff_00016.gif" 211 | } 212 | } 213 | } 214 | }, 215 | { 216 | "id": 25, 217 | "type": "PreviewAudio", 218 | "pos": [ 219 | 2202.7685546875, 220 | 211.095458984375 221 | ], 222 | "size": [ 223 | 315, 224 | 88 225 | ], 226 | "flags": {}, 227 | "order": 7, 228 | "mode": 0, 229 | "inputs": [ 230 | { 231 | "name": "audio", 232 | "label": "audio", 233 | "type": "AUDIO", 234 | "link": 30 235 | } 236 | ], 237 | "outputs": [], 238 | "properties": { 239 | "cnr_id": "comfy-core", 240 | "ver": "0.3.28", 241 | "Node name for S&R": "PreviewAudio" 242 | }, 243 | "widgets_values": [ 244 | "" 245 | ] 246 | }, 247 | { 248 | "id": 24, 249 | "type": "VHS_VideoCombine", 250 | "pos": [ 251 | 2662.107421875, 252 | -97.97989654541016 253 | ], 254 | "size": [ 255 | 214.7587890625, 256 | 446.7587890625 257 | ], 258 | "flags": {}, 259 | "order": 4, 260 | "mode": 0, 261 | "inputs": [ 262 | { 263 | "name": "images", 264 | "label": "images", 265 | "type": "IMAGE", 266 | "link": 31 267 | }, 268 | { 269 | "name": "audio", 270 | "label": "audio", 271 | "type": "AUDIO", 272 | "shape": 7, 273 | "link": null 274 | }, 275 | { 276 | "name": "meta_batch", 277 | "label": "meta_batch", 278 | "type": "VHS_BatchManager", 279 | "shape": 7, 280 | "link": null 281 | }, 282 | { 283 | "name": "vae", 284 | "label": "vae", 285 | "type": "VAE", 286 | "shape": 7, 287 | "link": null 288 | } 289 | ], 290 | "outputs": [ 291 | { 292 | "name": "Filenames", 293 | "label": "Filenames", 294 | "type": "VHS_FILENAMES", 295 | "links": null 296 | } 297 | ], 298 | "properties": { 299 | "cnr_id": "comfyui-videohelpersuite", 300 | "ver": "df55f01d1df2f7bf5cc772294bc2e6d8bab22d66", 301 | "Node name for S&R": "VHS_VideoCombine" 302 | }, 303 | "widgets_values": { 304 | "frame_rate": 8, 305 | "loop_count": 0, 306 | "filename_prefix": "AnimateDiff", 307 | "format": "image/gif", 308 | "pingpong": false, 309 | "save_output": true, 310 | "videopreview": { 311 | "hidden": false, 312 | "paused": false, 313 | "params": { 314 | "filename": "AnimateDiff_00015.gif", 315 | "subfolder": "", 316 | "type": "output", 317 | "format": "image/gif", 318 | "frame_rate": 8, 319 | "workflow": "AnimateDiff_00015.png", 320 | "fullpath": "D:\\ComfyUI_windows_portable\\ComfyUI\\output\\AnimateDiff_00015.gif" 321 | } 322 | } 323 | } 324 | }, 325 | { 326 | "id": 18, 327 | "type": "PreviewImage", 328 | "pos": [ 329 | 2212.341796875, 330 | -233.77410888671875 331 | ], 332 | "size": [ 333 | 213.61570739746094, 334 | 246 335 | ], 336 | "flags": {}, 337 | "order": 3, 338 | "mode": 0, 339 | "inputs": [ 340 | { 341 | "name": "images", 342 | "label": "images", 343 | "type": "IMAGE", 344 | "link": 22 345 | } 346 | ], 347 | "outputs": [], 348 | "properties": { 349 | "cnr_id": "comfy-core", 350 | "ver": "0.3.28", 351 | "Node name for S&R": "PreviewImage" 352 | }, 353 | "widgets_values": [] 354 | }, 355 | { 356 | "id": 14, 357 | "type": "RH_ExecuteNode", 358 | "pos": [ 359 | 1421.802001953125, 360 | 126.14463806152344 361 | ], 362 | "size": [ 363 | 411.875, 364 | 279.5 365 | ], 366 | "flags": {}, 367 | "order": 2, 368 | "mode": 0, 369 | "inputs": [ 370 | { 371 | "name": "apiConfig", 372 | "label": "apiConfig", 373 | "type": "STRUCT", 374 | "link": 15 375 | }, 376 | { 377 | "name": "nodeInfoList", 378 | "label": "nodeInfoList", 379 | "type": "ARRAY", 380 | "shape": 7, 381 | "link": null 382 | } 383 | ], 384 | "outputs": [ 385 | { 386 | "name": "images", 387 | "label": "images", 388 | "type": "IMAGE", 389 | "links": [ 390 | 22 391 | ], 392 | "slot_index": 0 393 | }, 394 | { 395 | "name": "video_frames", 396 | "label": "video_frames", 397 | "type": "IMAGE", 398 | "links": [ 399 | 31 400 | ], 401 | "slot_index": 1 402 | }, 403 | { 404 | "name": "latent", 405 | "label": "latent", 406 | "type": "LATENT", 407 | "links": [ 408 | 25 409 | ], 410 | "slot_index": 2 411 | }, 412 | { 413 | "name": "text", 414 | "label": "text", 415 | "type": "STRING", 416 | "links": [ 417 | 26 418 | ], 419 | "slot_index": 3 420 | }, 421 | { 422 | "name": "audio", 423 | "label": "audio", 424 | "type": "AUDIO", 425 | "links": [ 426 | 29, 427 | 30 428 | ], 429 | "slot_index": 4 430 | } 431 | ], 432 | "properties": { 433 | "cnr_id": "ComfyUI_RH_APICall", 434 | "ver": "30aafeab51d6c09a3c866a000476a676cad47ed7", 435 | "Node name for S&R": "RH_ExecuteNode" 436 | }, 437 | "widgets_values": [ 438 | 600, 439 | 4 440 | ] 441 | }, 442 | { 443 | "id": 6, 444 | "type": "RH_SettingsNode", 445 | "pos": [ 446 | 956.8349609375, 447 | 192.3112335205078 448 | ], 449 | "size": [ 450 | 315, 451 | 106 452 | ], 453 | "flags": {}, 454 | "order": 1, 455 | "mode": 0, 456 | "inputs": [], 457 | "outputs": [ 458 | { 459 | "name": "STRUCT", 460 | "label": "STRUCT", 461 | "type": "STRUCT", 462 | "shape": 3, 463 | "links": [ 464 | 15 465 | ], 466 | "slot_index": 0 467 | } 468 | ], 469 | "properties": { 470 | "cnr_id": "ComfyUI_RH_APICall", 471 | "ver": "539cc9c3df5316c36a660529d290d042a46e55b3", 472 | "Node name for S&R": "RH_SettingsNode" 473 | }, 474 | "widgets_values": [ 475 | "https://www.runninghub.cn", 476 | "ed37fbdd79c34a7ca612aedbe5cea13e", 477 | "1915077908329594882" 478 | ] 479 | } 480 | ], 481 | "links": [ 482 | [ 483 | 15, 484 | 6, 485 | 0, 486 | 14, 487 | 0, 488 | "STRUCT" 489 | ], 490 | [ 491 | 20, 492 | 17, 493 | 0, 494 | 16, 495 | 0, 496 | "WANVAE" 497 | ], 498 | [ 499 | 22, 500 | 14, 501 | 0, 502 | 18, 503 | 0, 504 | "IMAGE" 505 | ], 506 | [ 507 | 25, 508 | 14, 509 | 2, 510 | 16, 511 | 1, 512 | "LATENT" 513 | ], 514 | [ 515 | 26, 516 | 14, 517 | 3, 518 | 19, 519 | 0, 520 | "STRING" 521 | ], 522 | [ 523 | 27, 524 | 16, 525 | 0, 526 | 23, 527 | 0, 528 | "IMAGE" 529 | ], 530 | [ 531 | 29, 532 | 14, 533 | 4, 534 | 23, 535 | 1, 536 | "AUDIO" 537 | ], 538 | [ 539 | 30, 540 | 14, 541 | 4, 542 | 25, 543 | 0, 544 | "AUDIO" 545 | ], 546 | [ 547 | 31, 548 | 14, 549 | 1, 550 | 24, 551 | 0, 552 | "IMAGE" 553 | ] 554 | ], 555 | "groups": [], 556 | "config": {}, 557 | "extra": { 558 | "ds": { 559 | "scale": 0.6830134553650705, 560 | "offset": [ 561 | -138.72136016474388, 562 | 531.7418684260365 563 | ] 564 | }, 565 | "ue_links": [], 566 | "VHS_latentpreview": false, 567 | "VHS_latentpreviewrate": 0, 568 | "VHS_MetadataImage": true, 569 | "VHS_KeepIntermediate": true 570 | }, 571 | "version": 0.4 572 | } -------------------------------------------------------------------------------- /examples/rh_image_to_image.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 12, 3 | "last_link_id": 12, 4 | "nodes": [ 5 | { 6 | "id": 5, 7 | "type": "LoadImage", 8 | "pos": [ 9 | 1347, 10 | 263 11 | ], 12 | "size": [ 13 | 315, 14 | 314 15 | ], 16 | "flags": {}, 17 | "order": 0, 18 | "mode": 0, 19 | "inputs": [], 20 | "outputs": [ 21 | { 22 | "name": "IMAGE", 23 | "label": "IMAGE", 24 | "type": "IMAGE", 25 | "shape": 3, 26 | "links": [ 27 | 7 28 | ], 29 | "slot_index": 0 30 | }, 31 | { 32 | "name": "MASK", 33 | "label": "MASK", 34 | "type": "MASK", 35 | "shape": 3, 36 | "links": null 37 | } 38 | ], 39 | "properties": { 40 | "cnr_id": "comfy-core", 41 | "ver": "0.3.28", 42 | "Node name for S&R": "LoadImage" 43 | }, 44 | "widgets_values": [ 45 | "comfyui_temp_clpqa_00001__erplu.png", 46 | "image" 47 | ] 48 | }, 49 | { 50 | "id": 9, 51 | "type": "RH_ImageUploaderNode", 52 | "pos": [ 53 | 1743.840087890625, 54 | 466.5376281738281 55 | ], 56 | "size": [ 57 | 229.20001220703125, 58 | 46 59 | ], 60 | "flags": {}, 61 | "order": 2, 62 | "mode": 0, 63 | "inputs": [ 64 | { 65 | "name": "apiConfig", 66 | "label": "apiConfig", 67 | "type": "STRUCT", 68 | "link": 6 69 | }, 70 | { 71 | "name": "image", 72 | "label": "image", 73 | "type": "IMAGE", 74 | "link": 7 75 | } 76 | ], 77 | "outputs": [ 78 | { 79 | "name": "filename", 80 | "label": "filename", 81 | "type": "STRING", 82 | "shape": 3, 83 | "links": [ 84 | 11 85 | ], 86 | "slot_index": 0 87 | } 88 | ], 89 | "properties": { 90 | "cnr_id": "ComfyUI_RH_APICall", 91 | "ver": "f0f16c02a2fd8ee80a84a4233713e1fb2f7c516c", 92 | "Node name for S&R": "RH_ImageUploaderNode" 93 | }, 94 | "widgets_values": [] 95 | }, 96 | { 97 | "id": 11, 98 | "type": "RH_NodeInfoListNode", 99 | "pos": [ 100 | 1702.48779296875, 101 | 576.8912963867188 102 | ], 103 | "size": [ 104 | 330, 105 | 126 106 | ], 107 | "flags": {}, 108 | "order": 3, 109 | "mode": 0, 110 | "inputs": [ 111 | { 112 | "name": "previousNodeInfoList", 113 | "label": "previousNodeInfoList", 114 | "type": "ARRAY", 115 | "shape": 7, 116 | "link": null 117 | }, 118 | { 119 | "name": "fieldValue", 120 | "label": "fieldValue", 121 | "type": "STRING", 122 | "widget": { 123 | "name": "fieldValue" 124 | }, 125 | "link": 11 126 | } 127 | ], 128 | "outputs": [ 129 | { 130 | "name": "ARRAY", 131 | "label": "ARRAY", 132 | "type": "ARRAY", 133 | "shape": 3, 134 | "links": [ 135 | 9 136 | ] 137 | } 138 | ], 139 | "properties": { 140 | "cnr_id": "ComfyUI_RH_APICall", 141 | "ver": "f0f16c02a2fd8ee80a84a4233713e1fb2f7c516c", 142 | "Node name for S&R": "RH_NodeInfoListNode" 143 | }, 144 | "widgets_values": [ 145 | 19, 146 | "image", 147 | "18" 148 | ] 149 | }, 150 | { 151 | "id": 10, 152 | "type": "RH_ExecuteNode", 153 | "pos": [ 154 | 2035.218017578125, 155 | 315.1768798828125 156 | ], 157 | "size": [ 158 | 315, 159 | 162 160 | ], 161 | "flags": {}, 162 | "order": 4, 163 | "mode": 0, 164 | "inputs": [ 165 | { 166 | "name": "apiConfig", 167 | "label": "apiConfig", 168 | "type": "STRUCT", 169 | "link": 8 170 | }, 171 | { 172 | "name": "nodeInfoList", 173 | "label": "nodeInfoList", 174 | "type": "ARRAY", 175 | "shape": 7, 176 | "link": 9, 177 | "slot_index": 1 178 | } 179 | ], 180 | "outputs": [ 181 | { 182 | "name": "images", 183 | "label": "images", 184 | "type": "IMAGE", 185 | "shape": 3, 186 | "links": [ 187 | 10 188 | ], 189 | "slot_index": 0 190 | }, 191 | { 192 | "name": "video_frames", 193 | "label": "video_frames", 194 | "type": "IMAGE", 195 | "links": null 196 | }, 197 | { 198 | "name": "latent", 199 | "label": "latent", 200 | "type": "LATENT", 201 | "links": null 202 | }, 203 | { 204 | "name": "text", 205 | "label": "text", 206 | "type": "STRING", 207 | "links": null 208 | }, 209 | { 210 | "name": "audio", 211 | "label": "audio", 212 | "type": "AUDIO", 213 | "links": null 214 | } 215 | ], 216 | "properties": { 217 | "cnr_id": "ComfyUI_RH_APICall", 218 | "ver": "f0f16c02a2fd8ee80a84a4233713e1fb2f7c516c", 219 | "Node name for S&R": "RH_ExecuteNode" 220 | }, 221 | "widgets_values": [ 222 | 600, 223 | 3 224 | ] 225 | }, 226 | { 227 | "id": 12, 228 | "type": "PreviewImage", 229 | "pos": [ 230 | 2398.80810546875, 231 | 223.72695922851562 232 | ], 233 | "size": [ 234 | 314.3929748535156, 235 | 547.883544921875 236 | ], 237 | "flags": {}, 238 | "order": 5, 239 | "mode": 0, 240 | "inputs": [ 241 | { 242 | "name": "images", 243 | "label": "images", 244 | "type": "IMAGE", 245 | "link": 10 246 | } 247 | ], 248 | "outputs": [], 249 | "properties": { 250 | "cnr_id": "comfy-core", 251 | "ver": "0.3.28", 252 | "Node name for S&R": "PreviewImage" 253 | }, 254 | "widgets_values": [] 255 | }, 256 | { 257 | "id": 6, 258 | "type": "RH_SettingsNode", 259 | "pos": [ 260 | 1687.73828125, 261 | 261.212158203125 262 | ], 263 | "size": [ 264 | 315, 265 | 106 266 | ], 267 | "flags": {}, 268 | "order": 1, 269 | "mode": 0, 270 | "inputs": [], 271 | "outputs": [ 272 | { 273 | "name": "STRUCT", 274 | "label": "STRUCT", 275 | "type": "STRUCT", 276 | "shape": 3, 277 | "links": [ 278 | 6, 279 | 8 280 | ], 281 | "slot_index": 0 282 | } 283 | ], 284 | "properties": { 285 | "cnr_id": "ComfyUI_RH_APICall", 286 | "ver": "f0f16c02a2fd8ee80a84a4233713e1fb2f7c516c", 287 | "Node name for S&R": "RH_SettingsNode" 288 | }, 289 | "widgets_values": [ 290 | "https://www.runninghub.cn", 291 | "ed37fbdd79c34a7ca612aedbe5cea13e", 292 | "1867632304280715265" 293 | ] 294 | } 295 | ], 296 | "links": [ 297 | [ 298 | 6, 299 | 6, 300 | 0, 301 | 9, 302 | 0, 303 | "STRUCT" 304 | ], 305 | [ 306 | 7, 307 | 5, 308 | 0, 309 | 9, 310 | 1, 311 | "IMAGE" 312 | ], 313 | [ 314 | 8, 315 | 6, 316 | 0, 317 | 10, 318 | 0, 319 | "STRUCT" 320 | ], 321 | [ 322 | 9, 323 | 11, 324 | 0, 325 | 10, 326 | 1, 327 | "ARRAY" 328 | ], 329 | [ 330 | 10, 331 | 10, 332 | 0, 333 | 12, 334 | 0, 335 | "IMAGE" 336 | ], 337 | [ 338 | 11, 339 | 9, 340 | 0, 341 | 11, 342 | 1, 343 | "STRING" 344 | ] 345 | ], 346 | "groups": [], 347 | "config": {}, 348 | "extra": { 349 | "ds": { 350 | "scale": 1.3190278677309182, 351 | "offset": [ 352 | -1193.309900431111, 353 | -6.93960225853351 354 | ] 355 | }, 356 | "ue_links": [], 357 | "VHS_latentpreview": false, 358 | "VHS_latentpreviewrate": 0, 359 | "VHS_MetadataImage": true, 360 | "VHS_KeepIntermediate": true 361 | }, 362 | "version": 0.4 363 | } -------------------------------------------------------------------------------- /examples/rh_image_to_image_webapp.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 7, 3 | "last_link_id": 7, 4 | "nodes": [ 5 | { 6 | "id": 4, 7 | "type": "SaveImage", 8 | "pos": [ 9 | 2598.51708984375, 10 | 613.9205932617188 11 | ], 12 | "size": [ 13 | 315, 14 | 270 15 | ], 16 | "flags": {}, 17 | "order": 6, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "images", 22 | "label": "images", 23 | "type": "IMAGE", 24 | "link": 3 25 | } 26 | ], 27 | "outputs": [], 28 | "properties": { 29 | "cnr_id": "comfy-core", 30 | "ver": "0.3.28", 31 | "Node name for S&R": "SaveImage" 32 | }, 33 | "widgets_values": [ 34 | "ComfyUI" 35 | ] 36 | }, 37 | { 38 | "id": 1, 39 | "type": "RH_NodeInfoListNode", 40 | "pos": [ 41 | 1771.202392578125, 42 | 696.84130859375 43 | ], 44 | "size": [ 45 | 343.2673034667969, 46 | 115.00286865234375 47 | ], 48 | "flags": {}, 49 | "order": 4, 50 | "mode": 0, 51 | "inputs": [ 52 | { 53 | "name": "previousNodeInfoList", 54 | "label": "previousNodeInfoList", 55 | "type": "ARRAY", 56 | "shape": 7, 57 | "link": 4 58 | } 59 | ], 60 | "outputs": [ 61 | { 62 | "name": "ARRAY", 63 | "label": "ARRAY", 64 | "type": "ARRAY", 65 | "links": [ 66 | 2 67 | ] 68 | } 69 | ], 70 | "properties": { 71 | "cnr_id": "ComfyUI_RH_APICall", 72 | "ver": "d26b20496040d4b4dbea590ac3690d587ae103b3", 73 | "Node name for S&R": "RH_NodeInfoListNode" 74 | }, 75 | "widgets_values": [ 76 | 55, 77 | "text", 78 | "皮克斯风格,3D,卡通,精致的面部特征,细腻的皮肤" 79 | ] 80 | }, 81 | { 82 | "id": 2, 83 | "type": "RH_SettingsNode", 84 | "pos": [ 85 | 1772.150146484375, 86 | 515.3632202148438 87 | ], 88 | "size": [ 89 | 315, 90 | 106 91 | ], 92 | "flags": {}, 93 | "order": 0, 94 | "mode": 0, 95 | "inputs": [], 96 | "outputs": [ 97 | { 98 | "name": "STRUCT", 99 | "label": "STRUCT", 100 | "type": "STRUCT", 101 | "shape": 3, 102 | "links": [ 103 | 1, 104 | 6 105 | ], 106 | "slot_index": 0 107 | } 108 | ], 109 | "properties": { 110 | "cnr_id": "ComfyUI_RH_APICall", 111 | "ver": "f0f16c02a2fd8ee80a84a4233713e1fb2f7c516c", 112 | "Node name for S&R": "RH_SettingsNode" 113 | }, 114 | "widgets_values": [ 115 | "https://www.runninghub.cn", 116 | "ed37fbdd79c34a7ca612aedbe5cea13e", 117 | " 1873567075221295105" 118 | ] 119 | }, 120 | { 121 | "id": 3, 122 | "type": "RH_ExecuteNode", 123 | "pos": [ 124 | 2181.069580078125, 125 | 652.3009033203125 126 | ], 127 | "size": [ 128 | 317.4000244140625, 129 | 186 130 | ], 131 | "flags": {}, 132 | "order": 5, 133 | "mode": 0, 134 | "inputs": [ 135 | { 136 | "name": "apiConfig", 137 | "label": "apiConfig", 138 | "type": "STRUCT", 139 | "link": 1 140 | }, 141 | { 142 | "name": "nodeInfoList", 143 | "label": "nodeInfoList", 144 | "type": "ARRAY", 145 | "shape": 7, 146 | "link": 2 147 | } 148 | ], 149 | "outputs": [ 150 | { 151 | "name": "images", 152 | "label": "images", 153 | "type": "IMAGE", 154 | "links": [ 155 | 3 156 | ], 157 | "slot_index": 0 158 | }, 159 | { 160 | "name": "video_frames", 161 | "label": "video_frames", 162 | "type": "IMAGE", 163 | "links": null 164 | }, 165 | { 166 | "name": "latent", 167 | "label": "latent", 168 | "type": "LATENT", 169 | "links": null 170 | }, 171 | { 172 | "name": "text", 173 | "label": "text", 174 | "type": "STRING", 175 | "links": null 176 | }, 177 | { 178 | "name": "audio", 179 | "label": "audio", 180 | "type": "AUDIO", 181 | "links": null 182 | } 183 | ], 184 | "properties": { 185 | "cnr_id": "ComfyUI_RH_APICall", 186 | "ver": "d26b20496040d4b4dbea590ac3690d587ae103b3", 187 | "Node name for S&R": "RH_ExecuteNode" 188 | }, 189 | "widgets_values": [ 190 | 600, 191 | 1, 192 | true 193 | ] 194 | }, 195 | { 196 | "id": 6, 197 | "type": "LoadImage", 198 | "pos": [ 199 | 1428.1463623046875, 200 | 530.5256958007812 201 | ], 202 | "size": [ 203 | 315, 204 | 314 205 | ], 206 | "flags": {}, 207 | "order": 1, 208 | "mode": 0, 209 | "inputs": [], 210 | "outputs": [ 211 | { 212 | "name": "IMAGE", 213 | "label": "IMAGE", 214 | "type": "IMAGE", 215 | "links": [ 216 | 5 217 | ], 218 | "slot_index": 0 219 | }, 220 | { 221 | "name": "MASK", 222 | "label": "MASK", 223 | "type": "MASK", 224 | "links": null 225 | } 226 | ], 227 | "properties": { 228 | "cnr_id": "comfy-core", 229 | "ver": "0.3.28", 230 | "Node name for S&R": "LoadImage" 231 | }, 232 | "widgets_values": [ 233 | "00002-3919489202.png", 234 | "image" 235 | ] 236 | }, 237 | { 238 | "id": 7, 239 | "type": "RH_ImageUploaderNode", 240 | "pos": [ 241 | 1521.4920654296875, 242 | 962.1885986328125 243 | ], 244 | "size": [ 245 | 229.20001220703125, 246 | 46 247 | ], 248 | "flags": {}, 249 | "order": 2, 250 | "mode": 0, 251 | "inputs": [ 252 | { 253 | "name": "apiConfig", 254 | "label": "apiConfig", 255 | "type": "STRUCT", 256 | "link": 6 257 | }, 258 | { 259 | "name": "image", 260 | "label": "image", 261 | "type": "IMAGE", 262 | "link": 5 263 | } 264 | ], 265 | "outputs": [ 266 | { 267 | "name": "filename", 268 | "label": "filename", 269 | "type": "STRING", 270 | "links": [ 271 | 7 272 | ], 273 | "slot_index": 0 274 | } 275 | ], 276 | "properties": { 277 | "cnr_id": "ComfyUI_RH_APICall", 278 | "ver": "5f18caa0ad47bc741770a1bc9d0a0682b40a0b81", 279 | "Node name for S&R": "RH_ImageUploaderNode" 280 | } 281 | }, 282 | { 283 | "id": 5, 284 | "type": "RH_NodeInfoListNode", 285 | "pos": [ 286 | 1789.681640625, 287 | 888.2706298828125 288 | ], 289 | "size": [ 290 | 330, 291 | 126 292 | ], 293 | "flags": {}, 294 | "order": 3, 295 | "mode": 0, 296 | "inputs": [ 297 | { 298 | "name": "previousNodeInfoList", 299 | "label": "previousNodeInfoList", 300 | "type": "ARRAY", 301 | "shape": 7, 302 | "link": null 303 | }, 304 | { 305 | "name": "fieldValue", 306 | "type": "STRING", 307 | "widget": { 308 | "name": "fieldValue" 309 | }, 310 | "link": 7 311 | } 312 | ], 313 | "outputs": [ 314 | { 315 | "name": "ARRAY", 316 | "label": "ARRAY", 317 | "type": "ARRAY", 318 | "links": [ 319 | 4 320 | ] 321 | } 322 | ], 323 | "properties": { 324 | "cnr_id": "ComfyUI_RH_APICall", 325 | "ver": "5f18caa0ad47bc741770a1bc9d0a0682b40a0b81", 326 | "Node name for S&R": "RH_NodeInfoListNode" 327 | }, 328 | "widgets_values": [ 329 | 44, 330 | "image", 331 | "" 332 | ] 333 | } 334 | ], 335 | "links": [ 336 | [ 337 | 1, 338 | 2, 339 | 0, 340 | 3, 341 | 0, 342 | "STRUCT" 343 | ], 344 | [ 345 | 2, 346 | 1, 347 | 0, 348 | 3, 349 | 1, 350 | "ARRAY" 351 | ], 352 | [ 353 | 3, 354 | 3, 355 | 0, 356 | 4, 357 | 0, 358 | "IMAGE" 359 | ], 360 | [ 361 | 4, 362 | 5, 363 | 0, 364 | 1, 365 | 0, 366 | "ARRAY" 367 | ], 368 | [ 369 | 5, 370 | 6, 371 | 0, 372 | 7, 373 | 1, 374 | "IMAGE" 375 | ], 376 | [ 377 | 6, 378 | 2, 379 | 0, 380 | 7, 381 | 0, 382 | "STRUCT" 383 | ], 384 | [ 385 | 7, 386 | 7, 387 | 0, 388 | 5, 389 | 1, 390 | "STRING" 391 | ] 392 | ], 393 | "groups": [], 394 | "config": {}, 395 | "extra": { 396 | "ds": { 397 | "scale": 1.3190278677309182, 398 | "offset": [ 399 | -1316.5066910186686, 400 | -381.7421459306837 401 | ] 402 | }, 403 | "VHS_latentpreview": false, 404 | "VHS_latentpreviewrate": 0, 405 | "VHS_MetadataImage": true, 406 | "VHS_KeepIntermediate": true 407 | }, 408 | "version": 0.4 409 | } -------------------------------------------------------------------------------- /examples/rh_image_to_video_FramePack.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "e863e8b3-bfbb-4d1e-8e77-36e710a00769", 3 | "revision": 0, 4 | "last_node_id": 16, 5 | "last_link_id": 18, 6 | "nodes": [ 7 | { 8 | "id": 10, 9 | "type": "RH_ExecuteNode", 10 | "pos": [ 11 | 1755.1820068359375, 12 | 391.4641418457031 13 | ], 14 | "size": [ 15 | 315, 16 | 186 17 | ], 18 | "flags": {}, 19 | "order": 4, 20 | "mode": 0, 21 | "inputs": [ 22 | { 23 | "label": "apiConfig", 24 | "name": "apiConfig", 25 | "type": "STRUCT", 26 | "link": 8 27 | }, 28 | { 29 | "label": "nodeInfoList", 30 | "name": "nodeInfoList", 31 | "shape": 7, 32 | "type": "ARRAY", 33 | "link": 15 34 | } 35 | ], 36 | "outputs": [ 37 | { 38 | "label": "images", 39 | "name": "images", 40 | "type": "IMAGE", 41 | "slot_index": 0, 42 | "links": [] 43 | }, 44 | { 45 | "label": "video_frames", 46 | "name": "video_frames", 47 | "type": "IMAGE", 48 | "slot_index": 1, 49 | "links": [ 50 | 14 51 | ] 52 | }, 53 | { 54 | "label": "latent", 55 | "name": "latent", 56 | "type": "LATENT", 57 | "links": null 58 | }, 59 | { 60 | "label": "text", 61 | "name": "text", 62 | "type": "STRING", 63 | "links": null 64 | }, 65 | { 66 | "label": "audio", 67 | "name": "audio", 68 | "type": "AUDIO", 69 | "links": null 70 | } 71 | ], 72 | "properties": { 73 | "cnr_id": "ComfyUI_RH_APICall", 74 | "ver": "f0f16c02a2fd8ee80a84a4233713e1fb2f7c516c", 75 | "Node name for S&R": "RH_ExecuteNode" 76 | }, 77 | "widgets_values": [ 78 | 600, 79 | 3, 80 | false 81 | ] 82 | }, 83 | { 84 | "id": 16, 85 | "type": "RH_ImageUploaderNode", 86 | "pos": [ 87 | 1766.6341552734375, 88 | 818.8133544921875 89 | ], 90 | "size": [ 91 | 229.20001220703125, 92 | 46 93 | ], 94 | "flags": {}, 95 | "order": 2, 96 | "mode": 0, 97 | "inputs": [ 98 | { 99 | "label": "apiConfig", 100 | "name": "apiConfig", 101 | "type": "STRUCT", 102 | "link": 17 103 | }, 104 | { 105 | "label": "image", 106 | "name": "image", 107 | "type": "IMAGE", 108 | "link": 16 109 | } 110 | ], 111 | "outputs": [ 112 | { 113 | "label": "filename", 114 | "name": "filename", 115 | "type": "STRING", 116 | "links": [ 117 | 18 118 | ] 119 | } 120 | ], 121 | "properties": { 122 | "cnr_id": "ComfyUI_RH_APICall", 123 | "ver": "6ee2bcd4d1f5ae8bc54178bc9f3f92304a8989c4", 124 | "Node name for S&R": "RH_ImageUploaderNode" 125 | } 126 | }, 127 | { 128 | "id": 6, 129 | "type": "RH_SettingsNode", 130 | "pos": [ 131 | 1335.6798095703125, 132 | 401.46697998046875 133 | ], 134 | "size": [ 135 | 315, 136 | 106 137 | ], 138 | "flags": {}, 139 | "order": 0, 140 | "mode": 0, 141 | "inputs": [], 142 | "outputs": [ 143 | { 144 | "label": "STRUCT", 145 | "name": "STRUCT", 146 | "type": "STRUCT", 147 | "slot_index": 0, 148 | "links": [ 149 | 8, 150 | 17 151 | ] 152 | } 153 | ], 154 | "properties": { 155 | "cnr_id": "ComfyUI_RH_APICall", 156 | "ver": "f0f16c02a2fd8ee80a84a4233713e1fb2f7c516c", 157 | "Node name for S&R": "RH_SettingsNode" 158 | }, 159 | "widgets_values": [ 160 | "https://www.runninghub.cn", 161 | "ed37fbdd79c34a7ca612aedbe5cea13e", 162 | "1912930457355517954" 163 | ] 164 | }, 165 | { 166 | "id": 15, 167 | "type": "RH_NodeInfoListNode", 168 | "pos": [ 169 | 1740.8934326171875, 170 | 624.1896362304688 171 | ], 172 | "size": [ 173 | 330, 174 | 106 175 | ], 176 | "flags": {}, 177 | "order": 3, 178 | "mode": 0, 179 | "inputs": [ 180 | { 181 | "label": "previousNodeInfoList", 182 | "name": "previousNodeInfoList", 183 | "shape": 7, 184 | "type": "ARRAY", 185 | "link": null 186 | }, 187 | { 188 | "label": "fieldValue", 189 | "name": "fieldValue", 190 | "type": "STRING", 191 | "widget": { 192 | "name": "fieldValue" 193 | }, 194 | "link": 18 195 | } 196 | ], 197 | "outputs": [ 198 | { 199 | "label": "ARRAY", 200 | "name": "ARRAY", 201 | "type": "ARRAY", 202 | "links": [ 203 | 15 204 | ] 205 | } 206 | ], 207 | "properties": { 208 | "cnr_id": "ComfyUI_RH_APICall", 209 | "ver": "6ee2bcd4d1f5ae8bc54178bc9f3f92304a8989c4", 210 | "Node name for S&R": "RH_NodeInfoListNode" 211 | }, 212 | "widgets_values": [ 213 | 2, 214 | "image", 215 | "" 216 | ] 217 | }, 218 | { 219 | "id": 13, 220 | "type": "VHS_VideoCombine", 221 | "pos": [ 222 | 2233.8193359375, 223 | 372.83282470703125 224 | ], 225 | "size": [ 226 | 214.7587890625, 227 | 601.1864013671875 228 | ], 229 | "flags": {}, 230 | "order": 5, 231 | "mode": 0, 232 | "inputs": [ 233 | { 234 | "label": "images", 235 | "name": "images", 236 | "type": "IMAGE", 237 | "link": 14 238 | }, 239 | { 240 | "label": "audio", 241 | "name": "audio", 242 | "shape": 7, 243 | "type": "AUDIO", 244 | "link": null 245 | }, 246 | { 247 | "label": "meta_batch", 248 | "name": "meta_batch", 249 | "shape": 7, 250 | "type": "VHS_BatchManager", 251 | "link": null 252 | }, 253 | { 254 | "label": "vae", 255 | "name": "vae", 256 | "shape": 7, 257 | "type": "VAE", 258 | "link": null 259 | } 260 | ], 261 | "outputs": [ 262 | { 263 | "label": "Filenames", 264 | "name": "Filenames", 265 | "type": "VHS_FILENAMES", 266 | "links": null 267 | } 268 | ], 269 | "properties": { 270 | "cnr_id": "comfyui-videohelpersuite", 271 | "ver": "df55f01d1df2f7bf5cc772294bc2e6d8bab22d66", 272 | "Node name for S&R": "VHS_VideoCombine" 273 | }, 274 | "widgets_values": { 275 | "frame_rate": 30, 276 | "loop_count": 0, 277 | "filename_prefix": "AnimateDiff", 278 | "format": "video/h264-mp4", 279 | "pix_fmt": "yuv420p", 280 | "crf": 19, 281 | "save_metadata": true, 282 | "trim_to_audio": false, 283 | "pingpong": false, 284 | "save_output": true, 285 | "videopreview": { 286 | "hidden": false, 287 | "paused": false, 288 | "params": { 289 | "filename": "AnimateDiff_00027.mp4", 290 | "subfolder": "", 291 | "type": "output", 292 | "format": "video/h264-mp4", 293 | "frame_rate": 30, 294 | "workflow": "AnimateDiff_00027.png", 295 | "fullpath": "D:\\ComfyUI_windows_portable\\ComfyUI\\output\\AnimateDiff_00027.mp4" 296 | } 297 | } 298 | } 299 | }, 300 | { 301 | "id": 14, 302 | "type": "LoadImage", 303 | "pos": [ 304 | 1310.210205078125, 305 | 609.7498779296875 306 | ], 307 | "size": [ 308 | 342.5999755859375, 309 | 314 310 | ], 311 | "flags": {}, 312 | "order": 1, 313 | "mode": 0, 314 | "inputs": [], 315 | "outputs": [ 316 | { 317 | "label": "IMAGE", 318 | "name": "IMAGE", 319 | "type": "IMAGE", 320 | "links": [ 321 | 16 322 | ] 323 | }, 324 | { 325 | "label": "MASK", 326 | "name": "MASK", 327 | "type": "MASK", 328 | "links": null 329 | } 330 | ], 331 | "properties": { 332 | "cnr_id": "comfy-core", 333 | "ver": "0.3.30", 334 | "Node name for S&R": "LoadImage" 335 | }, 336 | "widgets_values": [ 337 | "4k_00002_jcyxi_1739563519_fssjr_1739563522.png", 338 | "image" 339 | ] 340 | } 341 | ], 342 | "links": [ 343 | [ 344 | 8, 345 | 6, 346 | 0, 347 | 10, 348 | 0, 349 | "STRUCT" 350 | ], 351 | [ 352 | 14, 353 | 10, 354 | 1, 355 | 13, 356 | 0, 357 | "IMAGE" 358 | ], 359 | [ 360 | 15, 361 | 15, 362 | 0, 363 | 10, 364 | 1, 365 | "ARRAY" 366 | ], 367 | [ 368 | 16, 369 | 14, 370 | 0, 371 | 16, 372 | 1, 373 | "IMAGE" 374 | ], 375 | [ 376 | 17, 377 | 6, 378 | 0, 379 | 16, 380 | 0, 381 | "STRUCT" 382 | ], 383 | [ 384 | 18, 385 | 16, 386 | 0, 387 | 15, 388 | 1, 389 | "STRING" 390 | ] 391 | ], 392 | "groups": [], 393 | "config": {}, 394 | "extra": { 395 | "ds": { 396 | "scale": 0.995510824785223, 397 | "offset": [ 398 | -1239.511738864484, 399 | -274.4948176627424 400 | ] 401 | }, 402 | "ue_links": [], 403 | "VHS_latentpreview": false, 404 | "VHS_latentpreviewrate": 0, 405 | "VHS_MetadataImage": true, 406 | "VHS_KeepIntermediate": true, 407 | "frontendVersion": "1.17.11" 408 | }, 409 | "version": 0.4 410 | } -------------------------------------------------------------------------------- /examples/rh_save_video.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 13, 3 | "last_link_id": 14, 4 | "nodes": [ 5 | { 6 | "id": 6, 7 | "type": "RH_SettingsNode", 8 | "pos": [ 9 | 1335.6798095703125, 10 | 401.46697998046875 11 | ], 12 | "size": [ 13 | 315, 14 | 106 15 | ], 16 | "flags": {}, 17 | "order": 0, 18 | "mode": 0, 19 | "inputs": [], 20 | "outputs": [ 21 | { 22 | "name": "STRUCT", 23 | "label": "STRUCT", 24 | "type": "STRUCT", 25 | "shape": 3, 26 | "links": [ 27 | 8 28 | ], 29 | "slot_index": 0 30 | } 31 | ], 32 | "properties": { 33 | "cnr_id": "ComfyUI_RH_APICall", 34 | "ver": "f0f16c02a2fd8ee80a84a4233713e1fb2f7c516c", 35 | "Node name for S&R": "RH_SettingsNode" 36 | }, 37 | "widgets_values": [ 38 | "https://www.runninghub.cn", 39 | "ed37fbdd79c34a7ca612aedbe5cea13e", 40 | "1915099205092970498" 41 | ] 42 | }, 43 | { 44 | "id": 10, 45 | "type": "RH_ExecuteNode", 46 | "pos": [ 47 | 1755.1820068359375, 48 | 391.4641418457031 49 | ], 50 | "size": [ 51 | 315, 52 | 162 53 | ], 54 | "flags": {}, 55 | "order": 1, 56 | "mode": 0, 57 | "inputs": [ 58 | { 59 | "name": "apiConfig", 60 | "label": "apiConfig", 61 | "type": "STRUCT", 62 | "link": 8 63 | }, 64 | { 65 | "name": "nodeInfoList", 66 | "label": "nodeInfoList", 67 | "type": "ARRAY", 68 | "shape": 7, 69 | "link": null, 70 | "slot_index": 1 71 | } 72 | ], 73 | "outputs": [ 74 | { 75 | "name": "images", 76 | "label": "images", 77 | "type": "IMAGE", 78 | "shape": 3, 79 | "links": [], 80 | "slot_index": 0 81 | }, 82 | { 83 | "name": "video_frames", 84 | "label": "video_frames", 85 | "type": "IMAGE", 86 | "links": [ 87 | 14 88 | ], 89 | "slot_index": 1 90 | }, 91 | { 92 | "name": "latent", 93 | "label": "latent", 94 | "type": "LATENT", 95 | "links": null 96 | }, 97 | { 98 | "name": "text", 99 | "label": "text", 100 | "type": "STRING", 101 | "links": null 102 | }, 103 | { 104 | "name": "audio", 105 | "label": "audio", 106 | "type": "AUDIO", 107 | "links": null 108 | } 109 | ], 110 | "properties": { 111 | "cnr_id": "ComfyUI_RH_APICall", 112 | "ver": "f0f16c02a2fd8ee80a84a4233713e1fb2f7c516c", 113 | "Node name for S&R": "RH_ExecuteNode" 114 | }, 115 | "widgets_values": [ 116 | 600, 117 | 3 118 | ] 119 | }, 120 | { 121 | "id": 13, 122 | "type": "VHS_VideoCombine", 123 | "pos": [ 124 | 2258.30419921875, 125 | 312.5622253417969 126 | ], 127 | "size": [ 128 | 214.7587890625, 129 | 542.7587890625 130 | ], 131 | "flags": {}, 132 | "order": 2, 133 | "mode": 0, 134 | "inputs": [ 135 | { 136 | "name": "images", 137 | "label": "images", 138 | "type": "IMAGE", 139 | "link": 14 140 | }, 141 | { 142 | "name": "audio", 143 | "label": "audio", 144 | "type": "AUDIO", 145 | "shape": 7, 146 | "link": null 147 | }, 148 | { 149 | "name": "meta_batch", 150 | "label": "meta_batch", 151 | "type": "VHS_BatchManager", 152 | "shape": 7, 153 | "link": null 154 | }, 155 | { 156 | "name": "vae", 157 | "label": "vae", 158 | "type": "VAE", 159 | "shape": 7, 160 | "link": null 161 | } 162 | ], 163 | "outputs": [ 164 | { 165 | "name": "Filenames", 166 | "label": "Filenames", 167 | "type": "VHS_FILENAMES", 168 | "links": null 169 | } 170 | ], 171 | "properties": { 172 | "cnr_id": "comfyui-videohelpersuite", 173 | "ver": "df55f01d1df2f7bf5cc772294bc2e6d8bab22d66", 174 | "Node name for S&R": "VHS_VideoCombine" 175 | }, 176 | "widgets_values": { 177 | "frame_rate": 30, 178 | "loop_count": 0, 179 | "filename_prefix": "AnimateDiff", 180 | "format": "video/h264-mp4", 181 | "pix_fmt": "yuv420p", 182 | "crf": 19, 183 | "save_metadata": true, 184 | "trim_to_audio": false, 185 | "pingpong": false, 186 | "save_output": true, 187 | "videopreview": { 188 | "hidden": false, 189 | "paused": false, 190 | "params": { 191 | "filename": "AnimateDiff_00018.mp4", 192 | "subfolder": "", 193 | "type": "output", 194 | "format": "video/h264-mp4", 195 | "frame_rate": 30, 196 | "workflow": "AnimateDiff_00018.png", 197 | "fullpath": "D:\\ComfyUI_windows_portable\\ComfyUI\\output\\AnimateDiff_00018.mp4" 198 | } 199 | } 200 | } 201 | } 202 | ], 203 | "links": [ 204 | [ 205 | 8, 206 | 6, 207 | 0, 208 | 10, 209 | 0, 210 | "STRUCT" 211 | ], 212 | [ 213 | 14, 214 | 10, 215 | 1, 216 | 13, 217 | 0, 218 | "IMAGE" 219 | ] 220 | ], 221 | "groups": [], 222 | "config": {}, 223 | "extra": { 224 | "ds": { 225 | "scale": 1.3190278677309182, 226 | "offset": [ 227 | -1193.309900431111, 228 | -6.93960225853351 229 | ] 230 | }, 231 | "ue_links": [], 232 | "VHS_latentpreview": false, 233 | "VHS_latentpreviewrate": 0, 234 | "VHS_MetadataImage": true, 235 | "VHS_KeepIntermediate": true 236 | }, 237 | "version": 0.4 238 | } -------------------------------------------------------------------------------- /examples/rh_text_to_image.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 33, 3 | "last_link_id": 24, 4 | "nodes": [ 5 | { 6 | "id": 18, 7 | "type": "RH_NodeInfoListNode", 8 | "pos": [ 9 | 66, 10 | 1291 11 | ], 12 | "size": [ 13 | 330, 14 | 126 15 | ], 16 | "flags": {}, 17 | "order": 3, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "previousNodeInfoList", 22 | "label": "previousNodeInfoList", 23 | "type": "ARRAY", 24 | "shape": 7, 25 | "link": null 26 | }, 27 | { 28 | "name": "fieldValue", 29 | "label": "fieldValue", 30 | "type": "STRING", 31 | "widget": { 32 | "name": "fieldValue" 33 | }, 34 | "link": 24, 35 | "slot_index": 1 36 | } 37 | ], 38 | "outputs": [ 39 | { 40 | "name": "ARRAY", 41 | "label": "ARRAY", 42 | "type": "ARRAY", 43 | "shape": 3, 44 | "links": [ 45 | 13 46 | ], 47 | "slot_index": 0 48 | } 49 | ], 50 | "properties": { 51 | "cnr_id": "ComfyUI_RH_APICall", 52 | "ver": "f0f16c02a2fd8ee80a84a4233713e1fb2f7c516c", 53 | "Node name for S&R": "RH_NodeInfoListNode" 54 | }, 55 | "widgets_values": [ 56 | 25, 57 | "noise_seed", 58 | "ice" 59 | ] 60 | }, 61 | { 62 | "id": 17, 63 | "type": "RH_NodeInfoListNode", 64 | "pos": [ 65 | 68, 66 | 1440 67 | ], 68 | "size": [ 69 | 330, 70 | 106 71 | ], 72 | "flags": {}, 73 | "order": 4, 74 | "mode": 0, 75 | "inputs": [ 76 | { 77 | "name": "previousNodeInfoList", 78 | "label": "previousNodeInfoList", 79 | "type": "ARRAY", 80 | "shape": 7, 81 | "link": 13 82 | } 83 | ], 84 | "outputs": [ 85 | { 86 | "name": "ARRAY", 87 | "label": "ARRAY", 88 | "type": "ARRAY", 89 | "shape": 3, 90 | "links": [ 91 | 18 92 | ], 93 | "slot_index": 0 94 | } 95 | ], 96 | "properties": { 97 | "cnr_id": "ComfyUI_RH_APICall", 98 | "ver": "f0f16c02a2fd8ee80a84a4233713e1fb2f7c516c", 99 | "Node name for S&R": "RH_NodeInfoListNode" 100 | }, 101 | "widgets_values": [ 102 | 45, 103 | "prompt", 104 | "土星环" 105 | ] 106 | }, 107 | { 108 | "id": 28, 109 | "type": "RH_NodeInfoListNode", 110 | "pos": [ 111 | 67, 112 | 1591 113 | ], 114 | "size": [ 115 | 330, 116 | 106 117 | ], 118 | "flags": {}, 119 | "order": 5, 120 | "mode": 0, 121 | "inputs": [ 122 | { 123 | "name": "previousNodeInfoList", 124 | "label": "previousNodeInfoList", 125 | "type": "ARRAY", 126 | "shape": 7, 127 | "link": 18 128 | } 129 | ], 130 | "outputs": [ 131 | { 132 | "name": "ARRAY", 133 | "label": "ARRAY", 134 | "type": "ARRAY", 135 | "shape": 3, 136 | "links": [ 137 | 21 138 | ], 139 | "slot_index": 0 140 | } 141 | ], 142 | "properties": { 143 | "cnr_id": "ComfyUI_RH_APICall", 144 | "ver": "f0f16c02a2fd8ee80a84a4233713e1fb2f7c516c", 145 | "Node name for S&R": "RH_NodeInfoListNode" 146 | }, 147 | "widgets_values": [ 148 | 5, 149 | "batch_size", 150 | "2" 151 | ] 152 | }, 153 | { 154 | "id": 27, 155 | "type": "SaveImage", 156 | "pos": [ 157 | 957, 158 | 1049 159 | ], 160 | "size": [ 161 | 545.910400390625, 162 | 761.0469360351562 163 | ], 164 | "flags": {}, 165 | "order": 7, 166 | "mode": 0, 167 | "inputs": [ 168 | { 169 | "name": "images", 170 | "label": "images", 171 | "type": "IMAGE", 172 | "link": 22 173 | } 174 | ], 175 | "outputs": [], 176 | "properties": { 177 | "cnr_id": "comfy-core", 178 | "ver": "0.3.28", 179 | "Node name for S&R": "SaveImage" 180 | }, 181 | "widgets_values": [ 182 | "ComfyUI" 183 | ] 184 | }, 185 | { 186 | "id": 9, 187 | "type": "RH_SettingsNode", 188 | "pos": [ 189 | 72, 190 | 1132 191 | ], 192 | "size": [ 193 | 315, 194 | 106 195 | ], 196 | "flags": {}, 197 | "order": 0, 198 | "mode": 0, 199 | "inputs": [], 200 | "outputs": [ 201 | { 202 | "name": "STRUCT", 203 | "label": "STRUCT", 204 | "type": "STRUCT", 205 | "shape": 3, 206 | "links": [ 207 | 20 208 | ], 209 | "slot_index": 0 210 | } 211 | ], 212 | "properties": { 213 | "cnr_id": "ComfyUI_RH_APICall", 214 | "ver": "f0f16c02a2fd8ee80a84a4233713e1fb2f7c516c", 215 | "Node name for S&R": "RH_SettingsNode" 216 | }, 217 | "widgets_values": [ 218 | "https://www.runninghub.cn", 219 | "ed37fbdd79c34a7ca612aedbe5cea13e", 220 | "1866574464472215553" 221 | ] 222 | }, 223 | { 224 | "id": 19, 225 | "type": "easy seed", 226 | "pos": [ 227 | -325, 228 | 1298 229 | ], 230 | "size": [ 231 | 315, 232 | 106 233 | ], 234 | "flags": {}, 235 | "order": 1, 236 | "mode": 0, 237 | "inputs": [], 238 | "outputs": [ 239 | { 240 | "name": "seed", 241 | "label": "seed", 242 | "type": "INT", 243 | "shape": 3, 244 | "links": [ 245 | 23 246 | ], 247 | "slot_index": 0 248 | } 249 | ], 250 | "properties": { 251 | "cnr_id": "comfyui-easy-use", 252 | "ver": "98273b37f20dafb4b28316b8d6e45bf28e4130fe", 253 | "Node name for S&R": "easy seed" 254 | }, 255 | "widgets_values": [ 256 | 661888457586111, 257 | "randomize", 258 | null 259 | ] 260 | }, 261 | { 262 | "id": 33, 263 | "type": "RH_Utils", 264 | "pos": [ 265 | -201, 266 | 1209 267 | ], 268 | "size": [ 269 | 210, 270 | 26 271 | ], 272 | "flags": {}, 273 | "order": 2, 274 | "mode": 0, 275 | "inputs": [ 276 | { 277 | "name": "anything", 278 | "label": "anything", 279 | "type": "*", 280 | "link": 23 281 | } 282 | ], 283 | "outputs": [ 284 | { 285 | "name": "STRING", 286 | "label": "STRING", 287 | "type": "STRING", 288 | "shape": 3, 289 | "links": [ 290 | 24 291 | ], 292 | "slot_index": 0 293 | } 294 | ], 295 | "properties": { 296 | "cnr_id": "ComfyUI_RH_APICall", 297 | "ver": "f0f16c02a2fd8ee80a84a4233713e1fb2f7c516c", 298 | "Node name for S&R": "RH_Utils" 299 | }, 300 | "widgets_values": [] 301 | }, 302 | { 303 | "id": 30, 304 | "type": "RH_ExecuteNode", 305 | "pos": [ 306 | 551, 307 | 1315 308 | ], 309 | "size": [ 310 | 315, 311 | 162 312 | ], 313 | "flags": {}, 314 | "order": 6, 315 | "mode": 0, 316 | "inputs": [ 317 | { 318 | "name": "apiConfig", 319 | "label": "apiConfig", 320 | "type": "STRUCT", 321 | "link": 20 322 | }, 323 | { 324 | "name": "nodeInfoList", 325 | "label": "nodeInfoList", 326 | "type": "ARRAY", 327 | "shape": 7, 328 | "link": 21 329 | } 330 | ], 331 | "outputs": [ 332 | { 333 | "name": "images", 334 | "label": "images", 335 | "type": "IMAGE", 336 | "shape": 3, 337 | "links": [ 338 | 22 339 | ], 340 | "slot_index": 0 341 | }, 342 | { 343 | "name": "video_frames", 344 | "label": "video_frames", 345 | "type": "IMAGE", 346 | "links": null 347 | }, 348 | { 349 | "name": "latent", 350 | "label": "latent", 351 | "type": "LATENT", 352 | "links": null 353 | }, 354 | { 355 | "name": "text", 356 | "label": "text", 357 | "type": "STRING", 358 | "links": null 359 | }, 360 | { 361 | "name": "audio", 362 | "label": "audio", 363 | "type": "AUDIO", 364 | "links": null 365 | } 366 | ], 367 | "properties": { 368 | "cnr_id": "ComfyUI_RH_APICall", 369 | "ver": "f0f16c02a2fd8ee80a84a4233713e1fb2f7c516c", 370 | "Node name for S&R": "RH_ExecuteNode" 371 | }, 372 | "widgets_values": [ 373 | 600, 374 | 3 375 | ] 376 | } 377 | ], 378 | "links": [ 379 | [ 380 | 13, 381 | 18, 382 | 0, 383 | 17, 384 | 0, 385 | "ARRAY" 386 | ], 387 | [ 388 | 18, 389 | 17, 390 | 0, 391 | 28, 392 | 0, 393 | "ARRAY" 394 | ], 395 | [ 396 | 20, 397 | 9, 398 | 0, 399 | 30, 400 | 0, 401 | "STRUCT" 402 | ], 403 | [ 404 | 21, 405 | 28, 406 | 0, 407 | 30, 408 | 1, 409 | "ARRAY" 410 | ], 411 | [ 412 | 22, 413 | 30, 414 | 0, 415 | 27, 416 | 0, 417 | "IMAGE" 418 | ], 419 | [ 420 | 23, 421 | 19, 422 | 0, 423 | 33, 424 | 0, 425 | "*" 426 | ], 427 | [ 428 | 24, 429 | 33, 430 | 0, 431 | 18, 432 | 1, 433 | "STRING" 434 | ] 435 | ], 436 | "groups": [], 437 | "config": {}, 438 | "extra": { 439 | "ds": { 440 | "scale": 0.9481993680645889, 441 | "offset": [ 442 | 566.524353466392, 443 | -872.7070128111055 444 | ] 445 | }, 446 | "ue_links": [], 447 | "VHS_latentpreview": false, 448 | "VHS_latentpreviewrate": 0, 449 | "VHS_MetadataImage": true, 450 | "VHS_KeepIntermediate": true 451 | }, 452 | "version": 0.4 453 | } -------------------------------------------------------------------------------- /examples/rh_text_to_image_webapp.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 4, 3 | "last_link_id": 3, 4 | "nodes": [ 5 | { 6 | "id": 3, 7 | "type": "RH_NodeInfoListNode", 8 | "pos": [ 9 | 1314.8992919921875, 10 | 603.4961547851562 11 | ], 12 | "size": [ 13 | 502.9493103027344, 14 | 153.85723876953125 15 | ], 16 | "flags": {}, 17 | "order": 0, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "previousNodeInfoList", 22 | "label": "previousNodeInfoList", 23 | "type": "ARRAY", 24 | "shape": 7, 25 | "link": null 26 | } 27 | ], 28 | "outputs": [ 29 | { 30 | "name": "ARRAY", 31 | "label": "ARRAY", 32 | "type": "ARRAY", 33 | "links": [ 34 | 2 35 | ] 36 | } 37 | ], 38 | "properties": { 39 | "cnr_id": "ComfyUI_RH_APICall", 40 | "ver": "d26b20496040d4b4dbea590ac3690d587ae103b3", 41 | "Node name for S&R": "RH_NodeInfoListNode" 42 | }, 43 | "widgets_values": [ 44 | 50, 45 | "text", 46 | "可爱的动漫女孩,有着巨大的蓬松耳廓狐耳朵和一条蓬松的大尾巴" 47 | ] 48 | }, 49 | { 50 | "id": 4, 51 | "type": "SaveImage", 52 | "pos": [ 53 | 2253.56494140625, 54 | 431.4947814941406 55 | ], 56 | "size": [ 57 | 315, 58 | 270 59 | ], 60 | "flags": {}, 61 | "order": 3, 62 | "mode": 0, 63 | "inputs": [ 64 | { 65 | "name": "images", 66 | "label": "images", 67 | "type": "IMAGE", 68 | "link": 3 69 | } 70 | ], 71 | "outputs": [], 72 | "properties": { 73 | "cnr_id": "comfy-core", 74 | "ver": "0.3.28", 75 | "Node name for S&R": "SaveImage" 76 | }, 77 | "widgets_values": [ 78 | "ComfyUI" 79 | ] 80 | }, 81 | { 82 | "id": 2, 83 | "type": "RH_SettingsNode", 84 | "pos": [ 85 | 1389.765380859375, 86 | 393.1142578125 87 | ], 88 | "size": [ 89 | 315, 90 | 106 91 | ], 92 | "flags": {}, 93 | "order": 1, 94 | "mode": 0, 95 | "inputs": [], 96 | "outputs": [ 97 | { 98 | "name": "STRUCT", 99 | "label": "STRUCT", 100 | "type": "STRUCT", 101 | "shape": 3, 102 | "links": [ 103 | 1 104 | ], 105 | "slot_index": 0 106 | } 107 | ], 108 | "properties": { 109 | "cnr_id": "ComfyUI_RH_APICall", 110 | "ver": "f0f16c02a2fd8ee80a84a4233713e1fb2f7c516c", 111 | "Node name for S&R": "RH_SettingsNode" 112 | }, 113 | "widgets_values": [ 114 | "https://www.runninghub.cn", 115 | "ed37fbdd79c34a7ca612aedbe5cea13e", 116 | " 1872916082783936513" 117 | ] 118 | }, 119 | { 120 | "id": 1, 121 | "type": "RH_ExecuteNode", 122 | "pos": [ 123 | 1857.913818359375, 124 | 400.6954650878906 125 | ], 126 | "size": [ 127 | 317.4000244140625, 128 | 186 129 | ], 130 | "flags": {}, 131 | "order": 2, 132 | "mode": 0, 133 | "inputs": [ 134 | { 135 | "name": "apiConfig", 136 | "label": "apiConfig", 137 | "type": "STRUCT", 138 | "link": 1 139 | }, 140 | { 141 | "name": "nodeInfoList", 142 | "label": "nodeInfoList", 143 | "type": "ARRAY", 144 | "shape": 7, 145 | "link": 2 146 | } 147 | ], 148 | "outputs": [ 149 | { 150 | "name": "images", 151 | "label": "images", 152 | "type": "IMAGE", 153 | "links": [ 154 | 3 155 | ], 156 | "slot_index": 0 157 | }, 158 | { 159 | "name": "video_frames", 160 | "label": "video_frames", 161 | "type": "IMAGE", 162 | "links": null 163 | }, 164 | { 165 | "name": "latent", 166 | "label": "latent", 167 | "type": "LATENT", 168 | "links": null 169 | }, 170 | { 171 | "name": "text", 172 | "label": "text", 173 | "type": "STRING", 174 | "links": null 175 | }, 176 | { 177 | "name": "audio", 178 | "label": "audio", 179 | "type": "AUDIO", 180 | "links": null 181 | } 182 | ], 183 | "properties": { 184 | "cnr_id": "ComfyUI_RH_APICall", 185 | "ver": "d26b20496040d4b4dbea590ac3690d587ae103b3", 186 | "Node name for S&R": "RH_ExecuteNode" 187 | }, 188 | "widgets_values": [ 189 | 600, 190 | 1, 191 | true 192 | ] 193 | } 194 | ], 195 | "links": [ 196 | [ 197 | 1, 198 | 2, 199 | 0, 200 | 1, 201 | 0, 202 | "STRUCT" 203 | ], 204 | [ 205 | 2, 206 | 3, 207 | 0, 208 | 1, 209 | 1, 210 | "ARRAY" 211 | ], 212 | [ 213 | 3, 214 | 1, 215 | 0, 216 | 4, 217 | 0, 218 | "IMAGE" 219 | ] 220 | ], 221 | "groups": [], 222 | "config": {}, 223 | "extra": { 224 | "ds": { 225 | "scale": 1.3190278677309182, 226 | "offset": [ 227 | -1163.4583704041254, 228 | -50.5323127741312 229 | ] 230 | }, 231 | "VHS_latentpreview": false, 232 | "VHS_latentpreviewrate": 0, 233 | "VHS_MetadataImage": true, 234 | "VHS_KeepIntermediate": true 235 | }, 236 | "version": 0.4 237 | } -------------------------------------------------------------------------------- /examples/rh_video_uploader.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "31211066-e402-4e8a-8063-5004f7e91b5d", 3 | "revision": 0, 4 | "last_node_id": 18, 5 | "last_link_id": 16, 6 | "nodes": [ 7 | { 8 | "id": 13, 9 | "type": "RH_VideoUploader", 10 | "pos": [ 11 | 1306.440673828125, 12 | 610.4983520507812 13 | ], 14 | "size": [ 15 | 354.8528137207031, 16 | 371.41925048828125 17 | ], 18 | "flags": {}, 19 | "order": 1, 20 | "mode": 0, 21 | "inputs": [ 22 | { 23 | "label": "apiConfig", 24 | "name": "apiConfig", 25 | "type": "STRUCT", 26 | "link": 14 27 | } 28 | ], 29 | "outputs": [ 30 | { 31 | "label": "filename", 32 | "name": "filename", 33 | "type": "STRING", 34 | "links": [ 35 | 13 36 | ] 37 | } 38 | ], 39 | "properties": { 40 | "cnr_id": "ComfyUI_RH_APICall", 41 | "ver": "1c3397656e9caf9cf63f02132a5cbc24284b54c6", 42 | "Node name for S&R": "RH_VideoUploader" 43 | }, 44 | "widgets_values": [ 45 | "AnimateDiff_00076_bnuok_1745181223.mp4", 46 | "" 47 | ] 48 | }, 49 | { 50 | "id": 11, 51 | "type": "RH_NodeInfoListNode", 52 | "pos": [ 53 | 1678.42529296875, 54 | 519.7435913085938 55 | ], 56 | "size": [ 57 | 330, 58 | 126 59 | ], 60 | "flags": {}, 61 | "order": 2, 62 | "mode": 0, 63 | "inputs": [ 64 | { 65 | "label": "previousNodeInfoList", 66 | "name": "previousNodeInfoList", 67 | "shape": 7, 68 | "type": "ARRAY", 69 | "link": null 70 | }, 71 | { 72 | "label": "fieldValue", 73 | "name": "fieldValue", 74 | "type": "STRING", 75 | "widget": { 76 | "name": "fieldValue" 77 | }, 78 | "link": 13 79 | } 80 | ], 81 | "outputs": [ 82 | { 83 | "label": "ARRAY", 84 | "name": "ARRAY", 85 | "type": "ARRAY", 86 | "links": [ 87 | 9 88 | ] 89 | } 90 | ], 91 | "properties": { 92 | "cnr_id": "ComfyUI_RH_APICall", 93 | "ver": "f0f16c02a2fd8ee80a84a4233713e1fb2f7c516c", 94 | "Node name for S&R": "RH_NodeInfoListNode" 95 | }, 96 | "widgets_values": [ 97 | 1, 98 | "video", 99 | "18" 100 | ] 101 | }, 102 | { 103 | "id": 6, 104 | "type": "RH_SettingsNode", 105 | "pos": [ 106 | 1296.7294921875, 107 | 307.83251953125 108 | ], 109 | "size": [ 110 | 315, 111 | 106 112 | ], 113 | "flags": {}, 114 | "order": 0, 115 | "mode": 0, 116 | "inputs": [], 117 | "outputs": [ 118 | { 119 | "label": "STRUCT", 120 | "name": "STRUCT", 121 | "type": "STRUCT", 122 | "slot_index": 0, 123 | "links": [ 124 | 8, 125 | 14 126 | ] 127 | } 128 | ], 129 | "properties": { 130 | "cnr_id": "ComfyUI_RH_APICall", 131 | "ver": "f0f16c02a2fd8ee80a84a4233713e1fb2f7c516c", 132 | "Node name for S&R": "RH_SettingsNode" 133 | }, 134 | "widgets_values": [ 135 | "https://www.runninghub.cn", 136 | "ed37fbdd79c34a7ca612aedbe5cea13e", 137 | "1918751557083398146" 138 | ] 139 | }, 140 | { 141 | "id": 10, 142 | "type": "RH_ExecuteNode", 143 | "pos": [ 144 | 2035.218017578125, 145 | 315.1768798828125 146 | ], 147 | "size": [ 148 | 315, 149 | 186 150 | ], 151 | "flags": {}, 152 | "order": 3, 153 | "mode": 0, 154 | "inputs": [ 155 | { 156 | "label": "apiConfig", 157 | "name": "apiConfig", 158 | "type": "STRUCT", 159 | "link": 8 160 | }, 161 | { 162 | "label": "nodeInfoList", 163 | "name": "nodeInfoList", 164 | "shape": 7, 165 | "type": "ARRAY", 166 | "link": 9 167 | } 168 | ], 169 | "outputs": [ 170 | { 171 | "label": "images", 172 | "name": "images", 173 | "type": "IMAGE", 174 | "slot_index": 0, 175 | "links": [] 176 | }, 177 | { 178 | "label": "video_frames", 179 | "name": "video_frames", 180 | "type": "IMAGE", 181 | "links": [ 182 | 16 183 | ] 184 | }, 185 | { 186 | "label": "latent", 187 | "name": "latent", 188 | "type": "LATENT", 189 | "links": null 190 | }, 191 | { 192 | "label": "text", 193 | "name": "text", 194 | "type": "STRING", 195 | "links": null 196 | }, 197 | { 198 | "label": "audio", 199 | "name": "audio", 200 | "type": "AUDIO", 201 | "links": null 202 | } 203 | ], 204 | "properties": { 205 | "cnr_id": "ComfyUI_RH_APICall", 206 | "ver": "f0f16c02a2fd8ee80a84a4233713e1fb2f7c516c", 207 | "Node name for S&R": "RH_ExecuteNode" 208 | }, 209 | "widgets_values": [ 210 | 600, 211 | 3, 212 | false 213 | ] 214 | }, 215 | { 216 | "id": 18, 217 | "type": "VHS_VideoCombine", 218 | "pos": [ 219 | 2393.745849609375, 220 | 334.5364074707031 221 | ], 222 | "size": [ 223 | 317.7745666503906, 224 | 735.10693359375 225 | ], 226 | "flags": {}, 227 | "order": 4, 228 | "mode": 0, 229 | "inputs": [ 230 | { 231 | "label": "images", 232 | "name": "images", 233 | "type": "IMAGE", 234 | "link": 16 235 | }, 236 | { 237 | "label": "audio", 238 | "name": "audio", 239 | "shape": 7, 240 | "type": "AUDIO", 241 | "link": null 242 | }, 243 | { 244 | "label": "meta_batch", 245 | "name": "meta_batch", 246 | "shape": 7, 247 | "type": "VHS_BatchManager", 248 | "link": null 249 | }, 250 | { 251 | "label": "vae", 252 | "name": "vae", 253 | "shape": 7, 254 | "type": "VAE", 255 | "link": null 256 | } 257 | ], 258 | "outputs": [ 259 | { 260 | "label": "Filenames", 261 | "name": "Filenames", 262 | "type": "VHS_FILENAMES", 263 | "links": null 264 | } 265 | ], 266 | "properties": { 267 | "cnr_id": "comfyui-videohelpersuite", 268 | "ver": "598e181e97d50174cc18677dfcdaa7b9b6ec0cb2", 269 | "Node name for S&R": "VHS_VideoCombine" 270 | }, 271 | "widgets_values": { 272 | "frame_rate": 24, 273 | "loop_count": 0, 274 | "filename_prefix": "AnimateDiff", 275 | "format": "video/h264-mp4", 276 | "pix_fmt": "yuv420p", 277 | "crf": 19, 278 | "save_metadata": true, 279 | "trim_to_audio": false, 280 | "pingpong": false, 281 | "save_output": true, 282 | "videopreview": { 283 | "hidden": false, 284 | "paused": false, 285 | "params": { 286 | "filename": "AnimateDiff_00029.mp4", 287 | "subfolder": "", 288 | "type": "output", 289 | "format": "video/h264-mp4", 290 | "frame_rate": 24, 291 | "workflow": "AnimateDiff_00029.png", 292 | "fullpath": "D:\\ComfyUI_windows_portable\\ComfyUI\\output\\AnimateDiff_00029.mp4" 293 | } 294 | } 295 | } 296 | } 297 | ], 298 | "links": [ 299 | [ 300 | 8, 301 | 6, 302 | 0, 303 | 10, 304 | 0, 305 | "STRUCT" 306 | ], 307 | [ 308 | 9, 309 | 11, 310 | 0, 311 | 10, 312 | 1, 313 | "ARRAY" 314 | ], 315 | [ 316 | 13, 317 | 13, 318 | 0, 319 | 11, 320 | 1, 321 | "STRING" 322 | ], 323 | [ 324 | 14, 325 | 6, 326 | 0, 327 | 13, 328 | 0, 329 | "STRUCT" 330 | ], 331 | [ 332 | 16, 333 | 10, 334 | 1, 335 | 18, 336 | 0, 337 | "IMAGE" 338 | ] 339 | ], 340 | "groups": [], 341 | "config": {}, 342 | "extra": { 343 | "ds": { 344 | "scale": 0.8311834575010493, 345 | "offset": [ 346 | -1205.2225372148093, 347 | -169.1097881796019 348 | ] 349 | }, 350 | "frontendVersion": "1.17.11", 351 | "ue_links": [], 352 | "VHS_latentpreview": false, 353 | "VHS_latentpreviewrate": 0, 354 | "VHS_MetadataImage": true, 355 | "VHS_KeepIntermediate": true 356 | }, 357 | "version": 0.4 358 | } -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | requests 2 | Pillow 3 | numpy 4 | torch 5 | websocket-client 6 | opencv-python 7 | safetensors 8 | torchaudio -------------------------------------------------------------------------------- /web/js/audioUploaderNode.js: -------------------------------------------------------------------------------- 1 | import { app } from "../../../scripts/app.js"; 2 | import { api } from '../../../scripts/api.js'; 3 | 4 | // Basic helper for creating elements 5 | function createElement(tag, attrs = {}, children = []) { 6 | const el = document.createElement(tag); 7 | Object.assign(el, attrs); 8 | children.forEach(child => el.appendChild(typeof child === 'string' ? document.createTextNode(child) : child)); 9 | return el; 10 | } 11 | 12 | app.registerExtension({ 13 | name: "RunningHub.AudioUploader", // Changed name 14 | 15 | nodeCreated(node) { 16 | // Match the new Python class name mapping key 17 | if (node.comfyClass !== "RH_AudioUploader") { 18 | return; 19 | } 20 | 21 | // Find the regular STRING widget that will hold the ComfyUI filename 22 | const filenameWidget = node.widgets.find((w) => w.name === "audio"); // Changed widget name 23 | if (!filenameWidget) { 24 | console.error("RH_AudioUploader: Could not find 'audio' widget on node:", node.id); 25 | return; 26 | } 27 | 28 | // --- Create Custom UI Elements --- 29 | const container = document.createElement("div"); 30 | container.style.margin = "5px 0"; 31 | 32 | const uploadButton = createElement("button", { 33 | textContent: "Select Audio", // Changed text 34 | style: "width: 100%; margin-bottom: 5px;" 35 | }); 36 | 37 | // Use