├── core ├── __init__.py ├── scanner.py ├── linker.py ├── matcher.py ├── workflow_analyzer.py └── workflow_updater.py ├── demo.mp4 ├── model-linker.png ├── pyproject.toml ├── .gitignore ├── README.md ├── __init__.py └── web └── linker.js /core/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Core modules for Model Linker extension. 3 | """ 4 | 5 | -------------------------------------------------------------------------------- /demo.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kianxyzw/comfyui-model-linker/HEAD/demo.mp4 -------------------------------------------------------------------------------- /model-linker.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kianxyzw/comfyui-model-linker/HEAD/model-linker.png -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "comfyui-model-linker" 3 | version = "0.1.0" 4 | description = "A ComfyUI extension that helps users relink missing models in workflows" 5 | readme = "README.md" 6 | requires-python = ">=3.8" 7 | 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Python 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | *.so 6 | .Python 7 | 8 | # Virtual environments 9 | venv/ 10 | env/ 11 | ENV/ 12 | 13 | # IDE 14 | .vscode/ 15 | .idea/ 16 | *.swp 17 | *.swo 18 | *~ 19 | 20 | # OS 21 | .DS_Store 22 | Thumbs.db 23 | 24 | # Logs 25 | *.log 26 | 27 | # Temporary files 28 | *.tmp 29 | *.bak 30 | *.cache 31 | 32 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ComfyUI Model Linker Extension 2 | 3 | A ComfyUI extension that helps users relink missing models in workflows using fuzzy matching. 4 | 5 | https://github.com/user-attachments/assets/fedf3645-aa66-49f7-b01d-8c3b5127faf4 6 | 7 |  8 | 9 | 10 | ## Features 11 | 12 | - Scans all nodes in workflows to find missing models 13 | - Uses fuzzy matching to suggest similar model files 14 | - Updates workflow JSON in UI/memory (user saves themselves) 15 | - Supports all node types 16 | - Optional auto-resolve for 100% confidence matches 17 | 18 | ## Installation 19 | 20 | 1. Clone or download this repository 21 | 2. Place it in your ComfyUI custom_nodes/ directory 22 | 3. Restart ComfyUI 23 | 24 | ## Usage 25 | 26 | 1. Open a workflow with missing models 27 | 2. Click the "🔗 — Model Linker" button in ComfyUI's top menu bar 28 | 3. Review missing models and their suggested matches 29 | 4. Click "Resolve" for individual models or "Auto-Resolve 100% Matches" for perfect matches 30 | 5. Save your workflow when ready 31 | 32 | ## Features 33 | 34 | - **Subgraph Support**: Automatically detects and handles missing models inside subgraphs 35 | - **Smart Matching**: Shows 100% confidence matches when available, otherwise shows best matches (≥70% confidence) 36 | - **Fuzzy Matching**: Uses intelligent similarity scoring to find model files even with different naming 37 | - **Auto-Resolve**: One-click resolution for all perfect matches 38 | 39 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | @author: Model Linker Team 3 | @title: ComfyUI Model Linker 4 | @nickname: Model Linker 5 | @version: 1.0.0 6 | @description: Extension for relinking missing models in ComfyUI workflows using fuzzy matching 7 | """ 8 | 9 | import logging 10 | 11 | # Web directory for JavaScript interface 12 | WEB_DIRECTORY = "./web" 13 | 14 | # Empty NODE_CLASS_MAPPINGS - we don't provide custom nodes, only web extension 15 | # This prevents ComfyUI from showing "IMPORT FAILED" message 16 | NODE_CLASS_MAPPINGS = {} 17 | 18 | __all__ = ["WEB_DIRECTORY"] 19 | 20 | 21 | class ModelLinkerExtension: 22 | """Main extension class for Model Linker.""" 23 | 24 | def __init__(self): 25 | self.routes_setup = False 26 | self.logger = logging.getLogger(__name__) 27 | 28 | def initialize(self): 29 | """Initialize the extension and set up API routes.""" 30 | try: 31 | self.setup_routes() 32 | self.logger.info("Model Linker: Extension initialized successfully") 33 | except Exception as e: 34 | self.logger.error(f"Model Linker: Extension initialization failed: {e}", exc_info=True) 35 | 36 | def setup_routes(self): 37 | """Register API routes for the Model Linker extension.""" 38 | if self.routes_setup: 39 | return # Already set up 40 | 41 | try: 42 | from aiohttp import web 43 | 44 | # Try to get routes from PromptServer 45 | try: 46 | from server import PromptServer 47 | if not hasattr(PromptServer, 'instance') or PromptServer.instance is None: 48 | self.logger.debug("Model Linker: PromptServer not available yet") 49 | return False 50 | 51 | routes = PromptServer.instance.routes 52 | except (ImportError, AttributeError) as e: 53 | self.logger.debug(f"Model Linker: Could not access PromptServer: {e}") 54 | return False 55 | 56 | # Import linker modules - use relative imports which should work for packages 57 | try: 58 | from .core.linker import analyze_and_find_matches, apply_resolution 59 | from .core.scanner import get_model_files 60 | except ImportError as e: 61 | self.logger.error(f"Model Linker: Could not import core modules: {e}") 62 | return False 63 | 64 | @routes.post("/model_linker/analyze") 65 | async def analyze_workflow(request): 66 | """Analyze workflow and return missing models with matches.""" 67 | try: 68 | data = await request.json() 69 | workflow_json = data.get('workflow') 70 | 71 | if not workflow_json: 72 | return web.json_response( 73 | {'error': 'Workflow JSON is required'}, 74 | status=400 75 | ) 76 | 77 | # Analyze and find matches 78 | result = analyze_and_find_matches(workflow_json) 79 | 80 | return web.json_response(result) 81 | except Exception as e: 82 | self.logger.error(f"Model Linker analyze error: {e}", exc_info=True) 83 | return web.json_response( 84 | {'error': str(e)}, 85 | status=500 86 | ) 87 | 88 | @routes.post("/model_linker/resolve") 89 | async def resolve_models(request): 90 | """Apply model resolution and return updated workflow.""" 91 | try: 92 | data = await request.json() 93 | workflow_json = data.get('workflow') 94 | resolutions = data.get('resolutions', []) 95 | 96 | if not workflow_json: 97 | return web.json_response( 98 | {'error': 'Workflow JSON is required'}, 99 | status=400 100 | ) 101 | 102 | if not resolutions: 103 | return web.json_response( 104 | {'error': 'Resolutions array is required'}, 105 | status=400 106 | ) 107 | 108 | # Apply resolutions 109 | updated_workflow = apply_resolution(workflow_json, resolutions) 110 | 111 | return web.json_response({ 112 | 'workflow': updated_workflow, 113 | 'success': True 114 | }) 115 | except Exception as e: 116 | self.logger.error(f"Model Linker resolve error: {e}", exc_info=True) 117 | return web.json_response( 118 | {'error': str(e), 'success': False}, 119 | status=500 120 | ) 121 | 122 | @routes.get("/model_linker/models") 123 | async def get_models(request): 124 | """Get list of all available models (for debugging/UI display).""" 125 | try: 126 | models = get_model_files() 127 | return web.json_response(models) 128 | except Exception as e: 129 | self.logger.error(f"Model Linker get_models error: {e}", exc_info=True) 130 | return web.json_response( 131 | {'error': str(e)}, 132 | status=500 133 | ) 134 | 135 | self.routes_setup = True 136 | self.logger.info("Model Linker: API routes registered successfully") 137 | return True 138 | 139 | except ImportError as e: 140 | self.logger.warning(f"Model Linker: Could not register routes (missing dependency): {e}") 141 | return False 142 | except Exception as e: 143 | self.logger.error(f"Model Linker: Error setting up routes: {e}", exc_info=True) 144 | return False 145 | 146 | 147 | # Initialize the extension 148 | try: 149 | extension = ModelLinkerExtension() 150 | extension.initialize() 151 | except Exception as e: 152 | logging.error(f"ComfyUI Model Linker extension initialization failed: {e}", exc_info=True) 153 | -------------------------------------------------------------------------------- /core/scanner.py: -------------------------------------------------------------------------------- 1 | """ 2 | Directory Scanner Module 3 | 4 | Scans configured model directories and finds available model files. 5 | """ 6 | 7 | import os 8 | import logging 9 | from typing import List, Dict, Tuple 10 | 11 | # Import folder_paths lazily - it may not be available until ComfyUI is initialized 12 | try: 13 | import folder_paths 14 | except ImportError: 15 | folder_paths = None 16 | logging.warning("Model Linker: folder_paths not available yet - will retry later") 17 | 18 | # Model file extensions to look for 19 | # This matches folder_paths.supported_pt_extensions 20 | MODEL_EXTENSIONS = {'.ckpt', '.pt', '.pt2', '.bin', '.pth', '.safetensors', '.pkl', '.sft', '.onnx'} 21 | 22 | 23 | def get_model_directories() -> Dict[str, Tuple[List[str], set]]: 24 | """ 25 | Get all configured model directories from folder_paths. 26 | 27 | Returns: 28 | Dictionary mapping category name to a tuple. ComfyUI may provide either: 29 | - (paths, extensions), or 30 | - (paths, extensions, recursive_flag) 31 | """ 32 | global folder_paths 33 | 34 | if folder_paths is None: 35 | # Try to import again 36 | try: 37 | import folder_paths as fp 38 | folder_paths = fp 39 | except ImportError: 40 | logging.error("Model Linker: folder_paths still not available") 41 | return {} 42 | 43 | return folder_paths.folder_names_and_paths.copy() 44 | 45 | 46 | def scan_directory(directory: str, extensions: set, category: str) -> List[Dict[str, str]]: 47 | """ 48 | Recursively scan a single directory for model files. 49 | 50 | Args: 51 | directory: Absolute path to directory to scan 52 | extensions: Set of file extensions to look for 53 | category: Model category name (e.g., 'checkpoints', 'loras') 54 | 55 | Returns: 56 | List of dictionaries with model information: 57 | { 58 | 'filename': 'model.safetensors', 59 | 'path': 'absolute/path/to/model.safetensors', 60 | 'relative_path': 'subfolder/model.safetensors' or 'model.safetensors', 61 | 'category': 'checkpoints', 62 | 'base_directory': 'absolute/path/to/base' 63 | } 64 | """ 65 | models = [] 66 | 67 | if not os.path.exists(directory) or not os.path.isdir(directory): 68 | logging.debug(f"Directory does not exist or is not accessible: {directory}") 69 | return models 70 | 71 | try: 72 | # Get absolute path and normalize 73 | base_directory = os.path.abspath(directory) 74 | 75 | # Walk through directory recursively 76 | for root, dirs, files in os.walk(base_directory, followlinks=True): 77 | # Skip hidden directories 78 | dirs[:] = [d for d in dirs if not d.startswith('.')] 79 | 80 | for filename in files: 81 | # Check if file has a model extension 82 | file_ext = os.path.splitext(filename)[1].lower() 83 | 84 | # For categories with empty extension set, accept all files 85 | # Otherwise, check if extension matches 86 | # Accept if: 87 | # - no explicit extensions configured, or 88 | # - matches configured extensions, or 89 | # - matches our known model extensions 90 | if len(extensions or set()) == 0 or file_ext in extensions or file_ext in MODEL_EXTENSIONS: 91 | full_path = os.path.join(root, filename) 92 | 93 | # Calculate relative path from base directory 94 | # IMPORTANT: Use OS-native path separators (backslashes on Windows) 95 | # This matches ComfyUI's recursive_search format for get_filename_list 96 | try: 97 | relative_path = os.path.relpath(full_path, base_directory) 98 | # DO NOT normalize - keep OS-native separators to match ComfyUI 99 | # ComfyUI's get_filename_list uses os.path.relpath which returns 100 | # backslashes on Windows, forward slashes on Unix 101 | except ValueError: 102 | # If paths are on different drives (Windows), use filename only 103 | relative_path = filename 104 | 105 | models.append({ 106 | 'filename': filename, 107 | 'path': full_path, 108 | 'relative_path': relative_path, 109 | 'category': category, 110 | 'base_directory': base_directory 111 | }) 112 | except (OSError, PermissionError) as e: 113 | logging.warning(f"Error scanning directory {directory}: {e}") 114 | 115 | return models 116 | 117 | 118 | def scan_all_directories() -> List[Dict[str, str]]: 119 | """ 120 | Scan all configured model directories and return list of available models. 121 | 122 | Returns: 123 | List of dictionaries with model information (same format as scan_directory) 124 | """ 125 | all_models = [] 126 | directories = get_model_directories() 127 | 128 | for category, value in directories.items(): 129 | # Skip categories that aren't typically model directories 130 | if category in ['custom_nodes', 'configs']: 131 | continue 132 | 133 | # Unpack folder_paths value flexibly: (paths, extensions) or (paths, extensions, recursive) 134 | paths = [] 135 | extensions = set() 136 | try: 137 | if isinstance(value, (list, tuple)): 138 | if len(value) >= 2: 139 | paths = value[0] or [] 140 | raw_exts = value[1] 141 | else: 142 | # Unexpected format: treat value as paths 143 | paths = list(value) 144 | raw_exts = [] 145 | elif isinstance(value, dict): 146 | paths = value.get('paths') or value.get('path') or [] 147 | raw_exts = value.get('extensions') or [] 148 | else: 149 | # Unknown format; skip category 150 | logging.debug(f"Unexpected folder_paths format for category {category}: {type(value)}") 151 | continue 152 | 153 | # Normalize extensions to a set[str] 154 | if isinstance(raw_exts, (list, tuple, set)): 155 | extensions = {str(e).lower() for e in raw_exts} 156 | elif raw_exts: 157 | extensions = {str(raw_exts).lower()} 158 | except Exception as e: 159 | logging.warning(f"Error interpreting folder_paths entry for {category}: {e}") 160 | continue 161 | 162 | for directory_path in paths: 163 | try: 164 | models = scan_directory(directory_path, extensions, category) 165 | all_models.extend(models) 166 | logging.debug(f"Found {len(models)} models in {category}/{directory_path}") 167 | except Exception as e: 168 | logging.warning(f"Error scanning {category} directory {directory_path}: {e}") 169 | 170 | return all_models 171 | 172 | 173 | def get_model_files() -> List[Dict[str, str]]: 174 | """ 175 | Get list of all available model files with metadata. 176 | 177 | This is the main entry point for getting model files. 178 | 179 | Returns: 180 | List of model dictionaries (same format as scan_directory) 181 | """ 182 | return scan_all_directories() 183 | -------------------------------------------------------------------------------- /core/linker.py: -------------------------------------------------------------------------------- 1 | """ 2 | Core Linker Module 3 | 4 | Integrates all components to provide high-level API for model linking. 5 | """ 6 | 7 | import os 8 | import logging 9 | from typing import Dict, Any, List, Optional 10 | 11 | from .scanner import get_model_files 12 | from .workflow_analyzer import analyze_workflow_models, identify_missing_models 13 | from .matcher import find_matches 14 | from .workflow_updater import update_workflow_nodes 15 | 16 | 17 | def analyze_and_find_matches( 18 | workflow_json: Dict[str, Any], 19 | similarity_threshold: float = 0.0, 20 | max_matches_per_model: int = 10 21 | ) -> Dict[str, Any]: 22 | """ 23 | Main entry point: analyze workflow and find matches for missing models. 24 | 25 | Args: 26 | workflow_json: Complete workflow JSON dictionary 27 | similarity_threshold: Minimum similarity score (0.0 to 1.0) for matches 28 | max_matches_per_model: Maximum number of matches to return per missing model 29 | 30 | Returns: 31 | Dictionary with analysis results: 32 | { 33 | 'missing_models': [ 34 | { 35 | 'node_id': node ID, 36 | 'node_type': node type, 37 | 'widget_index': widget index, 38 | 'original_path': original path from workflow, 39 | 'category': model category, 40 | 'matches': [ 41 | { 42 | 'model': model dict from scanner, 43 | 'filename': model filename, 44 | 'similarity': similarity score (0.0-1.0), 45 | 'confidence': confidence percentage (0-100) 46 | }, 47 | ... 48 | ] 49 | }, 50 | ... 51 | ], 52 | 'total_missing': count of missing models, 53 | 'total_models_analyzed': count of all models in workflow 54 | } 55 | """ 56 | # Analyze workflow to find all model references 57 | all_model_refs = analyze_workflow_models(workflow_json) 58 | 59 | # Get available models 60 | available_models = get_model_files() 61 | 62 | # Identify missing models 63 | missing_models = identify_missing_models(all_model_refs, available_models) 64 | 65 | # Find matches for each missing model 66 | missing_with_matches = [] 67 | for missing in missing_models: 68 | original_path = missing.get('original_path', '') 69 | 70 | # Filter available models by category if known 71 | # IMPORTANT: If category is 'unknown', we still try to find the right category 72 | # by using node type hints 73 | category = missing.get('category') 74 | 75 | # If category is unknown, try to use node type to infer category 76 | if not category or category == 'unknown': 77 | from .workflow_analyzer import NODE_TYPE_TO_CATEGORY_HINTS 78 | node_type = missing.get('node_type', '') 79 | category = NODE_TYPE_TO_CATEGORY_HINTS.get(node_type, 'unknown') 80 | 81 | candidates = available_models 82 | if category and category != 'unknown': 83 | # Prioritize models from the same category 84 | candidates = [m for m in available_models if m.get('category') == category] 85 | # Also include other categories as fallback 86 | candidates.extend([m for m in available_models if m.get('category') != category]) 87 | 88 | # Find matches 89 | matches = find_matches( 90 | original_path, 91 | candidates, 92 | threshold=similarity_threshold, 93 | max_results=max_matches_per_model 94 | ) 95 | 96 | # Deduplicate matches by absolute path - same physical file should only appear once 97 | # This handles cases where the same file exists in multiple base directories 98 | # or has different relative_paths but is the same file 99 | seen_absolute_paths = {} 100 | deduplicated_matches = [] 101 | for match in matches: 102 | model_dict = match['model'] 103 | absolute_path = model_dict.get('path', '') 104 | 105 | # Normalize absolute path for comparison 106 | if absolute_path: 107 | absolute_path = os.path.normpath(absolute_path) 108 | 109 | # If we haven't seen this absolute path, add it 110 | if absolute_path not in seen_absolute_paths: 111 | seen_absolute_paths[absolute_path] = match 112 | deduplicated_matches.append(match) 113 | else: 114 | # If we've seen this absolute path before, replace with better match if confidence is higher 115 | existing_match = seen_absolute_paths[absolute_path] 116 | if match['confidence'] > existing_match['confidence']: 117 | # Replace with better match 118 | idx = deduplicated_matches.index(existing_match) 119 | deduplicated_matches[idx] = match 120 | seen_absolute_paths[absolute_path] = match 121 | 122 | missing_with_matches.append({ 123 | **missing, 124 | 'matches': deduplicated_matches 125 | }) 126 | 127 | return { 128 | 'missing_models': missing_with_matches, 129 | 'total_missing': len(missing_with_matches), 130 | 'total_models_analyzed': len(all_model_refs) 131 | } 132 | 133 | 134 | def apply_resolution( 135 | workflow_json: Dict[str, Any], 136 | resolutions: List[Dict[str, Any]] 137 | ) -> Dict[str, Any]: 138 | """ 139 | Apply model resolutions to workflow. 140 | 141 | Args: 142 | workflow_json: Workflow JSON dictionary (will be modified) 143 | resolutions: List of resolution dictionaries: 144 | { 145 | 'node_id': node ID, 146 | 'widget_index': widget index, 147 | 'resolved_path': absolute path to resolved model, 148 | 'category': model category (optional), 149 | 'resolved_model': model dict from scanner (optional) 150 | } 151 | 152 | Returns: 153 | Updated workflow JSON dictionary 154 | """ 155 | # Prepare mappings for workflow_updater 156 | mappings = [] 157 | for resolution in resolutions: 158 | mapping = { 159 | 'node_id': resolution.get('node_id'), 160 | 'widget_index': resolution.get('widget_index'), 161 | 'resolved_path': resolution.get('resolved_path'), 162 | 'category': resolution.get('category'), 163 | 'resolved_model': resolution.get('resolved_model'), 164 | 'subgraph_id': resolution.get('subgraph_id'), # Include subgraph_id for subgraph nodes 165 | 'is_top_level': resolution.get('is_top_level') # True for top-level nodes, False for nodes in subgraph definitions 166 | } 167 | 168 | # If resolved_model provided, extract path if needed 169 | if 'resolved_model' in resolution and resolution['resolved_model']: 170 | resolved_model = resolution['resolved_model'] 171 | if 'path' in resolved_model and not mapping.get('resolved_path'): 172 | mapping['resolved_path'] = resolved_model['path'] 173 | if 'base_directory' in resolved_model: 174 | mapping['base_directory'] = resolved_model['base_directory'] 175 | 176 | mappings.append(mapping) 177 | 178 | # Update workflow 179 | updated_workflow = update_workflow_nodes(workflow_json, mappings) 180 | 181 | return updated_workflow 182 | 183 | 184 | def get_resolution_summary(workflow_json: Dict[str, Any]) -> Dict[str, Any]: 185 | """ 186 | Get summary of missing models and matches without applying resolutions. 187 | 188 | This is a convenience method that calls analyze_and_find_matches with defaults. 189 | 190 | Args: 191 | workflow_json: Complete workflow JSON dictionary 192 | 193 | Returns: 194 | Same format as analyze_and_find_matches 195 | """ 196 | return analyze_and_find_matches(workflow_json) 197 | 198 | -------------------------------------------------------------------------------- /core/matcher.py: -------------------------------------------------------------------------------- 1 | """ 2 | Fuzzy Matcher Module 3 | 4 | Implements fuzzy string matching to find similar model names. 5 | """ 6 | 7 | import os 8 | import re 9 | from typing import List, Dict, Tuple 10 | from difflib import SequenceMatcher 11 | 12 | 13 | def normalize_filename(filename: str) -> str: 14 | """ 15 | Normalize a filename for comparison. 16 | 17 | Removes file extension, converts to lowercase, and normalizes 18 | separators (underscores, hyphens, spaces). 19 | 20 | Args: 21 | filename: Filename to normalize 22 | 23 | Returns: 24 | Normalized string for comparison 25 | """ 26 | # Remove file extension 27 | base = os.path.splitext(filename)[0] 28 | 29 | # Convert to lowercase 30 | base = base.lower() 31 | 32 | # Normalize separators: replace underscores, hyphens, and spaces with a single space 33 | base = re.sub(r'[_\-\s]+', ' ', base) 34 | 35 | # Strip whitespace 36 | base = base.strip() 37 | 38 | return base 39 | 40 | 41 | def calculate_similarity(str1: str, str2: str) -> float: 42 | """ 43 | Calculate similarity score between two strings (0.0 to 1.0). 44 | 45 | Uses SequenceMatcher to compute a ratio. 46 | 47 | Args: 48 | str1: First string 49 | str2: Second string 50 | 51 | Returns: 52 | Similarity score from 0.0 (completely different) to 1.0 (identical) 53 | """ 54 | return SequenceMatcher(None, str1, str2).ratio() 55 | 56 | 57 | def calculate_similarity_with_normalization(str1: str, str2: str) -> float: 58 | """ 59 | Calculate similarity score with filename normalization. 60 | 61 | Normalizes both strings before comparing. 62 | 63 | Args: 64 | str1: First string (typically model filename) 65 | str2: Second string (typically candidate model filename) 66 | 67 | Returns: 68 | Similarity score from 0.0 to 1.0 69 | """ 70 | norm1 = normalize_filename(str1) 71 | norm2 = normalize_filename(str2) 72 | return calculate_similarity(norm1, norm2) 73 | 74 | 75 | def find_matches( 76 | target_model: str, 77 | candidate_models: List[Dict[str, str]], 78 | threshold: float = 0.0, 79 | max_results: int = 10 80 | ) -> List[Dict[str, any]]: 81 | """ 82 | Find similar models using fuzzy matching. 83 | 84 | Args: 85 | target_model: The target model filename/path to match 86 | candidate_models: List of candidate model dictionaries with 'filename' or 'path' key 87 | threshold: Minimum similarity score (0.0 to 1.0) to include in results 88 | max_results: Maximum number of results to return 89 | 90 | Returns: 91 | List of match dictionaries sorted by similarity (highest first): 92 | { 93 | 'model': original model dict from candidates, 94 | 'filename': model filename, 95 | 'similarity': similarity score (0.0 to 1.0), 96 | 'confidence': confidence percentage (0 to 100) 97 | } 98 | """ 99 | matches = [] 100 | 101 | # Normalize path separators in target_model based on current OS 102 | # This ensures paths with \ vs / separators are treated as identical 103 | target_model_normalized = os.path.normpath(target_model) if target_model else '' 104 | 105 | # Extract just the filename from target_model (remove any subfolder paths) 106 | # target_model might be just a filename or might include subfolder paths 107 | target_filename = os.path.basename(target_model_normalized) 108 | 109 | # Normalize target filename once for exact match comparisons 110 | target_norm = normalize_filename(target_filename) 111 | 112 | for candidate in candidate_models: 113 | # Get filename from candidate (prefer 'filename' key, fallback to extracting from 'path' or 'relative_path') 114 | candidate_filename = candidate.get('filename') 115 | candidate_path = candidate.get('path', '') or candidate.get('relative_path', '') 116 | 117 | # If no filename key, try to extract from path or relative_path 118 | if not candidate_filename: 119 | if candidate_path: 120 | candidate_filename = os.path.basename(candidate_path) 121 | 122 | if not candidate_filename: 123 | continue 124 | 125 | # Normalize candidate path separators based on current OS 126 | # This ensures paths with \ vs / separators are treated as identical 127 | candidate_path_normalized = os.path.normpath(candidate_path) if candidate_path else '' 128 | candidate_relative_path = candidate.get('relative_path', '') 129 | candidate_relative_path_normalized = os.path.normpath(candidate_relative_path) if candidate_relative_path else '' 130 | 131 | # Check if normalized paths are identical (100% match) 132 | # This handles cases where paths differ only by separator (e.g., path/to/model vs path\to\model) 133 | # Compare both absolute paths and relative paths 134 | path_match = False 135 | if candidate_path_normalized and target_model_normalized: 136 | if candidate_path_normalized == target_model_normalized: 137 | path_match = True 138 | elif candidate_relative_path_normalized and target_model_normalized: 139 | # Also check if relative path matches the target (which might be relative) 140 | if candidate_relative_path_normalized == target_model_normalized: 141 | path_match = True 142 | 143 | if path_match: 144 | # Exact path match after normalization = 100% confidence 145 | similarity = 1.0 146 | matches.append({ 147 | 'model': candidate, 148 | 'filename': candidate_filename, 149 | 'similarity': similarity, 150 | 'confidence': round(similarity * 100, 1) 151 | }) 152 | continue 153 | 154 | # Calculate similarity comparing just filenames (not paths) 155 | # This ensures we're comparing apples to apples 156 | 157 | # First check for exact match (after normalization) - should be 100% 158 | # Only exact matches should get 100% confidence 159 | candidate_norm = normalize_filename(candidate_filename) 160 | 161 | if target_norm == candidate_norm: 162 | # Exact match after normalization = 100% confidence 163 | similarity = 1.0 164 | else: 165 | # Calculate similarity using SequenceMatcher 166 | # This gives a ratio between 0.0 and 1.0 based on longest common subsequence 167 | similarity = calculate_similarity_with_normalization(target_filename, candidate_filename) 168 | 169 | # Also try comparing without extensions for better matching 170 | target_base = os.path.splitext(target_filename)[0] 171 | candidate_base = os.path.splitext(candidate_filename)[0] 172 | similarity_no_ext = calculate_similarity_with_normalization(target_base, candidate_base) 173 | 174 | # Use the higher of the two similarity scores 175 | # But ensure we never get 1.0 unless it's an exact normalized match 176 | similarity = max(similarity, similarity_no_ext) 177 | 178 | # Cap similarity at 0.999 for non-exact matches to prevent false 100% scores 179 | # SequenceMatcher can sometimes give 1.0 for very similar but not identical strings 180 | # due to normalization artifacts 181 | if similarity >= 0.999 and target_norm != candidate_norm: 182 | similarity = 0.999 183 | 184 | # Only include if above threshold 185 | if similarity >= threshold: 186 | matches.append({ 187 | 'model': candidate, 188 | 'filename': candidate_filename, 189 | 'similarity': similarity, 190 | 'confidence': round(similarity * 100, 1) # Convert to percentage 191 | }) 192 | 193 | # Sort by similarity (highest first) 194 | matches.sort(key=lambda x: x['similarity'], reverse=True) 195 | 196 | # Limit to max_results 197 | matches = matches[:max_results] 198 | 199 | return matches 200 | 201 | -------------------------------------------------------------------------------- /core/workflow_analyzer.py: -------------------------------------------------------------------------------- 1 | """ 2 | Workflow Analyzer Module 3 | 4 | Extracts model references from workflow JSON and identifies missing models. 5 | """ 6 | 7 | import os 8 | import logging 9 | from typing import List, Dict, Any, Optional 10 | 11 | # Import folder_paths lazily - it may not be available until ComfyUI is initialized 12 | try: 13 | import folder_paths 14 | except ImportError: 15 | folder_paths = None 16 | logging.warning("Model Linker: folder_paths not available yet - will retry later") 17 | 18 | 19 | # Common model file extensions 20 | MODEL_EXTENSIONS = {'.ckpt', '.pt', '.pt2', '.bin', '.pth', '.safetensors', '.pkl', '.sft', '.onnx'} 21 | 22 | # Mapping of common node types to their expected model category 23 | # This is used as hints but we don't rely solely on this 24 | # UNETLoader uses 'diffusion_models' category (folder_paths maps 'unet' to 'diffusion_models') 25 | NODE_TYPE_TO_CATEGORY_HINTS = { 26 | 'CheckpointLoaderSimple': 'checkpoints', 27 | 'CheckpointLoader': 'checkpoints', 28 | 'unCLIPCheckpointLoader': 'checkpoints', 29 | 'VAELoader': 'vae', 30 | 'LoraLoader': 'loras', 31 | 'LoraLoaderModelOnly': 'loras', 32 | 'UNETLoader': 'diffusion_models', # UNETLoader uses diffusion_models category 33 | 'ControlNetLoader': 'controlnet', 34 | 'ControlNetLoaderAdvanced': 'controlnet', 35 | 'CLIPVisionLoader': 'clip_vision', 36 | 'UpscaleModelLoader': 'upscale_models', 37 | 'HypernetworkLoader': 'hypernetworks', 38 | 'EmbeddingLoader': 'embeddings', 39 | } 40 | 41 | 42 | def is_model_filename(value: Any) -> bool: 43 | """ 44 | Check if a value looks like a model filename. 45 | 46 | Args: 47 | value: The value to check 48 | 49 | Returns: 50 | True if it looks like a model filename 51 | """ 52 | if not isinstance(value, str): 53 | return False 54 | 55 | # Check if it ends with a model extension 56 | _, ext = os.path.splitext(value.lower()) 57 | return ext in MODEL_EXTENSIONS 58 | 59 | 60 | def try_resolve_model_path(value: str, categories: List[str] = None) -> Optional[tuple[str, str]]: 61 | """ 62 | Try to resolve a model path using folder_paths. 63 | 64 | Args: 65 | value: The model filename/path to resolve 66 | categories: Optional list of categories to try (if None, tries all) 67 | 68 | Returns: 69 | Tuple of (category, full_path) if found, None otherwise 70 | """ 71 | if not isinstance(value, str) or not value.strip(): 72 | return None 73 | 74 | # Remove any path separators that might indicate an absolute path prefix 75 | # Workflows should store relative paths, but handle both cases 76 | filename = value.strip() 77 | 78 | # Ensure folder_paths is available 79 | global folder_paths 80 | if folder_paths is None: 81 | try: 82 | import folder_paths as fp 83 | folder_paths = fp 84 | except ImportError: 85 | logging.error("Model Linker: folder_paths not available") 86 | return None 87 | 88 | # If categories not provided, try all categories 89 | if categories is None: 90 | categories = list(folder_paths.folder_names_and_paths.keys()) 91 | 92 | # Skip non-model categories 93 | skip_categories = {'custom_nodes', 'configs'} 94 | categories = [c for c in categories if c not in skip_categories] 95 | 96 | for category in categories: 97 | try: 98 | full_path = folder_paths.get_full_path(category, filename) 99 | if full_path and os.path.exists(full_path): 100 | return (category, full_path) 101 | except Exception: 102 | continue 103 | 104 | return None 105 | 106 | 107 | def get_node_model_info(node: Dict[str, Any]) -> List[Dict[str, Any]]: 108 | """ 109 | Extract model references from a single node. 110 | 111 | This scans all widgets_values entries and tries to identify which ones 112 | are model file references by attempting to resolve them. 113 | 114 | Args: 115 | node: Node dictionary from workflow JSON 116 | 117 | Returns: 118 | List of model reference dictionaries: 119 | { 120 | 'node_id': node id, 121 | 'node_type': node type, 122 | 'widget_index': index in widgets_values, 123 | 'original_path': original path from workflow, 124 | 'category': model category (if found), 125 | 'exists': True if model exists 126 | } 127 | """ 128 | model_refs = [] 129 | node_id = node.get('id') 130 | node_type = node.get('type', '') 131 | widgets_values = node.get('widgets_values', []) 132 | 133 | if not widgets_values: 134 | return model_refs 135 | 136 | # Get category hints for this node type 137 | category_hint = NODE_TYPE_TO_CATEGORY_HINTS.get(node_type) 138 | categories_to_try = [category_hint] if category_hint else None 139 | 140 | # For each widget value, check if it looks like a model file 141 | for idx, value in enumerate(widgets_values): 142 | if not is_model_filename(value): 143 | continue 144 | 145 | # Try to resolve the model path 146 | resolved = try_resolve_model_path(value, categories_to_try) 147 | 148 | if resolved: 149 | category, full_path = resolved 150 | exists = os.path.exists(full_path) 151 | else: 152 | # If we can't resolve it, check if it at least looks like a model filename 153 | # This might be a missing model or a custom node's model 154 | category = category_hint or 'unknown' 155 | full_path = None 156 | exists = False 157 | 158 | model_refs.append({ 159 | 'node_id': node_id, 160 | 'node_type': node_type, 161 | 'widget_index': idx, 162 | 'original_path': value, 163 | 'category': category, 164 | 'full_path': full_path, 165 | 'exists': exists 166 | }) 167 | 168 | return model_refs 169 | 170 | 171 | def analyze_workflow_models(workflow_json: Dict[str, Any]) -> List[Dict[str, Any]]: 172 | """ 173 | Extract all model references from a workflow, including nested subgraphs. 174 | 175 | Args: 176 | workflow_json: Complete workflow JSON dictionary 177 | 178 | Returns: 179 | List of model reference dictionaries (same format as get_node_model_info) 180 | Each dict includes 'subgraph_id' if the model is in a subgraph 181 | """ 182 | all_model_refs = [] 183 | 184 | # Get subgraph definitions first to check if node types are subgraph UUIDs 185 | definitions = workflow_json.get('definitions', {}) 186 | subgraphs = definitions.get('subgraphs', []) 187 | subgraph_lookup = {sg.get('id'): sg.get('name', sg.get('id')) for sg in subgraphs} 188 | 189 | # Analyze top-level nodes 190 | nodes = workflow_json.get('nodes', []) 191 | for node in nodes: 192 | try: 193 | model_refs = get_node_model_info(node) 194 | node_type = node.get('type', '') 195 | 196 | # Check if node type is a subgraph UUID 197 | subgraph_name = None 198 | subgraph_id = None 199 | if node_type in subgraph_lookup: 200 | subgraph_name = subgraph_lookup[node_type] 201 | subgraph_id = node_type 202 | 203 | # Mark with subgraph info if it's a subgraph node 204 | # For top-level subgraph instance nodes, subgraph_path is None 205 | # This distinguishes them from nodes within subgraph definitions 206 | for ref in model_refs: 207 | ref['subgraph_id'] = subgraph_id 208 | ref['subgraph_name'] = subgraph_name 209 | ref['subgraph_path'] = None # Top-level, not in definitions.subgraphs 210 | ref['is_top_level'] = True # Flag to indicate this is a top-level node 211 | all_model_refs.extend(model_refs) 212 | except Exception as e: 213 | logging.warning(f"Error analyzing node {node.get('id', 'unknown')}: {e}") 214 | continue 215 | 216 | # Recursively analyze subgraphs (definitions already loaded above) 217 | if not subgraphs: # Re-get if not loaded above 218 | subgraphs = definitions.get('subgraphs', []) 219 | 220 | for subgraph in subgraphs: 221 | subgraph_id = subgraph.get('id') 222 | subgraph_name = subgraph.get('name', subgraph_id) 223 | subgraph_nodes = subgraph.get('nodes', []) 224 | 225 | logging.debug(f"Analyzing subgraph: {subgraph_name} (ID: {subgraph_id}) with {len(subgraph_nodes)} nodes") 226 | 227 | for node in subgraph_nodes: 228 | try: 229 | model_refs = get_node_model_info(node) 230 | # Mark as belonging to this subgraph definition 231 | for ref in model_refs: 232 | ref['subgraph_id'] = subgraph_id 233 | ref['subgraph_name'] = subgraph_name 234 | ref['subgraph_path'] = ['definitions', 'subgraphs', subgraph_id, 'nodes'] 235 | ref['is_top_level'] = False # This is inside a subgraph definition 236 | all_model_refs.extend(model_refs) 237 | except Exception as e: 238 | logging.warning(f"Error analyzing subgraph node {node.get('id', 'unknown')}: {e}") 239 | continue 240 | 241 | return all_model_refs 242 | 243 | 244 | def identify_missing_models( 245 | workflow_models: List[Dict[str, Any]], 246 | available_models: List[Dict[str, str]] = None 247 | ) -> List[Dict[str, Any]]: 248 | """ 249 | Identify which models from the workflow are missing. 250 | 251 | Args: 252 | workflow_models: List of model references from analyze_workflow_models 253 | available_models: Optional list of available models (if None, checks via folder_paths) 254 | 255 | Returns: 256 | List of missing model references (filtered to only missing ones) 257 | """ 258 | missing = [] 259 | 260 | for model_ref in workflow_models: 261 | # If exists is False, it's missing 262 | if not model_ref.get('exists', False): 263 | missing.append(model_ref) 264 | 265 | return missing 266 | 267 | -------------------------------------------------------------------------------- /core/workflow_updater.py: -------------------------------------------------------------------------------- 1 | """ 2 | Workflow Updater Module 3 | 4 | Updates workflow JSON by replacing model paths in nodes. 5 | """ 6 | 7 | import os 8 | import logging 9 | from typing import Dict, Any, List, Optional 10 | 11 | 12 | def convert_to_relative_path(absolute_path: str, category: str, base_directory: str = None) -> str: 13 | """ 14 | Convert an absolute path to a relative path for workflow storage. 15 | 16 | This should match the format that ComfyUI's get_filename_list() returns, 17 | which uses relative paths from the category base directory with forward slashes. 18 | 19 | Args: 20 | absolute_path: Full absolute path to the model file 21 | category: Model category (e.g., 'checkpoints', 'loras') 22 | base_directory: Optional base directory for the category 23 | 24 | Returns: 25 | Relative path (filename or subfolder/filename) suitable for workflow storage 26 | This MUST match the format ComfyUI uses for validation 27 | """ 28 | if not absolute_path or not os.path.isabs(absolute_path): 29 | # Already relative or empty - return as-is (keep OS-native separators) 30 | # Don't normalize path separators - must match ComfyUI's format exactly 31 | return absolute_path 32 | 33 | # Use folder_paths.get_filename_list to find the exact format ComfyUI expects 34 | # CRITICAL: ComfyUI uses OS-native path separators (backslashes on Windows, forward slashes on Unix) 35 | # We must return the EXACT format from get_filename_list, not a normalized version 36 | try: 37 | import folder_paths 38 | # Get all available filenames for this category 39 | # This returns paths with OS-native separators (backslashes on Windows) 40 | available_filenames = folder_paths.get_filename_list(category) 41 | 42 | # Try to find a matching entry in ComfyUI's list 43 | # Compare by finding the file that resolves to our absolute path 44 | for filename in available_filenames: 45 | try: 46 | full_path = folder_paths.get_full_path(category, filename) 47 | if full_path and os.path.normpath(full_path) == os.path.normpath(absolute_path): 48 | # Found exact match - return ComfyUI's format EXACTLY as-is 49 | # This includes OS-native path separators 50 | return filename 51 | except Exception: 52 | continue 53 | except Exception: 54 | # Fall back to manual calculation if folder_paths not available 55 | pass 56 | 57 | # If base_directory is provided, calculate relative to it 58 | # IMPORTANT: Use OS-native path separators (don't normalize to forward slashes) 59 | # ComfyUI expects paths with backslashes on Windows, forward slashes on Unix 60 | if base_directory: 61 | try: 62 | relative_path = os.path.relpath(absolute_path, base_directory) 63 | # DO NOT normalize path separators - use OS-native format 64 | # This matches what ComfyUI's recursive_search returns 65 | return relative_path 66 | except ValueError: 67 | # Paths are on different drives (Windows) or can't be relativized 68 | # Fall back to just filename 69 | pass 70 | 71 | # Fallback: return just the filename 72 | return os.path.basename(absolute_path) 73 | 74 | 75 | def get_base_directory_for_model(model_dict: Dict[str, str], category: str) -> Optional[str]: 76 | """ 77 | Get the base directory for a model based on its metadata. 78 | 79 | Args: 80 | model_dict: Model dictionary with 'base_directory' or 'path' key 81 | category: Model category 82 | 83 | Returns: 84 | Base directory path if found, None otherwise 85 | """ 86 | # Try to get base_directory from model dict 87 | if 'base_directory' in model_dict: 88 | return model_dict['base_directory'] 89 | 90 | # If we have the full path, try to find the category base directory 91 | if 'path' in model_dict: 92 | full_path = model_dict['path'] 93 | # Import here to avoid circular dependency 94 | import folder_paths 95 | 96 | # Try to get category directories 97 | if category in folder_paths.folder_names_and_paths: 98 | category_paths = folder_paths.get_folder_paths(category) 99 | # Find which base directory this path belongs to 100 | for base_dir in category_paths: 101 | try: 102 | if os.path.commonpath([full_path, base_dir]) == base_dir: 103 | return base_dir 104 | except (ValueError, FileNotFoundError): 105 | continue 106 | 107 | return None 108 | 109 | 110 | def update_model_path( 111 | workflow: Dict[str, Any], 112 | node_id: int, 113 | widget_index: int, 114 | resolved_path: str, 115 | category: str = None, 116 | base_directory: str = None, 117 | resolved_model: Dict[str, Any] = None, 118 | subgraph_id: str = None, 119 | is_top_level: bool = None 120 | ) -> bool: 121 | """ 122 | Update a single model path in a workflow node, supporting both top-level and subgraph nodes. 123 | 124 | Args: 125 | workflow: Workflow JSON dictionary 126 | node_id: ID of the node to update 127 | widget_index: Index in widgets_values array to update 128 | resolved_path: Absolute path to the resolved model 129 | category: Model category (optional, for calculating relative path) 130 | base_directory: Base directory for the category (optional) 131 | resolved_model: Model dict from scanner (optional) 132 | subgraph_id: ID of the subgraph (UUID for subgraph type, or None) 133 | is_top_level: True if this is a top-level node (even if it's a subgraph instance), 134 | False if it's inside a subgraph definition, None to auto-detect 135 | 136 | Returns: 137 | True if update was successful, False otherwise 138 | """ 139 | node = None 140 | 141 | # Determine if this is a top-level node or inside a subgraph definition 142 | # - If is_top_level is True, it's a top-level node (even if it's a subgraph instance) 143 | # - If is_top_level is False, it's inside a subgraph definition 144 | # - If is_top_level is None and subgraph_id is set, check if node exists in top-level first 145 | search_in_subgraph = False 146 | 147 | if is_top_level is False: 148 | # Explicitly inside a subgraph definition 149 | search_in_subgraph = True 150 | elif is_top_level is True: 151 | # Explicitly a top-level node 152 | search_in_subgraph = False 153 | elif subgraph_id: 154 | # Auto-detect: Check if node exists in top-level nodes first 155 | # (Top-level subgraph instances have subgraph_id set but are in workflow.nodes) 156 | nodes = workflow.get('nodes', []) 157 | for n in nodes: 158 | if n.get('id') == node_id: 159 | # Found in top-level - this is a subgraph instance node 160 | search_in_subgraph = False 161 | break 162 | else: 163 | # Not found in top-level - must be inside subgraph definition 164 | search_in_subgraph = True 165 | else: 166 | # No subgraph_id - definitely top-level 167 | search_in_subgraph = False 168 | 169 | # Search for the node 170 | if search_in_subgraph: 171 | # Find in subgraph definition 172 | definitions = workflow.get('definitions', {}) 173 | subgraphs = definitions.get('subgraphs', []) 174 | 175 | for subgraph in subgraphs: 176 | if subgraph.get('id') == subgraph_id: 177 | subgraph_nodes = subgraph.get('nodes', []) 178 | for n in subgraph_nodes: 179 | if n.get('id') == node_id: 180 | node = n 181 | break 182 | break 183 | else: 184 | # Find in top-level nodes 185 | nodes = workflow.get('nodes', []) 186 | for n in nodes: 187 | if n.get('id') == node_id: 188 | node = n 189 | break 190 | 191 | if not node: 192 | location = f"subgraph {subgraph_id}" if subgraph_id else "top-level" 193 | logging.warning(f"Node {node_id} not found in {location}") 194 | return False 195 | 196 | widgets_values = node.get('widgets_values', []) 197 | 198 | if widget_index >= len(widgets_values): 199 | logging.warning(f"Widget index {widget_index} out of range for node {node_id}") 200 | return False 201 | 202 | # Get category from resolved_model if not provided 203 | if not category and resolved_model: 204 | category = resolved_model.get('category') 205 | 206 | # Convert absolute path to relative path for workflow storage 207 | # IMPORTANT: Use the category from resolved_model, not the original missing model category 208 | # This ensures we use the correct category for validation 209 | if os.path.isabs(resolved_path): 210 | # Use category from resolved_model for path conversion 211 | effective_category = category 212 | if resolved_model: 213 | effective_category = resolved_model.get('category', category) 214 | 215 | relative_path = convert_to_relative_path(resolved_path, effective_category, base_directory) 216 | else: 217 | relative_path = resolved_path 218 | 219 | # Update the widget value 220 | widgets_values[widget_index] = relative_path 221 | 222 | logging.debug(f"Updated node {node_id}, widget {widget_index} to: {relative_path}") 223 | return True 224 | 225 | 226 | def update_workflow_nodes( 227 | workflow: Dict[str, Any], 228 | mappings: List[Dict[str, Any]] 229 | ) -> Dict[str, Any]: 230 | """ 231 | Apply multiple model path changes to a workflow. 232 | 233 | Args: 234 | workflow: Workflow JSON dictionary (will be modified in place) 235 | mappings: List of mapping dictionaries: 236 | { 237 | 'node_id': node ID, 238 | 'widget_index': widget index, 239 | 'resolved_path': absolute path to resolved model, 240 | 'category': model category (optional), 241 | 'base_directory': base directory for category (optional), 242 | 'resolved_model': model dict from scanner (optional, for base_directory) 243 | } 244 | 245 | Returns: 246 | Updated workflow dictionary (same reference, modified in place) 247 | """ 248 | updated_count = 0 249 | 250 | for mapping in mappings: 251 | node_id = mapping.get('node_id') 252 | widget_index = mapping.get('widget_index') 253 | resolved_path = mapping.get('resolved_path') 254 | 255 | if not all([node_id is not None, widget_index is not None, resolved_path]): 256 | logging.warning(f"Invalid mapping: {mapping}") 257 | continue 258 | 259 | # Try to get base_directory from resolved_model if provided 260 | base_directory = mapping.get('base_directory') 261 | if not base_directory and 'resolved_model' in mapping: 262 | resolved_model = mapping['resolved_model'] 263 | category = mapping.get('category', '') 264 | base_directory = get_base_directory_for_model(resolved_model, category) 265 | 266 | category = mapping.get('category') 267 | resolved_model = mapping.get('resolved_model') 268 | subgraph_id = mapping.get('subgraph_id') 269 | is_top_level = mapping.get('is_top_level') # True for top-level nodes, False for nodes in subgraph definitions 270 | 271 | success = update_model_path( 272 | workflow, 273 | node_id, 274 | widget_index, 275 | resolved_path, 276 | category, 277 | base_directory, 278 | resolved_model, 279 | subgraph_id, 280 | is_top_level 281 | ) 282 | 283 | if success: 284 | updated_count += 1 285 | 286 | logging.info(f"Updated {updated_count} model paths in workflow") 287 | return workflow 288 | 289 | -------------------------------------------------------------------------------- /web/linker.js: -------------------------------------------------------------------------------- 1 | /** 2 | * ComfyUI Model Linker Extension - Frontend 3 | * 4 | * Provides a menu button and dialog interface for relinking missing models in workflows. 5 | */ 6 | 7 | // Import ComfyUI APIs 8 | // These paths are relative to the ComfyUI web directory 9 | import { app } from "../../../scripts/app.js"; 10 | import { api } from "../../../scripts/api.js"; 11 | import { $el, ComfyDialog } from "../../../scripts/ui.js"; 12 | 13 | // Check if ComfyButtonGroup is available (from newer ComfyUI versions) 14 | let ComfyButtonGroup = null; 15 | try { 16 | // Try to import from scripts if available 17 | if (typeof window !== 'undefined') { 18 | try { 19 | // Some ComfyUI versions expose this globally 20 | if (window.ComfyButtonGroup) { 21 | ComfyButtonGroup = window.ComfyButtonGroup; 22 | } 23 | } catch (e) { 24 | // Ignore 25 | } 26 | } 27 | } catch (e) { 28 | // Fallback if ComfyButtonGroup not available 29 | } 30 | 31 | class LinkerManagerDialog extends ComfyDialog { 32 | constructor() { 33 | super(); 34 | this.currentWorkflow = null; 35 | this.missingModels = []; 36 | 37 | // Create dialog element using $el 38 | this.element = $el("div.comfy-modal", { 39 | parent: document.body, 40 | style: { 41 | position: "fixed", 42 | top: "50%", 43 | left: "50%", 44 | transform: "translate(-50%, -50%)", 45 | width: "900px", 46 | height: "700px", 47 | maxWidth: "95vw", 48 | maxHeight: "95vh", 49 | backgroundColor: "var(--comfy-menu-bg, #202020)", 50 | color: "var(--input-text, #ffffff)", 51 | border: "2px solid var(--border-color, #555555)", 52 | borderRadius: "8px", 53 | padding: "0", 54 | zIndex: "99999", 55 | boxShadow: "0 4px 20px rgba(0,0,0,0.8)", 56 | display: "none", 57 | flexDirection: "column" 58 | } 59 | }, [ 60 | this.createHeader(), 61 | this.createContent(), 62 | this.createFooter() 63 | ]); 64 | } 65 | 66 | createHeader() { 67 | return $el("div", { 68 | style: { 69 | display: "flex", 70 | justifyContent: "space-between", 71 | alignItems: "center", 72 | padding: "20px 20px 10px 20px", 73 | borderBottom: "1px solid var(--border-color)", 74 | backgroundColor: "var(--comfy-menu-bg, #202020)" 75 | } 76 | }, [ 77 | $el("h2", { 78 | textContent: "🔗 Model Linker", 79 | style: { 80 | margin: "0", 81 | color: "var(--input-text)", 82 | fontSize: "18px", 83 | fontWeight: "600" 84 | } 85 | }), 86 | $el("button", { 87 | textContent: "×", 88 | onclick: () => this.close(), 89 | style: { 90 | background: "none", 91 | border: "none", 92 | fontSize: "24px", 93 | cursor: "pointer", 94 | color: "var(--input-text)", 95 | padding: "0", 96 | width: "30px", 97 | height: "30px", 98 | borderRadius: "4px", 99 | display: "flex", 100 | alignItems: "center", 101 | justifyContent: "center" 102 | } 103 | }) 104 | ]); 105 | } 106 | 107 | createContent() { 108 | this.contentElement = $el("div", { 109 | id: "model-linker-content", 110 | style: { 111 | padding: "16px", 112 | overflowY: "auto", 113 | flex: "1", 114 | minHeight: "0" 115 | } 116 | }); 117 | return this.contentElement; 118 | } 119 | 120 | createFooter() { 121 | return $el("div", { 122 | style: { 123 | padding: "16px", 124 | borderTop: "1px solid var(--border-color)", 125 | display: "flex", 126 | justifyContent: "flex-end", 127 | gap: "8px" 128 | } 129 | }, [ 130 | $el("button", { 131 | textContent: "Auto-Resolve 100% Matches", 132 | onclick: () => this.autoResolve100Percent(), 133 | className: "comfy-button", 134 | style: { 135 | padding: "8px 16px" 136 | } 137 | }) 138 | ]); 139 | } 140 | 141 | async show() { 142 | this.element.style.display = "flex"; 143 | await this.loadWorkflowData(); 144 | } 145 | 146 | close() { 147 | this.element.style.display = "none"; 148 | } 149 | 150 | /** 151 | * Load workflow data and display missing models 152 | */ 153 | async loadWorkflowData(workflow = null) { 154 | if (!this.contentElement) return; 155 | 156 | // Show loading state 157 | this.contentElement.innerHTML = '
Analyzing workflow...
'; 158 | 159 | try { 160 | // Use provided workflow, or get current workflow from ComfyUI 161 | if (!workflow) { 162 | workflow = this.getCurrentWorkflow(); 163 | } 164 | 165 | if (!workflow) { 166 | this.contentElement.innerHTML = 'No workflow loaded. Please load a workflow first.
'; 167 | return; 168 | } 169 | 170 | // Call analyze endpoint 171 | const response = await api.fetchApi('/model_linker/analyze', { 172 | method: 'POST', 173 | headers: { 'Content-Type': 'application/json' }, 174 | body: JSON.stringify({ workflow }) 175 | }); 176 | 177 | if (!response.ok) { 178 | throw new Error(`API error: ${response.status}`); 179 | } 180 | 181 | const data = await response.json(); 182 | this.displayMissingModels(this.contentElement, data); 183 | 184 | } catch (error) { 185 | console.error('Model Linker: Error loading workflow data:', error); 186 | if (this.contentElement) { 187 | this.contentElement.innerHTML = `Error: ${error.message}
`; 188 | } 189 | } 190 | } 191 | 192 | /** 193 | * Get current workflow from ComfyUI 194 | */ 195 | getCurrentWorkflow() { 196 | // Try to get workflow from app 197 | if (app?.graph) { 198 | try { 199 | // Use ComfyUI's workflow serialization 200 | const workflow = app.graph.serialize(); 201 | return workflow; 202 | } catch (e) { 203 | console.warn('Model Linker: Could not serialize workflow from graph:', e); 204 | } 205 | } 206 | return null; 207 | } 208 | 209 | /** 210 | * Display missing models in the dialog 211 | */ 212 | displayMissingModels(container, data) { 213 | const missingModels = data.missing_models || []; 214 | const totalMissing = data.total_missing || 0; 215 | 216 | if (totalMissing === 0) { 217 | container.innerHTML = '✓ No missing models found. All models are available!
'; 218 | return; 219 | } 220 | 221 | let html = `Found ${totalMissing} missing model(s):
`; 222 | html += '${missing.original_path}${match.model?.relative_path || match.filename} `;
349 | html += `
350 | (${match.confidence}% confidence)
351 | `;
352 | // Show resolve button for all matches (100% or < 100%)
353 | html += ` `;
357 | html += `