├── .gitattributes ├── workflows ├── download.jpg ├── womanwalking.jpg ├── Monet-Camille.jpg ├── Arn-Van-Gogh-Secondary-1.webp ├── fd4a2b62395c7281b28501fdc14421d6.jpg ├── 231030083831-unseen-monet-auction.jpg ├── Starry-Night-canvas-Vincent-van-Gogh-New-1889.webp ├── original_3a930bdd-a01e-42d5-b8a4-8ebaf467384d.webp ├── Claude-Monet-San-Giorgio-Maggiore-at-Dusk_2048x.webp ├── TrainingOnlyWorkflow_Z-Image_demo.json ├── Qwen image Edit with Control images pairs.json ├── Lora Analysis and Block Control Demo - Z-Image.json └── SDXLDemo.json ├── requirements.txt ├── pyproject.toml ├── LICENSE ├── __init__.py ├── sd15_config_template.py ├── sdxl_config_template.py ├── musubi_qwen_image_config_template.py ├── musubi_zimage_config_template.py ├── musubi_wan_config_template.py ├── config_template.py ├── README.md ├── lora_analyzer.py ├── sd15_lora_trainer.py └── sdxl_lora_trainer.py /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /workflows/download.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shootthesound/comfyUI-Realtime-Lora/HEAD/workflows/download.jpg -------------------------------------------------------------------------------- /workflows/womanwalking.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shootthesound/comfyUI-Realtime-Lora/HEAD/workflows/womanwalking.jpg -------------------------------------------------------------------------------- /workflows/Monet-Camille.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shootthesound/comfyUI-Realtime-Lora/HEAD/workflows/Monet-Camille.jpg -------------------------------------------------------------------------------- /workflows/Arn-Van-Gogh-Secondary-1.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shootthesound/comfyUI-Realtime-Lora/HEAD/workflows/Arn-Van-Gogh-Secondary-1.webp -------------------------------------------------------------------------------- /workflows/fd4a2b62395c7281b28501fdc14421d6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shootthesound/comfyUI-Realtime-Lora/HEAD/workflows/fd4a2b62395c7281b28501fdc14421d6.jpg -------------------------------------------------------------------------------- /workflows/231030083831-unseen-monet-auction.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shootthesound/comfyUI-Realtime-Lora/HEAD/workflows/231030083831-unseen-monet-auction.jpg -------------------------------------------------------------------------------- /workflows/Starry-Night-canvas-Vincent-van-Gogh-New-1889.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shootthesound/comfyUI-Realtime-Lora/HEAD/workflows/Starry-Night-canvas-Vincent-van-Gogh-New-1889.webp -------------------------------------------------------------------------------- /workflows/original_3a930bdd-a01e-42d5-b8a4-8ebaf467384d.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shootthesound/comfyUI-Realtime-Lora/HEAD/workflows/original_3a930bdd-a01e-42d5-b8a4-8ebaf467384d.webp -------------------------------------------------------------------------------- /workflows/Claude-Monet-San-Giorgio-Maggiore-at-Dusk_2048x.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shootthesound/comfyUI-Realtime-Lora/HEAD/workflows/Claude-Monet-San-Giorgio-Maggiore-at-Dusk_2048x.webp -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # No additional requirements 2 | # This node uses only standard ComfyUI dependencies (torch, PIL, numpy) 3 | # Training is handled by external tools (AI-Toolkit, sd-scripts) in their own venvs 4 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "comfyui-realtime-lora" 3 | description = "Train, analyze, and selectively by block load LoRAs inside ComfyUI. Supports Z-Image, Qwen Image, Qwen Image Edit, SDXL, FLUX, Wan 2.2, and SD 1.5" 4 | version = "1.1.2" 5 | license = "MIT" 6 | authors = [ 7 | { name = "Peter Neill" } 8 | ] 9 | readme = "README.md" 10 | requires-python = ">=3.10" 11 | 12 | [project.urls] 13 | Homepage = "https://github.com/ShootTheSound/comfyUI-Realtime-Lora" 14 | Repository = "https://github.com/ShootTheSound/comfyUI-Realtime-Lora" 15 | Issues = "https://github.com/ShootTheSound/comfyUI-Realtime-Lora/issues" 16 | 17 | [tool.comfy] 18 | node_class_mappings = "NODE_CLASS_MAPPINGS" 19 | node_display_name_mappings = "NODE_DISPLAY_NAME_MAPPINGS" 20 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 shootthesound 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | ComfyUI Realtime LoRA Trainer 3 | 4 | Trains LoRAs on-the-fly from images during generation. 5 | Supports Z-Image, FLUX, Wan models via AI-Toolkit. 6 | Also supports SDXL and SD 1.5 via kohya sd-scripts. 7 | 8 | Includes LoRA Layer Analyzer and Selective LoRA Loader for analyzing 9 | and loading specific blocks/layers from LoRA files. 10 | """ 11 | 12 | from .realtime_lora_trainer import RealtimeLoraTrainer, ApplyTrainedLora 13 | from .sdxl_lora_trainer import SDXLLoraTrainer 14 | from .sd15_lora_trainer import SD15LoraTrainer 15 | from .musubi_zimage_lora_trainer import MusubiZImageLoraTrainer 16 | from .musubi_qwen_image_lora_trainer import MusubiQwenImageLoraTrainer 17 | from .musubi_qwen_image_edit_lora_trainer import MusubiQwenImageEditLoraTrainer 18 | from .musubi_wan_lora_trainer import MusubiWanLoraTrainer 19 | from .lora_analyzer import LoRALoaderWithAnalysis 20 | from .selective_lora_loader import SDXLSelectiveLoRALoader, ZImageSelectiveLoRALoader, FLUXSelectiveLoRALoader, WanSelectiveLoRALoader, QwenSelectiveLoRALoader 21 | 22 | # Web directory for JavaScript extensions 23 | WEB_DIRECTORY = "./web/js" 24 | 25 | NODE_CLASS_MAPPINGS = { 26 | "RealtimeLoraTrainer": RealtimeLoraTrainer, 27 | "ApplyTrainedLora": ApplyTrainedLora, 28 | "SDXLLoraTrainer": SDXLLoraTrainer, 29 | "SD15LoraTrainer": SD15LoraTrainer, 30 | "MusubiZImageLoraTrainer": MusubiZImageLoraTrainer, 31 | "MusubiQwenImageLoraTrainer": MusubiQwenImageLoraTrainer, 32 | "MusubiQwenImageEditLoraTrainer": MusubiQwenImageEditLoraTrainer, 33 | "MusubiWanLoraTrainer": MusubiWanLoraTrainer, 34 | "LoRALoaderWithAnalysis": LoRALoaderWithAnalysis, 35 | "SDXLSelectiveLoRALoader": SDXLSelectiveLoRALoader, 36 | "ZImageSelectiveLoRALoader": ZImageSelectiveLoRALoader, 37 | "FLUXSelectiveLoRALoader": FLUXSelectiveLoRALoader, 38 | "WanSelectiveLoRALoader": WanSelectiveLoRALoader, 39 | "QwenSelectiveLoRALoader": QwenSelectiveLoRALoader, 40 | } 41 | 42 | NODE_DISPLAY_NAME_MAPPINGS = { 43 | "RealtimeLoraTrainer": "Realtime LoRA Trainer", 44 | "ApplyTrainedLora": "Apply Trained LoRA", 45 | "SDXLLoraTrainer": "Realtime LoRA Trainer (SDXL - sd-scripts)", 46 | "SD15LoraTrainer": "Realtime LoRA Trainer (SD 1.5 - sd-scripts)", 47 | "MusubiZImageLoraTrainer": "Realtime LoRA Trainer (Z-Image - Musubi Tuner)", 48 | "MusubiQwenImageLoraTrainer": "Realtime LoRA Trainer (Qwen Image - Musubi Tuner)", 49 | "MusubiQwenImageEditLoraTrainer": "Realtime LoRA Trainer (Qwen Image Edit - Musubi Tuner)", 50 | "MusubiWanLoraTrainer": "Realtime LoRA Trainer (Wan 2.2 - Musubi Tuner)", 51 | "LoRALoaderWithAnalysis": "LoRA Loader + Analyzer", 52 | "SDXLSelectiveLoRALoader": "Selective LoRA Loader (SDXL)", 53 | "ZImageSelectiveLoRALoader": "Selective LoRA Loader (Z-Image)", 54 | "FLUXSelectiveLoRALoader": "Selective LoRA Loader (FLUX)", 55 | "WanSelectiveLoRALoader": "Selective LoRA Loader (Wan)", 56 | "QwenSelectiveLoRALoader": "Selective LoRA Loader (Qwen)", 57 | } 58 | 59 | __all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS', 'WEB_DIRECTORY'] 60 | -------------------------------------------------------------------------------- /sd15_config_template.py: -------------------------------------------------------------------------------- 1 | """ 2 | SD 1.5 LoRA Training Config Template for Kohya sd-scripts 3 | 4 | Generates TOML configuration files for train_network.py 5 | """ 6 | 7 | import os 8 | 9 | 10 | def generate_sd15_training_config( 11 | name: str, 12 | image_folder: str, 13 | output_folder: str, 14 | model_path: str, 15 | steps: int = 500, 16 | learning_rate: float = 0.0005, 17 | lora_rank: int = 16, 18 | lora_alpha: int = 16, 19 | resolution: int = 512, 20 | batch_size: int = 1, 21 | optimizer: str = "AdamW8bit", 22 | mixed_precision: str = "fp16", 23 | gradient_checkpointing: bool = True, 24 | cache_latents: bool = True, 25 | ) -> str: 26 | """ 27 | Generate a TOML config file for SD 1.5 LoRA training with sd-scripts. 28 | 29 | Returns the config as a TOML string. 30 | """ 31 | 32 | # Escape backslashes for TOML on Windows 33 | model_path_escaped = model_path.replace('\\', '/') 34 | image_folder_escaped = image_folder.replace('\\', '/') 35 | output_folder_escaped = output_folder.replace('\\', '/') 36 | 37 | config = f'''# SD 1.5 LoRA Training Config 38 | # Generated by ComfyUI SD 1.5 LoRA Trainer 39 | 40 | [general] 41 | enable_bucket = true 42 | bucket_no_upscale = true 43 | 44 | [model] 45 | pretrained_model_name_or_path = "{model_path_escaped}" 46 | 47 | [dataset] 48 | train_data_dir = "{image_folder_escaped}" 49 | resolution = "{resolution},{resolution}" 50 | caption_extension = ".txt" 51 | 52 | [network] 53 | network_module = "networks.lora" 54 | network_dim = {lora_rank} 55 | network_alpha = {lora_alpha} 56 | network_train_unet_only = true 57 | 58 | [optimizer] 59 | optimizer_type = "{optimizer}" 60 | learning_rate = {learning_rate:g} 61 | lr_scheduler = "constant" 62 | 63 | [training] 64 | output_dir = "{output_folder_escaped}" 65 | output_name = "{name}" 66 | save_model_as = "safetensors" 67 | save_precision = "fp16" 68 | max_train_steps = {steps} 69 | train_batch_size = {batch_size} 70 | mixed_precision = "{mixed_precision}" 71 | gradient_checkpointing = {str(gradient_checkpointing).lower()} 72 | cache_latents = {str(cache_latents).lower()} 73 | sdpa = true 74 | max_data_loader_n_workers = 0 75 | seed = 42 76 | ''' 77 | 78 | return config 79 | 80 | 81 | def save_config(config_content: str, config_path: str): 82 | """Save config content to a TOML file.""" 83 | with open(config_path, 'w', encoding='utf-8') as f: 84 | f.write(config_content) 85 | 86 | 87 | # VRAM mode presets for SD 1.5 (rank is user-controlled, not preset) 88 | SD15_VRAM_PRESETS = { 89 | "Min (256px)": { 90 | "optimizer": "AdamW8bit", 91 | "mixed_precision": "fp16", 92 | "batch_size": 1, 93 | "gradient_checkpointing": True, 94 | "cache_latents": True, 95 | "resolution": 256, 96 | }, 97 | "Low (512px)": { 98 | "optimizer": "AdamW8bit", 99 | "mixed_precision": "fp16", 100 | "batch_size": 1, 101 | "gradient_checkpointing": True, 102 | "cache_latents": True, 103 | "resolution": 512, 104 | }, 105 | "Max (768px)": { 106 | "optimizer": "AdamW", 107 | "mixed_precision": "fp16", 108 | "batch_size": 4, 109 | "gradient_checkpointing": False, 110 | "cache_latents": True, 111 | "resolution": 768, 112 | }, 113 | } 114 | -------------------------------------------------------------------------------- /sdxl_config_template.py: -------------------------------------------------------------------------------- 1 | """ 2 | SDXL LoRA Training Config Template for Kohya sd-scripts 3 | 4 | Generates TOML configuration files for sdxl_train_network.py 5 | """ 6 | 7 | import os 8 | 9 | 10 | def generate_sdxl_training_config( 11 | name: str, 12 | image_folder: str, 13 | output_folder: str, 14 | model_path: str, 15 | steps: int = 500, 16 | learning_rate: float = 0.0001, 17 | lora_rank: int = 8, 18 | lora_alpha: int = 4, 19 | resolution: int = 1024, 20 | batch_size: int = 1, 21 | optimizer: str = "AdamW8bit", 22 | mixed_precision: str = "fp16", 23 | gradient_checkpointing: bool = True, 24 | cache_latents: bool = True, 25 | cache_text_encoder_outputs: bool = True, 26 | ) -> str: 27 | """ 28 | Generate a TOML config file for SDXL LoRA training with sd-scripts. 29 | 30 | Returns the config as a TOML string. 31 | """ 32 | 33 | # Escape backslashes for TOML on Windows 34 | model_path_escaped = model_path.replace('\\', '/') 35 | image_folder_escaped = image_folder.replace('\\', '/') 36 | output_folder_escaped = output_folder.replace('\\', '/') 37 | 38 | config = f'''# SDXL LoRA Training Config 39 | # Generated by ComfyUI SDXL LoRA Trainer 40 | 41 | [general] 42 | enable_bucket = true 43 | bucket_no_upscale = true 44 | 45 | [model] 46 | pretrained_model_name_or_path = "{model_path_escaped}" 47 | 48 | [dataset] 49 | train_data_dir = "{image_folder_escaped}" 50 | resolution = "{resolution},{resolution}" 51 | caption_extension = ".txt" 52 | 53 | [network] 54 | network_module = "networks.lora" 55 | network_dim = {lora_rank} 56 | network_alpha = {lora_alpha} 57 | network_train_unet_only = true 58 | 59 | [optimizer] 60 | optimizer_type = "{optimizer}" 61 | learning_rate = {learning_rate:g} 62 | lr_scheduler = "constant" 63 | 64 | [training] 65 | output_dir = "{output_folder_escaped}" 66 | output_name = "{name}" 67 | save_model_as = "safetensors" 68 | save_precision = "fp16" 69 | max_train_steps = {steps} 70 | train_batch_size = {batch_size} 71 | mixed_precision = "{mixed_precision}" 72 | gradient_checkpointing = {str(gradient_checkpointing).lower()} 73 | cache_latents = {str(cache_latents).lower()} 74 | cache_text_encoder_outputs = {str(cache_text_encoder_outputs).lower()} 75 | sdpa = true 76 | max_data_loader_n_workers = 0 77 | seed = 42 78 | ''' 79 | 80 | return config 81 | 82 | 83 | def save_config(config_content: str, config_path: str): 84 | """Save config content to a TOML file.""" 85 | with open(config_path, 'w', encoding='utf-8') as f: 86 | f.write(config_content) 87 | 88 | 89 | # VRAM mode presets for SDXL (rank is user-controlled, not preset) 90 | SDXL_VRAM_PRESETS = { 91 | "Min (512px)": { 92 | "optimizer": "AdamW8bit", 93 | "mixed_precision": "fp16", 94 | "batch_size": 1, 95 | "gradient_checkpointing": True, 96 | "cache_latents": True, 97 | "cache_text_encoder_outputs": True, 98 | "resolution": 512, 99 | }, 100 | "Low (768px)": { 101 | "optimizer": "AdamW8bit", 102 | "mixed_precision": "fp16", 103 | "batch_size": 1, 104 | "gradient_checkpointing": True, 105 | "cache_latents": True, 106 | "cache_text_encoder_outputs": True, 107 | "resolution": 768, 108 | }, 109 | "Max (1024px)": { 110 | "optimizer": "AdamW", 111 | "mixed_precision": "bf16", 112 | "batch_size": 4, 113 | "gradient_checkpointing": False, 114 | "cache_latents": True, 115 | "cache_text_encoder_outputs": False, 116 | "resolution": 1024, 117 | }, 118 | } 119 | -------------------------------------------------------------------------------- /musubi_qwen_image_config_template.py: -------------------------------------------------------------------------------- 1 | """ 2 | Musubi Tuner Qwen Image LoRA Training Config Template 3 | 4 | Generates TOML dataset configuration files for qwen_image_train_network.py 5 | Supports Qwen-Image (text-to-image) and Qwen-Image-Edit variants (image editing) 6 | """ 7 | 8 | import os 9 | 10 | 11 | def generate_dataset_config( 12 | image_folder: str, 13 | resolution: int = 1024, 14 | batch_size: int = 1, 15 | enable_bucket: bool = True, 16 | num_repeats: int = 10, 17 | control_directory: str = None, 18 | ) -> str: 19 | """ 20 | Generate a TOML dataset config file for Musubi Tuner Qwen Image training. 21 | 22 | Args: 23 | image_folder: Path to target images (the output/result images) 24 | num_repeats: How many times to repeat each image per epoch. 25 | Higher = fewer epochs for same step count, less overhead. 26 | control_directory: Path to control/source images for Qwen-Image-Edit training. 27 | Control images are matched by basename with target images. 28 | Returns the config as a TOML string. 29 | """ 30 | 31 | # Escape backslashes for TOML on Windows 32 | image_folder_escaped = image_folder.replace('\\', '/') 33 | 34 | config = f'''# Musubi Tuner Qwen Image Dataset Config 35 | # Generated by ComfyUI Musubi Qwen Image LoRA Trainer 36 | 37 | [general] 38 | resolution = [{resolution}, {resolution}] 39 | batch_size = {batch_size} 40 | enable_bucket = {str(enable_bucket).lower()} 41 | caption_extension = ".txt" 42 | 43 | [[datasets]] 44 | image_directory = "{image_folder_escaped}" 45 | num_repeats = {num_repeats} 46 | ''' 47 | 48 | # Add control_directory for Qwen-Image-Edit training 49 | if control_directory: 50 | control_dir_escaped = control_directory.replace('\\', '/') 51 | config += f'control_directory = "{control_dir_escaped}"\n' 52 | 53 | return config 54 | 55 | 56 | def save_config(config_content: str, config_path: str): 57 | """Save config content to a TOML file.""" 58 | with open(config_path, 'w', encoding='utf-8') as f: 59 | f.write(config_content) 60 | 61 | 62 | # VRAM mode presets for Qwen Image (Musubi Tuner) 63 | # Uses fp8_vl for text encoder (not fp8_llm like Z-Image) 64 | # blocks_to_swap is now a separate parameter (0-45) 65 | # Note: Musubi Tuner ALWAYS requires pre-caching latents and text encoder outputs 66 | MUSUBI_QWEN_IMAGE_VRAM_PRESETS = { 67 | "Max (1024px)": { 68 | "optimizer": "adamw8bit", 69 | "mixed_precision": "bf16", 70 | "batch_size": 1, 71 | "gradient_checkpointing": False, 72 | "fp8_scaled": False, 73 | "fp8_vl": False, 74 | "resolution": 1024, 75 | }, 76 | "Max (1024px) fp8": { 77 | "optimizer": "adamw8bit", 78 | "mixed_precision": "bf16", 79 | "batch_size": 1, 80 | "gradient_checkpointing": False, 81 | "fp8_scaled": True, 82 | "fp8_vl": True, 83 | "resolution": 1024, 84 | }, 85 | "Medium (768px)": { 86 | "optimizer": "adamw8bit", 87 | "mixed_precision": "bf16", 88 | "batch_size": 1, 89 | "gradient_checkpointing": True, 90 | "fp8_scaled": False, 91 | "fp8_vl": False, 92 | "resolution": 768, 93 | }, 94 | "Medium (768px) fp8": { 95 | "optimizer": "adamw8bit", 96 | "mixed_precision": "bf16", 97 | "batch_size": 1, 98 | "gradient_checkpointing": True, 99 | "fp8_scaled": True, 100 | "fp8_vl": True, 101 | "resolution": 768, 102 | }, 103 | "Low (512px)": { 104 | "optimizer": "adamw8bit", 105 | "mixed_precision": "bf16", 106 | "batch_size": 1, 107 | "gradient_checkpointing": True, 108 | "fp8_scaled": False, 109 | "fp8_vl": False, 110 | "resolution": 512, 111 | }, 112 | "Low (512px) fp8": { 113 | "optimizer": "adamw8bit", 114 | "mixed_precision": "bf16", 115 | "batch_size": 1, 116 | "gradient_checkpointing": True, 117 | "fp8_scaled": True, 118 | "fp8_vl": True, 119 | "resolution": 512, 120 | }, 121 | } 122 | -------------------------------------------------------------------------------- /musubi_zimage_config_template.py: -------------------------------------------------------------------------------- 1 | """ 2 | Musubi Tuner Z-Image LoRA Training Config Template 3 | 4 | Generates TOML dataset configuration files for zimage_train_network.py 5 | """ 6 | 7 | import os 8 | 9 | 10 | def generate_dataset_config( 11 | image_folder: str, 12 | resolution: int = 960, 13 | batch_size: int = 1, 14 | enable_bucket: bool = True, 15 | num_repeats: int = 10, 16 | ) -> str: 17 | """ 18 | Generate a TOML dataset config file for Musubi Tuner Z-Image training. 19 | 20 | Args: 21 | num_repeats: How many times to repeat each image per epoch. 22 | Higher = fewer epochs for same step count, less overhead. 23 | Returns the config as a TOML string. 24 | """ 25 | 26 | # Escape backslashes for TOML on Windows 27 | image_folder_escaped = image_folder.replace('\\', '/') 28 | 29 | config = f'''# Musubi Tuner Z-Image Dataset Config 30 | # Generated by ComfyUI Musubi Z-Image LoRA Trainer 31 | 32 | [general] 33 | resolution = [{resolution}, {resolution}] 34 | batch_size = {batch_size} 35 | enable_bucket = {str(enable_bucket).lower()} 36 | caption_extension = ".txt" 37 | 38 | [[datasets]] 39 | image_directory = "{image_folder_escaped}" 40 | num_repeats = {num_repeats} 41 | ''' 42 | 43 | return config 44 | 45 | 46 | def save_config(config_content: str, config_path: str): 47 | """Save config content to a TOML file.""" 48 | with open(config_path, 'w', encoding='utf-8') as f: 49 | f.write(config_content) 50 | 51 | 52 | # VRAM mode presets for Z-Image (Musubi Tuner) 53 | # Max/Medium have fp8 variants, Low/Min are always fp8 with block offloading 54 | # Note: Musubi Tuner ALWAYS requires pre-caching latents and text encoder outputs 55 | MUSUBI_ZIMAGE_VRAM_PRESETS = { 56 | "Max (1256px)": { 57 | "optimizer": "adamw8bit", 58 | "mixed_precision": "bf16", 59 | "batch_size": 1, 60 | "gradient_checkpointing": False, 61 | "fp8_scaled": False, 62 | "fp8_llm": False, 63 | "blocks_to_swap": 0, 64 | "resolution": 1256, 65 | }, 66 | "Max (1256px) fp8": { 67 | "optimizer": "adamw8bit", 68 | "mixed_precision": "bf16", 69 | "batch_size": 1, 70 | "gradient_checkpointing": False, 71 | "fp8_scaled": True, 72 | "fp8_llm": True, 73 | "blocks_to_swap": 0, 74 | "resolution": 1256, 75 | }, 76 | "Max (1256px) fp8 offload": { 77 | "optimizer": "adamw8bit", 78 | "mixed_precision": "bf16", 79 | "batch_size": 1, 80 | "gradient_checkpointing": True, 81 | "fp8_scaled": True, 82 | "fp8_llm": True, 83 | "blocks_to_swap": 14, 84 | "resolution": 1256, 85 | }, 86 | "Medium (1024px)": { 87 | "optimizer": "adamw8bit", 88 | "mixed_precision": "bf16", 89 | "batch_size": 1, 90 | "gradient_checkpointing": True, 91 | "fp8_scaled": False, 92 | "fp8_llm": False, 93 | "blocks_to_swap": 0, 94 | "resolution": 1024, 95 | }, 96 | "Medium (1024px) fp8": { 97 | "optimizer": "adamw8bit", 98 | "mixed_precision": "bf16", 99 | "batch_size": 1, 100 | "gradient_checkpointing": True, 101 | "fp8_scaled": True, 102 | "fp8_llm": True, 103 | "blocks_to_swap": 0, 104 | "resolution": 1024, 105 | }, 106 | "Medium (1024px) fp8 offload": { 107 | "optimizer": "adamw8bit", 108 | "mixed_precision": "bf16", 109 | "batch_size": 1, 110 | "gradient_checkpointing": True, 111 | "fp8_scaled": True, 112 | "fp8_llm": True, 113 | "blocks_to_swap": 14, 114 | "resolution": 1024, 115 | }, 116 | "Low (768px)": { 117 | "optimizer": "adamw8bit", 118 | "mixed_precision": "bf16", 119 | "batch_size": 1, 120 | "gradient_checkpointing": True, 121 | "fp8_scaled": True, 122 | "fp8_llm": True, 123 | "blocks_to_swap": 14, 124 | "resolution": 768, 125 | }, 126 | "Min (512px)": { 127 | "optimizer": "adamw8bit", 128 | "mixed_precision": "bf16", 129 | "batch_size": 1, 130 | "gradient_checkpointing": True, 131 | "fp8_scaled": True, 132 | "fp8_llm": True, 133 | "blocks_to_swap": 28, 134 | "resolution": 512, 135 | }, 136 | } 137 | -------------------------------------------------------------------------------- /musubi_wan_config_template.py: -------------------------------------------------------------------------------- 1 | """ 2 | Musubi Tuner Wan 2.2 LoRA Training Config Template 3 | 4 | Generates TOML dataset configuration files for wan_train_network.py 5 | Supports Wan 2.2 T2V single-frame training with High/Low noise modes. 6 | """ 7 | 8 | import os 9 | 10 | 11 | def generate_dataset_config( 12 | image_folder: str, 13 | resolution: int = 480, 14 | batch_size: int = 1, 15 | enable_bucket: bool = True, 16 | num_repeats: int = 10, 17 | ) -> str: 18 | """ 19 | Generate a TOML dataset config file for Musubi Tuner Wan 2.2 training. 20 | 21 | Args: 22 | num_repeats: How many times to repeat each image per epoch. 23 | Higher = fewer epochs for same step count, less overhead. 24 | Returns the config as a TOML string. 25 | """ 26 | 27 | # Escape backslashes for TOML on Windows 28 | image_folder_escaped = image_folder.replace('\\', '/') 29 | 30 | config = f'''# Musubi Tuner Wan 2.2 Dataset Config 31 | # Generated by ComfyUI Musubi Wan LoRA Trainer 32 | 33 | [general] 34 | resolution = [{resolution}, {resolution}] 35 | batch_size = {batch_size} 36 | enable_bucket = {str(enable_bucket).lower()} 37 | caption_extension = ".txt" 38 | 39 | [[datasets]] 40 | image_directory = "{image_folder_escaped}" 41 | num_repeats = {num_repeats} 42 | ''' 43 | 44 | return config 45 | 46 | 47 | def save_config(config_content: str, config_path: str): 48 | """Save config content to a TOML file.""" 49 | with open(config_path, 'w', encoding='utf-8') as f: 50 | f.write(config_content) 51 | 52 | 53 | # Timestep ranges for Wan 2.2 High/Low/Combo noise training 54 | # Timestep boundary for T2V is 0.875 (875 out of 1000) 55 | # High noise: early denoising steps (timesteps 875-1000) 56 | # Low noise: later denoising steps (timesteps 0-875) 57 | # Combo: full range (timesteps 0-1000), use Low model 58 | WAN_TIMESTEP_RANGES = { 59 | "High Noise": { 60 | "min_timestep": 875, 61 | "max_timestep": 1000, 62 | "description": "Trains on early denoising steps (high noise). Use with Wan 2.2 High model.", 63 | }, 64 | "Low Noise": { 65 | "min_timestep": 0, 66 | "max_timestep": 875, 67 | "description": "Trains on later denoising steps (low noise). Use with Wan 2.2 Low model.", 68 | }, 69 | "Combo": { 70 | "min_timestep": 0, 71 | "max_timestep": 1000, 72 | "description": "Trains on full timestep range. Use with Wan 2.2 Low model.", 73 | }, 74 | } 75 | 76 | 77 | # VRAM mode presets for Wan 2.2 (Musubi Tuner) 78 | # Wan 2.2 supports up to 1256px resolution 79 | # blocks_to_swap is now a separate parameter (0-39) 80 | # Note: Musubi Tuner ALWAYS requires pre-caching latents and text encoder outputs 81 | MUSUBI_WAN_VRAM_PRESETS = { 82 | "Max (1256px)": { 83 | "optimizer": "adamw8bit", 84 | "mixed_precision": "fp16", 85 | "batch_size": 1, 86 | "gradient_checkpointing": False, 87 | "fp8_base": False, 88 | "resolution": 1256, 89 | }, 90 | "Max (1256px) fp8": { 91 | "optimizer": "adamw8bit", 92 | "mixed_precision": "fp16", 93 | "batch_size": 1, 94 | "gradient_checkpointing": False, 95 | "fp8_base": True, 96 | "resolution": 1256, 97 | }, 98 | "Medium (1024px)": { 99 | "optimizer": "adamw8bit", 100 | "mixed_precision": "fp16", 101 | "batch_size": 1, 102 | "gradient_checkpointing": True, 103 | "fp8_base": False, 104 | "resolution": 1024, 105 | }, 106 | "Medium (1024px) fp8": { 107 | "optimizer": "adamw8bit", 108 | "mixed_precision": "fp16", 109 | "batch_size": 1, 110 | "gradient_checkpointing": True, 111 | "fp8_base": True, 112 | "resolution": 1024, 113 | }, 114 | "Low (768px)": { 115 | "optimizer": "adamw8bit", 116 | "mixed_precision": "fp16", 117 | "batch_size": 1, 118 | "gradient_checkpointing": True, 119 | "fp8_base": False, 120 | "resolution": 768, 121 | }, 122 | "Low (768px) fp8": { 123 | "optimizer": "adamw8bit", 124 | "mixed_precision": "fp16", 125 | "batch_size": 1, 126 | "gradient_checkpointing": True, 127 | "fp8_base": True, 128 | "resolution": 768, 129 | }, 130 | "Min (512px)": { 131 | "optimizer": "adamw8bit", 132 | "mixed_precision": "fp16", 133 | "batch_size": 1, 134 | "gradient_checkpointing": True, 135 | "fp8_base": False, 136 | "resolution": 512, 137 | }, 138 | "Min (512px) fp8": { 139 | "optimizer": "adamw8bit", 140 | "mixed_precision": "fp16", 141 | "batch_size": 1, 142 | "gradient_checkpointing": True, 143 | "fp8_base": True, 144 | "resolution": 512, 145 | }, 146 | } 147 | -------------------------------------------------------------------------------- /config_template.py: -------------------------------------------------------------------------------- 1 | """ 2 | Config template generator for multi-architecture LoRA training. 3 | Supports Z-Image, FLUX, Wan, and Qwen models. 4 | Borrowed from the training_config_gui structure, optimized for fast training. 5 | """ 6 | 7 | import yaml 8 | import os 9 | from datetime import datetime 10 | 11 | 12 | # Architecture-specific configuration defaults 13 | ARCHITECTURE_CONFIGS = { 14 | "Z-Image Turbo": { 15 | "default_path": "Tongyi-MAI/Z-Image-Turbo", 16 | "arch": "zimage", 17 | "is_flux": False, 18 | "train_text_encoder": False, 19 | "noise_scheduler": "flowmatch", 20 | "assistant_lora_path": "ostris/zimage_turbo_training_adapter/zimage_turbo_training_adapter_v2.safetensors", 21 | "sample_guidance_scale": 1.0, 22 | "sample_steps": 8, 23 | }, 24 | "FLUX.1-dev": { 25 | "default_path": "black-forest-labs/FLUX.1-dev", 26 | "arch": None, 27 | "is_flux": True, 28 | "train_text_encoder": False, 29 | "noise_scheduler": "flowmatch", 30 | "assistant_lora_path": None, 31 | "sample_guidance_scale": 3.5, 32 | "sample_steps": 20, 33 | }, 34 | "Wan 2.2 High": { 35 | "default_path": "ai-toolkit/Wan2.2-T2V-A14B-Diffusers-bf16", 36 | "arch": "wan22_14b", 37 | "is_flux": False, 38 | "train_text_encoder": False, 39 | "noise_scheduler": "flowmatch", 40 | "assistant_lora_path": None, 41 | "sample_guidance_scale": 3.5, 42 | "sample_steps": 25, 43 | "wan_stage": "high", 44 | # Wan requires 4-bit quant with ARA for 24GB cards 45 | "qtype": "uint4|ostris/accuracy_recovery_adapters/wan22_14b_t2i_torchao_uint4.safetensors", 46 | "quantize_te": True, 47 | "qtype_te": "qfloat8", 48 | "cache_text_embeddings": True, 49 | }, 50 | "Wan 2.2 Low": { 51 | "default_path": "ai-toolkit/Wan2.2-T2V-A14B-Diffusers-bf16", 52 | "arch": "wan22_14b", 53 | "is_flux": False, 54 | "train_text_encoder": False, 55 | "noise_scheduler": "flowmatch", 56 | "assistant_lora_path": None, 57 | "sample_guidance_scale": 3.5, 58 | "sample_steps": 25, 59 | "wan_stage": "low", 60 | # Wan requires 4-bit quant with ARA for 24GB cards 61 | "qtype": "uint4|ostris/accuracy_recovery_adapters/wan22_14b_t2i_torchao_uint4.safetensors", 62 | "quantize_te": True, 63 | "qtype_te": "qfloat8", 64 | "cache_text_embeddings": True, 65 | }, 66 | "Wan 2.2 Combo": { 67 | "default_path": "ai-toolkit/Wan2.2-T2V-A14B-Diffusers-bf16", 68 | "arch": "wan22_14b", 69 | "is_flux": False, 70 | "train_text_encoder": False, 71 | "noise_scheduler": "flowmatch", 72 | "assistant_lora_path": None, 73 | "sample_guidance_scale": 3.5, 74 | "sample_steps": 25, 75 | "wan_stage": "combo", # Trains both high and low noise stages 76 | # Wan requires 4-bit quant with ARA for 24GB cards 77 | "qtype": "uint4|ostris/accuracy_recovery_adapters/wan22_14b_t2i_torchao_uint4.safetensors", 78 | "quantize_te": True, 79 | "qtype_te": "qfloat8", 80 | "cache_text_embeddings": True, 81 | }, 82 | } 83 | 84 | 85 | def _build_model_config(arch_config, model_path, quantize, low_vram, layer_offloading): 86 | """Build architecture-specific model config.""" 87 | model_config = { 88 | 'name_or_path': model_path, 89 | 'quantize': quantize, 90 | 'low_vram': low_vram, 91 | 'layer_offloading': layer_offloading, 92 | } 93 | 94 | # Add architecture-specific fields 95 | if arch_config['arch']: 96 | model_config['arch'] = arch_config['arch'] 97 | if arch_config['is_flux']: 98 | model_config['is_flux'] = True 99 | if arch_config['assistant_lora_path']: 100 | model_config['assistant_lora_path'] = arch_config['assistant_lora_path'] 101 | # Wan-specific settings 102 | if arch_config.get('wan_stage'): 103 | stage = arch_config['wan_stage'] 104 | model_config['model_kwargs'] = { 105 | 'train_high_noise': stage in ['high', 'combo'], 106 | 'train_low_noise': stage in ['low', 'combo'], 107 | } 108 | if arch_config.get('qtype'): 109 | model_config['qtype'] = arch_config['qtype'] 110 | if arch_config.get('quantize_te'): 111 | model_config['quantize_te'] = arch_config['quantize_te'] 112 | if arch_config.get('qtype_te'): 113 | model_config['qtype_te'] = arch_config['qtype_te'] 114 | 115 | return model_config 116 | 117 | 118 | def generate_training_config( 119 | name: str, 120 | image_folder: str, 121 | output_folder: str, 122 | architecture: str, 123 | model_path: str, 124 | steps: int = 100, 125 | learning_rate: float = 5e-4, 126 | lora_rank: int = 16, 127 | resolution: int = 1024, 128 | low_vram: bool = False, 129 | quantize: bool = False, 130 | layer_offloading: bool = False, 131 | gradient_accumulation_steps: int = 1, 132 | ) -> dict: 133 | """ 134 | Generate a training config dict for multi-architecture LoRA training. 135 | 136 | Supports Z-Image, FLUX, Wan, and Qwen models with architecture-specific settings. 137 | 138 | Args: 139 | name: Unique name for this training run 140 | image_folder: Path to folder containing the training image(s) and caption(s) 141 | output_folder: Where to save the trained LoRA 142 | architecture: Model architecture (Z-Image Turbo, FLUX.1-dev, Wan 2.2 High/Low, Qwen Image/Edit) 143 | model_path: HuggingFace path or local path to model 144 | steps: Number of training steps (default 100 for fast training) 145 | learning_rate: Learning rate (default 5e-4, higher than normal for fast overfitting) 146 | lora_rank: LoRA rank/dimension (default 16) 147 | resolution: Training resolution (default 1024) 148 | low_vram: Enable low VRAM mode (quantizes model on CPU, enables offloading) 149 | quantize: Enable model quantization for reduced VRAM usage 150 | layer_offloading: Enable layer offloading (moves transformer blocks between CPU/GPU) 151 | gradient_accumulation_steps: Number of gradient accumulation steps (trades time for VRAM) 152 | 153 | Returns: 154 | Config dict ready to be saved as YAML 155 | """ 156 | # Get architecture-specific config 157 | arch_config = ARCHITECTURE_CONFIGS[architecture] 158 | 159 | config = { 160 | 'job': 'extension', 161 | 'config': { 162 | 'name': name, 163 | 'process': [ 164 | { 165 | 'type': 'sd_trainer', 166 | 'training_folder': output_folder, 167 | 'device': 'cuda:0', 168 | 169 | # Network config - standard LoRA 170 | 'network': { 171 | 'type': 'lora', 172 | 'linear': lora_rank, 173 | 'linear_alpha': lora_rank, 174 | }, 175 | 176 | # Save config - only save final checkpoint 177 | 'save': { 178 | 'dtype': 'float16', 179 | 'save_every': steps, # Only save at the end 180 | 'max_step_saves_to_keep': 1, 181 | }, 182 | 183 | # Dataset config - single image folder 184 | 'datasets': [ 185 | { 186 | 'folder_path': image_folder, 187 | 'caption_ext': 'txt', 188 | 'caption_dropout_rate': 0.0, # Always use caption for single image 189 | 'shuffle_tokens': False, 190 | 'cache_latents_to_disk': False, # Not worth it for single image 191 | 'resolution': [resolution], 192 | 'bucket_no_upscale': True, 193 | } 194 | ], 195 | 196 | # Training config - optimized for fast overfitting 197 | 'train': { 198 | 'batch_size': 1, 199 | 'steps': steps, 200 | 'gradient_accumulation_steps': gradient_accumulation_steps, 201 | 'train_unet': True, 202 | 'train_text_encoder': arch_config['train_text_encoder'], 203 | 'gradient_checkpointing': True, 204 | 'noise_scheduler': arch_config['noise_scheduler'], 205 | 'optimizer': 'adamw8bit', 206 | 'lr': learning_rate, 207 | 'cache_text_embeddings': arch_config.get('cache_text_embeddings', False), 208 | 'dtype': 'bf16', 209 | }, 210 | 211 | # Model config - architecture-specific 212 | 'model': _build_model_config(arch_config, model_path, quantize, low_vram, layer_offloading), 213 | 214 | } 215 | ] 216 | }, 217 | 'meta': { 218 | 'name': '[name]', 219 | 'version': '1.0', 220 | } 221 | } 222 | 223 | return config 224 | 225 | 226 | def save_config(config: dict, config_path: str) -> None: 227 | """Save config dict to YAML file.""" 228 | with open(config_path, 'w', encoding='utf-8') as f: 229 | yaml.dump(config, f, default_flow_style=False, allow_unicode=True, sort_keys=False) 230 | 231 | 232 | def generate_unique_name() -> str: 233 | """Generate a unique name for a training run.""" 234 | timestamp = datetime.now().strftime('%Y%m%d_%H%M%S_%f') 235 | return f"realtime_lora_{timestamp}" 236 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ComfyUI Realtime LoRA Toolkit 2 | 3 | [![Buy Me A Coffee](https://img.shields.io/badge/Buy%20Me%20A%20Coffee-Support-yellow.svg)](https://buymeacoffee.com/lorasandlenses) 4 | [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE) 5 | 6 | **Train**, **analyze**, and **selectively by Block load** LoRAs for **SDXL, SD 1.5, FLUX, Z-Image, Qwen Image, Qwen Image Edit, and Wan 2.2** directly inside ComfyUI. One unified interface across three training backends, plus powerful analysis and block-level loading tools. 7 | 8 | > **New in v1.1:** LoRA Analysis & Selective Block Loading - See which blocks matter and load only the ones you want! 9 | > 10 | > [![▶ Watch Demo](https://img.youtube.com/vi/dkEB5i5yBUI/0.jpg)](https://www.youtube.com/watch?v=dkEB5i5yBUI) 11 | 12 | Capture a face, a style, or a subject from your reference images and apply it to new generations - all within the same workflow. No config files. No command line. Just connect images and go. 13 | 14 | ## At a Glance 15 | 16 | | Backend | Models | Best For | 17 | |---------|--------|----------| 18 | | **sd-scripts** | SDXL, SD 1.5 | Fast training, mature workflows, broad checkpoint compatibility | 19 | | **Musubi Tuner** | Z-Image, Qwen Image, Qwen Image Edit, Wan 2.2 | Cutting-edge models, smaller LoRAs, excellent VRAM efficiency | 20 | | **AI-Toolkit** | FLUX.1-dev, Z-Image, Wan 2.2 alternative training pipeline | 21 | 22 | **7 architectures. 3 training backends. 8 trainer nodes. 5 selective loaders. 1 analyzer.** 23 | 24 | ## Use Cases 25 | 26 | - **Subject consistency** - Train on a character or face, use across multiple generations 27 | - **Style transfer** - Capture an art style from a few reference images 28 | - **Rapid prototyping** - Test a LoRA concept in minutes before committing to longer training 29 | - **Video keyframes** - Train on first/last frames for Wan video temporal consistency 30 | - **Image editing behaviors** - Use Qwen Image Edit to teach specific transformations with paired images 31 | 32 | ## What This Does 33 | 34 | This node trains LoRAs on-the-fly from your images without leaving ComfyUI. SDXL and SD 1.5 training is particularly fast - a few minutes on a decent GPU, or under 2 minutes for SD 1.5 on modern hardware. This makes it practical to train a quick LoRA and immediately use it for img2img variations, style transfer, or subject consistency within the same workflow. 35 | 36 | **Personal note:** I think SDXL is due for a revival. It trains fast, runs on reasonable hardware, and the results are solid. For quick iteration - testing a concept before committing to a longer train, locking down a subject for consistency, or training on frames for Wan video work - SDXL hits a sweet spot that newer models don't always match. Sometimes the "old" tool is still the right one. 37 | 38 | ## Supported Models 39 | 40 | **Via Kohya sd-scripts:** 41 | - SDXL (any checkpoint) - tested with Juggernaut XL Ragnarok, base SDXL will work too 42 | - SD 1.5 (any checkpoint) - blazingly fast, ~2 mins for 500 steps on a 5090 43 | 44 | **Via Musubi Tuner:** 45 | - Z-Image - faster training, smaller LoRA files, no diffusers dependency. Requires the de-distilled model for training, but trained LoRAs work with the regular distilled Z-Image Turbo model. 46 | - Qwen Image - text-to-image generation. Supports Qwen-Image, Qwen-Image-Edit, and Qwen-Image-Edit-2509 models for style/subject LoRAs. 47 | - Qwen Image Edit - for training image editing behaviors with source/target image pairs. Uses folder paths for paired training data. 48 | - Wan 2.2 - single-frame image training with High/Low/Combo noise modes. Separate block offloading control for fine-tuned VRAM management. 49 | 50 | **Via AI-Toolkit:** 51 | - Z-Image Turbo 52 | - FLUX.1-dev 53 | - Wan 2.2 (High/Low/Combo) 54 | 55 | **Note on Wan 2.2 modes:** Wan uses a two-stage noise model - High handles early denoising steps, Low handles later steps. You can train separate LoRAs for each, or use Combo mode which trains a single LoRA across all noise steps that works with both High and Low models. 56 | 57 | **Technical note:** When using High or Low mode, the example workflows still pass the LoRA to both models but at zero strength for the one you didn't train. This prevents ComfyUI from loading the base model into memory before training starts - a workaround to avoid unnecessary VRAM usage. 58 | 59 | ## Requirements 60 | 61 | **Python version:** Both AI-Toolkit and sd-scripts work best with Python 3.10-3.12. Python 3.10 is the safest bet. Avoid 3.13 for now. 62 | 63 | You need to install the training backend(s) separately: 64 | 65 | **For SDXL / SD 1.5 training:** 66 | 1. Install sd-scripts: https://github.com/kohya-ss/sd-scripts 67 | 2. Follow their install instructions 68 | 69 | **For Musubi Tuner models (Z-Image, Qwen Image, Wan 2.2):** 70 | 1. Install Musubi Tuner: https://github.com/kohya-ss/musubi-tuner 71 | 2. Follow their install instructions 72 | 3. Download the required models: 73 | 74 | **Z-Image:** Download the de-distilled model from https://huggingface.co/ostris/Z-Image-De-Turbo/tree/main - save to `models/diffusion_models`. Your trained LoRAs will work with the regular distilled Z-Image Turbo model. 75 | 76 | **Qwen Image:** Download bf16 models (not fp8) from Comfy-Org or from the links in the exampe workflows: 77 | - DiT: https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI (qwen_image_bf16.safetensors) or https://huggingface.co/Comfy-Org/Qwen-Image-Edit_ComfyUI for Edit models 78 | - VAE: qwen_image_vae.safetensors 79 | - Text Encoder: qwen_2.5_vl_7b.safetensors (from clip folder) 80 | - Note: Pre-quantized fp8 models don't work for training - use bf16 versions. 81 | 82 | **Wan 2.2:** Download fp16 models from Comfy-Org or from the links in the exampe workflows: 83 | - DiT: wan2.2_t2v_14B_fp16.safetensors (High or Low noise variant) 84 | - VAE: wan_2.2_vae.safetensors 85 | - T5: models_t5_umt5-xxl-enc-bf16.pth 86 | 87 | **For FLUX/Z-Image/Wan training (AI-Toolkit):** 88 | 1. Install AI-Toolkit: https://github.com/ostris/ai-toolkit 89 | 2. Follow their install instructions 90 | 91 | **RTX 50-series GPUs (AI-Toolkit only):** Blackwell GPUs (RTX 5080/5090) require PyTorch 2.7+ with CUDA 12.8 support. The standard AI-Toolkit installation may not work out of the box. A community installer is available at https://github.com/omgitsgb/ostris-ai-toolkit-50gpu-installer that handles the correct PyTorch/CUDA versions. Note: sd-scripts for SDXL/SD1.5 training & Musubi Training is unaffected - this applies only to AI-Toolkit. 92 | 93 | You don't need to open the training environments after installation. The node just needs the path to where you installed them. 94 | 95 | ## Installation 96 | 97 | Clone this repo into your ComfyUI custom_nodes folder: 98 | 99 | ``` 100 | cd ComfyUI/custom_nodes 101 | git clone https://github.com/ShootTheSound/comfyUI-Realtime-Lora 102 | ``` 103 | 104 | Restart ComfyUI. 105 | 106 | ## Nodes 107 | 108 | Search for these in ComfyUI: 109 | 110 | - **Realtime LoRA Trainer** - Trains using AI-Toolkit (FLUX, Z-Image, Wan) 111 | - **Realtime LoRA Trainer (Z-Image - Musubi Tuner)** - Trains Z-Image using Musubi Tuner (recommended) 112 | - **Realtime LoRA Trainer (Qwen Image - Musubi Tuner)** - Trains Qwen Image/Edit models for style/subject LoRAs 113 | - **Realtime LoRA Trainer (Qwen Image Edit - Musubi Tuner)** - Trains edit behaviors with source/target image pairs 114 | - **Realtime LoRA Trainer (Wan 2.2 - Musubi Tuner)** - Trains Wan 2.2 with High/Low/Combo noise modes 115 | - **Realtime LoRA Trainer (SDXL - sd-scripts)** - Trains using sd-scripts (SDXL) 116 | - **Realtime LoRA Trainer (SD 1.5 - sd-scripts)** - Trains using sd-scripts (SD 1.5) 117 | - **Apply Trained LoRA** - Applies the trained LoRA to your model 118 | 119 | **Analysis & Selective Loading:** 120 | - **LoRA Loader + Analyzer** - Loads a LoRA and analyzes block-level impact (outputs analysis JSON for selective loaders) 121 | - **Selective LoRA Loader (SDXL)** - Load SDXL LoRAs with per-block toggles and strength sliders 122 | - **Selective LoRA Loader (Z-Image)** - Load Z-Image LoRAs with per-layer toggles (30 layers) 123 | - **Selective LoRA Loader (FLUX)** - Load FLUX LoRAs with per-block toggles (57 blocks: 19 double + 38 single) 124 | - **Selective LoRA Loader (Wan)** - Load Wan LoRAs with per-block toggles (40 blocks) 125 | - **Selective LoRA Loader (Qwen)** - Load Qwen LoRAs with per-block toggles (60 blocks) 126 | 127 | ## Getting Started 128 | 129 | There are critical example workflows with useful info included in the custom_nodes/comfyUI-Realtime-Lora folder. Open one in ComfyUI and: 130 | 131 | 1. Paste the path to your training backend installation (sd-scripts, Musubi Tuner, or AI-Toolkit) 132 | 2. For SDXL/SD1.5: select your checkpoint from the dropdown 133 | 3. For Musubi Tuner Z-Image: select your de-distilled model, VAE, and text encoder from the dropdowns 134 | 4. For AI-Toolkit models: the first run will download the model from HuggingFace automatically 135 | 136 | **First run with AI-Toolkit:** The model will download to your HuggingFace cache folder. On Windows this is `C:\Users\%USERNAME%\.cache\huggingface\hub`. You can watch that folder to monitor download progress - these models are large (several GB). 137 | 138 | ## Basic Usage 139 | 140 | 1. Add the trainer node for your model type 141 | 2. Connect your training image(s) 142 | 3. Set the path to your training backend installation 143 | 4. Queue the workflow 144 | 5. Connect the lora_path output to the Apply Trained LoRA node 145 | 146 | ## Features 147 | 148 | - Train from 1 to 100+ images 149 | - Per-image captions (optional) 150 | - Folder input for batch training with .txt caption files 151 | - Automatic caching - identical inputs skip training and reuse the LoRA 152 | - VRAM presets for different GPU sizes 153 | - Settings are saved between sessions 154 | 155 | ## LoRA Analysis & Selective Loading 156 | 157 | Beyond training, this toolkit includes tools for understanding and fine-tuning how LoRAs affect your generations. 158 | 159 | **[▶ Watch Demo: LoRA Analysis & Selective Block Loading](https://www.youtube.com/watch?v=dkEB5i5yBUI)** 160 | 161 | [![LoRA Analysis Demo](https://img.youtube.com/vi/dkEB5i5yBUI/0.jpg)](https://www.youtube.com/watch?v=dkEB5i5yBUI) 162 | 163 | ### LoRA Loader + Analyzer 164 | 165 | The analyzer loads any LoRA and shows you which blocks have the most impact. It calculates a "strength" score (0-100%) for each block based on the weight magnitudes in that block. High-impact blocks are where the LoRA learned the most - these are often the blocks responsible for the subject's face, style, or composition. 166 | 167 | **Outputs:** 168 | - `model` / `clip` - The model with LoRA applied 169 | - `analysis` - Human-readable text breakdown 170 | - `analysis_json` - JSON data for selective loaders (enables impact-colored checkboxes) 171 | - `lora_path` - Path to the loaded LoRA (can connect to selective loaders) 172 | 173 | ### Selective LoRA Loaders 174 | 175 | Each architecture has its own selective loader with toggles and strength sliders for every block or layer. This lets you: 176 | 177 | - **Disable low-impact blocks** to reduce LoRA influence on parts of the image 178 | - **Focus on specific blocks** (e.g., face blocks, style blocks, composition blocks) 179 | - **Fine-tune strength per-block** instead of using a single global strength 180 | 181 | **Presets included:** 182 | - Default (all on at 1.0) 183 | - All Off 184 | - Half Strength 185 | - Architecture-specific presets (High Impact, Face Focus, Style Only, etc.) 186 | 187 | ### Impact-Colored Checkboxes 188 | 189 | Connect the `analysis_json` output from the Analyzer to a Selective Loader's `analysis_json` input. The checkboxes will color-code by impact: 190 | 191 | - **Blue** = Low impact (0-30%) 192 | - **Cyan/Green** = Medium impact (30-60%) 193 | - **Yellow/Orange** = High impact (60-90%) 194 | - **Red** = Very high impact (90-100%) 195 | 196 | This makes it easy to see at a glance which blocks matter most for your LoRA. 197 | 198 | ### Usage Notes 199 | 200 | - **Analyzer standalone**: The LoRA Loader + Analyzer works on its own as a drop-in replacement for ComfyUI's standard LoRA loader. The analysis outputs are optional - you can ignore them and just use the model/clip outputs. 201 | 202 | - **Path override**: When you connect a path to a Selective Loader's `lora_path` input, the dropdown selection is ignored. This lets you analyze one LoRA and selectively load it in one step. 203 | 204 | - **Trainer → Selective Loader**: The `lora_path` output from any trainer node is compatible with the Selective Loader's path input. Train a LoRA and immediately load it with per-block control - useful for testing which blocks matter for your freshly trained subject. 205 | 206 | ## Defaults (Z-Image example) 207 | 208 | - 400 training steps 209 | - Learning rate 0.0002 210 | - LoRA rank 16 211 | - Low VRAM mode (768px) 212 | 213 | These defaults are starting points for experimentation, not ideal values. Every subject and style is different. 214 | 215 | **Learning rate advice:** 216 | - 0.0002 trains fast but can overshoot, causing artifacts or burning in the subject too hard 217 | - Try lowering to 0.0001 or 0.00005 for more stable, gradual training 218 | - If your LoRA looks overcooked or the subject bleeds into everything, lower the learning rate 219 | - If your LoRA is too weak after 400-500 steps, try more steps before raising the learning rate, its already high in the example workflows. 220 | 221 | 222 | ## Support 223 | 224 | If this tool saves you time or fits into your workflow, consider [buying me a coffee](https://buymeacoffee.com/lorasandlenses). 225 | 226 | I'm currently between contracts due to family circumstances, which has given me time to build and maintain this project. Your support helps me keep developing it. 227 | 228 | No perks, no tiers - just a way to say thanks if you find it useful. 229 | 230 | 231 | ## Credits 232 | 233 | This project makes use of these excellent training tools fro the training nodes: 234 | 235 | - **AI-Toolkit** by ostris: https://github.com/ostris/ai-toolkit 236 | - **sd-scripts** by kohya-ss: https://github.com/kohya-ss/sd-scripts 237 | - **Musubi Tuner** by kohya-ss: https://github.com/kohya-ss/musubi-tuner 238 | 239 | The training is done by these projects. This node just makes them accessible from within ComfyUI in a user centric manner. Essentially i want to democratize training and make it easier to get into creativly. 240 | 241 | ## Author 242 | 243 | Peter Neill - [ShootTheSound.com](https://shootthesound.com) / [UltrawideWallpapers.net](https://ultrawidewallpapers.net) 244 | 245 | Background in music industry photography and video. Built this node to make LoRA training accessible to creators who just want to get things done without diving into command line tools. 246 | 247 | Feedback is welcome - open an issue or reach out. 248 | 249 | ## License 250 | 251 | MIT 252 | -------------------------------------------------------------------------------- /lora_analyzer.py: -------------------------------------------------------------------------------- 1 | """ 2 | LoRA Loader with Analysis for ComfyUI 3 | Loads a LoRA and measures per-block contributions during inference. 4 | Shows which blocks actually contribute to the generated image. 5 | """ 6 | 7 | import os 8 | import re 9 | import json 10 | from collections import defaultdict 11 | import threading 12 | 13 | import torch 14 | import folder_paths 15 | import comfy.sd 16 | import comfy.model_patcher 17 | from safetensors.torch import load_file 18 | 19 | 20 | def _detect_architecture(keys): 21 | """Identify LoRA architecture from key patterns.""" 22 | keys_lower = [k.lower() for k in keys] 23 | keys_str = ' '.join(keys_lower) 24 | num_keys = len(keys) 25 | 26 | # Check for Qwen-Image (transformer_blocks with img_mlp/txt_mlp/img_mod/txt_mod) 27 | if any('transformer_blocks' in k and any(x in k for x in ['img_mlp', 'txt_mlp', 'img_mod', 'txt_mod']) for k in keys_lower): 28 | return 'QWEN_IMAGE' 29 | 30 | # Check for Z-Image patterns: 31 | # - diffusion_model.layers.N.attention/adaLN_modulation (ComfyUI format) 32 | # - single_transformer_blocks (older format) 33 | if any('diffusion_model.layers.' in k and ('attention' in k or 'adaln' in k.lower()) for k in keys_lower): 34 | return 'ZIMAGE' 35 | if any('single_transformer_blocks' in k for k in keys_lower): 36 | return 'ZIMAGE' 37 | 38 | # Check for Flux (double_blocks/single_blocks) 39 | if any('double_blocks' in k or 'single_blocks' in k for k in keys_lower): 40 | return 'FLUX' 41 | 42 | # Check for Wan (blocks.N or blocks_N with self_attn/cross_attn/ffn) 43 | if any(('blocks.' in k or 'blocks_' in k) and any(x in k for x in ['self_attn', 'cross_attn', 'ffn']) 44 | for k in keys_lower): 45 | return 'WAN' 46 | 47 | # Check for SDXL - look for dual text encoders 48 | has_te1 = 'lora_te1_' in keys_str or 'text_encoder_1' in keys_str 49 | has_te2 = 'lora_te2_' in keys_str or 'text_encoder_2' in keys_str 50 | if has_te1 and has_te2: 51 | return 'SDXL' 52 | 53 | # SDXL has way more tensors than SD15 (2000+ vs 600-800) 54 | if num_keys > 1500: 55 | return 'SDXL' 56 | 57 | if any('input_blocks_7' in k or 'input_blocks_8' in k or 58 | 'input_blocks.7' in k or 'input_blocks.8' in k for k in keys_lower): 59 | return 'SDXL' 60 | 61 | # Check for SD1.5 patterns 62 | if any('lora_unet_' in k or 'lora_te_' in k for k in keys_lower): 63 | return 'SD15' 64 | 65 | # Fallback based on tensor count 66 | if num_keys > 1000: 67 | return 'SDXL' 68 | 69 | if any('input_blocks' in k for k in keys_lower): 70 | return 'SD15' 71 | 72 | return 'UNKNOWN' 73 | 74 | 75 | def _extract_block_id(key: str, architecture: str) -> str: 76 | """Extract block identifier from a LoRA/model weight key.""" 77 | key_lower = key.lower() 78 | 79 | if architecture == 'QWEN_IMAGE': 80 | match = re.search(r'transformer_blocks[._](\d+)', key) 81 | return f"block_{match.group(1)}" if match else 'other' 82 | 83 | elif architecture == 'ZIMAGE': 84 | # New format: diffusion_model.layers.N.attention/adaLN_modulation 85 | match = re.search(r'diffusion_model\.layers\.(\d+)', key) 86 | if match: 87 | return f"layer_{match.group(1)}" 88 | # Old format: single_transformer_blocks.N 89 | match = re.search(r'single_transformer_blocks\.(\d+)', key) 90 | if match: 91 | return f"block_{match.group(1)}" 92 | return 'other' 93 | 94 | elif architecture == 'WAN': 95 | # Handle both blocks.N and blocks_N patterns 96 | match = re.search(r'blocks[._](\d+)', key) 97 | return f"block_{match.group(1)}" if match else 'other' 98 | 99 | elif architecture == 'FLUX': 100 | double = re.search(r'double_blocks[._]?(\d+)', key_lower) 101 | if double: 102 | return f"double_{double.group(1)}" 103 | single = re.search(r'single_blocks[._]?(\d+)', key_lower) 104 | if single: 105 | return f"single_{single.group(1)}" 106 | return 'other' 107 | 108 | elif architecture in ['SDXL', 'SD15']: 109 | te = re.search(r'lora_te(\d?)_', key_lower) 110 | if te: 111 | return f"text_encoder_{te.group(1) or '1'}" 112 | # Match diffusion_model patterns 113 | down = re.search(r'down_blocks?[._]?(\d+)', key_lower) 114 | if down: 115 | return f"unet_down_{down.group(1)}" 116 | if 'mid_block' in key_lower or 'middle_block' in key_lower: 117 | return "unet_mid" 118 | up = re.search(r'up_blocks?[._]?(\d+)', key_lower) 119 | if up: 120 | return f"unet_up_{up.group(1)}" 121 | # Input/output blocks for SD 122 | inp = re.search(r'input_blocks?[._]?(\d+)', key_lower) 123 | if inp: 124 | return f"input_{inp.group(1)}" 125 | out = re.search(r'output_blocks?[._]?(\d+)', key_lower) 126 | if out: 127 | return f"output_{out.group(1)}" 128 | return 'other' 129 | 130 | return 'other' 131 | 132 | 133 | class LoRAContributionTracker: 134 | """ 135 | Singleton that tracks LoRA contributions during inference. 136 | Uses weight_function wrappers to measure actual contributions. 137 | """ 138 | _instance = None 139 | _lock = threading.Lock() 140 | 141 | def __new__(cls): 142 | if cls._instance is None: 143 | with cls._lock: 144 | if cls._instance is None: 145 | cls._instance = super().__new__(cls) 146 | cls._instance.contributions = defaultdict(lambda: {'count': 0, 'total_norm': 0.0, 'total_delta': 0.0}) 147 | cls._instance.architecture = 'UNKNOWN' 148 | cls._instance.enabled = False 149 | cls._instance.lora_name = "" 150 | return cls._instance 151 | 152 | def reset(self): 153 | self.contributions = defaultdict(lambda: {'count': 0, 'total_norm': 0.0, 'total_delta': 0.0}) 154 | 155 | def record(self, block_id: str, delta_norm: float): 156 | if self.enabled: 157 | self.contributions[block_id]['count'] += 1 158 | self.contributions[block_id]['total_delta'] += delta_norm 159 | 160 | def get_report(self) -> str: 161 | if not self.contributions: 162 | return "No LoRA contributions recorded yet.\nGenerate an image first, then check this output." 163 | 164 | total = sum(d['total_delta'] for d in self.contributions.values()) 165 | if total == 0: 166 | return "No significant LoRA contributions detected." 167 | 168 | lines = [ 169 | f"LoRA: {self.lora_name}", 170 | f"Architecture: {self.architecture}", 171 | "=" * 60, 172 | f"{'Block':<25} {'Contribution':>15} {'Calls':>10}", 173 | "-" * 60 174 | ] 175 | 176 | sorted_blocks = sorted( 177 | self.contributions.items(), 178 | key=lambda x: x[1]['total_delta'], 179 | reverse=True 180 | ) 181 | 182 | for block_id, data in sorted_blocks: 183 | pct = (data['total_delta'] / total) * 100 184 | bar_len = int(pct / 5) 185 | bar = '█' * bar_len + '░' * (20 - bar_len) 186 | lines.append(f"{block_id:<25} [{bar}] {pct:5.1f}% ({data['count']:>5})") 187 | 188 | lines.append("-" * 60) 189 | lines.append(f"Total forward passes with LoRA: {sum(d['count'] for d in self.contributions.values())}") 190 | 191 | return '\n'.join(lines) 192 | 193 | 194 | # Global tracker 195 | _tracker = LoRAContributionTracker() 196 | 197 | 198 | def _create_measuring_weight_function(original_weight_func, block_id: str, weight_key: str): 199 | """ 200 | Wrap a weight function to measure LoRA contribution. 201 | The weight function is called with the base weight and returns the patched weight. 202 | """ 203 | def measuring_wrapper(weight, *args, **kwargs): 204 | global _tracker 205 | 206 | # Get the original weight norm before patching 207 | original_norm = weight.norm().item() 208 | 209 | # Apply the original function (which applies LoRA patches) 210 | result = original_weight_func(weight, *args, **kwargs) 211 | 212 | # Measure the delta 213 | if _tracker.enabled and result is not None: 214 | try: 215 | result_norm = result.norm().item() 216 | delta = abs(result_norm - original_norm) 217 | _tracker.record(block_id, delta) 218 | except Exception: 219 | pass 220 | 221 | return result 222 | 223 | return measuring_wrapper 224 | 225 | 226 | def _analyze_patches(model_patcher, architecture: str) -> dict: 227 | """ 228 | Analyze the patches stored in the ModelPatcher. 229 | Returns per-block analysis of patch strengths and norms. 230 | 231 | Patch structure: (strength, patch_data, strength_model, offset, function) 232 | patch_data can be: 233 | - LoRAAdapter object (comfy.weight_adapter.lora.LoRAAdapter) 234 | - tuple like ("lora", (lora_up, lora_down, alpha, ...)) 235 | - tensor directly 236 | """ 237 | block_analysis = defaultdict(lambda: { 238 | 'patch_count': 0, 239 | 'total_strength': 0.0, 240 | 'total_norm': 0.0, 241 | 'keys': [] 242 | }) 243 | 244 | if not hasattr(model_patcher, 'patches'): 245 | return dict(block_analysis) 246 | 247 | for weight_key, patch_list in model_patcher.patches.items(): 248 | block_id = _extract_block_id(weight_key, architecture) 249 | 250 | for patch_tuple in patch_list: 251 | if len(patch_tuple) < 3: 252 | continue 253 | 254 | strength = patch_tuple[0] 255 | patch_data = patch_tuple[1] 256 | strength_model = patch_tuple[2] 257 | 258 | effective_strength = abs(strength * strength_model) 259 | block_analysis[block_id]['patch_count'] += 1 260 | block_analysis[block_id]['total_strength'] += effective_strength 261 | block_analysis[block_id]['keys'].append(weight_key) 262 | 263 | norm_value = 0.0 264 | 265 | try: 266 | # Handle LoRAAdapter object (ComfyUI format) 267 | # weights tuple: (lora_up, lora_down, alpha, ...) 268 | if hasattr(patch_data, 'weights') and isinstance(patch_data.weights, tuple): 269 | weights = patch_data.weights 270 | if len(weights) >= 2: 271 | lora_up = weights[0] # shape: [out_features, rank] 272 | lora_down = weights[1] # shape: [rank, in_features] 273 | if hasattr(lora_up, 'norm') and hasattr(lora_down, 'norm'): 274 | up_norm = lora_up.float().norm().item() 275 | down_norm = lora_down.float().norm().item() 276 | norm_value = up_norm * down_norm 277 | 278 | # Handle older LoRAAdapter with direct attributes 279 | elif hasattr(patch_data, 'lora_up') and hasattr(patch_data, 'lora_down'): 280 | lora_up = patch_data.lora_up 281 | lora_down = patch_data.lora_down 282 | if lora_up is not None and lora_down is not None: 283 | up_norm = lora_up.float().norm().item() 284 | down_norm = lora_down.float().norm().item() 285 | norm_value = up_norm * down_norm 286 | 287 | # Handle tuple format ("lora", (up, down, alpha, ...)) 288 | elif isinstance(patch_data, tuple) and len(patch_data) >= 2: 289 | patch_type = patch_data[0] 290 | patch_content = patch_data[1] 291 | 292 | if patch_type == "lora" and isinstance(patch_content, tuple) and len(patch_content) >= 2: 293 | lora_up = patch_content[0] 294 | lora_down = patch_content[1] 295 | if hasattr(lora_up, 'norm') and hasattr(lora_down, 'norm'): 296 | up_norm = lora_up.float().norm().item() 297 | down_norm = lora_down.float().norm().item() 298 | norm_value = up_norm * down_norm 299 | 300 | # Direct tensor 301 | elif hasattr(patch_data, 'norm'): 302 | norm_value = patch_data.float().norm().item() 303 | 304 | except Exception as e: 305 | pass 306 | 307 | block_analysis[block_id]['total_norm'] += norm_value * effective_strength 308 | 309 | return dict(block_analysis) 310 | 311 | 312 | def _format_patch_analysis(block_analysis: dict, architecture: str) -> str: 313 | """Format patch analysis as readable text.""" 314 | if not block_analysis: 315 | return "No patches found." 316 | 317 | # Normalize scores to 0-100 318 | max_norm = max((d['total_norm'] for d in block_analysis.values()), default=1.0) 319 | if max_norm == 0: 320 | max_norm = 1.0 321 | 322 | lines = [ 323 | f"LoRA Patch Analysis ({architecture})", 324 | "=" * 60, 325 | f"{'Block':<25} {'Score':>8} {'Patches':>10} {'Strength':>10}", 326 | "-" * 60 327 | ] 328 | 329 | # Sort by normalized score 330 | sorted_blocks = sorted( 331 | block_analysis.items(), 332 | key=lambda x: x[1]['total_norm'], 333 | reverse=True 334 | ) 335 | 336 | for block_id, data in sorted_blocks: 337 | score = (data['total_norm'] / max_norm) * 100 338 | bar_len = int(score / 5) 339 | bar = '█' * bar_len + '░' * (20 - bar_len) 340 | lines.append(f"{block_id:<25} [{bar}] {score:5.1f} ({data['patch_count']:>3}) {data['total_strength']:>8.3f}") 341 | 342 | lines.append("-" * 60) 343 | lines.append(f"Total patched layers: {sum(d['patch_count'] for d in block_analysis.values())}") 344 | 345 | return '\n'.join(lines) 346 | 347 | 348 | def _create_analysis_json(block_analysis: dict, architecture: str, lora_name: str) -> str: 349 | """Create JSON analysis output for use by selective loaders.""" 350 | if not block_analysis: 351 | return json.dumps({"architecture": architecture, "lora_name": lora_name, "blocks": {}}) 352 | 353 | # Normalize scores to 0-100 354 | max_norm = max((d['total_norm'] for d in block_analysis.values()), default=1.0) 355 | if max_norm == 0: 356 | max_norm = 1.0 357 | 358 | blocks = {} 359 | for block_id, data in block_analysis.items(): 360 | score = (data['total_norm'] / max_norm) * 100 361 | blocks[block_id] = { 362 | "score": round(score, 1), 363 | "patch_count": data['patch_count'], 364 | "strength": round(data['total_strength'], 4) 365 | } 366 | 367 | return json.dumps({ 368 | "architecture": architecture, 369 | "lora_name": lora_name, 370 | "blocks": blocks 371 | }) 372 | 373 | 374 | class LoRALoaderWithAnalysis: 375 | """ 376 | Loads a LoRA and provides per-block contribution analysis. 377 | 378 | Analysis output shows: 379 | 1. Static analysis: Which blocks have patches and their relative strength 380 | 2. Runtime analysis: After generation, shows actual contributions (use GetLoRAAnalysis node) 381 | """ 382 | 383 | @classmethod 384 | def INPUT_TYPES(cls): 385 | return { 386 | "required": { 387 | "model": ("MODEL",), 388 | "clip": ("CLIP",), 389 | "lora_name": (folder_paths.get_filename_list("loras"), { 390 | "tooltip": "LoRA file to load and analyze" 391 | }), 392 | "strength_model": ("FLOAT", { 393 | "default": 1.0, 394 | "min": -10.0, 395 | "max": 10.0, 396 | "step": 0.01, 397 | "tooltip": "LoRA strength for model (UNet/DiT)" 398 | }), 399 | "strength_clip": ("FLOAT", { 400 | "default": 1.0, 401 | "min": -10.0, 402 | "max": 10.0, 403 | "step": 0.01, 404 | "tooltip": "LoRA strength for CLIP text encoder" 405 | }), 406 | }, 407 | } 408 | 409 | RETURN_TYPES = ("MODEL", "CLIP", "STRING", "STRING", "STRING") 410 | RETURN_NAMES = ("model", "clip", "analysis", "analysis_json", "lora_path") 411 | OUTPUT_TOOLTIPS = ( 412 | "Model with LoRA applied.", 413 | "CLIP with LoRA applied.", 414 | "Per-block patch analysis. Shows which blocks are affected by this LoRA.", 415 | "JSON analysis data. Connect to Selective LoRA Loader for impact-colored UI.", 416 | "Full path to the loaded LoRA file. Connect to Selective LoRA Loader." 417 | ) 418 | FUNCTION = "load_lora_with_analysis" 419 | CATEGORY = "loaders/lora" 420 | OUTPUT_NODE = True 421 | DESCRIPTION = "Loads a LoRA and analyzes which blocks it affects. Connect analysis_json to Selective Loaders for impact-colored checkboxes." 422 | 423 | def load_lora_with_analysis(self, model, clip, lora_name, strength_model, strength_clip): 424 | global _tracker 425 | 426 | lora_path = folder_paths.get_full_path("loras", lora_name) 427 | if not lora_path or not os.path.exists(lora_path): 428 | return (model, clip, "Error: LoRA file not found", "{}", "") 429 | 430 | print(f"[LoRA Analyzer] Loading: {lora_name}") 431 | 432 | # Load LoRA state dict to detect architecture 433 | if lora_path.endswith('.safetensors'): 434 | lora_state_dict = load_file(lora_path) 435 | else: 436 | lora_state_dict = torch.load(lora_path, map_location='cpu') 437 | 438 | lora_keys = list(lora_state_dict.keys()) 439 | architecture = _detect_architecture(lora_keys) 440 | print(f"[LoRA Analyzer] Architecture: {architecture}, {len(lora_state_dict)} tensors") 441 | 442 | # Debug: show sample keys if unknown 443 | if architecture == 'UNKNOWN': 444 | print(f"[LoRA Analyzer] Sample keys: {lora_keys[:5]}") 445 | 446 | # Reset tracker 447 | _tracker.reset() 448 | _tracker.architecture = architecture 449 | _tracker.lora_name = lora_name 450 | _tracker.enabled = True 451 | 452 | # Load the LoRA using ComfyUI's standard method 453 | model_lora, clip_lora = comfy.sd.load_lora_for_models( 454 | model, 455 | clip, 456 | lora_state_dict, 457 | strength_model, 458 | strength_clip 459 | ) 460 | 461 | # Analyze the patches that were applied 462 | patch_analysis = _analyze_patches(model_lora, architecture) 463 | analysis_text = _format_patch_analysis(patch_analysis, architecture) 464 | analysis_json = _create_analysis_json(patch_analysis, architecture, lora_name) 465 | 466 | print(f"[LoRA Analyzer] Found {len(patch_analysis)} blocks with patches") 467 | print(analysis_text) 468 | 469 | return (model_lora, clip_lora, analysis_text, analysis_json, lora_path) 470 | 471 | 472 | NODE_CLASS_MAPPINGS = { 473 | "LoRALoaderWithAnalysis": LoRALoaderWithAnalysis, 474 | } 475 | 476 | NODE_DISPLAY_NAME_MAPPINGS = { 477 | "LoRALoaderWithAnalysis": "LoRA Loader + Analyzer", 478 | } 479 | -------------------------------------------------------------------------------- /workflows/TrainingOnlyWorkflow_Z-Image_demo.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "92112d97-bb64-4b44-86f2-ea5691ef8f6e", 3 | "revision": 0, 4 | "last_node_id": 80, 5 | "last_link_id": 162, 6 | "nodes": [ 7 | { 8 | "id": 29, 9 | "type": "LoadImage", 10 | "pos": [ 11 | -1899.7019401757116, 12 | -39.665001232435756 13 | ], 14 | "size": [ 15 | 395.5046924121225, 16 | 376.092132305915 17 | ], 18 | "flags": {}, 19 | "order": 0, 20 | "mode": 0, 21 | "inputs": [ 22 | { 23 | "localized_name": "image", 24 | "name": "image", 25 | "type": "COMBO", 26 | "widget": { 27 | "name": "image" 28 | }, 29 | "link": null 30 | }, 31 | { 32 | "localized_name": "choose file to upload", 33 | "name": "upload", 34 | "type": "IMAGEUPLOAD", 35 | "widget": { 36 | "name": "upload" 37 | }, 38 | "link": null 39 | } 40 | ], 41 | "outputs": [ 42 | { 43 | "localized_name": "IMAGE", 44 | "name": "IMAGE", 45 | "type": "IMAGE", 46 | "links": [ 47 | 152 48 | ] 49 | }, 50 | { 51 | "localized_name": "MASK", 52 | "name": "MASK", 53 | "type": "MASK", 54 | "links": null 55 | } 56 | ], 57 | "properties": { 58 | "cnr_id": "comfy-core", 59 | "ver": "0.3.75", 60 | "Node name for S&R": "LoadImage", 61 | "ue_properties": { 62 | "widget_ue_connectable": {}, 63 | "input_ue_unconnectable": {}, 64 | "version": "7.5.1" 65 | } 66 | }, 67 | "widgets_values": [ 68 | "original_3a930bdd-a01e-42d5-b8a4-8ebaf467384d.webp", 69 | "image" 70 | ] 71 | }, 72 | { 73 | "id": 32, 74 | "type": "LoadImage", 75 | "pos": [ 76 | -1897.3742566951246, 77 | 698.405647918002 78 | ], 79 | "size": [ 80 | 392.7450420874152, 81 | 334.6973774353048 82 | ], 83 | "flags": {}, 84 | "order": 1, 85 | "mode": 0, 86 | "inputs": [ 87 | { 88 | "localized_name": "image", 89 | "name": "image", 90 | "type": "COMBO", 91 | "widget": { 92 | "name": "image" 93 | }, 94 | "link": null 95 | }, 96 | { 97 | "localized_name": "choose file to upload", 98 | "name": "upload", 99 | "type": "IMAGEUPLOAD", 100 | "widget": { 101 | "name": "upload" 102 | }, 103 | "link": null 104 | } 105 | ], 106 | "outputs": [ 107 | { 108 | "localized_name": "IMAGE", 109 | "name": "IMAGE", 110 | "type": "IMAGE", 111 | "links": [ 112 | 154 113 | ] 114 | }, 115 | { 116 | "localized_name": "MASK", 117 | "name": "MASK", 118 | "type": "MASK", 119 | "links": null 120 | } 121 | ], 122 | "properties": { 123 | "cnr_id": "comfy-core", 124 | "ver": "0.3.75", 125 | "Node name for S&R": "LoadImage", 126 | "ue_properties": { 127 | "widget_ue_connectable": {}, 128 | "input_ue_unconnectable": {}, 129 | "version": "7.5.1" 130 | } 131 | }, 132 | "widgets_values": [ 133 | "download (1).jpg", 134 | "image" 135 | ] 136 | }, 137 | { 138 | "id": 31, 139 | "type": "LoadImage", 140 | "pos": [ 141 | -1481.8263702119325, 142 | 699.6559765208757 143 | ], 144 | "size": [ 145 | 373.4274898144638, 146 | 341.59650324707343 147 | ], 148 | "flags": {}, 149 | "order": 2, 150 | "mode": 0, 151 | "inputs": [ 152 | { 153 | "localized_name": "image", 154 | "name": "image", 155 | "type": "COMBO", 156 | "widget": { 157 | "name": "image" 158 | }, 159 | "link": null 160 | }, 161 | { 162 | "localized_name": "choose file to upload", 163 | "name": "upload", 164 | "type": "IMAGEUPLOAD", 165 | "widget": { 166 | "name": "upload" 167 | }, 168 | "link": null 169 | } 170 | ], 171 | "outputs": [ 172 | { 173 | "localized_name": "IMAGE", 174 | "name": "IMAGE", 175 | "type": "IMAGE", 176 | "links": [ 177 | 155 178 | ] 179 | }, 180 | { 181 | "localized_name": "MASK", 182 | "name": "MASK", 183 | "type": "MASK", 184 | "links": null 185 | } 186 | ], 187 | "properties": { 188 | "cnr_id": "comfy-core", 189 | "ver": "0.3.75", 190 | "Node name for S&R": "LoadImage", 191 | "ue_properties": { 192 | "widget_ue_connectable": {}, 193 | "input_ue_unconnectable": {}, 194 | "version": "7.5.1" 195 | } 196 | }, 197 | "widgets_values": [ 198 | "Starry-Night-canvas-Vincent-van-Gogh-New-1889.webp", 199 | "image" 200 | ] 201 | }, 202 | { 203 | "id": 30, 204 | "type": "LoadImage", 205 | "pos": [ 206 | -1479.098549522403, 207 | -29.447645769383254 208 | ], 209 | "size": [ 210 | 407.9231188733056, 211 | 366.43335616943943 212 | ], 213 | "flags": {}, 214 | "order": 3, 215 | "mode": 0, 216 | "inputs": [ 217 | { 218 | "localized_name": "image", 219 | "name": "image", 220 | "type": "COMBO", 221 | "widget": { 222 | "name": "image" 223 | }, 224 | "link": null 225 | }, 226 | { 227 | "localized_name": "choose file to upload", 228 | "name": "upload", 229 | "type": "IMAGEUPLOAD", 230 | "widget": { 231 | "name": "upload" 232 | }, 233 | "link": null 234 | } 235 | ], 236 | "outputs": [ 237 | { 238 | "localized_name": "IMAGE", 239 | "name": "IMAGE", 240 | "type": "IMAGE", 241 | "links": [ 242 | 153 243 | ] 244 | }, 245 | { 246 | "localized_name": "MASK", 247 | "name": "MASK", 248 | "type": "MASK", 249 | "links": null 250 | } 251 | ], 252 | "properties": { 253 | "cnr_id": "comfy-core", 254 | "ver": "0.3.75", 255 | "Node name for S&R": "LoadImage", 256 | "ue_properties": { 257 | "widget_ue_connectable": {}, 258 | "input_ue_unconnectable": {}, 259 | "version": "7.5.1" 260 | } 261 | }, 262 | "widgets_values": [ 263 | "Arn-Van-Gogh-Secondary-1.webp", 264 | "image" 265 | ] 266 | }, 267 | { 268 | "id": 59, 269 | "type": "String", 270 | "pos": [ 271 | -1903.96103902309, 272 | -289.2550430271083 273 | ], 274 | "size": [ 275 | 400, 276 | 200 277 | ], 278 | "flags": {}, 279 | "order": 4, 280 | "mode": 0, 281 | "inputs": [ 282 | { 283 | "localized_name": "String", 284 | "name": "String", 285 | "type": "STRING", 286 | "widget": { 287 | "name": "String" 288 | }, 289 | "link": null 290 | } 291 | ], 292 | "outputs": [ 293 | { 294 | "localized_name": "STRING", 295 | "name": "STRING", 296 | "type": "STRING", 297 | "links": [ 298 | 156 299 | ] 300 | } 301 | ], 302 | "properties": { 303 | "cnr_id": "ComfyLiterals", 304 | "ver": "bdddb08ca82d90d75d97b1d437a652e0284a32ac", 305 | "Node name for S&R": "String", 306 | "ue_properties": { 307 | "widget_ue_connectable": {}, 308 | "input_ue_unconnectable": {}, 309 | "version": "7.5.1" 310 | } 311 | }, 312 | "widgets_values": [ 313 | "zwxem painting of corn field" 314 | ] 315 | }, 316 | { 317 | "id": 61, 318 | "type": "String", 319 | "pos": [ 320 | -1489.0050028571977, 321 | -280.3313861058403 322 | ], 323 | "size": [ 324 | 400, 325 | 200 326 | ], 327 | "flags": {}, 328 | "order": 5, 329 | "mode": 0, 330 | "inputs": [ 331 | { 332 | "localized_name": "String", 333 | "name": "String", 334 | "type": "STRING", 335 | "widget": { 336 | "name": "String" 337 | }, 338 | "link": null 339 | } 340 | ], 341 | "outputs": [ 342 | { 343 | "localized_name": "STRING", 344 | "name": "STRING", 345 | "type": "STRING", 346 | "links": [ 347 | 157 348 | ] 349 | } 350 | ], 351 | "properties": { 352 | "cnr_id": "ComfyLiterals", 353 | "ver": "bdddb08ca82d90d75d97b1d437a652e0284a32ac", 354 | "Node name for S&R": "String", 355 | "ue_properties": { 356 | "widget_ue_connectable": {}, 357 | "input_ue_unconnectable": {}, 358 | "version": "7.5.1" 359 | } 360 | }, 361 | "widgets_values": [ 362 | "zwxem painting of a tree in a field" 363 | ] 364 | }, 365 | { 366 | "id": 60, 367 | "type": "String", 368 | "pos": [ 369 | -1902.842945889929, 370 | 457.6381465505763 371 | ], 372 | "size": [ 373 | 400, 374 | 200 375 | ], 376 | "flags": {}, 377 | "order": 6, 378 | "mode": 0, 379 | "inputs": [ 380 | { 381 | "localized_name": "String", 382 | "name": "String", 383 | "type": "STRING", 384 | "widget": { 385 | "name": "String" 386 | }, 387 | "link": null 388 | } 389 | ], 390 | "outputs": [ 391 | { 392 | "localized_name": "STRING", 393 | "name": "STRING", 394 | "type": "STRING", 395 | "links": [ 396 | 158 397 | ] 398 | } 399 | ], 400 | "properties": { 401 | "cnr_id": "ComfyLiterals", 402 | "ver": "bdddb08ca82d90d75d97b1d437a652e0284a32ac", 403 | "Node name for S&R": "String", 404 | "ue_properties": { 405 | "widget_ue_connectable": {}, 406 | "input_ue_unconnectable": {}, 407 | "version": "7.5.1" 408 | } 409 | }, 410 | "widgets_values": [ 411 | "zwxem portrait painting of a man" 412 | ] 413 | }, 414 | { 415 | "id": 62, 416 | "type": "String", 417 | "pos": [ 418 | -1497.6567994365214, 419 | 461.7230381340444 420 | ], 421 | "size": [ 422 | 400, 423 | 200 424 | ], 425 | "flags": {}, 426 | "order": 7, 427 | "mode": 0, 428 | "inputs": [ 429 | { 430 | "localized_name": "String", 431 | "name": "String", 432 | "type": "STRING", 433 | "widget": { 434 | "name": "String" 435 | }, 436 | "link": null 437 | } 438 | ], 439 | "outputs": [ 440 | { 441 | "localized_name": "STRING", 442 | "name": "STRING", 443 | "type": "STRING", 444 | "links": [ 445 | 159 446 | ] 447 | } 448 | ], 449 | "properties": { 450 | "cnr_id": "ComfyLiterals", 451 | "ver": "bdddb08ca82d90d75d97b1d437a652e0284a32ac", 452 | "Node name for S&R": "String", 453 | "ue_properties": { 454 | "widget_ue_connectable": {}, 455 | "input_ue_unconnectable": {}, 456 | "version": "7.5.1" 457 | } 458 | }, 459 | "widgets_values": [ 460 | "zwxem painting of a city under a starry night sky" 461 | ] 462 | }, 463 | { 464 | "id": 67, 465 | "type": "PrimitiveString", 466 | "pos": [ 467 | -623.94096059178, 468 | 508.5040753151047 469 | ], 470 | "size": [ 471 | 399.114163815623, 472 | 60.827317455816456 473 | ], 474 | "flags": { 475 | "collapsed": false 476 | }, 477 | "order": 8, 478 | "mode": 0, 479 | "inputs": [ 480 | { 481 | "localized_name": "value", 482 | "name": "value", 483 | "type": "STRING", 484 | "widget": { 485 | "name": "value" 486 | }, 487 | "link": null 488 | } 489 | ], 490 | "outputs": [ 491 | { 492 | "localized_name": "STRING", 493 | "name": "STRING", 494 | "type": "STRING", 495 | "links": null 496 | } 497 | ], 498 | "title": "<----- Point me at your AI-Toolkit install", 499 | "properties": { 500 | "cnr_id": "comfy-core", 501 | "ver": "0.3.75", 502 | "Node name for S&R": "PrimitiveString", 503 | "ue_properties": { 504 | "widget_ue_connectable": {}, 505 | "input_ue_unconnectable": {}, 506 | "version": "7.5.1" 507 | } 508 | }, 509 | "widgets_values": [ 510 | "<----- Point me at your AI-Toolkit install" 511 | ], 512 | "color": "#232", 513 | "bgcolor": "#353" 514 | }, 515 | { 516 | "id": 80, 517 | "type": "RealtimeLoraTrainer", 518 | "pos": [ 519 | -1034.6606275351087, 520 | 277.8110443780382 521 | ], 522 | "size": [ 523 | 400, 524 | 492 525 | ], 526 | "flags": {}, 527 | "order": 9, 528 | "mode": 0, 529 | "inputs": [ 530 | { 531 | "localized_name": "image_1", 532 | "name": "image_1", 533 | "shape": 7, 534 | "type": "IMAGE", 535 | "link": 152 536 | }, 537 | { 538 | "localized_name": "caption_1", 539 | "name": "caption_1", 540 | "shape": 7, 541 | "type": "STRING", 542 | "link": 156 543 | }, 544 | { 545 | "localized_name": "image_2", 546 | "name": "image_2", 547 | "shape": 7, 548 | "type": "IMAGE", 549 | "link": 153 550 | }, 551 | { 552 | "localized_name": "caption_2", 553 | "name": "caption_2", 554 | "shape": 7, 555 | "type": "STRING", 556 | "link": 157 557 | }, 558 | { 559 | "localized_name": "image_3", 560 | "name": "image_3", 561 | "shape": 7, 562 | "type": "IMAGE", 563 | "link": 154 564 | }, 565 | { 566 | "localized_name": "caption_3", 567 | "name": "caption_3", 568 | "shape": 7, 569 | "type": "STRING", 570 | "link": 158 571 | }, 572 | { 573 | "localized_name": "image_4", 574 | "name": "image_4", 575 | "shape": 7, 576 | "type": "IMAGE", 577 | "link": 155 578 | }, 579 | { 580 | "localized_name": "caption_4", 581 | "name": "caption_4", 582 | "shape": 7, 583 | "type": "STRING", 584 | "link": 159 585 | }, 586 | { 587 | "localized_name": "inputcount", 588 | "name": "inputcount", 589 | "type": "INT", 590 | "widget": { 591 | "name": "inputcount" 592 | }, 593 | "link": null 594 | }, 595 | { 596 | "localized_name": "images_path", 597 | "name": "images_path", 598 | "type": "STRING", 599 | "widget": { 600 | "name": "images_path" 601 | }, 602 | "link": null 603 | }, 604 | { 605 | "localized_name": "architecture", 606 | "name": "architecture", 607 | "type": "COMBO", 608 | "widget": { 609 | "name": "architecture" 610 | }, 611 | "link": null 612 | }, 613 | { 614 | "localized_name": "ai_toolkit_path", 615 | "name": "ai_toolkit_path", 616 | "type": "STRING", 617 | "widget": { 618 | "name": "ai_toolkit_path" 619 | }, 620 | "link": null 621 | }, 622 | { 623 | "localized_name": "caption", 624 | "name": "caption", 625 | "type": "STRING", 626 | "widget": { 627 | "name": "caption" 628 | }, 629 | "link": null 630 | }, 631 | { 632 | "localized_name": "training_steps", 633 | "name": "training_steps", 634 | "type": "INT", 635 | "widget": { 636 | "name": "training_steps" 637 | }, 638 | "link": null 639 | }, 640 | { 641 | "localized_name": "learning_rate", 642 | "name": "learning_rate", 643 | "type": "FLOAT", 644 | "widget": { 645 | "name": "learning_rate" 646 | }, 647 | "link": null 648 | }, 649 | { 650 | "localized_name": "lora_rank", 651 | "name": "lora_rank", 652 | "type": "INT", 653 | "widget": { 654 | "name": "lora_rank" 655 | }, 656 | "link": null 657 | }, 658 | { 659 | "localized_name": "vram_mode", 660 | "name": "vram_mode", 661 | "type": "COMBO", 662 | "widget": { 663 | "name": "vram_mode" 664 | }, 665 | "link": null 666 | }, 667 | { 668 | "localized_name": "keep_lora", 669 | "name": "keep_lora", 670 | "type": "BOOLEAN", 671 | "widget": { 672 | "name": "keep_lora" 673 | }, 674 | "link": null 675 | }, 676 | { 677 | "localized_name": "output_name", 678 | "name": "output_name", 679 | "type": "STRING", 680 | "widget": { 681 | "name": "output_name" 682 | }, 683 | "link": null 684 | } 685 | ], 686 | "outputs": [ 687 | { 688 | "localized_name": "lora_path", 689 | "name": "lora_path", 690 | "type": "STRING", 691 | "links": [ 692 | 162 693 | ] 694 | } 695 | ], 696 | "properties": { 697 | "aux_id": "shootthesound/comfyUI-Realtime-Lora", 698 | "ver": "7c5d8e9358c7be1a4995a56fede6d8c3fd534616", 699 | "ue_properties": { 700 | "widget_ue_connectable": {}, 701 | "input_ue_unconnectable": {}, 702 | "version": "7.5.1" 703 | } 704 | }, 705 | "widgets_values": [ 706 | 4, 707 | null, 708 | "", 709 | "Z-Image Turbo", 710 | "S:\\Auto\\Aitoolkit\\ai-toolkit", 711 | "", 712 | 500, 713 | 0.0005, 714 | 16, 715 | "Low (768px)", 716 | true, 717 | "MyLora" 718 | ], 719 | "ndSuperSelectorEnabled": false, 720 | "ndPowerEnabled": false 721 | }, 722 | { 723 | "id": 79, 724 | "type": "ShowText|pysssss", 725 | "pos": [ 726 | -561.6217376724688, 727 | 243.18298495101558 728 | ], 729 | "size": [ 730 | 253.14566331300148, 731 | 96.37108328003751 732 | ], 733 | "flags": {}, 734 | "order": 10, 735 | "mode": 0, 736 | "inputs": [ 737 | { 738 | "localized_name": "text", 739 | "name": "text", 740 | "type": "STRING", 741 | "link": 162 742 | } 743 | ], 744 | "outputs": [ 745 | { 746 | "localized_name": "STRING", 747 | "name": "STRING", 748 | "shape": 6, 749 | "type": "STRING", 750 | "links": null 751 | } 752 | ], 753 | "title": "Where The Lora is Saved", 754 | "properties": { 755 | "cnr_id": "comfyui-custom-scripts", 756 | "ver": "1.2.5", 757 | "Node name for S&R": "ShowText|pysssss", 758 | "ue_properties": { 759 | "widget_ue_connectable": {}, 760 | "input_ue_unconnectable": {}, 761 | "version": "7.5.1" 762 | } 763 | }, 764 | "widgets_values": [ 765 | "MyLora_20251205_170614" 766 | ] 767 | } 768 | ], 769 | "links": [ 770 | [ 771 | 152, 772 | 29, 773 | 0, 774 | 80, 775 | 0, 776 | "IMAGE" 777 | ], 778 | [ 779 | 153, 780 | 30, 781 | 0, 782 | 80, 783 | 2, 784 | "IMAGE" 785 | ], 786 | [ 787 | 154, 788 | 32, 789 | 0, 790 | 80, 791 | 4, 792 | "IMAGE" 793 | ], 794 | [ 795 | 155, 796 | 31, 797 | 0, 798 | 80, 799 | 6, 800 | "IMAGE" 801 | ], 802 | [ 803 | 156, 804 | 59, 805 | 0, 806 | 80, 807 | 1, 808 | "STRING" 809 | ], 810 | [ 811 | 157, 812 | 61, 813 | 0, 814 | 80, 815 | 3, 816 | "STRING" 817 | ], 818 | [ 819 | 158, 820 | 60, 821 | 0, 822 | 80, 823 | 5, 824 | "STRING" 825 | ], 826 | [ 827 | 159, 828 | 62, 829 | 0, 830 | 80, 831 | 7, 832 | "STRING" 833 | ], 834 | [ 835 | 162, 836 | 80, 837 | 0, 838 | 79, 839 | 0, 840 | "STRING" 841 | ] 842 | ], 843 | "groups": [], 844 | "config": {}, 845 | "extra": { 846 | "ds": { 847 | "scale": 1.0610764609500019, 848 | "offset": [ 849 | 1953.8491407604374, 850 | 32.90206176810892 851 | ] 852 | }, 853 | "frontendVersion": "1.32.9", 854 | "workflowRendererVersion": "LG", 855 | "ue_links": [], 856 | "links_added_by_ue": [], 857 | "VHS_latentpreview": false, 858 | "VHS_latentpreviewrate": 0, 859 | "VHS_MetadataImage": true, 860 | "VHS_KeepIntermediate": true 861 | }, 862 | "version": 0.4 863 | } -------------------------------------------------------------------------------- /workflows/Qwen image Edit with Control images pairs.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "90b505ef-e012-49e3-b52f-7785d594687d", 3 | "revision": 0, 4 | "last_node_id": 31, 5 | "last_link_id": 38, 6 | "nodes": [ 7 | { 8 | "id": 12, 9 | "type": "LoraLoaderModelOnly", 10 | "pos": [ 11 | 644.0239841196558, 12 | -354.3081410800077 13 | ], 14 | "size": [ 15 | 270, 16 | 82 17 | ], 18 | "flags": {}, 19 | "order": 10, 20 | "mode": 0, 21 | "inputs": [ 22 | { 23 | "name": "model", 24 | "type": "MODEL", 25 | "link": 36 26 | } 27 | ], 28 | "outputs": [ 29 | { 30 | "name": "MODEL", 31 | "type": "MODEL", 32 | "links": [ 33 | 18 34 | ] 35 | } 36 | ], 37 | "properties": { 38 | "cnr_id": "comfy-core", 39 | "ver": "0.3.50", 40 | "Node name for S&R": "LoraLoaderModelOnly", 41 | "ue_properties": { 42 | "version": "7.5.1", 43 | "widget_ue_connectable": {}, 44 | "input_ue_unconnectable": {} 45 | } 46 | }, 47 | "widgets_values": [ 48 | "QWEN\\Qwen-Image-Lightning-8steps-V1.0.safetensors", 49 | 1 50 | ], 51 | "ndSuperSelectorEnabled": false, 52 | "ndPowerEnabled": false 53 | }, 54 | { 55 | "id": 10, 56 | "type": "VAEEncode", 57 | "pos": [ 58 | 953.4717999711048, 59 | -355.6999223078739 60 | ], 61 | "size": [ 62 | 140, 63 | 46 64 | ], 65 | "flags": {}, 66 | "order": 8, 67 | "mode": 0, 68 | "inputs": [ 69 | { 70 | "name": "pixels", 71 | "type": "IMAGE", 72 | "link": 15 73 | }, 74 | { 75 | "name": "vae", 76 | "type": "VAE", 77 | "link": 12 78 | } 79 | ], 80 | "outputs": [ 81 | { 82 | "name": "LATENT", 83 | "type": "LATENT", 84 | "links": [ 85 | 13 86 | ] 87 | } 88 | ], 89 | "properties": { 90 | "cnr_id": "comfy-core", 91 | "ver": "0.3.50", 92 | "Node name for S&R": "VAEEncode", 93 | "ue_properties": { 94 | "version": "7.5.1", 95 | "widget_ue_connectable": {}, 96 | "input_ue_unconnectable": {} 97 | } 98 | }, 99 | "widgets_values": [] 100 | }, 101 | { 102 | "id": 13, 103 | "type": "ConditioningZeroOut", 104 | "pos": [ 105 | 1042.0040135497372, 106 | -207.15373697183819 107 | ], 108 | "size": [ 109 | 197.712890625, 110 | 26 111 | ], 112 | "flags": {}, 113 | "order": 12, 114 | "mode": 0, 115 | "inputs": [ 116 | { 117 | "name": "conditioning", 118 | "type": "CONDITIONING", 119 | "link": 19 120 | } 121 | ], 122 | "outputs": [ 123 | { 124 | "name": "CONDITIONING", 125 | "type": "CONDITIONING", 126 | "links": [ 127 | 20 128 | ] 129 | } 130 | ], 131 | "properties": { 132 | "cnr_id": "comfy-core", 133 | "ver": "0.3.50", 134 | "Node name for S&R": "ConditioningZeroOut", 135 | "ue_properties": { 136 | "version": "7.5.1", 137 | "widget_ue_connectable": {}, 138 | "input_ue_unconnectable": {} 139 | } 140 | }, 141 | "widgets_values": [] 142 | }, 143 | { 144 | "id": 5, 145 | "type": "KSampler", 146 | "pos": [ 147 | 1285.0049908213587, 148 | -367.0989352755915 149 | ], 150 | "size": [ 151 | 270, 152 | 262 153 | ], 154 | "flags": {}, 155 | "order": 13, 156 | "mode": 0, 157 | "inputs": [ 158 | { 159 | "name": "model", 160 | "type": "MODEL", 161 | "link": 18 162 | }, 163 | { 164 | "name": "positive", 165 | "type": "CONDITIONING", 166 | "link": 4 167 | }, 168 | { 169 | "name": "negative", 170 | "type": "CONDITIONING", 171 | "link": 20 172 | }, 173 | { 174 | "name": "latent_image", 175 | "type": "LATENT", 176 | "link": 13 177 | } 178 | ], 179 | "outputs": [ 180 | { 181 | "name": "LATENT", 182 | "type": "LATENT", 183 | "links": [ 184 | 7 185 | ] 186 | } 187 | ], 188 | "properties": { 189 | "cnr_id": "comfy-core", 190 | "ver": "0.3.50", 191 | "Node name for S&R": "KSampler", 192 | "ue_properties": { 193 | "version": "7.5.1", 194 | "widget_ue_connectable": {}, 195 | "input_ue_unconnectable": {} 196 | } 197 | }, 198 | "widgets_values": [ 199 | 649226397527985, 200 | "randomize", 201 | 8, 202 | 1, 203 | "heun", 204 | "beta57", 205 | 1 206 | ] 207 | }, 208 | { 209 | "id": 7, 210 | "type": "VAEDecode", 211 | "pos": [ 212 | 1595.644407381857, 213 | -354.22357897066667 214 | ], 215 | "size": [ 216 | 140, 217 | 46 218 | ], 219 | "flags": {}, 220 | "order": 14, 221 | "mode": 0, 222 | "inputs": [ 223 | { 224 | "name": "samples", 225 | "type": "LATENT", 226 | "link": 7 227 | }, 228 | { 229 | "name": "vae", 230 | "type": "VAE", 231 | "link": 8 232 | } 233 | ], 234 | "outputs": [ 235 | { 236 | "name": "IMAGE", 237 | "type": "IMAGE", 238 | "links": [ 239 | 21 240 | ] 241 | } 242 | ], 243 | "properties": { 244 | "cnr_id": "comfy-core", 245 | "ver": "0.3.50", 246 | "Node name for S&R": "VAEDecode", 247 | "ue_properties": { 248 | "version": "7.5.1", 249 | "widget_ue_connectable": {}, 250 | "input_ue_unconnectable": {} 251 | } 252 | }, 253 | "widgets_values": [] 254 | }, 255 | { 256 | "id": 14, 257 | "type": "SaveImage", 258 | "pos": [ 259 | 1339.408257031673, 260 | -42.357345335487985 261 | ], 262 | "size": [ 263 | 969.0353921676067, 264 | 530.1054687722498 265 | ], 266 | "flags": {}, 267 | "order": 15, 268 | "mode": 0, 269 | "inputs": [ 270 | { 271 | "name": "images", 272 | "type": "IMAGE", 273 | "link": 21 274 | } 275 | ], 276 | "outputs": [], 277 | "properties": { 278 | "cnr_id": "comfy-core", 279 | "ver": "0.3.75", 280 | "Node name for S&R": "SaveImage", 281 | "ue_properties": { 282 | "widget_ue_connectable": {}, 283 | "input_ue_unconnectable": {}, 284 | "version": "7.5.1" 285 | } 286 | }, 287 | "widgets_values": [ 288 | "ComfyUI" 289 | ] 290 | }, 291 | { 292 | "id": 6, 293 | "type": "UNETLoader", 294 | "pos": [ 295 | 3.9451035142017754, 296 | -393.7557962222563 297 | ], 298 | "size": [ 299 | 270, 300 | 82 301 | ], 302 | "flags": {}, 303 | "order": 0, 304 | "mode": 0, 305 | "inputs": [], 306 | "outputs": [ 307 | { 308 | "name": "MODEL", 309 | "type": "MODEL", 310 | "links": [ 311 | 35 312 | ] 313 | } 314 | ], 315 | "properties": { 316 | "cnr_id": "comfy-core", 317 | "ver": "0.3.50", 318 | "Node name for S&R": "UNETLoader", 319 | "ue_properties": { 320 | "version": "7.5.1", 321 | "widget_ue_connectable": {}, 322 | "input_ue_unconnectable": {} 323 | } 324 | }, 325 | "widgets_values": [ 326 | "qwen_image_edit_2509_fp8_e4m3fn.safetensors", 327 | "default" 328 | ], 329 | "ndSuperSelectorEnabled": false, 330 | "ndPowerEnabled": false 331 | }, 332 | { 333 | "id": 4, 334 | "type": "LoadImage", 335 | "pos": [ 336 | 327.4657185202677, 337 | 40.13457054590721 338 | ], 339 | "size": [ 340 | 678.1082540771511, 341 | 453.8758513671887 342 | ], 343 | "flags": {}, 344 | "order": 1, 345 | "mode": 0, 346 | "inputs": [], 347 | "outputs": [ 348 | { 349 | "name": "IMAGE", 350 | "type": "IMAGE", 351 | "links": [ 352 | 14 353 | ] 354 | }, 355 | { 356 | "name": "MASK", 357 | "type": "MASK", 358 | "links": null 359 | } 360 | ], 361 | "properties": { 362 | "cnr_id": "comfy-core", 363 | "ver": "0.3.50", 364 | "Node name for S&R": "LoadImage", 365 | "ue_properties": { 366 | "version": "7.5.1", 367 | "widget_ue_connectable": {}, 368 | "input_ue_unconnectable": {} 369 | } 370 | }, 371 | "widgets_values": [ 372 | "womanwalking.jpg", 373 | "image" 374 | ] 375 | }, 376 | { 377 | "id": 11, 378 | "type": "ImageScaleToTotalPixels", 379 | "pos": [ 380 | 1055.901256671691, 381 | 29.179887216972233 382 | ], 383 | "size": [ 384 | 270, 385 | 82 386 | ], 387 | "flags": {}, 388 | "order": 6, 389 | "mode": 0, 390 | "inputs": [ 391 | { 392 | "name": "image", 393 | "type": "IMAGE", 394 | "link": 14 395 | } 396 | ], 397 | "outputs": [ 398 | { 399 | "name": "IMAGE", 400 | "type": "IMAGE", 401 | "links": [ 402 | 15, 403 | 16 404 | ] 405 | } 406 | ], 407 | "properties": { 408 | "cnr_id": "comfy-core", 409 | "ver": "0.3.50", 410 | "Node name for S&R": "ImageScaleToTotalPixels", 411 | "ue_properties": { 412 | "version": "7.5.1", 413 | "widget_ue_connectable": {}, 414 | "input_ue_unconnectable": {} 415 | } 416 | }, 417 | "widgets_values": [ 418 | "nearest-exact", 419 | 1 420 | ] 421 | }, 422 | { 423 | "id": 2, 424 | "type": "CLIPLoader", 425 | "pos": [ 426 | 12.939562452497324, 427 | -266.96841174320457 428 | ], 429 | "size": [ 430 | 270, 431 | 106 432 | ], 433 | "flags": {}, 434 | "order": 2, 435 | "mode": 0, 436 | "inputs": [], 437 | "outputs": [ 438 | { 439 | "name": "CLIP", 440 | "type": "CLIP", 441 | "links": [ 442 | 1 443 | ] 444 | } 445 | ], 446 | "properties": { 447 | "cnr_id": "comfy-core", 448 | "ver": "0.3.50", 449 | "Node name for S&R": "CLIPLoader", 450 | "ue_properties": { 451 | "version": "7.5.1", 452 | "widget_ue_connectable": {}, 453 | "input_ue_unconnectable": {} 454 | } 455 | }, 456 | "widgets_values": [ 457 | "qwen_2.5_vl_7b_fp8_scaled.safetensors", 458 | "qwen_image", 459 | "default" 460 | ], 461 | "ndSuperSelectorEnabled": false, 462 | "ndPowerEnabled": false 463 | }, 464 | { 465 | "id": 3, 466 | "type": "VAELoader", 467 | "pos": [ 468 | 9.734899243175214, 469 | -111.00086603628145 470 | ], 471 | "size": [ 472 | 270, 473 | 58 474 | ], 475 | "flags": {}, 476 | "order": 3, 477 | "mode": 0, 478 | "inputs": [], 479 | "outputs": [ 480 | { 481 | "name": "VAE", 482 | "type": "VAE", 483 | "links": [ 484 | 2, 485 | 8, 486 | 12 487 | ] 488 | } 489 | ], 490 | "properties": { 491 | "cnr_id": "comfy-core", 492 | "ver": "0.3.50", 493 | "Node name for S&R": "VAELoader", 494 | "ue_properties": { 495 | "version": "7.5.1", 496 | "widget_ue_connectable": {}, 497 | "input_ue_unconnectable": {} 498 | } 499 | }, 500 | "widgets_values": [ 501 | "qwen_image_vae.safetensors" 502 | ], 503 | "ndSuperSelectorEnabled": false, 504 | "ndPowerEnabled": false 505 | }, 506 | { 507 | "id": 28, 508 | "type": "ApplyTrainedLora", 509 | "pos": [ 510 | 301.78272972943705, 511 | -600.4167828704855 512 | ], 513 | "size": [ 514 | 270, 515 | 98 516 | ], 517 | "flags": {}, 518 | "order": 7, 519 | "mode": 0, 520 | "inputs": [ 521 | { 522 | "name": "model", 523 | "type": "MODEL", 524 | "link": 35 525 | }, 526 | { 527 | "name": "lora_path", 528 | "type": "STRING", 529 | "link": 38 530 | } 531 | ], 532 | "outputs": [ 533 | { 534 | "name": "model", 535 | "type": "MODEL", 536 | "links": [ 537 | 36 538 | ] 539 | }, 540 | { 541 | "name": "lora_name", 542 | "type": "STRING", 543 | "links": null 544 | }, 545 | { 546 | "name": "lora_path", 547 | "type": "STRING", 548 | "links": [ 549 | 37 550 | ] 551 | } 552 | ], 553 | "properties": { 554 | "aux_id": "ShootTheSound/comfyUI-Realtime-Lora", 555 | "ver": "bf44ba8ffde7ac74b4b270040180622c05c02b89", 556 | "Node name for S&R": "ApplyTrainedLora", 557 | "ue_properties": { 558 | "widget_ue_connectable": {}, 559 | "input_ue_unconnectable": {}, 560 | "version": "7.5.1" 561 | } 562 | }, 563 | "widgets_values": [ 564 | 1 565 | ] 566 | }, 567 | { 568 | "id": 29, 569 | "type": "ShowText|pysssss", 570 | "pos": [ 571 | 588.5406890814451, 572 | -597.5089417077055 573 | ], 574 | "size": [ 575 | 253.14566331300148, 576 | 96.37108328003751 577 | ], 578 | "flags": {}, 579 | "order": 11, 580 | "mode": 0, 581 | "inputs": [ 582 | { 583 | "name": "text", 584 | "type": "STRING", 585 | "link": 37 586 | } 587 | ], 588 | "outputs": [ 589 | { 590 | "name": "STRING", 591 | "shape": 6, 592 | "type": "STRING", 593 | "links": null 594 | } 595 | ], 596 | "title": "Where The Lora is Saved", 597 | "properties": { 598 | "cnr_id": "comfyui-custom-scripts", 599 | "ver": "1.2.5", 600 | "Node name for S&R": "ShowText|pysssss", 601 | "ue_properties": { 602 | "widget_ue_connectable": {}, 603 | "input_ue_unconnectable": {}, 604 | "version": "7.5.1" 605 | } 606 | }, 607 | "widgets_values": [ 608 | "s:\\auto\\musubi-tuner\\output\\MyQwenLora_20251211_222716.safetensors" 609 | ] 610 | }, 611 | { 612 | "id": 1, 613 | "type": "TextEncodeQwenImageEdit", 614 | "pos": [ 615 | 635.4607776543927, 616 | -225.86330686346383 617 | ], 618 | "size": [ 619 | 400, 620 | 200 621 | ], 622 | "flags": {}, 623 | "order": 9, 624 | "mode": 0, 625 | "inputs": [ 626 | { 627 | "name": "clip", 628 | "type": "CLIP", 629 | "link": 1 630 | }, 631 | { 632 | "name": "vae", 633 | "shape": 7, 634 | "type": "VAE", 635 | "link": 2 636 | }, 637 | { 638 | "name": "image", 639 | "shape": 7, 640 | "type": "IMAGE", 641 | "link": 16 642 | } 643 | ], 644 | "outputs": [ 645 | { 646 | "name": "CONDITIONING", 647 | "type": "CONDITIONING", 648 | "links": [ 649 | 4, 650 | 19 651 | ] 652 | } 653 | ], 654 | "properties": { 655 | "cnr_id": "comfy-core", 656 | "ver": "0.3.50", 657 | "Node name for S&R": "TextEncodeQwenImageEdit", 658 | "ue_properties": { 659 | "version": "7.5.1", 660 | "widget_ue_connectable": {}, 661 | "input_ue_unconnectable": {} 662 | } 663 | }, 664 | "widgets_values": [ 665 | "add a red hat on the woman" 666 | ] 667 | }, 668 | { 669 | "id": 30, 670 | "type": "Note", 671 | "pos": [ 672 | 853.5180329067512, 673 | 582.2755531681021 674 | ], 675 | "size": [ 676 | 659.3771760560459, 677 | 417.01525595841304 678 | ], 679 | "flags": {}, 680 | "order": 4, 681 | "mode": 0, 682 | "inputs": [], 683 | "outputs": [], 684 | "properties": { 685 | "ue_properties": { 686 | "widget_ue_connectable": {}, 687 | "version": "7.5.1", 688 | "input_ue_unconnectable": {} 689 | } 690 | }, 691 | "widgets_values": [ 692 | "This workflow for edit loras that use control images too. Paste in your paths. You'll need to provide your own dataset for this workflow, but i've described exactly what you have to do below.\n\nImages Path contains your target/result images (the \"after\" or post-edit images) along with .txt caption files describing the edit instruction. \n\nControl Images Path contains the source images (the \"before\" or pre-edit images) that get edited. Images are matched by filename between the two folders. \n\nFor example, if you want to train the model to add hats: place the original photos in Control Images Path, place the edited photos with hats in Images Path, and create matching .txt files in Images \npath with captions like \"add a red hat\". \n\nThe model learns: given this source image + this instruction → produce this result.\n___\n\nPath to you msubi tuner install\n\nSelect edit model mode \n\nQwen Image Edit 2509 model you need for training:\nhttps://huggingface.co/Comfy-Org/Qwen-Image-Edit_ComfyUI/blob/main/split_files/diffusion_models/qwen_image_edit_2509_bf16.safetensors\n\nYour Qwen image vae\n\nQwen image Text encoder, qwen_2.5_vl_7b.safetensors (not fp8 etc)\nhttps://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/blob/main/split_files/text_encoders/qwen_2.5_vl_7b.safetensors\n\n______\nNote the Agressive block swapping, feel free to lower!!\n" 693 | ], 694 | "color": "#432", 695 | "bgcolor": "#653" 696 | }, 697 | { 698 | "id": 31, 699 | "type": "MusubiQwenImageEditLoraTrainer", 700 | "pos": [ 701 | 325.8507575418658, 702 | 619.8029813120203 703 | ], 704 | "size": [ 705 | 459.88046875, 706 | 370 707 | ], 708 | "flags": {}, 709 | "order": 5, 710 | "mode": 0, 711 | "inputs": [], 712 | "outputs": [ 713 | { 714 | "name": "lora_path", 715 | "type": "STRING", 716 | "links": [ 717 | 38 718 | ] 719 | } 720 | ], 721 | "properties": { 722 | "aux_id": "ShootTheSound/comfyUI-Realtime-Lora", 723 | "ver": "bf44ba8ffde7ac74b4b270040180622c05c02b89", 724 | "Node name for S&R": "MusubiQwenImageEditLoraTrainer", 725 | "ue_properties": { 726 | "widget_ue_connectable": {}, 727 | "input_ue_unconnectable": {}, 728 | "version": "7.5.1" 729 | } 730 | }, 731 | "widgets_values": [ 732 | "c:\\path1", 733 | "c:\\path2", 734 | "S:\\Auto\\musubi-tuner", 735 | "Qwen-Image-Edit-2509", 736 | "qwen_image_edit_2509_bf16.safetensors", 737 | "qwen_image_vae.safetensors", 738 | "qwen_2.5_vl_7b.safetensors", 739 | 500, 740 | 0.0003, 741 | 16, 742 | "Medium (768px) fp8", 743 | "40", 744 | true, 745 | "MyQwenEditLora" 746 | ], 747 | "ndSuperSelectorEnabled": false, 748 | "ndPowerEnabled": false 749 | } 750 | ], 751 | "links": [ 752 | [ 753 | 1, 754 | 2, 755 | 0, 756 | 1, 757 | 0, 758 | "CLIP" 759 | ], 760 | [ 761 | 2, 762 | 3, 763 | 0, 764 | 1, 765 | 1, 766 | "VAE" 767 | ], 768 | [ 769 | 4, 770 | 1, 771 | 0, 772 | 5, 773 | 1, 774 | "CONDITIONING" 775 | ], 776 | [ 777 | 7, 778 | 5, 779 | 0, 780 | 7, 781 | 0, 782 | "LATENT" 783 | ], 784 | [ 785 | 8, 786 | 3, 787 | 0, 788 | 7, 789 | 1, 790 | "VAE" 791 | ], 792 | [ 793 | 12, 794 | 3, 795 | 0, 796 | 10, 797 | 1, 798 | "VAE" 799 | ], 800 | [ 801 | 13, 802 | 10, 803 | 0, 804 | 5, 805 | 3, 806 | "LATENT" 807 | ], 808 | [ 809 | 14, 810 | 4, 811 | 0, 812 | 11, 813 | 0, 814 | "IMAGE" 815 | ], 816 | [ 817 | 15, 818 | 11, 819 | 0, 820 | 10, 821 | 0, 822 | "IMAGE" 823 | ], 824 | [ 825 | 16, 826 | 11, 827 | 0, 828 | 1, 829 | 2, 830 | "IMAGE" 831 | ], 832 | [ 833 | 18, 834 | 12, 835 | 0, 836 | 5, 837 | 0, 838 | "MODEL" 839 | ], 840 | [ 841 | 19, 842 | 1, 843 | 0, 844 | 13, 845 | 0, 846 | "CONDITIONING" 847 | ], 848 | [ 849 | 20, 850 | 13, 851 | 0, 852 | 5, 853 | 2, 854 | "CONDITIONING" 855 | ], 856 | [ 857 | 21, 858 | 7, 859 | 0, 860 | 14, 861 | 0, 862 | "IMAGE" 863 | ], 864 | [ 865 | 35, 866 | 6, 867 | 0, 868 | 28, 869 | 0, 870 | "MODEL" 871 | ], 872 | [ 873 | 36, 874 | 28, 875 | 0, 876 | 12, 877 | 0, 878 | "MODEL" 879 | ], 880 | [ 881 | 37, 882 | 28, 883 | 2, 884 | 29, 885 | 0, 886 | "STRING" 887 | ], 888 | [ 889 | 38, 890 | 31, 891 | 0, 892 | 28, 893 | 1, 894 | "STRING" 895 | ] 896 | ], 897 | "groups": [], 898 | "config": {}, 899 | "extra": { 900 | "ds": { 901 | "scale": 0.7972024500000015, 902 | "offset": [ 903 | -320.59922468723863, 904 | 160.69452110431973 905 | ] 906 | }, 907 | "frontendVersion": "1.32.9", 908 | "VHS_latentpreview": false, 909 | "VHS_latentpreviewrate": 0, 910 | "VHS_MetadataImage": true, 911 | "VHS_KeepIntermediate": true, 912 | "ue_links": [], 913 | "links_added_by_ue": [], 914 | "workflowRendererVersion": "LG" 915 | }, 916 | "version": 0.4 917 | } -------------------------------------------------------------------------------- /sd15_lora_trainer.py: -------------------------------------------------------------------------------- 1 | """ 2 | SD 1.5 LoRA Trainer Node for ComfyUI 3 | 4 | Trains SD 1.5 LoRAs using kohya-ss/sd-scripts. 5 | Completely independent from the AI-Toolkit based trainer. 6 | """ 7 | 8 | import os 9 | import sys 10 | import json 11 | import hashlib 12 | import tempfile 13 | import shutil 14 | import subprocess 15 | from datetime import datetime 16 | import numpy as np 17 | from PIL import Image 18 | 19 | import folder_paths 20 | 21 | from .sd15_config_template import ( 22 | generate_sd15_training_config, 23 | save_config, 24 | SD15_VRAM_PRESETS, 25 | ) 26 | 27 | 28 | # Global config for SD 1.5 trainer 29 | _sd15_config = {} 30 | _sd15_config_file = os.path.join(os.path.dirname(__file__), ".sd15_config.json") 31 | 32 | # Global cache for trained LoRAs 33 | _sd15_lora_cache = {} 34 | _sd15_cache_file = os.path.join(os.path.dirname(__file__), ".sd15_lora_cache.json") 35 | 36 | 37 | def _load_sd15_config(): 38 | """Load SD 1.5 config from disk.""" 39 | global _sd15_config 40 | if os.path.exists(_sd15_config_file): 41 | try: 42 | with open(_sd15_config_file, 'r', encoding='utf-8') as f: 43 | _sd15_config = json.load(f) 44 | except: 45 | _sd15_config = {} 46 | 47 | 48 | def _save_sd15_config(): 49 | """Save SD 1.5 config to disk.""" 50 | try: 51 | with open(_sd15_config_file, 'w', encoding='utf-8') as f: 52 | json.dump(_sd15_config, f, indent=2) 53 | except: 54 | pass 55 | 56 | 57 | def _load_sd15_cache(): 58 | """Load SD 1.5 LoRA cache from disk.""" 59 | global _sd15_lora_cache 60 | if os.path.exists(_sd15_cache_file): 61 | try: 62 | with open(_sd15_cache_file, 'r', encoding='utf-8') as f: 63 | _sd15_lora_cache = json.load(f) 64 | except: 65 | _sd15_lora_cache = {} 66 | 67 | 68 | def _save_sd15_cache(): 69 | """Save SD 1.5 LoRA cache to disk.""" 70 | try: 71 | with open(_sd15_cache_file, 'w', encoding='utf-8') as f: 72 | json.dump(_sd15_lora_cache, f) 73 | except: 74 | pass 75 | 76 | 77 | def _compute_image_hash(images, captions, training_steps, learning_rate, lora_rank, vram_mode, output_name, use_folder_path=False): 78 | """Compute a hash of all images, captions, and training parameters.""" 79 | hasher = hashlib.sha256() 80 | 81 | if use_folder_path: 82 | # For folder paths, hash the file paths and modification times 83 | for img_path in images: 84 | hasher.update(img_path.encode('utf-8')) 85 | if os.path.exists(img_path): 86 | hasher.update(str(os.path.getmtime(img_path)).encode('utf-8')) 87 | else: 88 | # For tensor inputs, hash the image data 89 | for img_tensor in images: 90 | img_np = (img_tensor[0].cpu().numpy() * 255).astype(np.uint8) 91 | img_bytes = img_np.tobytes() 92 | hasher.update(img_bytes) 93 | 94 | # Include all captions in hash 95 | captions_str = "|".join(captions) 96 | params_str = f"sd15|{captions_str}|{training_steps}|{learning_rate}|{lora_rank}|{vram_mode}|{output_name}|{len(images)}" 97 | hasher.update(params_str.encode('utf-8')) 98 | 99 | return hasher.hexdigest()[:16] 100 | 101 | 102 | def _get_venv_python_path(sd_scripts_path): 103 | """Get the Python path for sd-scripts venv based on platform. 104 | Checks both .venv (uv default) and venv (traditional) folders.""" 105 | venv_folders = [".venv", "venv"] 106 | 107 | for venv_folder in venv_folders: 108 | if sys.platform == 'win32': 109 | python_path = os.path.join(sd_scripts_path, venv_folder, "Scripts", "python.exe") 110 | else: 111 | python_path = os.path.join(sd_scripts_path, venv_folder, "bin", "python") 112 | 113 | if os.path.exists(python_path): 114 | return python_path 115 | 116 | # Return traditional path for error messaging 117 | if sys.platform == 'win32': 118 | return os.path.join(sd_scripts_path, "venv", "Scripts", "python.exe") 119 | else: 120 | return os.path.join(sd_scripts_path, "venv", "bin", "python") 121 | 122 | 123 | def _get_accelerate_path(sd_scripts_path): 124 | """Get the accelerate path for sd-scripts venv based on platform. 125 | Checks both .venv (uv default) and venv (traditional) folders.""" 126 | venv_folders = [".venv", "venv"] 127 | 128 | for venv_folder in venv_folders: 129 | if sys.platform == 'win32': 130 | accel_path = os.path.join(sd_scripts_path, venv_folder, "Scripts", "accelerate.exe") 131 | else: 132 | accel_path = os.path.join(sd_scripts_path, venv_folder, "bin", "accelerate") 133 | 134 | if os.path.exists(accel_path): 135 | return accel_path 136 | 137 | # Return traditional path for error messaging 138 | if sys.platform == 'win32': 139 | return os.path.join(sd_scripts_path, "venv", "Scripts", "accelerate.exe") 140 | else: 141 | return os.path.join(sd_scripts_path, "venv", "bin", "accelerate") 142 | 143 | 144 | # Load config and cache on module import 145 | _load_sd15_config() 146 | _load_sd15_cache() 147 | 148 | 149 | class SD15LoraTrainer: 150 | """ 151 | Trains an SD 1.5 LoRA from one or more images using kohya sd-scripts. 152 | """ 153 | 154 | def __init__(self): 155 | pass 156 | 157 | @classmethod 158 | def INPUT_TYPES(s): 159 | # Get saved settings or use defaults 160 | if sys.platform == 'win32': 161 | sd_scripts_fallback = 'S:\\Auto\\sd-scripts' 162 | else: 163 | sd_scripts_fallback = '~/sd-scripts' 164 | 165 | saved = _sd15_config.get('trainer_settings', {}) 166 | 167 | # Get list of checkpoints from ComfyUI 168 | checkpoints = folder_paths.get_filename_list("checkpoints") 169 | 170 | return { 171 | "required": { 172 | "inputcount": ("INT", {"default": 4, "min": 1, "max": 100, "step": 1, 173 | "tooltip": "Number of image inputs. Click 'Update inputs' button after changing."}), 174 | "images_path": ("STRING", { 175 | "default": "", 176 | "tooltip": "Optional: Path to folder containing training images. If provided, images from this folder are used instead of image inputs. Caption .txt files with matching names are used if present." 177 | }), 178 | "sd_scripts_path": ("STRING", { 179 | "default": _sd15_config.get('sd_scripts_path', sd_scripts_fallback), 180 | "tooltip": "Path to kohya sd-scripts installation." 181 | }), 182 | "ckpt_name": (checkpoints, { 183 | "tooltip": "SD 1.5 checkpoint to train LoRA on." 184 | }), 185 | "caption": ("STRING", { 186 | "default": saved.get('caption', "photo of subject"), 187 | "multiline": True, 188 | "tooltip": "Default caption for all images. Per-image caption inputs override this." 189 | }), 190 | "training_steps": ("INT", { 191 | "default": saved.get('training_steps', 500), 192 | "min": 10, 193 | "max": 5000, 194 | "step": 10, 195 | "tooltip": "Number of training steps. 500 is a good starting point. Increase for more images or complex subjects." 196 | }), 197 | "learning_rate": ("FLOAT", { 198 | "default": saved.get('learning_rate', 0.0005), 199 | "min": 0.00001, 200 | "max": 0.1, 201 | "step": 0.00001, 202 | "tooltip": "Learning rate. 0.0005 trains fast but may overshoot. Experiment with lowering for more stable/slower training." 203 | }), 204 | "lora_rank": ("INT", { 205 | "default": saved.get('lora_rank', 16), 206 | "min": 4, 207 | "max": 128, 208 | "step": 4, 209 | "tooltip": "LoRA rank/dimension. 16-32 typical. Higher = more capacity but larger file and more VRAM." 210 | }), 211 | "vram_mode": (["Min (256px)", "Low (512px)", "Max (768px)"], { 212 | "default": saved.get('vram_mode', "Low (512px)"), 213 | "tooltip": "VRAM optimization preset. Images are automatically resized to the specified resolution." 214 | }), 215 | "keep_lora": ("BOOLEAN", { 216 | "default": saved.get('keep_lora', True), 217 | "tooltip": "If True, keeps the trained LoRA file." 218 | }), 219 | "output_name": ("STRING", { 220 | "default": saved.get('output_name', "MyLora"), 221 | "tooltip": "Custom name for the output LoRA. Timestamp will be appended." 222 | }), 223 | "custom_python_exe": ("STRING", { 224 | "default": saved.get('custom_python_exe', ""), 225 | "tooltip": "Advanced: Optionally enter the full path to a custom python.exe (e.g. C:\\my-venv\\Scripts\\python.exe). If empty, uses the venv inside sd_scripts_path. The sd_scripts_path field is still required for locating training scripts." 226 | }), 227 | }, 228 | "optional": { 229 | "image_1": ("IMAGE", {"tooltip": "Training image (not needed if images_path is set)."}), 230 | "caption_1": ("STRING", {"forceInput": True, "tooltip": "Caption for image_1. Overrides default caption."}), 231 | "image_2": ("IMAGE", {"tooltip": "Training image."}), 232 | "caption_2": ("STRING", {"forceInput": True, "tooltip": "Caption for image_2. Overrides default caption."}), 233 | "image_3": ("IMAGE", {"tooltip": "Training image."}), 234 | "caption_3": ("STRING", {"forceInput": True, "tooltip": "Caption for image_3. Overrides default caption."}), 235 | "image_4": ("IMAGE", {"tooltip": "Training image."}), 236 | "caption_4": ("STRING", {"forceInput": True, "tooltip": "Caption for image_4. Overrides default caption."}), 237 | } 238 | } 239 | 240 | RETURN_TYPES = ("STRING",) 241 | RETURN_NAMES = ("lora_path",) 242 | OUTPUT_TOOLTIPS = ("Path to the trained SD 1.5 LoRA file.",) 243 | FUNCTION = "train_sd15_lora" 244 | CATEGORY = "loaders" 245 | DESCRIPTION = "Trains an SD 1.5 LoRA from images using kohya sd-scripts." 246 | 247 | def train_sd15_lora( 248 | self, 249 | inputcount, 250 | images_path, 251 | sd_scripts_path, 252 | ckpt_name, 253 | caption, 254 | training_steps, 255 | learning_rate, 256 | lora_rank, 257 | vram_mode, 258 | keep_lora=True, 259 | output_name="MyLora", 260 | custom_python_exe="", 261 | image_1=None, 262 | **kwargs 263 | ): 264 | global _sd15_lora_cache 265 | 266 | # Get full path to checkpoint 267 | model_path = folder_paths.get_full_path("checkpoints", ckpt_name) 268 | 269 | # Check if using folder path for images 270 | use_folder_path = False 271 | folder_images = [] 272 | folder_captions = [] 273 | 274 | if images_path and images_path.strip(): 275 | images_path = os.path.expanduser(images_path.strip()) 276 | if os.path.isdir(images_path): 277 | # Find all image files in the folder 278 | image_extensions = ('.png', '.jpg', '.jpeg', '.webp', '.bmp') 279 | for filename in sorted(os.listdir(images_path)): 280 | if filename.lower().endswith(image_extensions): 281 | img_path = os.path.join(images_path, filename) 282 | folder_images.append(img_path) 283 | 284 | # Look for matching caption file 285 | base_name = os.path.splitext(filename)[0] 286 | caption_file = os.path.join(images_path, f"{base_name}.txt") 287 | if os.path.exists(caption_file): 288 | with open(caption_file, 'r', encoding='utf-8') as f: 289 | folder_captions.append(f.read().strip()) 290 | else: 291 | folder_captions.append(caption) # Use default caption 292 | 293 | if folder_images: 294 | use_folder_path = True 295 | print(f"[SD1.5 LoRA] Using {len(folder_images)} images from folder: {images_path}") 296 | else: 297 | print(f"[SD1.5 LoRA] No images found in folder: {images_path}, falling back to inputs") 298 | else: 299 | print(f"[SD1.5 LoRA] Invalid folder path: {images_path}, falling back to inputs") 300 | 301 | if not use_folder_path: 302 | # Collect all images and captions from inputs 303 | # External caption_N inputs override the default caption widget 304 | all_images = [] 305 | all_captions = [] 306 | 307 | # image_1 is now optional 308 | if image_1 is not None: 309 | all_images.append(image_1) 310 | cap_1 = kwargs.get("caption_1", "") 311 | all_captions.append(cap_1 if cap_1 else caption) 312 | 313 | for i in range(2, inputcount + 1): 314 | img = kwargs.get(f"image_{i}") 315 | if img is not None: 316 | all_images.append(img) 317 | # Get per-image caption, fall back to default if empty/missing 318 | cap = kwargs.get(f"caption_{i}", "") 319 | all_captions.append(cap if cap else caption) 320 | 321 | if not all_images: 322 | raise ValueError("No images provided. Either set images_path to a folder containing images, or connect at least one image input.") 323 | 324 | num_images = len(folder_images) if use_folder_path else len(all_images) 325 | print(f"[SD1.5 LoRA] Training with {num_images} image(s)") 326 | print(f"[SD1.5 LoRA] Using model: {ckpt_name}") 327 | 328 | # Get VRAM preset settings (fallback handles old saved settings) 329 | preset = SD15_VRAM_PRESETS.get(vram_mode, SD15_VRAM_PRESETS["Low (512px)"]) 330 | print(f"[SD1.5 LoRA] Using VRAM mode: {vram_mode}") 331 | 332 | # Validate paths 333 | train_script = os.path.join(sd_scripts_path, "train_network.py") 334 | 335 | # Use custom python exe if provided, otherwise detect from sd_scripts_path 336 | if custom_python_exe and custom_python_exe.strip(): 337 | custom_python = custom_python_exe.strip() 338 | if not os.path.exists(custom_python): 339 | raise FileNotFoundError(f"Custom python.exe not found at: {custom_python}") 340 | # Derive accelerate path from same directory as custom python 341 | venv_scripts_dir = os.path.dirname(custom_python) 342 | if sys.platform == 'win32': 343 | accelerate_path = os.path.join(venv_scripts_dir, "accelerate.exe") 344 | else: 345 | accelerate_path = os.path.join(venv_scripts_dir, "accelerate") 346 | if not os.path.exists(accelerate_path): 347 | raise FileNotFoundError(f"accelerate not found at: {accelerate_path} (expected in same directory as custom python)") 348 | else: 349 | accelerate_path = _get_accelerate_path(sd_scripts_path) 350 | if not os.path.exists(accelerate_path): 351 | raise FileNotFoundError(f"sd-scripts accelerate not found at: {accelerate_path}") 352 | 353 | if not os.path.exists(train_script): 354 | raise FileNotFoundError(f"train_network.py not found at: {train_script}") 355 | if not model_path or not os.path.exists(model_path): 356 | raise FileNotFoundError(f"SD 1.5 model not found at: {model_path}") 357 | 358 | # Save settings 359 | global _sd15_config 360 | _sd15_config['sd_scripts_path'] = sd_scripts_path 361 | _sd15_config['trainer_settings'] = { 362 | 'ckpt_name': ckpt_name, 363 | 'caption': caption, 364 | 'training_steps': training_steps, 365 | 'learning_rate': learning_rate, 366 | 'lora_rank': lora_rank, 367 | 'vram_mode': vram_mode, 368 | 'keep_lora': keep_lora, 369 | 'output_name': output_name, 370 | 'custom_python_exe': custom_python_exe, 371 | } 372 | _save_sd15_config() 373 | 374 | # Compute hash for caching 375 | if use_folder_path: 376 | image_hash = _compute_image_hash(folder_images, folder_captions, training_steps, learning_rate, lora_rank, vram_mode, output_name, use_folder_path=True) 377 | else: 378 | image_hash = _compute_image_hash(all_images, all_captions, training_steps, learning_rate, lora_rank, vram_mode, output_name, use_folder_path=False) 379 | 380 | # Check cache 381 | if keep_lora and image_hash in _sd15_lora_cache: 382 | cached_path = _sd15_lora_cache[image_hash] 383 | if os.path.exists(cached_path): 384 | print(f"[SD1.5 LoRA] Cache hit! Reusing: {cached_path}") 385 | return (cached_path,) 386 | else: 387 | del _sd15_lora_cache[image_hash] 388 | _save_sd15_cache() 389 | 390 | # Generate run name with timestamp 391 | timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') 392 | run_name = f"{output_name}_{timestamp}" if output_name else f"sd15_lora_{image_hash}" 393 | 394 | # Output folder 395 | output_folder = os.path.join(sd_scripts_path, "output") 396 | os.makedirs(output_folder, exist_ok=True) 397 | lora_output_path = os.path.join(output_folder, f"{run_name}.safetensors") 398 | 399 | # Auto-increment if file somehow still exists (same second) 400 | if os.path.exists(lora_output_path): 401 | counter = 1 402 | while os.path.exists(os.path.join(output_folder, f"{run_name}_{counter}.safetensors")): 403 | counter += 1 404 | run_name = f"{run_name}_{counter}" 405 | lora_output_path = os.path.join(output_folder, f"{run_name}.safetensors") 406 | print(f"[SD1.5 LoRA] Name exists, using: {run_name}") 407 | 408 | # Create temp directory for images 409 | temp_dir = tempfile.mkdtemp(prefix="comfy_sd15_lora_") 410 | image_folder = os.path.join(temp_dir, "1_subject") # sd-scripts format: repeats_class 411 | os.makedirs(image_folder, exist_ok=True) 412 | 413 | try: 414 | # Save images with captions 415 | if use_folder_path: 416 | # Copy images from folder and create caption files 417 | for idx, (src_path, cap) in enumerate(zip(folder_images, folder_captions)): 418 | # Copy image to temp folder 419 | ext = os.path.splitext(src_path)[1] 420 | dest_path = os.path.join(image_folder, f"image_{idx+1:03d}{ext}") 421 | shutil.copy2(src_path, dest_path) 422 | 423 | # Create caption file 424 | caption_path = os.path.join(image_folder, f"image_{idx+1:03d}.txt") 425 | with open(caption_path, 'w', encoding='utf-8') as f: 426 | f.write(cap) 427 | else: 428 | # Save tensor images 429 | for idx, img_tensor in enumerate(all_images): 430 | img_data = img_tensor[0] 431 | img_np = (img_data.cpu().numpy() * 255).astype(np.uint8) 432 | img_pil = Image.fromarray(img_np) 433 | 434 | image_path = os.path.join(image_folder, f"image_{idx+1:03d}.png") 435 | img_pil.save(image_path, "PNG") 436 | 437 | # Use per-image caption 438 | caption_path = os.path.join(image_folder, f"image_{idx+1:03d}.txt") 439 | with open(caption_path, 'w', encoding='utf-8') as f: 440 | f.write(all_captions[idx]) 441 | 442 | print(f"[SD1.5 LoRA] Saved {num_images} images to {image_folder}") 443 | 444 | # Generate config 445 | config_content = generate_sd15_training_config( 446 | name=run_name, 447 | image_folder=temp_dir, # Parent of the class folder 448 | output_folder=output_folder, 449 | model_path=model_path, 450 | steps=training_steps, 451 | learning_rate=learning_rate, 452 | lora_rank=lora_rank, 453 | lora_alpha=lora_rank, # alpha = rank for full strength training 454 | resolution=preset['resolution'], 455 | batch_size=preset['batch_size'], 456 | optimizer=preset['optimizer'], 457 | mixed_precision=preset['mixed_precision'], 458 | gradient_checkpointing=preset['gradient_checkpointing'], 459 | cache_latents=preset['cache_latents'], 460 | ) 461 | 462 | config_path = os.path.join(temp_dir, "training_config.toml") 463 | save_config(config_content, config_path) 464 | print(f"[SD1.5 LoRA] Config saved to {config_path}") 465 | 466 | # Build command 467 | cmd = [ 468 | accelerate_path, 469 | "launch", 470 | "--num_cpu_threads_per_process=2", 471 | train_script, 472 | f"--config_file={config_path}", 473 | ] 474 | 475 | print(f"[SD1.5 LoRA] Starting training: {run_name}") 476 | print(f"[SD1.5 LoRA] Images: {num_images}, Steps: {training_steps}, LR: {learning_rate}, Rank: {lora_rank}") 477 | 478 | # Run training 479 | startupinfo = None 480 | if sys.platform == 'win32': 481 | startupinfo = subprocess.STARTUPINFO() 482 | startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW 483 | 484 | # Set UTF-8 encoding for subprocess to handle Japanese text in sd-scripts 485 | env = os.environ.copy() 486 | env['PYTHONIOENCODING'] = 'utf-8' 487 | 488 | process = subprocess.Popen( 489 | cmd, 490 | stdout=subprocess.PIPE, 491 | stderr=subprocess.STDOUT, 492 | text=True, 493 | encoding='utf-8', 494 | errors='replace', 495 | cwd=sd_scripts_path, 496 | startupinfo=startupinfo, 497 | env=env, 498 | ) 499 | 500 | # Stream output 501 | for line in process.stdout: 502 | line = line.rstrip() 503 | if line: 504 | print(f"[sd-scripts] {line}") 505 | 506 | process.wait() 507 | 508 | if process.returncode != 0: 509 | raise RuntimeError(f"sd-scripts training failed with code {process.returncode}") 510 | 511 | print(f"[SD1.5 LoRA] Training completed!") 512 | 513 | # Find the trained LoRA 514 | if not os.path.exists(lora_output_path): 515 | # Check for alternative naming 516 | possible_files = [f for f in os.listdir(output_folder) if f.startswith(run_name) and f.endswith('.safetensors')] 517 | if possible_files: 518 | lora_output_path = os.path.join(output_folder, possible_files[-1]) 519 | else: 520 | raise FileNotFoundError(f"No LoRA file found in {output_folder}") 521 | 522 | print(f"[SD1.5 LoRA] Found trained LoRA: {lora_output_path}") 523 | 524 | # Handle caching 525 | if keep_lora: 526 | _sd15_lora_cache[image_hash] = lora_output_path 527 | _save_sd15_cache() 528 | print(f"[SD1.5 LoRA] LoRA saved and cached at: {lora_output_path}") 529 | else: 530 | print(f"[SD1.5 LoRA] LoRA available at: {lora_output_path}") 531 | 532 | return (lora_output_path,) 533 | 534 | finally: 535 | # Cleanup temp directory 536 | try: 537 | shutil.rmtree(temp_dir) 538 | except Exception as e: 539 | print(f"[SD1.5 LoRA] Warning: Could not clean up temp dir: {e}") 540 | -------------------------------------------------------------------------------- /sdxl_lora_trainer.py: -------------------------------------------------------------------------------- 1 | """ 2 | SDXL LoRA Trainer Node for ComfyUI 3 | 4 | Trains SDXL LoRAs using kohya-ss/sd-scripts. 5 | Completely independent from the AI-Toolkit based trainer. 6 | """ 7 | 8 | import os 9 | import sys 10 | import json 11 | import hashlib 12 | import tempfile 13 | import shutil 14 | import subprocess 15 | from datetime import datetime 16 | import numpy as np 17 | from PIL import Image 18 | 19 | import folder_paths 20 | 21 | from .sdxl_config_template import ( 22 | generate_sdxl_training_config, 23 | save_config, 24 | SDXL_VRAM_PRESETS, 25 | ) 26 | 27 | 28 | # Global config for SDXL trainer 29 | _sdxl_config = {} 30 | _sdxl_config_file = os.path.join(os.path.dirname(__file__), ".sdxl_config.json") 31 | 32 | # Global cache for trained LoRAs 33 | _sdxl_lora_cache = {} 34 | _sdxl_cache_file = os.path.join(os.path.dirname(__file__), ".sdxl_lora_cache.json") 35 | 36 | 37 | def _load_sdxl_config(): 38 | """Load SDXL config from disk.""" 39 | global _sdxl_config 40 | if os.path.exists(_sdxl_config_file): 41 | try: 42 | with open(_sdxl_config_file, 'r', encoding='utf-8') as f: 43 | _sdxl_config = json.load(f) 44 | except: 45 | _sdxl_config = {} 46 | 47 | 48 | def _save_sdxl_config(): 49 | """Save SDXL config to disk.""" 50 | try: 51 | with open(_sdxl_config_file, 'w', encoding='utf-8') as f: 52 | json.dump(_sdxl_config, f, indent=2) 53 | except: 54 | pass 55 | 56 | 57 | def _load_sdxl_cache(): 58 | """Load SDXL LoRA cache from disk.""" 59 | global _sdxl_lora_cache 60 | if os.path.exists(_sdxl_cache_file): 61 | try: 62 | with open(_sdxl_cache_file, 'r', encoding='utf-8') as f: 63 | _sdxl_lora_cache = json.load(f) 64 | except: 65 | _sdxl_lora_cache = {} 66 | 67 | 68 | def _save_sdxl_cache(): 69 | """Save SDXL LoRA cache to disk.""" 70 | try: 71 | with open(_sdxl_cache_file, 'w', encoding='utf-8') as f: 72 | json.dump(_sdxl_lora_cache, f) 73 | except: 74 | pass 75 | 76 | 77 | def _compute_image_hash(images, captions, training_steps, learning_rate, lora_rank, vram_mode, output_name, use_folder_path=False): 78 | """Compute a hash of all images, captions, and training parameters.""" 79 | hasher = hashlib.sha256() 80 | 81 | if use_folder_path: 82 | # For folder paths, hash the file paths and modification times 83 | for img_path in images: 84 | hasher.update(img_path.encode('utf-8')) 85 | if os.path.exists(img_path): 86 | hasher.update(str(os.path.getmtime(img_path)).encode('utf-8')) 87 | else: 88 | # For tensor inputs, hash the image data 89 | for img_tensor in images: 90 | img_np = (img_tensor[0].cpu().numpy() * 255).astype(np.uint8) 91 | img_bytes = img_np.tobytes() 92 | hasher.update(img_bytes) 93 | 94 | # Include all captions in hash 95 | captions_str = "|".join(captions) 96 | params_str = f"sdxl|{captions_str}|{training_steps}|{learning_rate}|{lora_rank}|{vram_mode}|{output_name}|{len(images)}" 97 | hasher.update(params_str.encode('utf-8')) 98 | 99 | return hasher.hexdigest()[:16] 100 | 101 | 102 | def _get_venv_python_path(sd_scripts_path): 103 | """Get the Python path for sd-scripts venv based on platform. 104 | Checks both .venv (uv default) and venv (traditional) folders.""" 105 | venv_folders = [".venv", "venv"] 106 | 107 | for venv_folder in venv_folders: 108 | if sys.platform == 'win32': 109 | python_path = os.path.join(sd_scripts_path, venv_folder, "Scripts", "python.exe") 110 | else: 111 | python_path = os.path.join(sd_scripts_path, venv_folder, "bin", "python") 112 | 113 | if os.path.exists(python_path): 114 | return python_path 115 | 116 | # Return traditional path for error messaging 117 | if sys.platform == 'win32': 118 | return os.path.join(sd_scripts_path, "venv", "Scripts", "python.exe") 119 | else: 120 | return os.path.join(sd_scripts_path, "venv", "bin", "python") 121 | 122 | 123 | def _get_accelerate_path(sd_scripts_path): 124 | """Get the accelerate path for sd-scripts venv based on platform. 125 | Checks both .venv (uv default) and venv (traditional) folders.""" 126 | venv_folders = [".venv", "venv"] 127 | 128 | for venv_folder in venv_folders: 129 | if sys.platform == 'win32': 130 | accel_path = os.path.join(sd_scripts_path, venv_folder, "Scripts", "accelerate.exe") 131 | else: 132 | accel_path = os.path.join(sd_scripts_path, venv_folder, "bin", "accelerate") 133 | 134 | if os.path.exists(accel_path): 135 | return accel_path 136 | 137 | # Return traditional path for error messaging 138 | if sys.platform == 'win32': 139 | return os.path.join(sd_scripts_path, "venv", "Scripts", "accelerate.exe") 140 | else: 141 | return os.path.join(sd_scripts_path, "venv", "bin", "accelerate") 142 | 143 | 144 | # Load config and cache on module import 145 | _load_sdxl_config() 146 | _load_sdxl_cache() 147 | 148 | 149 | class SDXLLoraTrainer: 150 | """ 151 | Trains an SDXL LoRA from one or more images using kohya sd-scripts. 152 | """ 153 | 154 | def __init__(self): 155 | pass 156 | 157 | @classmethod 158 | def INPUT_TYPES(s): 159 | # Get saved settings or use defaults 160 | if sys.platform == 'win32': 161 | sd_scripts_fallback = 'S:\\Auto\\sd-scripts' 162 | else: 163 | sd_scripts_fallback = '~/sd-scripts' 164 | 165 | saved = _sdxl_config.get('trainer_settings', {}) 166 | 167 | # Get list of checkpoints from ComfyUI 168 | checkpoints = folder_paths.get_filename_list("checkpoints") 169 | 170 | return { 171 | "required": { 172 | "inputcount": ("INT", {"default": 4, "min": 1, "max": 100, "step": 1, 173 | "tooltip": "Number of image inputs. Click 'Update inputs' button after changing."}), 174 | "images_path": ("STRING", { 175 | "default": "", 176 | "tooltip": "Optional: Path to folder containing training images. If provided, images from this folder are used instead of image inputs. Caption .txt files with matching names are used if present." 177 | }), 178 | "sd_scripts_path": ("STRING", { 179 | "default": _sdxl_config.get('sd_scripts_path', sd_scripts_fallback), 180 | "tooltip": "Path to kohya sd-scripts installation." 181 | }), 182 | "ckpt_name": (checkpoints, { 183 | "tooltip": "SDXL checkpoint to train LoRA on." 184 | }), 185 | "caption": ("STRING", { 186 | "default": saved.get('caption', "photo of subject"), 187 | "multiline": True, 188 | "tooltip": "Default caption for all images. Per-image caption inputs override this." 189 | }), 190 | "training_steps": ("INT", { 191 | "default": saved.get('training_steps', 500), 192 | "min": 10, 193 | "max": 5000, 194 | "step": 10, 195 | "tooltip": "Number of training steps. 500 is a good starting point. Increase for more images or complex subjects." 196 | }), 197 | "learning_rate": ("FLOAT", { 198 | "default": saved.get('learning_rate', 0.0005), 199 | "min": 0.00001, 200 | "max": 0.1, 201 | "step": 0.00001, 202 | "tooltip": "Learning rate. 0.0005 trains fast but may overshoot. Experiment with lowering for more stable/slower training." 203 | }), 204 | "lora_rank": ("INT", { 205 | "default": saved.get('lora_rank', 16), 206 | "min": 4, 207 | "max": 128, 208 | "step": 4, 209 | "tooltip": "LoRA rank/dimension. 16-32 typical. Higher = more capacity but larger file and more VRAM." 210 | }), 211 | "vram_mode": (["Min (512px)", "Low (768px)", "Max (1024px)"], { 212 | "default": saved.get('vram_mode', "Low (768px)"), 213 | "tooltip": "VRAM optimization preset. Images are automatically resized to the specified resolution." 214 | }), 215 | "keep_lora": ("BOOLEAN", { 216 | "default": saved.get('keep_lora', True), 217 | "tooltip": "If True, keeps the trained LoRA file." 218 | }), 219 | "output_name": ("STRING", { 220 | "default": saved.get('output_name', "MyLora"), 221 | "tooltip": "Custom name for the output LoRA. Timestamp will be appended." 222 | }), 223 | "custom_python_exe": ("STRING", { 224 | "default": saved.get('custom_python_exe', ""), 225 | "tooltip": "Advanced: Optionally enter the full path to a custom python.exe (e.g. C:\\my-venv\\Scripts\\python.exe). If empty, uses the venv inside sd_scripts_path. The sd_scripts_path field is still required for locating training scripts." 226 | }), 227 | }, 228 | "optional": { 229 | "image_1": ("IMAGE", {"tooltip": "Training image (not needed if images_path is set)."}), 230 | "caption_1": ("STRING", {"forceInput": True, "tooltip": "Caption for image_1. Overrides default caption."}), 231 | "image_2": ("IMAGE", {"tooltip": "Training image."}), 232 | "caption_2": ("STRING", {"forceInput": True, "tooltip": "Caption for image_2. Overrides default caption."}), 233 | "image_3": ("IMAGE", {"tooltip": "Training image."}), 234 | "caption_3": ("STRING", {"forceInput": True, "tooltip": "Caption for image_3. Overrides default caption."}), 235 | "image_4": ("IMAGE", {"tooltip": "Training image."}), 236 | "caption_4": ("STRING", {"forceInput": True, "tooltip": "Caption for image_4. Overrides default caption."}), 237 | } 238 | } 239 | 240 | RETURN_TYPES = ("STRING",) 241 | RETURN_NAMES = ("lora_path",) 242 | OUTPUT_TOOLTIPS = ("Path to the trained SDXL LoRA file.",) 243 | FUNCTION = "train_sdxl_lora" 244 | CATEGORY = "loaders" 245 | DESCRIPTION = "Trains an SDXL LoRA from images using kohya sd-scripts." 246 | 247 | def train_sdxl_lora( 248 | self, 249 | inputcount, 250 | images_path, 251 | sd_scripts_path, 252 | ckpt_name, 253 | caption, 254 | training_steps, 255 | learning_rate, 256 | lora_rank, 257 | vram_mode, 258 | keep_lora=True, 259 | output_name="MyLora", 260 | custom_python_exe="", 261 | image_1=None, 262 | **kwargs 263 | ): 264 | global _sdxl_lora_cache 265 | 266 | # Get full path to checkpoint 267 | model_path = folder_paths.get_full_path("checkpoints", ckpt_name) 268 | 269 | # Check if using folder path for images 270 | use_folder_path = False 271 | folder_images = [] 272 | folder_captions = [] 273 | 274 | if images_path and images_path.strip(): 275 | images_path = os.path.expanduser(images_path.strip()) 276 | if os.path.isdir(images_path): 277 | # Find all image files in the folder 278 | image_extensions = ('.png', '.jpg', '.jpeg', '.webp', '.bmp') 279 | for filename in sorted(os.listdir(images_path)): 280 | if filename.lower().endswith(image_extensions): 281 | img_path = os.path.join(images_path, filename) 282 | folder_images.append(img_path) 283 | 284 | # Look for matching caption file 285 | base_name = os.path.splitext(filename)[0] 286 | caption_file = os.path.join(images_path, f"{base_name}.txt") 287 | if os.path.exists(caption_file): 288 | with open(caption_file, 'r', encoding='utf-8') as f: 289 | folder_captions.append(f.read().strip()) 290 | else: 291 | folder_captions.append(caption) # Use default caption 292 | 293 | if folder_images: 294 | use_folder_path = True 295 | print(f"[SDXL LoRA] Using {len(folder_images)} images from folder: {images_path}") 296 | else: 297 | print(f"[SDXL LoRA] No images found in folder: {images_path}, falling back to inputs") 298 | else: 299 | print(f"[SDXL LoRA] Invalid folder path: {images_path}, falling back to inputs") 300 | 301 | if not use_folder_path: 302 | # Collect all images and captions from inputs 303 | # External caption_N inputs override the default caption widget 304 | all_images = [] 305 | all_captions = [] 306 | 307 | # image_1 is now optional 308 | if image_1 is not None: 309 | all_images.append(image_1) 310 | cap_1 = kwargs.get("caption_1", "") 311 | all_captions.append(cap_1 if cap_1 else caption) 312 | 313 | for i in range(2, inputcount + 1): 314 | img = kwargs.get(f"image_{i}") 315 | if img is not None: 316 | all_images.append(img) 317 | # Get per-image caption, fall back to default if empty/missing 318 | cap = kwargs.get(f"caption_{i}", "") 319 | all_captions.append(cap if cap else caption) 320 | 321 | if not all_images: 322 | raise ValueError("No images provided. Either set images_path to a folder containing images, or connect at least one image input.") 323 | 324 | num_images = len(folder_images) if use_folder_path else len(all_images) 325 | print(f"[SDXL LoRA] Training with {num_images} image(s)") 326 | print(f"[SDXL LoRA] Using model: {ckpt_name}") 327 | 328 | # Get VRAM preset settings (fallback handles old saved settings) 329 | preset = SDXL_VRAM_PRESETS.get(vram_mode, SDXL_VRAM_PRESETS["Low (768px)"]) 330 | print(f"[SDXL LoRA] Using VRAM mode: {vram_mode}") 331 | 332 | # Validate paths 333 | train_script = os.path.join(sd_scripts_path, "sdxl_train_network.py") 334 | 335 | # Use custom python exe if provided, otherwise detect from sd_scripts_path 336 | if custom_python_exe and custom_python_exe.strip(): 337 | custom_python = custom_python_exe.strip() 338 | if not os.path.exists(custom_python): 339 | raise FileNotFoundError(f"Custom python.exe not found at: {custom_python}") 340 | # Derive accelerate path from same directory as custom python 341 | venv_scripts_dir = os.path.dirname(custom_python) 342 | if sys.platform == 'win32': 343 | accelerate_path = os.path.join(venv_scripts_dir, "accelerate.exe") 344 | else: 345 | accelerate_path = os.path.join(venv_scripts_dir, "accelerate") 346 | if not os.path.exists(accelerate_path): 347 | raise FileNotFoundError(f"accelerate not found at: {accelerate_path} (expected in same directory as custom python)") 348 | else: 349 | accelerate_path = _get_accelerate_path(sd_scripts_path) 350 | if not os.path.exists(accelerate_path): 351 | raise FileNotFoundError(f"sd-scripts accelerate not found at: {accelerate_path}") 352 | 353 | if not os.path.exists(train_script): 354 | raise FileNotFoundError(f"sdxl_train_network.py not found at: {train_script}") 355 | if not model_path or not os.path.exists(model_path): 356 | raise FileNotFoundError(f"SDXL model not found at: {model_path}") 357 | 358 | # Save settings 359 | global _sdxl_config 360 | _sdxl_config['sd_scripts_path'] = sd_scripts_path 361 | _sdxl_config['trainer_settings'] = { 362 | 'ckpt_name': ckpt_name, 363 | 'caption': caption, 364 | 'training_steps': training_steps, 365 | 'learning_rate': learning_rate, 366 | 'lora_rank': lora_rank, 367 | 'vram_mode': vram_mode, 368 | 'keep_lora': keep_lora, 369 | 'output_name': output_name, 370 | 'custom_python_exe': custom_python_exe, 371 | } 372 | _save_sdxl_config() 373 | 374 | # Compute hash for caching 375 | if use_folder_path: 376 | image_hash = _compute_image_hash(folder_images, folder_captions, training_steps, learning_rate, lora_rank, vram_mode, output_name, use_folder_path=True) 377 | else: 378 | image_hash = _compute_image_hash(all_images, all_captions, training_steps, learning_rate, lora_rank, vram_mode, output_name, use_folder_path=False) 379 | 380 | # Check cache 381 | if keep_lora and image_hash in _sdxl_lora_cache: 382 | cached_path = _sdxl_lora_cache[image_hash] 383 | if os.path.exists(cached_path): 384 | print(f"[SDXL LoRA] Cache hit! Reusing: {cached_path}") 385 | return (cached_path,) 386 | else: 387 | del _sdxl_lora_cache[image_hash] 388 | _save_sdxl_cache() 389 | 390 | # Generate run name with timestamp 391 | timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') 392 | run_name = f"{output_name}_{timestamp}" if output_name else f"sdxl_lora_{image_hash}" 393 | 394 | # Output folder 395 | output_folder = os.path.join(sd_scripts_path, "output") 396 | os.makedirs(output_folder, exist_ok=True) 397 | lora_output_path = os.path.join(output_folder, f"{run_name}.safetensors") 398 | 399 | # Auto-increment if file somehow still exists (same second) 400 | if os.path.exists(lora_output_path): 401 | counter = 1 402 | while os.path.exists(os.path.join(output_folder, f"{run_name}_{counter}.safetensors")): 403 | counter += 1 404 | run_name = f"{run_name}_{counter}" 405 | lora_output_path = os.path.join(output_folder, f"{run_name}.safetensors") 406 | print(f"[SDXL LoRA] Name exists, using: {run_name}") 407 | 408 | # Create temp directory for images 409 | temp_dir = tempfile.mkdtemp(prefix="comfy_sdxl_lora_") 410 | image_folder = os.path.join(temp_dir, "1_subject") # sd-scripts format: repeats_class 411 | os.makedirs(image_folder, exist_ok=True) 412 | 413 | try: 414 | # Save images with captions 415 | if use_folder_path: 416 | # Copy images from folder and create caption files 417 | for idx, (src_path, cap) in enumerate(zip(folder_images, folder_captions)): 418 | # Copy image to temp folder 419 | ext = os.path.splitext(src_path)[1] 420 | dest_path = os.path.join(image_folder, f"image_{idx+1:03d}{ext}") 421 | shutil.copy2(src_path, dest_path) 422 | 423 | # Create caption file 424 | caption_path = os.path.join(image_folder, f"image_{idx+1:03d}.txt") 425 | with open(caption_path, 'w', encoding='utf-8') as f: 426 | f.write(cap) 427 | else: 428 | # Save tensor images 429 | for idx, img_tensor in enumerate(all_images): 430 | img_data = img_tensor[0] 431 | img_np = (img_data.cpu().numpy() * 255).astype(np.uint8) 432 | img_pil = Image.fromarray(img_np) 433 | 434 | image_path = os.path.join(image_folder, f"image_{idx+1:03d}.png") 435 | img_pil.save(image_path, "PNG") 436 | 437 | # Use per-image caption 438 | caption_path = os.path.join(image_folder, f"image_{idx+1:03d}.txt") 439 | with open(caption_path, 'w', encoding='utf-8') as f: 440 | f.write(all_captions[idx]) 441 | 442 | print(f"[SDXL LoRA] Saved {num_images} images to {image_folder}") 443 | 444 | # Generate config 445 | config_content = generate_sdxl_training_config( 446 | name=run_name, 447 | image_folder=temp_dir, # Parent of the class folder 448 | output_folder=output_folder, 449 | model_path=model_path, 450 | steps=training_steps, 451 | learning_rate=learning_rate, 452 | lora_rank=lora_rank, 453 | lora_alpha=lora_rank, # alpha = rank for full strength training 454 | resolution=preset['resolution'], 455 | batch_size=preset['batch_size'], 456 | optimizer=preset['optimizer'], 457 | mixed_precision=preset['mixed_precision'], 458 | gradient_checkpointing=preset['gradient_checkpointing'], 459 | cache_latents=preset['cache_latents'], 460 | cache_text_encoder_outputs=preset['cache_text_encoder_outputs'], 461 | ) 462 | 463 | config_path = os.path.join(temp_dir, "training_config.toml") 464 | save_config(config_content, config_path) 465 | print(f"[SDXL LoRA] Config saved to {config_path}") 466 | 467 | # Build command 468 | cmd = [ 469 | accelerate_path, 470 | "launch", 471 | "--num_cpu_threads_per_process=2", 472 | train_script, 473 | f"--config_file={config_path}", 474 | ] 475 | 476 | print(f"[SDXL LoRA] Starting training: {run_name}") 477 | print(f"[SDXL LoRA] Images: {num_images}, Steps: {training_steps}, LR: {learning_rate}, Rank: {lora_rank}") 478 | 479 | # Run training 480 | startupinfo = None 481 | if sys.platform == 'win32': 482 | startupinfo = subprocess.STARTUPINFO() 483 | startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW 484 | 485 | # Set UTF-8 encoding for subprocess to handle Japanese text in sd-scripts 486 | env = os.environ.copy() 487 | env['PYTHONIOENCODING'] = 'utf-8' 488 | 489 | process = subprocess.Popen( 490 | cmd, 491 | stdout=subprocess.PIPE, 492 | stderr=subprocess.STDOUT, 493 | text=True, 494 | encoding='utf-8', 495 | errors='replace', 496 | cwd=sd_scripts_path, 497 | startupinfo=startupinfo, 498 | env=env, 499 | ) 500 | 501 | # Stream output 502 | for line in process.stdout: 503 | line = line.rstrip() 504 | if line: 505 | print(f"[sd-scripts] {line}") 506 | 507 | process.wait() 508 | 509 | if process.returncode != 0: 510 | raise RuntimeError(f"sd-scripts training failed with code {process.returncode}") 511 | 512 | print(f"[SDXL LoRA] Training completed!") 513 | 514 | # Find the trained LoRA 515 | if not os.path.exists(lora_output_path): 516 | # Check for alternative naming 517 | possible_files = [f for f in os.listdir(output_folder) if f.startswith(run_name) and f.endswith('.safetensors')] 518 | if possible_files: 519 | lora_output_path = os.path.join(output_folder, possible_files[-1]) 520 | else: 521 | raise FileNotFoundError(f"No LoRA file found in {output_folder}") 522 | 523 | print(f"[SDXL LoRA] Found trained LoRA: {lora_output_path}") 524 | 525 | # Handle caching 526 | if keep_lora: 527 | _sdxl_lora_cache[image_hash] = lora_output_path 528 | _save_sdxl_cache() 529 | print(f"[SDXL LoRA] LoRA saved and cached at: {lora_output_path}") 530 | else: 531 | print(f"[SDXL LoRA] LoRA available at: {lora_output_path}") 532 | 533 | return (lora_output_path,) 534 | 535 | finally: 536 | # Cleanup temp directory 537 | try: 538 | shutil.rmtree(temp_dir) 539 | except Exception as e: 540 | print(f"[SDXL LoRA] Warning: Could not clean up temp dir: {e}") 541 | -------------------------------------------------------------------------------- /workflows/Lora Analysis and Block Control Demo - Z-Image.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "92112d97-bb64-4b44-86f2-ea5691ef8f6e", 3 | "revision": 0, 4 | "last_node_id": 100, 5 | "last_link_id": 234, 6 | "nodes": [ 7 | { 8 | "id": 87, 9 | "type": "easy showAnything", 10 | "pos": [ 11 | -3042.0919590190556, 12 | -2005.8400617211933 13 | ], 14 | "size": [ 15 | 435.00890041395473, 16 | 510.35983829793304 17 | ], 18 | "flags": {}, 19 | "order": 9, 20 | "mode": 0, 21 | "inputs": [ 22 | { 23 | "name": "anything", 24 | "shape": 7, 25 | "type": "*", 26 | "link": 228 27 | } 28 | ], 29 | "outputs": [ 30 | { 31 | "name": "output", 32 | "type": "*", 33 | "links": null 34 | } 35 | ], 36 | "properties": { 37 | "cnr_id": "comfyui-easy-use", 38 | "ver": "1.3.4", 39 | "Node name for S&R": "easy showAnything", 40 | "ue_properties": { 41 | "widget_ue_connectable": {}, 42 | "input_ue_unconnectable": {}, 43 | "version": "7.5.1" 44 | } 45 | }, 46 | "widgets_values": [ 47 | "LoRA Patch Analysis (ZIMAGE)\n============================================================\nBlock Score Patches Strength\n------------------------------------------------------------\nother [████████████████████] 100.0 ( 22) 22.000\nlayer_28 [██████████████████░░] 90.5 ( 6) 6.000\nlayer_27 [█████████████████░░░] 88.5 ( 6) 6.000\nlayer_29 [████████████████░░░░] 82.9 ( 6) 6.000\nlayer_21 [██████████████░░░░░░] 74.2 ( 6) 6.000\nlayer_26 [██████████████░░░░░░] 73.7 ( 6) 6.000\nlayer_22 [██████████████░░░░░░] 71.6 ( 6) 6.000\nlayer_20 [█████████████░░░░░░░] 68.1 ( 6) 6.000\nlayer_19 [█████████████░░░░░░░] 66.1 ( 6) 6.000\nlayer_25 [████████████░░░░░░░░] 64.9 ( 6) 6.000\nlayer_24 [████████████░░░░░░░░] 63.0 ( 6) 6.000\nlayer_18 [████████████░░░░░░░░] 60.6 ( 6) 6.000\nlayer_23 [███████████░░░░░░░░░] 60.0 ( 6) 6.000\nlayer_17 [███████████░░░░░░░░░] 58.5 ( 6) 6.000\nlayer_16 [██████████░░░░░░░░░░] 54.1 ( 6) 6.000\nlayer_15 [██████████░░░░░░░░░░] 50.6 ( 6) 6.000\nlayer_14 [█████████░░░░░░░░░░░] 48.9 ( 6) 6.000\nlayer_13 [████████░░░░░░░░░░░░] 42.2 ( 6) 6.000\nlayer_12 [████████░░░░░░░░░░░░] 42.0 ( 6) 6.000\nlayer_11 [████████░░░░░░░░░░░░] 40.7 ( 6) 6.000\nlayer_10 [███████░░░░░░░░░░░░░] 38.1 ( 6) 6.000\nlayer_9 [███████░░░░░░░░░░░░░] 37.6 ( 6) 6.000\nlayer_7 [███████░░░░░░░░░░░░░] 35.8 ( 6) 6.000\nlayer_8 [██████░░░░░░░░░░░░░░] 34.5 ( 6) 6.000\nlayer_2 [██████░░░░░░░░░░░░░░] 34.4 ( 6) 6.000\nlayer_6 [██████░░░░░░░░░░░░░░] 34.3 ( 6) 6.000\nlayer_5 [██████░░░░░░░░░░░░░░] 33.3 ( 6) 6.000\nlayer_4 [██████░░░░░░░░░░░░░░] 33.1 ( 6) 6.000\nlayer_1 [██████░░░░░░░░░░░░░░] 32.9 ( 6) 6.000\nlayer_3 [██████░░░░░░░░░░░░░░] 32.0 ( 6) 6.000\nlayer_0 [█████░░░░░░░░░░░░░░░] 27.4 ( 6) 6.000\n------------------------------------------------------------\nTotal patched layers: 202" 48 | ] 49 | }, 50 | { 51 | "id": 72, 52 | "type": "EmptySD3LatentImage", 53 | "pos": [ 54 | -3031.8329581722055, 55 | -1316.771878484911 56 | ], 57 | "size": [ 58 | 411.4115252433435, 59 | 110.14673226853097 60 | ], 61 | "flags": {}, 62 | "order": 0, 63 | "mode": 0, 64 | "inputs": [], 65 | "outputs": [ 66 | { 67 | "name": "LATENT", 68 | "type": "LATENT", 69 | "slot_index": 0, 70 | "links": [ 71 | 140 72 | ] 73 | } 74 | ], 75 | "properties": { 76 | "cnr_id": "comfy-core", 77 | "ver": "0.3.75", 78 | "Node name for S&R": "EmptySD3LatentImage", 79 | "ue_properties": { 80 | "widget_ue_connectable": {}, 81 | "version": "7.5.1", 82 | "input_ue_unconnectable": {} 83 | } 84 | }, 85 | "widgets_values": [ 86 | 1024, 87 | 1488, 88 | 1 89 | ] 90 | }, 91 | { 92 | "id": 76, 93 | "type": "CLIPLoader", 94 | "pos": [ 95 | -3029.461722643861, 96 | -1162.995322720111 97 | ], 98 | "size": [ 99 | 404.76879872725476, 100 | 106 101 | ], 102 | "flags": {}, 103 | "order": 1, 104 | "mode": 0, 105 | "inputs": [], 106 | "outputs": [ 107 | { 108 | "name": "CLIP", 109 | "type": "CLIP", 110 | "links": [ 111 | 144, 112 | 225, 113 | 230 114 | ] 115 | } 116 | ], 117 | "properties": { 118 | "cnr_id": "comfy-core", 119 | "ver": "0.3.75", 120 | "Node name for S&R": "CLIPLoader", 121 | "ue_properties": { 122 | "widget_ue_connectable": {}, 123 | "version": "7.5.1", 124 | "input_ue_unconnectable": {} 125 | } 126 | }, 127 | "widgets_values": [ 128 | "qwen_3_4b.safetensors", 129 | "lumina2", 130 | "default" 131 | ], 132 | "ndSuperSelectorEnabled": false, 133 | "ndPowerEnabled": false 134 | }, 135 | { 136 | "id": 75, 137 | "type": "VAELoader", 138 | "pos": [ 139 | -3021.442800836146, 140 | -1014.4239720160352 141 | ], 142 | "size": [ 143 | 392.3286019216621, 144 | 101.5406888195746 145 | ], 146 | "flags": {}, 147 | "order": 2, 148 | "mode": 0, 149 | "inputs": [], 150 | "outputs": [ 151 | { 152 | "name": "VAE", 153 | "type": "VAE", 154 | "links": [ 155 | 147 156 | ] 157 | } 158 | ], 159 | "properties": { 160 | "cnr_id": "comfy-core", 161 | "ver": "0.3.75", 162 | "Node name for S&R": "VAELoader", 163 | "ue_properties": { 164 | "widget_ue_connectable": {}, 165 | "version": "7.5.1", 166 | "input_ue_unconnectable": {} 167 | } 168 | }, 169 | "widgets_values": [ 170 | "ae.sft" 171 | ], 172 | "ndSuperSelectorEnabled": false, 173 | "ndPowerEnabled": false 174 | }, 175 | { 176 | "id": 70, 177 | "type": "ModelSamplingAuraFlow", 178 | "pos": [ 179 | -2019.0428971367162, 180 | -1906.0212238100846 181 | ], 182 | "size": [ 183 | 315, 184 | 58 185 | ], 186 | "flags": {}, 187 | "order": 11, 188 | "mode": 4, 189 | "inputs": [ 190 | { 191 | "name": "model", 192 | "type": "MODEL", 193 | "link": 233 194 | } 195 | ], 196 | "outputs": [ 197 | { 198 | "name": "MODEL", 199 | "type": "MODEL", 200 | "slot_index": 0, 201 | "links": [ 202 | 137 203 | ] 204 | } 205 | ], 206 | "properties": { 207 | "cnr_id": "comfy-core", 208 | "ver": "0.3.75", 209 | "Node name for S&R": "ModelSamplingAuraFlow", 210 | "ue_properties": { 211 | "widget_ue_connectable": {}, 212 | "version": "7.5.1", 213 | "input_ue_unconnectable": {} 214 | } 215 | }, 216 | "widgets_values": [ 217 | 3.23 218 | ] 219 | }, 220 | { 221 | "id": 78, 222 | "type": "VAEDecode", 223 | "pos": [ 224 | -2001.1128944917043, 225 | -1498.3122963645676 226 | ], 227 | "size": [ 228 | 306.4115252433435, 229 | 52.22009840279634 230 | ], 231 | "flags": {}, 232 | "order": 14, 233 | "mode": 0, 234 | "inputs": [ 235 | { 236 | "name": "samples", 237 | "type": "LATENT", 238 | "link": 146 239 | }, 240 | { 241 | "name": "vae", 242 | "type": "VAE", 243 | "link": 147 244 | } 245 | ], 246 | "outputs": [ 247 | { 248 | "name": "IMAGE", 249 | "type": "IMAGE", 250 | "slot_index": 0, 251 | "links": [ 252 | 148 253 | ] 254 | } 255 | ], 256 | "properties": { 257 | "cnr_id": "comfy-core", 258 | "ver": "0.3.75", 259 | "Node name for S&R": "VAEDecode", 260 | "ue_properties": { 261 | "widget_ue_connectable": {}, 262 | "version": "7.5.1", 263 | "input_ue_unconnectable": {} 264 | } 265 | }, 266 | "widgets_values": [] 267 | }, 268 | { 269 | "id": 68, 270 | "type": "KSampler", 271 | "pos": [ 272 | -2008.5296805654157, 273 | -1804.4277515201438 274 | ], 275 | "size": [ 276 | 315, 277 | 262 278 | ], 279 | "flags": {}, 280 | "order": 13, 281 | "mode": 0, 282 | "inputs": [ 283 | { 284 | "name": "model", 285 | "type": "MODEL", 286 | "link": 137 287 | }, 288 | { 289 | "name": "positive", 290 | "type": "CONDITIONING", 291 | "link": 138 292 | }, 293 | { 294 | "name": "negative", 295 | "type": "CONDITIONING", 296 | "link": 139 297 | }, 298 | { 299 | "name": "latent_image", 300 | "type": "LATENT", 301 | "link": 140 302 | } 303 | ], 304 | "outputs": [ 305 | { 306 | "name": "LATENT", 307 | "type": "LATENT", 308 | "slot_index": 0, 309 | "links": [ 310 | 146 311 | ] 312 | } 313 | ], 314 | "properties": { 315 | "cnr_id": "comfy-core", 316 | "ver": "0.3.75", 317 | "Node name for S&R": "KSampler", 318 | "ue_properties": { 319 | "widget_ue_connectable": {}, 320 | "version": "7.5.1", 321 | "input_ue_unconnectable": {} 322 | } 323 | }, 324 | "widgets_values": [ 325 | 1037483790035655, 326 | "fixed", 327 | 9, 328 | 1, 329 | "euler", 330 | "sgm_uniform", 331 | 1 332 | ] 333 | }, 334 | { 335 | "id": 9, 336 | "type": "SaveImage", 337 | "pos": [ 338 | -1634.8631786267724, 339 | -2145.6087611633375 340 | ], 341 | "size": [ 342 | 976.0567626953125, 343 | 1060.9766845703125 344 | ], 345 | "flags": {}, 346 | "order": 15, 347 | "mode": 0, 348 | "inputs": [ 349 | { 350 | "name": "images", 351 | "type": "IMAGE", 352 | "link": 148 353 | } 354 | ], 355 | "outputs": [], 356 | "properties": { 357 | "cnr_id": "comfy-core", 358 | "ver": "0.3.75", 359 | "Node name for S&R": "SaveImage", 360 | "ue_properties": { 361 | "widget_ue_connectable": {}, 362 | "version": "7.5.1", 363 | "input_ue_unconnectable": {} 364 | } 365 | }, 366 | "widgets_values": [ 367 | "ComfyUI" 368 | ] 369 | }, 370 | { 371 | "id": 77, 372 | "type": "UNETLoader", 373 | "pos": [ 374 | -3040.80694110174, 375 | -2443.803881750064 376 | ], 377 | "size": [ 378 | 417.20899553284744, 379 | 94.44019680559268 380 | ], 381 | "flags": {}, 382 | "order": 3, 383 | "mode": 0, 384 | "inputs": [], 385 | "outputs": [ 386 | { 387 | "name": "MODEL", 388 | "type": "MODEL", 389 | "links": [ 390 | 201, 391 | 224 392 | ] 393 | } 394 | ], 395 | "properties": { 396 | "cnr_id": "comfy-core", 397 | "ver": "0.3.75", 398 | "Node name for S&R": "UNETLoader", 399 | "ue_properties": { 400 | "widget_ue_connectable": {}, 401 | "version": "7.5.1", 402 | "input_ue_unconnectable": {} 403 | } 404 | }, 405 | "widgets_values": [ 406 | "z_image_turbo_bf16.safetensors", 407 | "default" 408 | ], 409 | "ndSuperSelectorEnabled": false, 410 | "ndPowerEnabled": false 411 | }, 412 | { 413 | "id": 74, 414 | "type": "CLIPTextEncode", 415 | "pos": [ 416 | -2574.6139646636193, 417 | -2243.3277059084116 418 | ], 419 | "size": [ 420 | 423.83001708984375, 421 | 177.11770629882812 422 | ], 423 | "flags": {}, 424 | "order": 12, 425 | "mode": 0, 426 | "inputs": [ 427 | { 428 | "name": "clip", 429 | "type": "CLIP", 430 | "link": 234 431 | } 432 | ], 433 | "outputs": [ 434 | { 435 | "name": "CONDITIONING", 436 | "type": "CONDITIONING", 437 | "slot_index": 0, 438 | "links": [ 439 | 138 440 | ] 441 | } 442 | ], 443 | "title": "CLIP Text Encode (Positive Prompt)", 444 | "properties": { 445 | "cnr_id": "comfy-core", 446 | "ver": "0.3.75", 447 | "Node name for S&R": "CLIPTextEncode", 448 | "ue_properties": { 449 | "widget_ue_connectable": {}, 450 | "version": "7.5.1", 451 | "input_ue_unconnectable": {} 452 | } 453 | }, 454 | "widgets_values": [ 455 | "portrait photo of GlaraH woman soldier. She is carrying an axe, held high in a threatening manner in her right hand. The location is a sewer. She is walking towards the camera, her feet spashing in the sewage. In her left hand a torch casting a hazy beam at the viewer. Behind her we can see a pile of dead zombies that she has killed moments before.\n" 456 | ], 457 | "color": "#232", 458 | "bgcolor": "#353" 459 | }, 460 | { 461 | "id": 90, 462 | "type": "LoraLoaderModelOnly", 463 | "pos": [ 464 | -3028.1630277763697, 465 | -1449.2089699208204 466 | ], 467 | "size": [ 468 | 405.7018134876739, 469 | 82 470 | ], 471 | "flags": {}, 472 | "order": 7, 473 | "mode": 0, 474 | "inputs": [ 475 | { 476 | "name": "model", 477 | "type": "MODEL", 478 | "link": 201 479 | } 480 | ], 481 | "outputs": [ 482 | { 483 | "name": "MODEL", 484 | "type": "MODEL", 485 | "links": [ 486 | 229 487 | ] 488 | } 489 | ], 490 | "properties": { 491 | "cnr_id": "comfy-core", 492 | "ver": "0.4.0", 493 | "Node name for S&R": "LoraLoaderModelOnly", 494 | "ue_properties": { 495 | "widget_ue_connectable": {}, 496 | "input_ue_unconnectable": {}, 497 | "version": "7.5.1" 498 | } 499 | }, 500 | "widgets_values": [ 501 | "zimage\\GlaraH.safetensors", 502 | 1 503 | ], 504 | "ndSuperSelectorEnabled": false, 505 | "ndPowerEnabled": false 506 | }, 507 | { 508 | "id": 95, 509 | "type": "Note", 510 | "pos": [ 511 | -3518.6809455844236, 512 | -2231.5973666626774 513 | ], 514 | "size": [ 515 | 411.82749093112125, 516 | 88 517 | ], 518 | "flags": {}, 519 | "order": 4, 520 | "mode": 0, 521 | "inputs": [], 522 | "outputs": [], 523 | "title": "TUTORIAL", 524 | "properties": { 525 | "ue_properties": { 526 | "widget_ue_connectable": {}, 527 | "version": "7.5.1", 528 | "input_ue_unconnectable": {} 529 | } 530 | }, 531 | "widgets_values": [ 532 | "WATCH VIDEO ON YOUTUBE - 4 MINS LONG - IF YOU ARE UNSURE!!\n\nhttps://www.youtube.com/watch?v=dkEB5i5yBUI" 533 | ], 534 | "color": "#432", 535 | "bgcolor": "#653" 536 | }, 537 | { 538 | "id": 89, 539 | "type": "Note", 540 | "pos": [ 541 | -3529.6782856422087, 542 | -2025.833701774536 543 | ], 544 | "size": [ 545 | 463.233262855058, 546 | 258.61320970611996 547 | ], 548 | "flags": {}, 549 | "order": 5, 550 | "mode": 0, 551 | "inputs": [], 552 | "outputs": [], 553 | "properties": { 554 | "ue_properties": { 555 | "widget_ue_connectable": {}, 556 | "version": "7.5.1", 557 | "input_ue_unconnectable": {} 558 | } 559 | }, 560 | "widgets_values": [ 561 | "This is your Lora you are goign to analyse and use, not the lack of the use of model output. This is becuase using this as a pure loader is optional, you can use it just for analysis.\n\n\n\nLoraw I am using for this demo are:\n\nhttps://civitai.com/models/1241174/zitil-original-character-glara-h?modelVersionId=2495412\n\nhttps://civitai.com/models/1371819/1920s-horror-movies-lora?modelVersionId=2490859\n\n---\nIf you want to throw me a pint or a coffee, you can here! https://buymeacoffee.com/lorasandlenses" 562 | ], 563 | "color": "#432", 564 | "bgcolor": "#653" 565 | }, 566 | { 567 | "id": 73, 568 | "type": "CLIPTextEncode", 569 | "pos": [ 570 | -2591.429125527737, 571 | -1032.797465222949 572 | ], 573 | "size": [ 574 | 425.27801513671875, 575 | 180.6060791015625 576 | ], 577 | "flags": {}, 578 | "order": 6, 579 | "mode": 0, 580 | "inputs": [ 581 | { 582 | "name": "clip", 583 | "type": "CLIP", 584 | "link": 144 585 | } 586 | ], 587 | "outputs": [ 588 | { 589 | "name": "CONDITIONING", 590 | "type": "CONDITIONING", 591 | "slot_index": 0, 592 | "links": [ 593 | 139 594 | ] 595 | } 596 | ], 597 | "title": "CLIP Text Encode (Negative Prompt)", 598 | "properties": { 599 | "cnr_id": "comfy-core", 600 | "ver": "0.3.75", 601 | "Node name for S&R": "CLIPTextEncode", 602 | "ue_properties": { 603 | "widget_ue_connectable": {}, 604 | "version": "7.5.1", 605 | "input_ue_unconnectable": {} 606 | } 607 | }, 608 | "widgets_values": [ 609 | "blurry ugly bad" 610 | ], 611 | "color": "#322", 612 | "bgcolor": "#533" 613 | }, 614 | { 615 | "id": 99, 616 | "type": "LoRALoaderWithAnalysis", 617 | "pos": [ 618 | -3033.3644391597754, 619 | -2256.6102483250747 620 | ], 621 | "size": [ 622 | 397.512017257327, 623 | 205.69697827552227 624 | ], 625 | "flags": {}, 626 | "order": 8, 627 | "mode": 0, 628 | "inputs": [ 629 | { 630 | "name": "model", 631 | "type": "MODEL", 632 | "link": 224 633 | }, 634 | { 635 | "name": "clip", 636 | "type": "CLIP", 637 | "link": 225 638 | } 639 | ], 640 | "outputs": [ 641 | { 642 | "name": "model", 643 | "type": "MODEL", 644 | "links": null 645 | }, 646 | { 647 | "name": "clip", 648 | "type": "CLIP", 649 | "links": null 650 | }, 651 | { 652 | "name": "analysis", 653 | "type": "STRING", 654 | "links": [ 655 | 228 656 | ] 657 | }, 658 | { 659 | "name": "analysis_json", 660 | "type": "STRING", 661 | "links": [ 662 | 232 663 | ] 664 | }, 665 | { 666 | "name": "lora_path", 667 | "type": "STRING", 668 | "links": [ 669 | 231 670 | ] 671 | } 672 | ], 673 | "properties": { 674 | "aux_id": "ShootTheSound/comfyUI-Realtime-Lora", 675 | "ver": "a4e5b6615b5ded71abdca1558663ddd6ee665a3c", 676 | "Node name for S&R": "LoRALoaderWithAnalysis", 677 | "ue_properties": { 678 | "widget_ue_connectable": {}, 679 | "input_ue_unconnectable": {}, 680 | "version": "7.5.1" 681 | } 682 | }, 683 | "widgets_values": [ 684 | "zimage\\20s_horror_z_100.safetensors", 685 | 1, 686 | 1 687 | ], 688 | "ndSuperSelectorEnabled": false, 689 | "ndPowerEnabled": false 690 | }, 691 | { 692 | "id": 100, 693 | "type": "ZImageSelectiveLoRALoader", 694 | "pos": [ 695 | -2552.4562724957777, 696 | -2018.0461972467117 697 | ], 698 | "size": [ 699 | 500, 700 | 910 701 | ], 702 | "flags": {}, 703 | "order": 10, 704 | "mode": 0, 705 | "inputs": [ 706 | { 707 | "name": "model", 708 | "type": "MODEL", 709 | "link": 229 710 | }, 711 | { 712 | "name": "clip", 713 | "type": "CLIP", 714 | "link": 230 715 | }, 716 | { 717 | "name": "lora_path_opt", 718 | "shape": 7, 719 | "type": "STRING", 720 | "link": 231 721 | }, 722 | { 723 | "name": "analysis_json", 724 | "shape": 7, 725 | "type": "STRING", 726 | "link": 232 727 | } 728 | ], 729 | "outputs": [ 730 | { 731 | "name": "model", 732 | "type": "MODEL", 733 | "links": [ 734 | 233 735 | ] 736 | }, 737 | { 738 | "name": "clip", 739 | "type": "CLIP", 740 | "links": [ 741 | 234 742 | ] 743 | }, 744 | { 745 | "name": "info", 746 | "type": "STRING", 747 | "links": null 748 | } 749 | ], 750 | "properties": { 751 | "aux_id": "ShootTheSound/comfyUI-Realtime-Lora", 752 | "ver": "a4e5b6615b5ded71abdca1558663ddd6ee665a3c", 753 | "ue_properties": { 754 | "widget_ue_connectable": {}, 755 | "input_ue_unconnectable": {} 756 | }, 757 | "Node name for S&R": "ZImageSelectiveLoRALoader" 758 | }, 759 | "widgets_values": [ 760 | "FLUX\\1950s Hollywood glamour..safetensors", 761 | 1, 762 | "Default", 763 | true, 764 | 1, 765 | true, 766 | 1, 767 | true, 768 | 1, 769 | true, 770 | 1, 771 | true, 772 | 1, 773 | true, 774 | 1, 775 | true, 776 | 1, 777 | true, 778 | 1, 779 | true, 780 | 1, 781 | true, 782 | 1, 783 | true, 784 | 1, 785 | true, 786 | 1, 787 | true, 788 | 1, 789 | true, 790 | 1, 791 | true, 792 | 1, 793 | true, 794 | 1, 795 | false, 796 | 1, 797 | false, 798 | 1, 799 | false, 800 | 1, 801 | false, 802 | 1, 803 | false, 804 | 1, 805 | false, 806 | 1, 807 | false, 808 | 1, 809 | false, 810 | 1, 811 | false, 812 | 1, 813 | false, 814 | 1, 815 | false, 816 | 1, 817 | false, 818 | 1, 819 | false, 820 | 1, 821 | false, 822 | 1, 823 | false, 824 | 1 825 | ], 826 | "ndSuperSelectorEnabled": false, 827 | "ndPowerEnabled": false 828 | } 829 | ], 830 | "links": [ 831 | [ 832 | 137, 833 | 70, 834 | 0, 835 | 68, 836 | 0, 837 | "MODEL" 838 | ], 839 | [ 840 | 138, 841 | 74, 842 | 0, 843 | 68, 844 | 1, 845 | "CONDITIONING" 846 | ], 847 | [ 848 | 139, 849 | 73, 850 | 0, 851 | 68, 852 | 2, 853 | "CONDITIONING" 854 | ], 855 | [ 856 | 140, 857 | 72, 858 | 0, 859 | 68, 860 | 3, 861 | "LATENT" 862 | ], 863 | [ 864 | 144, 865 | 76, 866 | 0, 867 | 73, 868 | 0, 869 | "CLIP" 870 | ], 871 | [ 872 | 146, 873 | 68, 874 | 0, 875 | 78, 876 | 0, 877 | "LATENT" 878 | ], 879 | [ 880 | 147, 881 | 75, 882 | 0, 883 | 78, 884 | 1, 885 | "VAE" 886 | ], 887 | [ 888 | 148, 889 | 78, 890 | 0, 891 | 9, 892 | 0, 893 | "IMAGE" 894 | ], 895 | [ 896 | 201, 897 | 77, 898 | 0, 899 | 90, 900 | 0, 901 | "MODEL" 902 | ], 903 | [ 904 | 224, 905 | 77, 906 | 0, 907 | 99, 908 | 0, 909 | "MODEL" 910 | ], 911 | [ 912 | 225, 913 | 76, 914 | 0, 915 | 99, 916 | 1, 917 | "CLIP" 918 | ], 919 | [ 920 | 228, 921 | 99, 922 | 2, 923 | 87, 924 | 0, 925 | "STRING" 926 | ], 927 | [ 928 | 229, 929 | 90, 930 | 0, 931 | 100, 932 | 0, 933 | "MODEL" 934 | ], 935 | [ 936 | 230, 937 | 76, 938 | 0, 939 | 100, 940 | 1, 941 | "CLIP" 942 | ], 943 | [ 944 | 231, 945 | 99, 946 | 4, 947 | 100, 948 | 2, 949 | "STRING" 950 | ], 951 | [ 952 | 232, 953 | 99, 954 | 3, 955 | 100, 956 | 3, 957 | "STRING" 958 | ], 959 | [ 960 | 233, 961 | 100, 962 | 0, 963 | 70, 964 | 0, 965 | "MODEL" 966 | ], 967 | [ 968 | 234, 969 | 100, 970 | 1, 971 | 74, 972 | 0, 973 | "CLIP" 974 | ] 975 | ], 976 | "groups": [], 977 | "config": {}, 978 | "extra": { 979 | "ds": { 980 | "scale": 0.7972024500000007, 981 | "offset": [ 982 | 3549.693548924114, 983 | 2389.3446045709725 984 | ] 985 | }, 986 | "frontendVersion": "1.34.8", 987 | "workflowRendererVersion": "LG", 988 | "ue_links": [], 989 | "links_added_by_ue": [], 990 | "VHS_latentpreview": false, 991 | "VHS_latentpreviewrate": 0, 992 | "VHS_MetadataImage": true, 993 | "VHS_KeepIntermediate": true 994 | }, 995 | "version": 0.4 996 | } -------------------------------------------------------------------------------- /workflows/SDXLDemo.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "92112d97-bb64-4b44-86f2-ea5691ef8f6e", 3 | "revision": 0, 4 | "last_node_id": 67, 5 | "last_link_id": 136, 6 | "nodes": [ 7 | { 8 | "id": 7, 9 | "type": "CLIPTextEncode", 10 | "pos": [ 11 | -1026.7304384225074, 12 | 813.6083568917051 13 | ], 14 | "size": [ 15 | 425.27801513671875, 16 | 180.6060791015625 17 | ], 18 | "flags": {}, 19 | "order": 13, 20 | "mode": 0, 21 | "inputs": [ 22 | { 23 | "name": "clip", 24 | "type": "CLIP", 25 | "link": 67 26 | } 27 | ], 28 | "outputs": [ 29 | { 30 | "name": "CONDITIONING", 31 | "type": "CONDITIONING", 32 | "slot_index": 0, 33 | "links": [ 34 | 6 35 | ] 36 | } 37 | ], 38 | "title": "CLIP Text Encode (Negative Prompt)", 39 | "properties": { 40 | "cnr_id": "comfy-core", 41 | "ver": "0.3.75", 42 | "Node name for S&R": "CLIPTextEncode", 43 | "ue_properties": { 44 | "widget_ue_connectable": {}, 45 | "version": "7.5.1", 46 | "input_ue_unconnectable": {} 47 | } 48 | }, 49 | "widgets_values": [ 50 | "blurry ugly bad" 51 | ], 52 | "color": "#322", 53 | "bgcolor": "#533" 54 | }, 55 | { 56 | "id": 29, 57 | "type": "LoadImage", 58 | "pos": [ 59 | -1899.7019401757116, 60 | -39.665001232435756 61 | ], 62 | "size": [ 63 | 395.5046924121225, 64 | 376.092132305915 65 | ], 66 | "flags": {}, 67 | "order": 0, 68 | "mode": 0, 69 | "inputs": [], 70 | "outputs": [ 71 | { 72 | "name": "IMAGE", 73 | "type": "IMAGE", 74 | "links": [ 75 | 126 76 | ] 77 | }, 78 | { 79 | "name": "MASK", 80 | "type": "MASK", 81 | "links": null 82 | } 83 | ], 84 | "properties": { 85 | "cnr_id": "comfy-core", 86 | "ver": "0.3.75", 87 | "Node name for S&R": "LoadImage", 88 | "ue_properties": { 89 | "widget_ue_connectable": {}, 90 | "input_ue_unconnectable": {}, 91 | "version": "7.5.1" 92 | } 93 | }, 94 | "widgets_values": [ 95 | "original_3a930bdd-a01e-42d5-b8a4-8ebaf467384d.webp", 96 | "image" 97 | ] 98 | }, 99 | { 100 | "id": 59, 101 | "type": "String", 102 | "pos": [ 103 | -1903.96103902309, 104 | -289.2550430271083 105 | ], 106 | "size": [ 107 | 400, 108 | 200 109 | ], 110 | "flags": {}, 111 | "order": 1, 112 | "mode": 0, 113 | "inputs": [], 114 | "outputs": [ 115 | { 116 | "name": "STRING", 117 | "type": "STRING", 118 | "links": [ 119 | 127 120 | ] 121 | } 122 | ], 123 | "properties": { 124 | "cnr_id": "ComfyLiterals", 125 | "ver": "bdddb08ca82d90d75d97b1d437a652e0284a32ac", 126 | "Node name for S&R": "String", 127 | "ue_properties": { 128 | "widget_ue_connectable": {}, 129 | "input_ue_unconnectable": {}, 130 | "version": "7.5.1" 131 | } 132 | }, 133 | "widgets_values": [ 134 | "zwxem painting of corn field" 135 | ] 136 | }, 137 | { 138 | "id": 30, 139 | "type": "LoadImage", 140 | "pos": [ 141 | -1482.2085987238015, 142 | -35.6677441721797 143 | ], 144 | "size": [ 145 | 407.9231188733056, 146 | 366.43335616943943 147 | ], 148 | "flags": {}, 149 | "order": 2, 150 | "mode": 0, 151 | "inputs": [], 152 | "outputs": [ 153 | { 154 | "name": "IMAGE", 155 | "type": "IMAGE", 156 | "links": [ 157 | 128 158 | ] 159 | }, 160 | { 161 | "name": "MASK", 162 | "type": "MASK", 163 | "links": null 164 | } 165 | ], 166 | "properties": { 167 | "cnr_id": "comfy-core", 168 | "ver": "0.3.75", 169 | "Node name for S&R": "LoadImage", 170 | "ue_properties": { 171 | "widget_ue_connectable": {}, 172 | "input_ue_unconnectable": {}, 173 | "version": "7.5.1" 174 | } 175 | }, 176 | "widgets_values": [ 177 | "Arn-Van-Gogh-Secondary-1.webp", 178 | "image" 179 | ] 180 | }, 181 | { 182 | "id": 61, 183 | "type": "String", 184 | "pos": [ 185 | -1489.0050028571977, 186 | -280.3313861058403 187 | ], 188 | "size": [ 189 | 400, 190 | 200 191 | ], 192 | "flags": {}, 193 | "order": 3, 194 | "mode": 0, 195 | "inputs": [], 196 | "outputs": [ 197 | { 198 | "name": "STRING", 199 | "type": "STRING", 200 | "links": [ 201 | 129 202 | ] 203 | } 204 | ], 205 | "properties": { 206 | "cnr_id": "ComfyLiterals", 207 | "ver": "bdddb08ca82d90d75d97b1d437a652e0284a32ac", 208 | "Node name for S&R": "String", 209 | "ue_properties": { 210 | "widget_ue_connectable": {}, 211 | "input_ue_unconnectable": {}, 212 | "version": "7.5.1" 213 | } 214 | }, 215 | "widgets_values": [ 216 | "zwxem painting of a tree in a field" 217 | ] 218 | }, 219 | { 220 | "id": 32, 221 | "type": "LoadImage", 222 | "pos": [ 223 | -1897.3742566951246, 224 | 698.405647918002 225 | ], 226 | "size": [ 227 | 392.7450420874152, 228 | 334.6973774353048 229 | ], 230 | "flags": {}, 231 | "order": 4, 232 | "mode": 0, 233 | "inputs": [], 234 | "outputs": [ 235 | { 236 | "name": "IMAGE", 237 | "type": "IMAGE", 238 | "links": [ 239 | 130 240 | ] 241 | }, 242 | { 243 | "name": "MASK", 244 | "type": "MASK", 245 | "links": null 246 | } 247 | ], 248 | "properties": { 249 | "cnr_id": "comfy-core", 250 | "ver": "0.3.75", 251 | "Node name for S&R": "LoadImage", 252 | "ue_properties": { 253 | "widget_ue_connectable": {}, 254 | "input_ue_unconnectable": {}, 255 | "version": "7.5.1" 256 | } 257 | }, 258 | "widgets_values": [ 259 | "download (1).jpg", 260 | "image" 261 | ] 262 | }, 263 | { 264 | "id": 60, 265 | "type": "String", 266 | "pos": [ 267 | -1902.842945889929, 268 | 457.6381465505763 269 | ], 270 | "size": [ 271 | 400, 272 | 200 273 | ], 274 | "flags": {}, 275 | "order": 5, 276 | "mode": 0, 277 | "inputs": [], 278 | "outputs": [ 279 | { 280 | "name": "STRING", 281 | "type": "STRING", 282 | "links": [ 283 | 131 284 | ] 285 | } 286 | ], 287 | "properties": { 288 | "cnr_id": "ComfyLiterals", 289 | "ver": "bdddb08ca82d90d75d97b1d437a652e0284a32ac", 290 | "Node name for S&R": "String", 291 | "ue_properties": { 292 | "widget_ue_connectable": {}, 293 | "input_ue_unconnectable": {}, 294 | "version": "7.5.1" 295 | } 296 | }, 297 | "widgets_values": [ 298 | "zwxem portrait painting of a man" 299 | ] 300 | }, 301 | { 302 | "id": 31, 303 | "type": "LoadImage", 304 | "pos": [ 305 | -1481.8263702119325, 306 | 699.6559765208757 307 | ], 308 | "size": [ 309 | 373.4274898144638, 310 | 341.59650324707343 311 | ], 312 | "flags": {}, 313 | "order": 6, 314 | "mode": 0, 315 | "inputs": [], 316 | "outputs": [ 317 | { 318 | "name": "IMAGE", 319 | "type": "IMAGE", 320 | "links": [ 321 | 132 322 | ] 323 | }, 324 | { 325 | "name": "MASK", 326 | "type": "MASK", 327 | "links": null 328 | } 329 | ], 330 | "properties": { 331 | "cnr_id": "comfy-core", 332 | "ver": "0.3.75", 333 | "Node name for S&R": "LoadImage", 334 | "ue_properties": { 335 | "widget_ue_connectable": {}, 336 | "input_ue_unconnectable": {}, 337 | "version": "7.5.1" 338 | } 339 | }, 340 | "widgets_values": [ 341 | "Starry-Night-canvas-Vincent-van-Gogh-New-1889.webp", 342 | "image" 343 | ] 344 | }, 345 | { 346 | "id": 62, 347 | "type": "String", 348 | "pos": [ 349 | -1497.6567994365214, 350 | 461.7230381340444 351 | ], 352 | "size": [ 353 | 400, 354 | 200 355 | ], 356 | "flags": {}, 357 | "order": 7, 358 | "mode": 0, 359 | "inputs": [], 360 | "outputs": [ 361 | { 362 | "name": "STRING", 363 | "type": "STRING", 364 | "links": [ 365 | 133 366 | ] 367 | } 368 | ], 369 | "properties": { 370 | "cnr_id": "ComfyLiterals", 371 | "ver": "bdddb08ca82d90d75d97b1d437a652e0284a32ac", 372 | "Node name for S&R": "String", 373 | "ue_properties": { 374 | "widget_ue_connectable": {}, 375 | "input_ue_unconnectable": {}, 376 | "version": "7.5.1" 377 | } 378 | }, 379 | "widgets_values": [ 380 | "zwxem painting of a city under a starry night sky" 381 | ] 382 | }, 383 | { 384 | "id": 28, 385 | "type": "ShowText|pysssss", 386 | "pos": [ 387 | -451.3484212797091, 388 | -90.40574906711572 389 | ], 390 | "size": [ 391 | 253.14566331300148, 392 | 96.37108328003751 393 | ], 394 | "flags": {}, 395 | "order": 16, 396 | "mode": 0, 397 | "inputs": [ 398 | { 399 | "name": "text", 400 | "type": "STRING", 401 | "link": 136 402 | } 403 | ], 404 | "outputs": [ 405 | { 406 | "name": "STRING", 407 | "shape": 6, 408 | "type": "STRING", 409 | "links": null 410 | } 411 | ], 412 | "title": "Where The Lora is Saved", 413 | "properties": { 414 | "cnr_id": "comfyui-custom-scripts", 415 | "ver": "1.2.5", 416 | "Node name for S&R": "ShowText|pysssss", 417 | "ue_properties": { 418 | "widget_ue_connectable": {}, 419 | "input_ue_unconnectable": {}, 420 | "version": "7.5.1" 421 | } 422 | }, 423 | "widgets_values": [ 424 | "MyLora_20251205_153402.safetensors" 425 | ] 426 | }, 427 | { 428 | "id": 36, 429 | "type": "CheckpointLoaderSimple", 430 | "pos": [ 431 | -450.6798877005887, 432 | -249.9084806273337 433 | ], 434 | "size": [ 435 | 270, 436 | 98 437 | ], 438 | "flags": {}, 439 | "order": 8, 440 | "mode": 0, 441 | "inputs": [], 442 | "outputs": [ 443 | { 444 | "name": "MODEL", 445 | "type": "MODEL", 446 | "links": [ 447 | 65 448 | ] 449 | }, 450 | { 451 | "name": "CLIP", 452 | "type": "CLIP", 453 | "links": [ 454 | 66, 455 | 67 456 | ] 457 | }, 458 | { 459 | "name": "VAE", 460 | "type": "VAE", 461 | "links": [ 462 | 68 463 | ] 464 | } 465 | ], 466 | "properties": { 467 | "cnr_id": "comfy-core", 468 | "ver": "0.3.75", 469 | "Node name for S&R": "CheckpointLoaderSimple", 470 | "ue_properties": { 471 | "widget_ue_connectable": {}, 472 | "input_ue_unconnectable": {}, 473 | "version": "7.5.1" 474 | } 475 | }, 476 | "widgets_values": [ 477 | "juggernautXL_ragnarokBy.safetensors" 478 | ], 479 | "ndSuperSelectorEnabled": false, 480 | "ndPowerEnabled": false 481 | }, 482 | { 483 | "id": 13, 484 | "type": "EmptySD3LatentImage", 485 | "pos": [ 486 | -472.80581516937707, 487 | 198.39794190753517 488 | ], 489 | "size": [ 490 | 315, 491 | 106 492 | ], 493 | "flags": {}, 494 | "order": 9, 495 | "mode": 0, 496 | "inputs": [], 497 | "outputs": [ 498 | { 499 | "name": "LATENT", 500 | "type": "LATENT", 501 | "slot_index": 0, 502 | "links": [ 503 | 17 504 | ] 505 | } 506 | ], 507 | "properties": { 508 | "cnr_id": "comfy-core", 509 | "ver": "0.3.75", 510 | "Node name for S&R": "EmptySD3LatentImage", 511 | "ue_properties": { 512 | "widget_ue_connectable": {}, 513 | "version": "7.5.1", 514 | "input_ue_unconnectable": {} 515 | } 516 | }, 517 | "widgets_values": [ 518 | 1024, 519 | 1024, 520 | 1 521 | ] 522 | }, 523 | { 524 | "id": 8, 525 | "type": "VAEDecode", 526 | "pos": [ 527 | -396.519435729211, 528 | 946.5128816347581 529 | ], 530 | "size": [ 531 | 210, 532 | 46 533 | ], 534 | "flags": {}, 535 | "order": 17, 536 | "mode": 0, 537 | "inputs": [ 538 | { 539 | "name": "samples", 540 | "type": "LATENT", 541 | "link": 51 542 | }, 543 | { 544 | "name": "vae", 545 | "type": "VAE", 546 | "link": 68 547 | } 548 | ], 549 | "outputs": [ 550 | { 551 | "name": "IMAGE", 552 | "type": "IMAGE", 553 | "slot_index": 0, 554 | "links": [ 555 | 16 556 | ] 557 | } 558 | ], 559 | "properties": { 560 | "cnr_id": "comfy-core", 561 | "ver": "0.3.75", 562 | "Node name for S&R": "VAEDecode", 563 | "ue_properties": { 564 | "widget_ue_connectable": {}, 565 | "version": "7.5.1", 566 | "input_ue_unconnectable": {} 567 | } 568 | }, 569 | "widgets_values": [] 570 | }, 571 | { 572 | "id": 3, 573 | "type": "KSampler", 574 | "pos": [ 575 | -460.7331603722988, 576 | 646.5126970232761 577 | ], 578 | "size": [ 579 | 315, 580 | 262 581 | ], 582 | "flags": {}, 583 | "order": 15, 584 | "mode": 0, 585 | "inputs": [ 586 | { 587 | "name": "model", 588 | "type": "MODEL", 589 | "link": 69 590 | }, 591 | { 592 | "name": "positive", 593 | "type": "CONDITIONING", 594 | "link": 4 595 | }, 596 | { 597 | "name": "negative", 598 | "type": "CONDITIONING", 599 | "link": 6 600 | }, 601 | { 602 | "name": "latent_image", 603 | "type": "LATENT", 604 | "link": 17 605 | } 606 | ], 607 | "outputs": [ 608 | { 609 | "name": "LATENT", 610 | "type": "LATENT", 611 | "slot_index": 0, 612 | "links": [ 613 | 51 614 | ] 615 | } 616 | ], 617 | "properties": { 618 | "cnr_id": "comfy-core", 619 | "ver": "0.3.75", 620 | "Node name for S&R": "KSampler", 621 | "ue_properties": { 622 | "widget_ue_connectable": {}, 623 | "version": "7.5.1", 624 | "input_ue_unconnectable": {} 625 | } 626 | }, 627 | "widgets_values": [ 628 | 981878919128522, 629 | "fixed", 630 | 20, 631 | 2.2, 632 | "euler", 633 | "simple", 634 | 1 635 | ] 636 | }, 637 | { 638 | "id": 67, 639 | "type": "PrimitiveString", 640 | "pos": [ 641 | -610.4447607164396, 642 | 532.8721590032962 643 | ], 644 | "size": [ 645 | 399.114163815623, 646 | 60.827317455816456 647 | ], 648 | "flags": { 649 | "collapsed": false 650 | }, 651 | "order": 10, 652 | "mode": 0, 653 | "inputs": [], 654 | "outputs": [ 655 | { 656 | "name": "STRING", 657 | "type": "STRING", 658 | "links": null 659 | } 660 | ], 661 | "title": "<----- Point me at your SD Scripts install", 662 | "properties": { 663 | "cnr_id": "comfy-core", 664 | "ver": "0.3.75", 665 | "Node name for S&R": "PrimitiveString", 666 | "ue_properties": { 667 | "widget_ue_connectable": {}, 668 | "input_ue_unconnectable": {}, 669 | "version": "7.5.1" 670 | } 671 | }, 672 | "widgets_values": [ 673 | "<----- Point me at your SD Scripts install" 674 | ], 675 | "color": "#232", 676 | "bgcolor": "#353" 677 | }, 678 | { 679 | "id": 9, 680 | "type": "SaveImage", 681 | "pos": [ 682 | -30.600891095319753, 683 | -188.01121401539967 684 | ], 685 | "size": [ 686 | 976.0567626953125, 687 | 1060.9766845703125 688 | ], 689 | "flags": {}, 690 | "order": 18, 691 | "mode": 0, 692 | "inputs": [ 693 | { 694 | "name": "images", 695 | "type": "IMAGE", 696 | "link": 16 697 | } 698 | ], 699 | "outputs": [], 700 | "properties": { 701 | "cnr_id": "comfy-core", 702 | "ver": "0.3.75", 703 | "Node name for S&R": "SaveImage", 704 | "ue_properties": { 705 | "widget_ue_connectable": {}, 706 | "version": "7.5.1", 707 | "input_ue_unconnectable": {} 708 | } 709 | }, 710 | "widgets_values": [ 711 | "ComfyUI" 712 | ] 713 | }, 714 | { 715 | "id": 65, 716 | "type": "SDXLLoraTrainer", 717 | "pos": [ 718 | -1030.241532656554, 719 | 270.06872384472405 720 | ], 721 | "size": [ 722 | 400, 723 | 492 724 | ], 725 | "flags": {}, 726 | "order": 11, 727 | "mode": 0, 728 | "inputs": [ 729 | { 730 | "name": "image_1", 731 | "shape": 7, 732 | "type": "IMAGE", 733 | "link": 126 734 | }, 735 | { 736 | "name": "caption_1", 737 | "shape": 7, 738 | "type": "STRING", 739 | "link": 127 740 | }, 741 | { 742 | "name": "image_2", 743 | "shape": 7, 744 | "type": "IMAGE", 745 | "link": 128 746 | }, 747 | { 748 | "name": "caption_2", 749 | "shape": 7, 750 | "type": "STRING", 751 | "link": 129 752 | }, 753 | { 754 | "name": "image_3", 755 | "shape": 7, 756 | "type": "IMAGE", 757 | "link": 130 758 | }, 759 | { 760 | "name": "caption_3", 761 | "shape": 7, 762 | "type": "STRING", 763 | "link": 131 764 | }, 765 | { 766 | "name": "image_4", 767 | "shape": 7, 768 | "type": "IMAGE", 769 | "link": 132 770 | }, 771 | { 772 | "name": "caption_4", 773 | "shape": 7, 774 | "type": "STRING", 775 | "link": 133 776 | } 777 | ], 778 | "outputs": [ 779 | { 780 | "name": "lora_path", 781 | "type": "STRING", 782 | "links": [ 783 | 135 784 | ] 785 | } 786 | ], 787 | "properties": { 788 | "aux_id": "shootthesound/comfyUI-Realtime-Lora", 789 | "ver": "7c5d8e9358c7be1a4995a56fede6d8c3fd534616", 790 | "ue_properties": { 791 | "widget_ue_connectable": {}, 792 | "input_ue_unconnectable": {}, 793 | "version": "7.5.1" 794 | } 795 | }, 796 | "widgets_values": [ 797 | 4, 798 | null, 799 | "", 800 | "S:\\Auto\\sd-scripts", 801 | "juggernautXL_ragnarokBy.safetensors", 802 | "photo of subject", 803 | 500, 804 | 0.0005, 805 | 16, 806 | "Low (768px)", 807 | true, 808 | "MyLora" 809 | ], 810 | "ndSuperSelectorEnabled": false, 811 | "ndPowerEnabled": false 812 | }, 813 | { 814 | "id": 34, 815 | "type": "ApplyTrainedLora", 816 | "pos": [ 817 | -459.2709927404857, 818 | 48.87339071257331 819 | ], 820 | "size": [ 821 | 270, 822 | 98 823 | ], 824 | "flags": {}, 825 | "order": 14, 826 | "mode": 0, 827 | "inputs": [ 828 | { 829 | "name": "model", 830 | "type": "MODEL", 831 | "link": 65 832 | }, 833 | { 834 | "name": "lora_path", 835 | "type": "STRING", 836 | "link": 135 837 | } 838 | ], 839 | "outputs": [ 840 | { 841 | "name": "model", 842 | "type": "MODEL", 843 | "links": [ 844 | 69 845 | ] 846 | }, 847 | { 848 | "name": "lora_name", 849 | "type": "STRING", 850 | "links": [] 851 | }, 852 | { 853 | "name": "lora_path", 854 | "type": "STRING", 855 | "links": [ 856 | 136 857 | ] 858 | } 859 | ], 860 | "properties": { 861 | "aux_id": "shootthesound/comfyUI-Realtime-Lora", 862 | "ver": "7c5d8e9358c7be1a4995a56fede6d8c3fd534616", 863 | "Node name for S&R": "ApplyTrainedLora", 864 | "ue_properties": { 865 | "widget_ue_connectable": {}, 866 | "input_ue_unconnectable": {}, 867 | "version": "7.5.1" 868 | } 869 | }, 870 | "widgets_values": [ 871 | 1 872 | ] 873 | }, 874 | { 875 | "id": 6, 876 | "type": "CLIPTextEncode", 877 | "pos": [ 878 | -1026.9460355339493, 879 | 49.19264981505006 880 | ], 881 | "size": [ 882 | 423.83001708984375, 883 | 177.11770629882812 884 | ], 885 | "flags": {}, 886 | "order": 12, 887 | "mode": 0, 888 | "inputs": [ 889 | { 890 | "name": "clip", 891 | "type": "CLIP", 892 | "link": 66 893 | } 894 | ], 895 | "outputs": [ 896 | { 897 | "name": "CONDITIONING", 898 | "type": "CONDITIONING", 899 | "slot_index": 0, 900 | "links": [ 901 | 4 902 | ] 903 | } 904 | ], 905 | "title": "CLIP Text Encode (Positive Prompt)", 906 | "properties": { 907 | "cnr_id": "comfy-core", 908 | "ver": "0.3.75", 909 | "Node name for S&R": "CLIPTextEncode", 910 | "ue_properties": { 911 | "widget_ue_connectable": {}, 912 | "version": "7.5.1", 913 | "input_ue_unconnectable": {} 914 | } 915 | }, 916 | "widgets_values": [ 917 | "zwxem style painting of a city with amazing towers at night" 918 | ], 919 | "color": "#232", 920 | "bgcolor": "#353" 921 | } 922 | ], 923 | "links": [ 924 | [ 925 | 4, 926 | 6, 927 | 0, 928 | 3, 929 | 1, 930 | "CONDITIONING" 931 | ], 932 | [ 933 | 6, 934 | 7, 935 | 0, 936 | 3, 937 | 2, 938 | "CONDITIONING" 939 | ], 940 | [ 941 | 16, 942 | 8, 943 | 0, 944 | 9, 945 | 0, 946 | "IMAGE" 947 | ], 948 | [ 949 | 17, 950 | 13, 951 | 0, 952 | 3, 953 | 3, 954 | "LATENT" 955 | ], 956 | [ 957 | 51, 958 | 3, 959 | 0, 960 | 8, 961 | 0, 962 | "LATENT" 963 | ], 964 | [ 965 | 65, 966 | 36, 967 | 0, 968 | 34, 969 | 0, 970 | "MODEL" 971 | ], 972 | [ 973 | 66, 974 | 36, 975 | 1, 976 | 6, 977 | 0, 978 | "CLIP" 979 | ], 980 | [ 981 | 67, 982 | 36, 983 | 1, 984 | 7, 985 | 0, 986 | "CLIP" 987 | ], 988 | [ 989 | 68, 990 | 36, 991 | 2, 992 | 8, 993 | 1, 994 | "VAE" 995 | ], 996 | [ 997 | 69, 998 | 34, 999 | 0, 1000 | 3, 1001 | 0, 1002 | "MODEL" 1003 | ], 1004 | [ 1005 | 126, 1006 | 29, 1007 | 0, 1008 | 65, 1009 | 0, 1010 | "IMAGE" 1011 | ], 1012 | [ 1013 | 127, 1014 | 59, 1015 | 0, 1016 | 65, 1017 | 1, 1018 | "STRING" 1019 | ], 1020 | [ 1021 | 128, 1022 | 30, 1023 | 0, 1024 | 65, 1025 | 2, 1026 | "IMAGE" 1027 | ], 1028 | [ 1029 | 129, 1030 | 61, 1031 | 0, 1032 | 65, 1033 | 3, 1034 | "STRING" 1035 | ], 1036 | [ 1037 | 130, 1038 | 32, 1039 | 0, 1040 | 65, 1041 | 4, 1042 | "IMAGE" 1043 | ], 1044 | [ 1045 | 131, 1046 | 60, 1047 | 0, 1048 | 65, 1049 | 5, 1050 | "STRING" 1051 | ], 1052 | [ 1053 | 132, 1054 | 31, 1055 | 0, 1056 | 65, 1057 | 6, 1058 | "IMAGE" 1059 | ], 1060 | [ 1061 | 133, 1062 | 62, 1063 | 0, 1064 | 65, 1065 | 7, 1066 | "STRING" 1067 | ], 1068 | [ 1069 | 135, 1070 | 65, 1071 | 0, 1072 | 34, 1073 | 1, 1074 | "STRING" 1075 | ], 1076 | [ 1077 | 136, 1078 | 34, 1079 | 2, 1080 | 28, 1081 | 0, 1082 | "STRING" 1083 | ] 1084 | ], 1085 | "groups": [], 1086 | "config": {}, 1087 | "extra": { 1088 | "ds": { 1089 | "scale": 0.797202450000001, 1090 | "offset": [ 1091 | 3101.4030147235076, 1092 | 484.3380049971844 1093 | ] 1094 | }, 1095 | "frontendVersion": "1.32.9", 1096 | "workflowRendererVersion": "LG", 1097 | "ue_links": [], 1098 | "links_added_by_ue": [], 1099 | "VHS_latentpreview": false, 1100 | "VHS_latentpreviewrate": 0, 1101 | "VHS_MetadataImage": true, 1102 | "VHS_KeepIntermediate": true 1103 | }, 1104 | "version": 0.4 1105 | } --------------------------------------------------------------------------------