├── README.md ├── requirements.txt ├── __init__.py ├── utils.py └── nodes.py /README.md: -------------------------------------------------------------------------------- 1 | # ComfyUI-DDUF 2 | 3 | Run DDUF in ComfyUI - powered by Diffusers. 4 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # diffusers[torch] 2 | accelerate 3 | transformers 4 | safetensors 5 | omegaconf 6 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | from .nodes import * 2 | 3 | __all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS"] 4 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import io 2 | import torch 3 | import requests 4 | import numpy as np 5 | from PIL import Image 6 | from omegaconf import OmegaConf 7 | from torchvision.transforms import ToTensor 8 | from diffusers import ( 9 | DDIMScheduler, 10 | DDPMScheduler, 11 | DEISMultistepScheduler, 12 | DPMSolverMultistepScheduler, 13 | DPMSolverSinglestepScheduler, 14 | EulerAncestralDiscreteScheduler, 15 | EulerDiscreteScheduler, 16 | HeunDiscreteScheduler, 17 | KDPM2AncestralDiscreteScheduler, 18 | KDPM2DiscreteScheduler, 19 | UniPCMultistepScheduler, 20 | ) 21 | from comfy.model_management import get_torch_device 22 | 23 | SCHEDULERS = { 24 | 'DDIM' : DDIMScheduler, 25 | 'DDPM' : DDPMScheduler, 26 | 'DEISMultistep' : DEISMultistepScheduler, 27 | 'DPMSolverMultistep' : DPMSolverMultistepScheduler, 28 | 'DPMSolverSinglestep' : DPMSolverSinglestepScheduler, 29 | 'EulerAncestralDiscrete' : EulerAncestralDiscreteScheduler, 30 | 'EulerDiscrete' : EulerDiscreteScheduler, 31 | 'HeunDiscrete' : HeunDiscreteScheduler, 32 | 'KDPM2AncestralDiscrete' : KDPM2AncestralDiscreteScheduler, 33 | 'KDPM2Discrete' : KDPM2DiscreteScheduler, 34 | 'UniPCMultistep' : UniPCMultistepScheduler 35 | } 36 | 37 | def token_auto_concat_embeds(pipe, positive, negative): 38 | device = get_torch_device() 39 | max_length = pipe.tokenizer.model_max_length 40 | positive_length = pipe.tokenizer(positive, return_tensors="pt").input_ids.shape[-1] 41 | negative_length = pipe.tokenizer(negative, return_tensors="pt").input_ids.shape[-1] 42 | 43 | print(f'Token length is model maximum: {max_length}, positive length: {positive_length}, negative length: {negative_length}.') 44 | if max_length < positive_length or max_length < negative_length: 45 | print('Concatenated embedding.') 46 | if positive_length > negative_length: 47 | positive_ids = pipe.tokenizer(positive, return_tensors="pt").input_ids.to(device) 48 | negative_ids = pipe.tokenizer(negative, truncation=False, padding="max_length", max_length=positive_ids.shape[-1], return_tensors="pt").input_ids.to(device) 49 | else: 50 | negative_ids = pipe.tokenizer(negative, return_tensors="pt").input_ids.to(device) 51 | positive_ids = pipe.tokenizer(positive, truncation=False, padding="max_length", max_length=negative_ids.shape[-1], return_tensors="pt").input_ids.to(device) 52 | else: 53 | positive_ids = pipe.tokenizer(positive, truncation=False, padding="max_length", max_length=max_length, return_tensors="pt").input_ids.to(device) 54 | negative_ids = pipe.tokenizer(negative, truncation=False, padding="max_length", max_length=max_length, return_tensors="pt").input_ids.to(device) 55 | 56 | positive_concat_embeds = [] 57 | negative_concat_embeds = [] 58 | positive_pooled_embeds = [] 59 | negative_pooled_embeds = [] 60 | 61 | for i in range(0, positive_ids.shape[-1], max_length): 62 | # Get both text embeddings and pooled embeddings 63 | text_embeds, pooled_embeds = pipe.text_encoder(positive_ids[:, i: i + max_length], return_dict=False) 64 | positive_concat_embeds.append(text_embeds) 65 | positive_pooled_embeds.append(pooled_embeds) 66 | 67 | text_embeds, pooled_embeds = pipe.text_encoder(negative_ids[:, i: i + max_length], return_dict=False) 68 | negative_concat_embeds.append(text_embeds) 69 | negative_pooled_embeds.append(pooled_embeds) 70 | 71 | positive_prompt_embeds = torch.cat(positive_concat_embeds, dim=1) 72 | negative_prompt_embeds = torch.cat(negative_concat_embeds, dim=1) 73 | 74 | # For pooled embeddings, we take the mean of all segments 75 | positive_pooled_prompt_embeds = torch.stack(positive_pooled_embeds).mean(dim=0) 76 | negative_pooled_prompt_embeds = torch.stack(negative_pooled_embeds).mean(dim=0) 77 | 78 | return ( 79 | positive_prompt_embeds, 80 | negative_prompt_embeds, 81 | positive_pooled_prompt_embeds, 82 | negative_pooled_prompt_embeds 83 | ) 84 | 85 | 86 | def convert_images_to_tensors(images: list[Image.Image]): 87 | return torch.stack([np.transpose(ToTensor()(image), (1, 2, 0)) for image in images]) 88 | 89 | def convert_tensors_to_images(images: torch.tensor): 90 | return [Image.fromarray(np.clip(255. * image.to("cpu").numpy(), 0, 255).astype(np.uint8)) for image in images] 91 | 92 | def resize_images(images: list[Image.Image], size: tuple[int, int]): 93 | return [image.resize(size) for image in images] 94 | -------------------------------------------------------------------------------- /nodes.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import os 3 | import torch 4 | from safetensors.torch import load_file 5 | from .utils import SCHEDULERS, token_auto_concat_embeds, convert_images_to_tensors 6 | from comfy.model_management import get_torch_device 7 | import folder_paths 8 | from diffusers import StableDiffusionPipeline, AutoencoderKL, AutoencoderTiny 9 | from diffusers import DiffusionPipeline 10 | 11 | 12 | class DDUFLoader: 13 | def __init__(self): 14 | self.tmp_dir = folder_paths.get_temp_directory() 15 | self.dtype = torch.float32 16 | 17 | @classmethod 18 | def INPUT_TYPES(s): 19 | return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ), }} 20 | 21 | RETURN_TYPES = ("PIPELINE", "AUTOENCODER", "SCHEDULER",) 22 | FUNCTION = "create_pipeline" 23 | CATEGORY = "Diffusers" 24 | 25 | def create_pipeline(self, ckpt_name): 26 | dduf_path = folder_paths.get_folder_paths("checkpoints") 27 | 28 | pipe = DiffusionPipeline.from_pretrained( 29 | pretrained_model_name_or_path=dduf_path[0], 30 | dduf_file=ckpt_name, 31 | torch_dtype=self.dtype, 32 | cache_dir=self.tmp_dir, 33 | ) 34 | 35 | return ((pipe, self.tmp_dir), pipe.vae, pipe.scheduler) 36 | 37 | class DiffusersPipelineLoader: 38 | def __init__(self): 39 | self.tmp_dir = folder_paths.get_temp_directory() 40 | self.dtype = torch.float32 41 | 42 | @classmethod 43 | def INPUT_TYPES(s): 44 | return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ), }} 45 | 46 | RETURN_TYPES = ("PIPELINE", "AUTOENCODER", "SCHEDULER",) 47 | 48 | FUNCTION = "create_pipeline" 49 | 50 | CATEGORY = "Diffusers" 51 | 52 | def create_pipeline(self, ckpt_name): 53 | ckpt_cache_path = os.path.join(self.tmp_dir, ckpt_name) 54 | 55 | StableDiffusionPipeline.from_single_file( 56 | pretrained_model_link_or_path=folder_paths.get_full_path("checkpoints", ckpt_name), 57 | torch_dtype=self.dtype, 58 | cache_dir=self.tmp_dir, 59 | ).save_pretrained(ckpt_cache_path, safe_serialization=True) 60 | 61 | pipe = StableDiffusionPipeline.from_pretrained( 62 | pretrained_model_name_or_path=ckpt_cache_path, 63 | torch_dtype=self.dtype, 64 | cache_dir=self.tmp_dir, 65 | ) 66 | return ((pipe, ckpt_cache_path), pipe.vae, pipe.scheduler) 67 | 68 | class DiffusersSchedulerLoader: 69 | def __init__(self): 70 | self.tmp_dir = folder_paths.get_temp_directory() 71 | self.dtype = torch.float32 72 | 73 | @classmethod 74 | def INPUT_TYPES(s): 75 | return { 76 | "required": { 77 | "pipeline": ("PIPELINE", ), 78 | "scheduler_name": (list(SCHEDULERS.keys()), ), 79 | } 80 | } 81 | 82 | RETURN_TYPES = ("SCHEDULER",) 83 | 84 | FUNCTION = "load_scheduler" 85 | 86 | CATEGORY = "Diffusers" 87 | 88 | def load_scheduler(self, pipeline, scheduler_name): 89 | scheduler = SCHEDULERS[scheduler_name].from_pretrained( 90 | pretrained_model_name_or_path=pipeline[1], 91 | torch_dtype=self.dtype, 92 | cache_dir=self.tmp_dir, 93 | subfolder='scheduler' 94 | ) 95 | return (scheduler,) 96 | 97 | class DiffusersModelMakeup: 98 | def __init__(self): 99 | self.torch_device = get_torch_device() 100 | 101 | @classmethod 102 | def INPUT_TYPES(s): 103 | return { 104 | "required": { 105 | "pipeline": ("PIPELINE", ), 106 | "scheduler": ("SCHEDULER", ), 107 | "autoencoder": ("AUTOENCODER", ), 108 | }, 109 | } 110 | 111 | RETURN_TYPES = ("MAKED_PIPELINE",) 112 | 113 | FUNCTION = "makeup_pipeline" 114 | 115 | CATEGORY = "Diffusers" 116 | 117 | def makeup_pipeline(self, pipeline, scheduler, autoencoder): 118 | pipeline = pipeline[0] 119 | pipeline.vae = autoencoder 120 | pipeline.scheduler = scheduler 121 | pipeline = pipeline.to(self.torch_device) 122 | return (pipeline,) 123 | 124 | class DiffusersSimpleSampler: 125 | def __init__(self): 126 | self.torch_device = get_torch_device() 127 | 128 | @classmethod 129 | def INPUT_TYPES(s): 130 | return {"required": { 131 | "maked_pipeline": ("MAKED_PIPELINE", ), 132 | "prompt": ("STRING", {"multiline": True}), 133 | "width": ("INT", {"default": 1360, "min": 1, "max": 8192, "step": 1}), 134 | "height": ("INT", {"default": 768, "min": 1, "max": 8192, "step": 1}), 135 | "steps": ("INT", {"default": 4, "min": 1, "max": 10000}), 136 | "cfg": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}), 137 | "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), 138 | "max_sequence_length": ("INT", {"default": 256, "min": 1, "max": 1024}), 139 | }} 140 | 141 | RETURN_TYPES = ("IMAGE",) 142 | FUNCTION = "sample" 143 | CATEGORY = "Diffusers" 144 | 145 | def sample(self, maked_pipeline, prompt, height, width, steps, cfg, seed, max_sequence_length): 146 | images = maked_pipeline( 147 | prompt=prompt, 148 | height=height, 149 | width=width, 150 | num_inference_steps=steps, 151 | guidance_scale=cfg, 152 | generator=torch.Generator(self.torch_device).manual_seed(seed), 153 | max_sequence_length=max_sequence_length, 154 | ).images 155 | 156 | return (convert_images_to_tensors(images),) 157 | 158 | NODE_CLASS_MAPPINGS = { 159 | "DiffusersPipelineLoader": DiffusersPipelineLoader, 160 | "DiffusersSchedulerLoader": DiffusersSchedulerLoader, 161 | "DiffusersModelMakeup": DiffusersModelMakeup, 162 | "DiffusersSimpleSampler": DiffusersSimpleSampler, 163 | "DDUFLoader": DDUFLoader, 164 | } 165 | 166 | NODE_DISPLAY_NAME_MAPPINGS = { 167 | "DiffusersPipelineLoader": "Diffusers Pipeline Loader", 168 | "DiffusersSchedulerLoader": "Diffusers Scheduler Loader", 169 | "DiffusersModelMakeup": "Diffusers Model Makeup", 170 | "DiffusersSimpleSampler": "Diffusers Simple Sampler", 171 | "DDUFLoader": "DDUF Loader", 172 | } 173 | --------------------------------------------------------------------------------