├── README.md ├── __init__.py ├── color_blend.py ├── control_lora_create.py ├── image_nodes.py ├── requirements.txt └── standalone_install_requirements.bat /README.md: -------------------------------------------------------------------------------- 1 | # Custom nodes for [ComfyUI](https://github.com/comfyanonymous/ComfyUI) 2 | 3 | git clone this repo to your ComfyUI/custom_nodes folder, it should look like: ComfyUI/custom_nodes/stability-ComfyUI-nodes 4 | 5 | On the standalone you can run: standalone_install_requirements.bat to install the dependencies. On a manual install you should: ```pip install -r requirements.txt``` 6 | 7 | 8 | These nodes will appear in the stability section. 9 | 10 | ### ControlLoraSave 11 | 12 | This node can be used to create a Control Lora from a model and a controlnet. It will take the difference between the model weights and the controlnet weights and store that difference in Lora format. 13 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | 4 | node_list = [ #Add list of .py files containing nodes here 5 | "control_lora_create", 6 | "color_blend", 7 | "image_nodes" 8 | ] 9 | 10 | NODE_CLASS_MAPPINGS = {} 11 | NODE_DISPLAY_NAME_MAPPINGS = {} 12 | 13 | for module_name in node_list: 14 | imported_module = importlib.import_module(".{}".format(module_name), __name__) 15 | 16 | NODE_CLASS_MAPPINGS = {**NODE_CLASS_MAPPINGS, **imported_module.NODE_CLASS_MAPPINGS} 17 | NODE_DISPLAY_NAME_MAPPINGS = {**NODE_DISPLAY_NAME_MAPPINGS, **imported_module.NODE_DISPLAY_NAME_MAPPINGS} 18 | 19 | __all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS'] 20 | -------------------------------------------------------------------------------- /color_blend.py: -------------------------------------------------------------------------------- 1 | # Color blend node by Yam Levi 2 | # Property of Stability AI 3 | import cv2 4 | import numpy as np 5 | from PIL import Image 6 | import torch 7 | import comfy.utils 8 | 9 | def color_blend(bw_layer,color_layer): 10 | # Convert the color layer to LAB color space 11 | color_lab = cv2.cvtColor(color_layer, cv2.COLOR_BGR2Lab) 12 | # Convert the black and white layer to grayscale 13 | bw_layer_gray = cv2.cvtColor(bw_layer, cv2.COLOR_BGR2GRAY) 14 | # Replace the luminosity (L) channel in the color image with the black and white luminosity 15 | _, color_a, color_b = cv2.split(color_lab) 16 | blended_lab = cv2.merge((bw_layer_gray, color_a, color_b)) 17 | # Convert the blended LAB image back to BGR color space 18 | blended_result = cv2.cvtColor(blended_lab, cv2.COLOR_Lab2BGR) 19 | return blended_result 20 | 21 | class ColorBlend: 22 | def __init__(self): 23 | pass 24 | 25 | @classmethod 26 | def INPUT_TYPES(s): 27 | return { 28 | "required": { 29 | "bw_layer": ("IMAGE",), 30 | "color_layer": ("IMAGE",), 31 | }, 32 | } 33 | 34 | RETURN_TYPES = ("IMAGE",) 35 | FUNCTION = "color_blending_mode" 36 | 37 | CATEGORY = "stability/image/postprocessing" 38 | 39 | def color_blending_mode(self, bw_layer, color_layer): 40 | if bw_layer.shape[0] < color_layer.shape[0]: 41 | bw_layer = bw_layer.repeat(color_layer.shape[0], 1, 1, 1)[:color_layer.shape[0]] 42 | if bw_layer.shape[0] > color_layer.shape[0]: 43 | color_layer = color_layer.repeat(bw_layer.shape[0], 1, 1, 1)[:bw_layer.shape[0]] 44 | 45 | batch_size, height, width, _ = bw_layer.shape 46 | tensor_output = torch.empty_like(bw_layer) 47 | 48 | image1 = bw_layer.cpu() 49 | image2 = color_layer.cpu() 50 | if image1.shape != image2.shape: 51 | #print(image1.shape) 52 | #print(image2.shape) 53 | image2 = image2.permute(0, 3, 1, 2) 54 | image2 = comfy.utils.common_upscale(image2, image1.shape[2], image1.shape[1], upscale_method='bicubic', crop='center') 55 | image2 = image2.permute(0, 2, 3, 1) 56 | image1 = (image1 * 255).to(torch.uint8).numpy() 57 | image2 = (image2 * 255).to(torch.uint8).numpy() 58 | 59 | for i in range(batch_size): 60 | blend = color_blend(image1[i],image2[i]) 61 | blend = np.stack([blend]) 62 | tensor_output[i:i+1] = (torch.from_numpy(blend.transpose(0, 3, 1, 2))/255.0).permute(0, 2, 3, 1) 63 | 64 | return (tensor_output,) 65 | 66 | 67 | NODE_CLASS_MAPPINGS = { 68 | "ColorBlend": ColorBlend 69 | } 70 | NODE_DISPLAY_NAME_MAPPINGS = { 71 | "ColorBlend": "Color Blend" 72 | } 73 | -------------------------------------------------------------------------------- /control_lora_create.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import comfy.model_management 3 | import comfy.utils 4 | import folder_paths 5 | import os 6 | 7 | CLAMP_QUANTILE = 0.99 8 | 9 | def extract_lora(diff, rank): 10 | conv2d = (len(diff.shape) == 4) 11 | kernel_size = None if not conv2d else diff.size()[2:4] 12 | conv2d_3x3 = conv2d and kernel_size != (1, 1) 13 | out_dim, in_dim = diff.size()[0:2] 14 | rank = min(rank, in_dim, out_dim) 15 | 16 | if conv2d: 17 | if conv2d_3x3: 18 | diff = diff.flatten(start_dim=1) 19 | else: 20 | diff = diff.squeeze() 21 | 22 | 23 | U, S, Vh = torch.linalg.svd(diff.float()) 24 | U = U[:, :rank] 25 | S = S[:rank] 26 | U = U @ torch.diag(S) 27 | Vh = Vh[:rank, :] 28 | 29 | dist = torch.cat([U.flatten(), Vh.flatten()]) 30 | hi_val = torch.quantile(dist, CLAMP_QUANTILE) 31 | low_val = -hi_val 32 | 33 | U = U.clamp(low_val, hi_val) 34 | Vh = Vh.clamp(low_val, hi_val) 35 | if conv2d: 36 | U = U.reshape(out_dim, rank, 1, 1) 37 | Vh = Vh.reshape(rank, in_dim, kernel_size[0], kernel_size[1]) 38 | return (U, Vh) 39 | 40 | class ControlLoraSave: 41 | def __init__(self): 42 | self.output_dir = folder_paths.get_output_directory() 43 | 44 | @classmethod 45 | def INPUT_TYPES(s): 46 | return {"required": { "model": ("MODEL",), 47 | "control_net": ("CONTROL_NET",), 48 | "filename_prefix": ("STRING", {"default": "controlnet_loras/ComfyUI_control_lora"}), 49 | "rank": ("INT", {"default": 64, "min": 0, "max": 1024, "step": 8}), 50 | },} 51 | RETURN_TYPES = () 52 | FUNCTION = "save" 53 | OUTPUT_NODE = True 54 | 55 | CATEGORY = "stability/controlnet" 56 | 57 | def save(self, model, control_net, filename_prefix, rank): 58 | full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) 59 | 60 | output_sd = {} 61 | prefix_key = "diffusion_model." 62 | stored = set() 63 | 64 | comfy.model_management.load_models_gpu([model]) 65 | f = model.model_state_dict() 66 | c = control_net.control_model.state_dict() 67 | 68 | for k in f: 69 | if k.startswith(prefix_key): 70 | ck = k[len(prefix_key):] 71 | if ck not in c: 72 | ck = "control_model.{}".format(ck) 73 | if ck in c: 74 | model_weight = f[k] 75 | if len(model_weight.shape) >= 2: 76 | diff = c[ck].float().to(model_weight.device) - model_weight.float() 77 | out = extract_lora(diff, rank) 78 | name = ck 79 | if name.endswith(".weight"): 80 | name = name[:-len(".weight")] 81 | out1_key = "{}.up".format(name) 82 | out2_key = "{}.down".format(name) 83 | output_sd[out1_key] = out[0].contiguous().half().cpu() 84 | output_sd[out2_key] = out[1].contiguous().half().cpu() 85 | else: 86 | output_sd[ck] = c[ck] 87 | print(ck, c[ck].shape) 88 | stored.add(ck) 89 | 90 | for k in c: 91 | if k not in stored: 92 | output_sd[k] = c[k].half() 93 | output_sd["lora_controlnet"] = torch.tensor([]) 94 | 95 | output_checkpoint = f"{filename}_{counter:05}_.safetensors" 96 | output_checkpoint = os.path.join(full_output_folder, output_checkpoint) 97 | 98 | comfy.utils.save_torch_file(output_sd, output_checkpoint, metadata=None) 99 | return {} 100 | 101 | NODE_CLASS_MAPPINGS = { 102 | "ControlLoraSave": ControlLoraSave 103 | } 104 | 105 | NODE_DISPLAY_NAME_MAPPINGS = { 106 | } 107 | -------------------------------------------------------------------------------- /image_nodes.py: -------------------------------------------------------------------------------- 1 | 2 | class GetImageSize: 3 | @classmethod 4 | def INPUT_TYPES(cls): 5 | return { 6 | "required": { 7 | "image": ("IMAGE",), 8 | } 9 | } 10 | 11 | RETURN_TYPES = ("INT", "INT") 12 | RETURN_NAMES = ("width", "height") 13 | 14 | FUNCTION = "get_size" 15 | 16 | CATEGORY = "stability/image" 17 | 18 | def get_size(self, image): 19 | _, height, width, _ = image.shape 20 | return (width, height) 21 | 22 | NODE_CLASS_MAPPINGS = { 23 | "GetImageSize": GetImageSize 24 | } 25 | 26 | NODE_DISPLAY_NAME_MAPPINGS = { 27 | } 28 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | opencv-python 2 | -------------------------------------------------------------------------------- /standalone_install_requirements.bat: -------------------------------------------------------------------------------- 1 | ..\..\..\python_embeded\python.exe -s -m pip install -r requirements.txt 2 | pause 3 | --------------------------------------------------------------------------------