├── valid_nodes.py ├── .gitignore ├── __init__.py ├── LICENSE ├── README.md └── api_nodes.py /valid_nodes.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | .idea/ 3 | .vscode/ 4 | .tmp 5 | 6 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | 4 | node_list = [ #Add list of .py files containing nodes here 5 | "api_nodes", 6 | 7 | ] 8 | 9 | NODE_CLASS_MAPPINGS = {} 10 | NODE_DISPLAY_NAME_MAPPINGS = {} 11 | 12 | for module_name in node_list: 13 | imported_module = importlib.import_module(".{}".format(module_name), __name__) 14 | 15 | NODE_CLASS_MAPPINGS = {**NODE_CLASS_MAPPINGS, **imported_module.NODE_CLASS_MAPPINGS} 16 | if hasattr(imported_module, "NODE_DISPLAY_NAME_MAPPINGS"): 17 | NODE_DISPLAY_NAME_MAPPINGS = {**NODE_DISPLAY_NAME_MAPPINGS, **imported_module.NODE_DISPLAY_NAME_MAPPINGS} 18 | 19 | __all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS'] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Abdullah Alfaraj 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Comfy-Photoshop-SD 2 | Download this extension via the [ComfyUI Manager](https://github.com/ltdrdata/ComfyUI-Manager) to establish a connection between ComfyUI and the [Auto-Photoshop-SD](https://github.com/AbdullahAlfaraj/Auto-Photoshop-StableDiffusion-Plugin) plugin in Photoshop. 3 | 4 | # How to Install: 5 | 1) Install [Auto-Photoshop-SD](https://github.com/AbdullahAlfaraj/Auto-Photoshop-StableDiffusion-Plugin) `v1.4.0` or a later version -> using either installation Method 1 (.ccx) or Method 2 (.zip) file. 6 | **Important: skip any instruction related to Automatic1111** 7 | 8 | 2. Install ComfyUI-Manager inside of ComfyUI 9 | - a) Navigate to ComfyUI Folder 10 | - b) Copy the following command: 11 | ``` 12 | git clone https://github.com/ltdrdata/ComfyUI-Manager.git 13 | ``` 14 | Follow the steps here: 15 | 16 | https://github.com/AbdullahAlfaraj/Comfy-Photoshop-SD/assets/7842232/bdab404c-1170-483d-be1b-67b5b4aab67a 17 | 18 | 4) Install this project (Comfy-Photoshop-SD) from ComfUI-Manager 19 | 20 | https://github.com/AbdullahAlfaraj/Comfy-Photoshop-SD/assets/7842232/4aa64420-345d-4fee-8f48-c1e92bd42348 21 | 22 | 5) Similarly install (comfyui_controlnet_aux)[https://github.com/Fannovel16/comfyui_controlnet_aux] from ComfyUI-Manager to use controlnet 23 | 24 | 25 | 26 | 27 | # Load Custom ComfyUI Workflows in Photoshop 28 | 29 | 1. **Load Workflow**: Open your workflow in ComfyUI. Make sure it's error-free. 30 | 31 | 2. **Convert to API**: Save the workflow as an API in JSON format. Use 'Save (API Format)', not 'Save'. See an example. 32 | 33 | ![image](https://github.com/AbdullahAlfaraj/Comfy-Photoshop-SD/assets/7842232/c768fefe-b151-4fdf-a598-1398b51b13c9) 34 | 35 | https://github.com/AbdullahAlfaraj/Comfy-Photoshop-SD/assets/7842232/ebe571dc-74e1-4a95-bda9-2763d88eb084 36 | 37 | 3. **Move API File**: Transfer the API JSON file to a folder of your choice, only include json files that are made using this guide. Here's an image for reference. 38 | ![image](https://github.com/AbdullahAlfaraj/Comfy-Photoshop-SD/assets/7842232/5c7a5c2b-3e9e-4039-abf9-0539000fedc5) 39 | 40 | 4. **Load Workflow in Photoshop**: Load your custom workflow within Photoshop. Here's an example. 41 | 42 | https://github.com/AbdullahAlfaraj/Comfy-Photoshop-SD/assets/7842232/64383557-32a2-4836-85e3-1daa9d22ce2e 43 | 44 | # How to Use: 45 | ## Txt2Img with Hires fix: 46 | 47 | 48 | https://github.com/AbdullahAlfaraj/Comfy-Photoshop-SD/assets/7842232/748aefc9-f75f-4581-9c8d-c796c5cddaba 49 | 50 | ## Img2Img with Hires fix: 51 | 52 | 53 | https://github.com/AbdullahAlfaraj/Comfy-Photoshop-SD/assets/7842232/4b000584-672c-44f5-80a0-717eac02af76 54 | 55 | ## Pure Img2Img : 56 | 57 | https://github.com/AbdullahAlfaraj/Comfy-Photoshop-SD/assets/7842232/3c934dad-2a13-4d58-80b5-3e5179ec1420 58 | 59 | ## Pure Inpainting: 60 | 61 | 62 | https://github.com/AbdullahAlfaraj/Comfy-Photoshop-SD/assets/7842232/ccdb9b8a-a63f-47da-9502-e108a630e5af 63 | 64 | ## Outpainting with Controlnet: 65 | 66 | 67 | 68 | https://github.com/AbdullahAlfaraj/Comfy-Photoshop-SD/assets/7842232/e0bcf679-59cd-4a44-8de7-04f0f0dd01fe 69 | 70 | 71 | -------------------------------------------------------------------------------- /api_nodes.py: -------------------------------------------------------------------------------- 1 | # from ..comfyui_controlnet_aux.node_wrappers.openpose import OpenPose_Preprocessor 2 | 3 | import folder_paths 4 | import json 5 | import comfy.samplers 6 | import comfy.sample 7 | import nodes 8 | from comfy_extras.nodes_mask import ( 9 | ImageToMask, 10 | ImageCompositeMasked, 11 | LatentCompositeMasked, 12 | ) 13 | 14 | import torch 15 | import numpy as np 16 | from PIL import Image, ImageOps 17 | import hashlib 18 | import os 19 | from PIL.PngImagePlugin import PngImageFile, PngInfo 20 | 21 | 22 | class LoadImageWithMetaData: 23 | @classmethod 24 | def INPUT_TYPES(s): 25 | # input_dir = folder_paths.get_input_directory() 26 | # files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))] 27 | # print("***files: ",files) 28 | return { 29 | "required": { 30 | "image_path": ( 31 | "STRING", 32 | { 33 | "multiline": False, # True if you want the field to look like the one on the ClipTextEncode node 34 | "default": "Hello World!", 35 | }, 36 | ), 37 | }, 38 | # {"image": (sorted(files), )}, 39 | # "hidden": {"image_path": "PROMPT",} 40 | } 41 | 42 | CATEGORY = "Auto-Photoshop-SD" 43 | 44 | OUTPUT_NODE = True 45 | # RETURN_TYPES = ("IMAGE", "MASK") 46 | RETURN_TYPES = () 47 | FUNCTION = "load_image" 48 | 49 | def load_image(self, image_path): 50 | # image_path = folder_paths.get_annotated_filepath(image) 51 | # image_path = image 52 | print("***image_path: ", image_path) 53 | 54 | # Open the image file 55 | image_temp = Image.open(image_path) 56 | 57 | # Check if the image is a PNG file 58 | if isinstance(image_temp, PngImageFile): 59 | # Get the metadata from the image 60 | metadata = image_temp.info 61 | 62 | print("metadata:", metadata) 63 | 64 | # Print the metadata 65 | for key, value in metadata.items(): 66 | print(f"{key}: {value}") 67 | 68 | i = Image.open(image_path) 69 | i = ImageOps.exif_transpose(i) 70 | image = i.convert("RGB") 71 | image = np.array(image).astype(np.float32) / 255.0 72 | image = torch.from_numpy(image)[None,] 73 | if "A" in i.getbands(): 74 | mask = np.array(i.getchannel("A")).astype(np.float32) / 255.0 75 | mask = 1.0 - torch.from_numpy(mask) 76 | else: 77 | mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") 78 | # return (image, mask) 79 | 80 | print("type of metadata: ", type(metadata)) 81 | print("type of prompt: ", type(metadata["prompt"])) 82 | print("type of workflow: ", type(metadata["workflow"])) 83 | 84 | return {"ui": {"prompt": metadata["prompt"], "workflow": metadata["workflow"]}} 85 | # return { "prompt":metadata['prompt'],"workflow":metadata['workflow'] } 86 | 87 | 88 | class GetConfig: 89 | @classmethod 90 | def INPUT_TYPES(s): 91 | input_dir = folder_paths.get_input_directory() 92 | files = [ 93 | f 94 | for f in os.listdir(input_dir) 95 | if os.path.isfile(os.path.join(input_dir, f)) 96 | ] 97 | return { 98 | "required": { 99 | "embeddings": (folder_paths.get_folder_paths("embeddings"),), 100 | }, 101 | "optional": { 102 | "controlnet_config": (controlnet_config.copy()), 103 | } 104 | # {"image": (sorted(files), )}, 105 | } 106 | 107 | CATEGORY = "Auto-Photoshop-SD" 108 | 109 | OUTPUT_NODE = True 110 | 111 | RETURN_TYPES = () 112 | FUNCTION = "get_config" 113 | 114 | def get_config(self): 115 | checkpoints = folder_paths.get_filename_list("checkpoints") 116 | samplers = comfy.samplers.KSampler.SAMPLERS 117 | schedulers = comfy.samplers.KSampler.SCHEDULERS 118 | loras = folder_paths.get_filename_list("loras") 119 | latent_upscale_methods = [ 120 | "nearest-exact", 121 | "bilinear", 122 | "area", 123 | "bicubic", 124 | "bislerp", 125 | ] 126 | latent_upscale_crop_methods = ["disabled", "center"] 127 | 128 | # print("checkpoints: ", checkpoints) 129 | return { 130 | "ui": { 131 | "checkpoints": checkpoints, 132 | "samplers": samplers, 133 | "schedulers": schedulers, 134 | "latent_upscale_methods": latent_upscale_methods, 135 | "latent_upscale_crop_methods": latent_upscale_crop_methods, 136 | "loras": loras, 137 | } 138 | } 139 | 140 | 141 | import base64 142 | from io import BytesIO 143 | 144 | 145 | class LoadImageBase64: 146 | @classmethod 147 | def INPUT_TYPES(s): 148 | # input_dir = folder_paths.get_input_directory() 149 | # files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))] 150 | return { 151 | "required": { 152 | "image_base64": ( 153 | "STRING", 154 | { 155 | "multiline": False, # True if you want the field to look like the one on the ClipTextEncode node 156 | "default": "", 157 | }, 158 | ), 159 | } 160 | } 161 | 162 | CATEGORY = "Auto-Photoshop-SD" 163 | 164 | RETURN_TYPES = ("IMAGE", "MASK") 165 | FUNCTION = "load_image_from_base64" 166 | 167 | def load_image_from_base64(self, image_base64): 168 | # Decode the base64 string 169 | imgdata = base64.b64decode(image_base64) 170 | 171 | # Open the image from memory 172 | i = Image.open(BytesIO(imgdata)) 173 | i = ImageOps.exif_transpose(i) 174 | image = i.convert("RGB") 175 | image = np.array(image).astype(np.float32) / 255.0 176 | image = torch.from_numpy(image)[None,] 177 | 178 | if "A" in i.getbands(): 179 | mask = np.array(i.getchannel("A")).astype(np.float32) / 255.0 180 | mask = 1.0 - torch.from_numpy(mask) 181 | else: 182 | mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") 183 | 184 | return (image, mask) 185 | 186 | 187 | from nodes import LoraLoader # Adjust this import statement to your project structure 188 | import re 189 | 190 | 191 | class LoadLorasFromPrompt: 192 | def __init__(self): 193 | self.lora_loaders = [] 194 | self.lora_list = folder_paths.get_filename_list("loras") 195 | 196 | @classmethod 197 | def INPUT_TYPES(s): 198 | return { 199 | "required": { 200 | "model": ("MODEL",), 201 | "clip": ("CLIP",), 202 | "prompt": ("STRING", {"multiline": True, "default": ""}), 203 | } 204 | } 205 | 206 | CATEGORY = "Auto-Photoshop-SD" 207 | RETURN_TYPES = ("MODEL", "CLIP", "STRING") 208 | FUNCTION = "load_loras_from_prompt" 209 | 210 | def extract_lora_info(self, prompt): 211 | # Extract LoRA info 212 | lora_info_list = re.findall(r"", prompt) 213 | 214 | # Remove LoRA symbols from the prompt 215 | prompt_without_lora = re.sub(r"", "", prompt) 216 | 217 | return prompt_without_lora, lora_info_list 218 | 219 | def load_loras_from_prompt(self, model, clip, prompt): 220 | # Parse the loras_prompt string 221 | prompt_without_lora, lora_info_list = self.extract_lora_info(prompt) 222 | 223 | # print("prompt:", prompt) 224 | # print("prompt_without_lora:", prompt_without_lora) 225 | # print("lora_info_list:", lora_info_list) 226 | 227 | out_model = model 228 | out_clip = clip 229 | 230 | # Create a LoraLoader for each lora and load it 231 | for lora_name, strength in lora_info_list: 232 | lora_name += ( 233 | ".safetensors" # Add the .safetensors extension to the lora_name 234 | ) 235 | strength = float(strength) 236 | # print("lora_name:", lora_name) 237 | # print("type(strength):", type(strength)) 238 | if lora_name in self.lora_list: 239 | lora_loader = LoraLoader() 240 | out_model, out_clip = lora_loader.load_lora( 241 | out_model, out_clip, lora_name, strength, strength 242 | ) 243 | self.lora_loaders.append((out_model, out_clip)) 244 | else: 245 | print( 246 | f"WARNING: The specified LoRa '{lora_name}' does not exist and will be skipped. Please ensure the LoRa name is correct and that the corresponding .safetensors file is available." 247 | ) 248 | 249 | # return self.lora_loaders[-1] 250 | # return (out_model,out_clip) 251 | return (out_model, out_clip, prompt_without_lora) 252 | 253 | 254 | import numpy as np 255 | 256 | 257 | class GaussianLatentImage: 258 | def __init__(self, device="cpu"): 259 | self.device = device 260 | 261 | @classmethod 262 | def INPUT_TYPES(s): 263 | return { 264 | "required": { 265 | "width": ( 266 | "INT", 267 | {"default": 512, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}, 268 | ), 269 | "height": ( 270 | "INT", 271 | {"default": 512, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}, 272 | ), 273 | "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), 274 | "seed": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFFFFFFFFFFFF}), 275 | } 276 | } 277 | 278 | RETURN_TYPES = ("LATENT",) 279 | FUNCTION = "generate" 280 | 281 | CATEGORY = "Auto-Photoshop-SD" 282 | 283 | def generate(self, width, height, batch_size=1, seed=0): 284 | # Set the seed for reproducibility 285 | torch.manual_seed(seed) 286 | 287 | # Define the mean and standard deviation 288 | mean = 0 289 | var = 10 290 | sigma = var**0.5 291 | 292 | # Generate Gaussian noise 293 | gaussian = torch.randn((batch_size, 4, height // 8, width // 8)) * sigma + mean 294 | 295 | # Move the tensor to the specified device 296 | latent = gaussian.float().to(self.device) 297 | 298 | return ({"samples": latent},) 299 | 300 | 301 | class APS_LatentBatch: 302 | @classmethod 303 | def INPUT_TYPES(s): 304 | return {"required": {"latent1": ("LATENT",), "latent2": ("LATENT",)}} 305 | 306 | RETURN_TYPES = ("LATENT",) 307 | FUNCTION = "batch" 308 | 309 | CATEGORY = "Auto-Photoshop-SD" 310 | 311 | def batch(self, latent1, latent2): 312 | latent1_samples = latent1["samples"] 313 | latent2_samples = latent2["samples"] 314 | if latent1_samples.shape[1:] != latent2_samples.shape[1:]: 315 | latent2_samples = comfy.utils.common_upscale( 316 | latent2_samples.movedim(-1, 1), 317 | latent1_samples.shape[2], 318 | latent1_samples.shape[1], 319 | "bilinear", 320 | "center", 321 | ).movedim(1, -1) 322 | s = torch.cat((latent1_samples, latent2_samples), dim=0) 323 | return ({"samples": s},) 324 | 325 | 326 | import io 327 | import base64 328 | from PIL import Image, ImageFilter 329 | from torchvision import transforms 330 | 331 | 332 | class MaskExpansion: 333 | @classmethod 334 | def INPUT_TYPES(s): 335 | return { 336 | "required": { 337 | "mask": ("IMAGE",), 338 | "expansion": ("INT", {"default": 0, "min": 0, "max": 256, "step": 1}), 339 | "blur": ("INT", {"default": 0, "min": 0, "max": 64, "step": 1}), 340 | } 341 | } 342 | 343 | RETURN_TYPES = ("IMAGE",) 344 | FUNCTION = "expandAndBlur" 345 | 346 | CATEGORY = "Auto-Photoshop-SD" 347 | 348 | def expandAndBlur(self, **kwarg): 349 | mask = kwarg.get("mask") 350 | expansion = kwarg.get("expansion") 351 | blur = kwarg.get("blur") 352 | # print("type: mask: ", type(mask)) 353 | expanded_mask = self.maskExpansionHandler(mask, expansion, blur) 354 | 355 | # print("expanded_mask:",expanded_mask) 356 | # print("type: expanded_mask: ", type(expanded_mask)) 357 | return (expanded_mask,) 358 | 359 | def b64_2_img(self, base64_image): 360 | image = Image.open(io.BytesIO(base64.b64decode(base64_image.split(",", 1)[0]))) 361 | return image 362 | 363 | def reserveBorderPixels(self, img, dilation_img): 364 | pixels = img.load() 365 | width, height = img.size 366 | dilation_pixels = dilation_img.load() 367 | depth = 1 368 | for x in range(width): 369 | for d in range(depth): 370 | dilation_pixels[x, d] = pixels[x, d] 371 | dilation_pixels[x, height - (d + 1)] = pixels[x, height - (d + 1)] 372 | for y in range(height): 373 | for d in range(depth): 374 | dilation_pixels[d, y] = pixels[d, y] 375 | dilation_pixels[width - (d + 1), y] = pixels[width - (d + 1), y] 376 | return dilation_img 377 | 378 | def maskExpansion(self, mask_img, mask_expansion, blur=10): 379 | iteration = mask_expansion 380 | dilated_img = self.applyDilation(mask_img, iteration) 381 | blurred_image = dilated_img.filter(ImageFilter.GaussianBlur(radius=blur)) 382 | mask_with_border = self.reserveBorderPixels(mask_img, blurred_image) 383 | return mask_with_border 384 | 385 | async def base64ToPng(self, base64_image, image_path): 386 | base64_img_bytes = base64_image.encode("utf-8") 387 | with open(image_path, "wb") as file_to_save: 388 | decoded_image_data = base64.decodebytes(base64_img_bytes) 389 | file_to_save.write(decoded_image_data) 390 | 391 | def applyDilation(self, img, iteration=20, max_filter=3): 392 | dilation_img = img.copy() 393 | for i in range(iteration): 394 | dilation_img = dilation_img.filter(ImageFilter.MaxFilter(max_filter)) 395 | return dilation_img 396 | 397 | def maskExpansionHandler(self, input_mask, mask_expansion, blur): 398 | try: 399 | # Check if input is a string or a tensor 400 | if isinstance(input_mask, str): 401 | self.base64ToPng(input_mask, "original_mask.png") 402 | mask_image = self.b64_2_img(input_mask) 403 | elif torch.is_tensor(input_mask): 404 | # Ensure the tensor is 3-dimensional 405 | # print("Shape of tensor: ", input_mask.size()) 406 | # print("Number of dimensions: ", input_mask.dim()) 407 | tensor = input_mask.squeeze(0).permute( 408 | 2, 0, 1 409 | ) # Remove batch dimension and rearrange dimensions 410 | transform = transforms.ToPILImage() 411 | mask_image = transform(tensor) 412 | else: 413 | raise ValueError( 414 | "Input mask must be a base64 string or a PyTorch tensor" 415 | ) 416 | 417 | expanded_mask_img = self.maskExpansion(mask_image, mask_expansion, blur) 418 | 419 | # Convert PIL Image to PyTorch tensor 420 | transform = transforms.ToTensor() 421 | expanded_mask_tensor = transform(expanded_mask_img) 422 | expanded_mask_tensor = expanded_mask_tensor.unsqueeze(0).permute(0, 2, 3, 1) 423 | 424 | return expanded_mask_tensor 425 | except: 426 | raise Exception(f"couldn't perform mask expansion") 427 | 428 | 429 | preprocessor_list = [ 430 | "None", 431 | "CannyEdgePreprocessor", 432 | "OpenposePreprocessor", 433 | "HEDPreprocessor", 434 | "FakeScribblePreprocessor", 435 | "InpaintPreprocessor", 436 | "LeReS-DepthMapPreprocessor", 437 | "AnimeLineArtPreprocessor", 438 | "LineArtPreprocessor", 439 | "Manga2Anime_LineArt_Preprocessor", 440 | "MediaPipe-FaceMeshPreprocessor", 441 | "MiDaS-NormalMapPreprocessor", 442 | "MiDaS-DepthMapPreprocessor", 443 | "M-LSDPreprocessor", 444 | "BAE-NormalMapPreprocessor", 445 | "OneFormer-COCO-SemSegPreprocessor", 446 | "OneFormer-ADE20K-SemSegPreprocessor", 447 | "PiDiNetPreprocessor", 448 | "ScribblePreprocessor", 449 | "Scribble_XDoG_Preprocessor", 450 | "SAMPreprocessor", 451 | "ShufflePreprocessor", 452 | "TilePreprocessor", 453 | "UniFormer-SemSegPreprocessor", 454 | "SemSegPreprocessor", 455 | "Zoe-DepthMapPreprocessor", 456 | ] 457 | 458 | controlnet_config = { 459 | "CannyEdgePreprocessor": { 460 | "low_threshold": 100, 461 | "high_threshold": 200, 462 | "resolution": 512, 463 | "threshold_mapping": { 464 | "threshold_a": "low_threshold", 465 | "threshold_b": "high_threshold", 466 | }, 467 | "param_config": { 468 | "low_threshold": { 469 | "type": "INT", 470 | "default": 100, 471 | "min": 0, 472 | "max": 255, 473 | "step": 1, 474 | }, 475 | "high_threshold": { 476 | "type": "INT", 477 | "default": 200, 478 | "min": 0, 479 | "max": 255, 480 | "step": 1, 481 | }, 482 | }, 483 | }, 484 | "OpenposePreprocessor": { 485 | "detect_hand": "enable", 486 | "detect_body": "enable", 487 | "detect_face": "enable", 488 | "resolution": 512, 489 | }, 490 | "HEDPreprocessor": {"safe": "enable"}, 491 | "FakeScribblePreprocessor": {"safe": "enable"}, 492 | "InpaintPreprocessor": {"mask": ""}, 493 | "LeReS-DepthMapPreprocessor": {"boost": "enable"}, 494 | "AnimeLineArtPreprocessor": {"resolution": 512}, 495 | "LineArtPreprocessor": {"resolution": 512, "coarse": "enable"}, 496 | "Manga2Anime_LineArt_Preprocessor": { 497 | "resolution": 512, 498 | }, 499 | "MediaPipe-FaceMeshPreprocessor": { 500 | "max_faces": 10, 501 | "min_confidence": 0.5, 502 | "resolution": 512, 503 | "threshold_mapping": { 504 | "threshold_a": "max_faces", 505 | "threshold_b": "min_confidence", 506 | }, 507 | "param_config": { 508 | "max_faces": { 509 | "type": "INT", 510 | "default": 10, 511 | "min": 1, 512 | "max": 50, 513 | "step": 1, 514 | }, 515 | "min_confidence": { 516 | "type": "FLOAT", 517 | "default": 0.5, 518 | "min": 0.01, 519 | "max": 1.0, 520 | "step": 0.01, 521 | }, 522 | }, 523 | }, 524 | "MiDaS-NormalMapPreprocessor": { 525 | "a": np.pi * 2.0, 526 | "bg_threshold": 0.1, 527 | "resolution": 512, 528 | "threshold_mapping": { 529 | "threshold_a": "a", 530 | "threshold_b": "bg_threshold", 531 | }, 532 | "param_config": { 533 | "a": { 534 | "type": "FLOAT", 535 | "default": np.pi * 2.0, 536 | "min": 0.0, 537 | "max": np.pi * 5.0, 538 | "step": 0.05, 539 | }, 540 | "bg_threshold": { 541 | "type": "FLOAT", 542 | "default": 0.1, 543 | "min": 0, 544 | "max": 1, 545 | "step": 0.05, 546 | }, 547 | }, 548 | }, 549 | "MiDaS-DepthMapPreprocessor": { 550 | "a": np.pi * 2.0, 551 | "bg_threshold": 0.1, 552 | "resolution": 512, 553 | "threshold_mapping": { 554 | "threshold_a": "a", 555 | "threshold_b": "bg_threshold", 556 | }, 557 | "param_config": { 558 | "a": { 559 | "type": "FLOAT", 560 | "default": np.pi * 2.0, 561 | "min": 0.0, 562 | "max": np.pi * 5.0, 563 | "step": 0.05, 564 | }, 565 | "bg_threshold": { 566 | "type": "FLOAT", 567 | "default": 0.1, 568 | "min": 0, 569 | "max": 1, 570 | "step": 0.05, 571 | }, 572 | }, 573 | }, 574 | "M-LSDPreprocessor": { 575 | "score_threshold": 0.1, 576 | "dist_threshold": 0.1, 577 | "resolution": 512, 578 | "threshold_mapping": { 579 | "threshold_a": "score_threshold", 580 | "threshold_b": "dist_threshold", 581 | }, 582 | "param_config": { 583 | "score_threshold": { 584 | "type": "FLOAT", 585 | "default": 0.1, 586 | "min": 0.01, 587 | "max": 2.0, 588 | "step": 0.01, 589 | }, 590 | "dist_threshold": { 591 | "type": "FLOAT", 592 | "default": 0.1, 593 | "min": 0.01, 594 | "max": 20.0, 595 | "step": 0.01, 596 | }, 597 | }, 598 | }, 599 | "BAE-NormalMapPreprocessor": {"resolution": 512}, 600 | "OneFormer-COCO-SemSegPreprocessor": {"resolution": 512}, 601 | "OneFormer-ADE20K-SemSegPreprocessor": {"resolution": 512}, 602 | "PiDiNetPreprocessor": {"safe": "enable", "resolution": 512}, 603 | "ScribblePreprocessor": {"resolution": 512}, 604 | "Scribble_XDoG_Preprocessor": { 605 | "threshold": 32, 606 | "resolution": 512, 607 | "threshold_mapping": { 608 | "threshold_a": "threshold", 609 | }, 610 | "param_config": { 611 | "threshold": { 612 | "type": "INT", 613 | "default": 32, 614 | "min": 1, 615 | "max": 64, 616 | "step": 64, 617 | }, 618 | }, 619 | }, 620 | # "SAMPreprocessor": {"resolution": 512}, 621 | "ShufflePreprocessor": {"resolution": 512}, 622 | "TilePreprocessor": { 623 | "pyrUp_iters": 3, 624 | "resolution": 512, 625 | "threshold_mapping": { 626 | "threshold_a": "pyrUp_iters", 627 | }, 628 | "param_config": { 629 | "pyrUp_iters": { 630 | "type": "INT", 631 | "default": 3, 632 | "min": 1, 633 | "max": 10, 634 | "step": 1, 635 | } 636 | }, 637 | }, 638 | "UniFormer-SemSegPreprocessor": {"resolution": 512}, 639 | "SemSegPreprocessor": {"resolution": 512}, 640 | "Zoe-DepthMapPreprocessor": {"resolution": 512}, 641 | } 642 | 643 | 644 | def convert_number(num, num_type): 645 | if num_type == "INT": 646 | return int(num) 647 | elif num_type == "FLOAT": 648 | return float(num) 649 | else: 650 | return "Invalid number type" 651 | 652 | 653 | class ControlnetUnit: 654 | def __init__( 655 | self, 656 | ): 657 | self.map = nodes.NODE_CLASS_MAPPINGS 658 | self.map_param = controlnet_config.copy() 659 | 660 | # @classmethod 661 | # def INPUT_TYPES(s): 662 | # return {"required": { "image": ("IMAGE",), 663 | # "preprocessor_name": (s.preprocessor_list,)}, 664 | # "resolution": ("INT", {"default": 512, "min": 64, "max": 2048, "step": 64}), 665 | # "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), 666 | # } 667 | @classmethod 668 | def INPUT_TYPES(s): 669 | return { 670 | "required": { 671 | "positive": ("CONDITIONING",), 672 | "negative": ("CONDITIONING",), 673 | "preprocessor_name": (preprocessor_list,), 674 | "control_net_name": (folder_paths.get_filename_list("controlnet"),), 675 | "strength": ( 676 | "FLOAT", 677 | {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}, 678 | ), 679 | "start_percent": ( 680 | "FLOAT", 681 | {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}, 682 | ), 683 | "end_percent": ( 684 | "FLOAT", 685 | {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}, 686 | ), 687 | "resolution": ( 688 | "INT", 689 | {"default": 512, "min": 64, "max": 2048, "step": 64}, 690 | ), 691 | }, 692 | "optional": { 693 | "image": ("IMAGE",), 694 | "mask": ("MASK",), 695 | "threshold_a": ( 696 | "FLOAT", 697 | { 698 | "default": 0.0, 699 | }, 700 | ), 701 | "threshold_b": ( 702 | "FLOAT", 703 | { 704 | "default": 0.0, 705 | }, 706 | ), 707 | }, 708 | } 709 | 710 | RETURN_TYPES = ("IMAGE", "CONDITIONING", "CONDITIONING") 711 | RETURN_NAMES = ("preprocessed_image", "positive", "negative") 712 | FUNCTION = "preprocessAndApply" 713 | 714 | CATEGORY = "Auto-Photoshop-SD" 715 | 716 | def preprocessAndApply( 717 | self, 718 | **kwargs, 719 | ): 720 | instance = self.map[kwargs["preprocessor_name"]] 721 | self.preprocessor = instance() 722 | self.method = getattr(self.preprocessor, self.preprocessor.FUNCTION) 723 | self.param = self.map_param.get(kwargs["preprocessor_name"], {}).copy() 724 | if "mask" in self.param: 725 | # print("mask:", kwargs["mask"]) 726 | self.param["mask"] = kwargs["mask"] 727 | if "resolution" in self.param: 728 | # print("resolution:", kwargs["resolution"]) 729 | self.param["resolution"] = kwargs["resolution"] 730 | threshold_mapping = self.param.pop("threshold_mapping", None) 731 | param_config = self.param.pop( 732 | "param_config", None 733 | ) # don't pass param_config to method(), delete param_config 734 | if threshold_mapping: 735 | threshold_a_param_name = threshold_mapping.get("threshold_a") 736 | threshold_b_param_name = threshold_mapping.get("threshold_b") 737 | if threshold_a_param_name and "threshold_a" in kwargs: 738 | value = kwargs["threshold_a"] 739 | var_type = param_config[threshold_a_param_name]["type"] 740 | converted_value = convert_number(value, var_type) 741 | self.param.update({threshold_a_param_name: converted_value}) 742 | if threshold_b_param_name and "threshold_b" in kwargs: 743 | value = kwargs["threshold_b"] 744 | var_type = param_config[threshold_b_param_name]["type"] 745 | converted_value = convert_number(value, var_type) 746 | self.param.update({threshold_b_param_name: converted_value}) 747 | 748 | res = self.method(kwargs["image"], **self.param) 749 | preprocessed_image = res 750 | if "result" in res: 751 | # print("res:", res) 752 | (preprocessed_image,) = res["result"] 753 | # print("type(res['result']):", type(res["result"])) 754 | # print("type(preprocessed_image): ", type(preprocessed_image)) 755 | elif isinstance(res, tuple): 756 | (preprocessed_image,) = res 757 | 758 | (controlnet,) = nodes.ControlNetLoader().load_controlnet( 759 | kwargs["control_net_name"] 760 | ) 761 | ( 762 | new_positive, 763 | new_negative, 764 | ) = nodes.ControlNetApplyAdvanced().apply_controlnet( 765 | kwargs["positive"], 766 | kwargs["negative"], 767 | controlnet, 768 | preprocessed_image, 769 | kwargs["strength"], 770 | kwargs["start_percent"], 771 | kwargs["end_percent"], 772 | ) 773 | 774 | return (preprocessed_image, new_positive, new_negative) 775 | 776 | 777 | class ControlNetScript: 778 | @classmethod 779 | def INPUT_TYPES(s): 780 | # model_list = folder_paths.get_filename_list("controlnet") 781 | 782 | model_list = ["None"] + folder_paths.get_filename_list("controlnet") 783 | # print("type model_list: ",type (model_list)) 784 | return { 785 | "required": { 786 | "positive": ("CONDITIONING",), 787 | "negative": ("CONDITIONING",), 788 | "is_enabled_1": (["disable", "enable"], {"default": "disable"}), 789 | "preprocessor_name_1": (preprocessor_list,), 790 | "control_net_name_1": (model_list,), 791 | "strength_1": ( 792 | "FLOAT", 793 | {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}, 794 | ), 795 | "threshold_a_1": ( 796 | "FLOAT", 797 | { 798 | "default": 0.0, 799 | }, 800 | ), 801 | "threshold_b_1": ( 802 | "FLOAT", 803 | { 804 | "default": 0.0, 805 | }, 806 | ), 807 | "start_percent_1": ( 808 | "FLOAT", 809 | {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}, 810 | ), 811 | "end_percent_1": ( 812 | "FLOAT", 813 | {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}, 814 | ), 815 | "resolution_1": ( 816 | "INT", 817 | {"default": 512, "min": 64, "max": 2048, "step": 64}, 818 | ), 819 | "is_enabled_2": (["disable", "enable"], {"default": "disable"}), 820 | "preprocessor_name_2": (preprocessor_list,), 821 | "control_net_name_2": (model_list,), 822 | "strength_2": ( 823 | "FLOAT", 824 | {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}, 825 | ), 826 | "threshold_a_2": ( 827 | "FLOAT", 828 | { 829 | "default": 0.0, 830 | }, 831 | ), 832 | "threshold_b_2": ( 833 | "FLOAT", 834 | { 835 | "default": 0.0, 836 | }, 837 | ), 838 | "start_percent_2": ( 839 | "FLOAT", 840 | {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}, 841 | ), 842 | "end_percent_2": ( 843 | "FLOAT", 844 | {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}, 845 | ), 846 | "resolution_2": ( 847 | "INT", 848 | {"default": 512, "min": 64, "max": 2048, "step": 64}, 849 | ), 850 | "is_enabled_3": (["disable", "enable"], {"default": "disable"}), 851 | "preprocessor_name_3": (preprocessor_list,), 852 | "control_net_name_3": (model_list,), 853 | "strength_3": ( 854 | "FLOAT", 855 | {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}, 856 | ), 857 | "threshold_a_3": ( 858 | "FLOAT", 859 | { 860 | "default": 0.0, 861 | }, 862 | ), 863 | "threshold_b_3": ( 864 | "FLOAT", 865 | { 866 | "default": 0.0, 867 | }, 868 | ), 869 | "start_percent_3": ( 870 | "FLOAT", 871 | {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}, 872 | ), 873 | "end_percent_3": ( 874 | "FLOAT", 875 | {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}, 876 | ), 877 | "resolution_3": ( 878 | "INT", 879 | {"default": 512, "min": 64, "max": 2048, "step": 64}, 880 | ), 881 | }, 882 | "optional": { 883 | "image_1": ("IMAGE",), 884 | "mask_1": ("IMAGE",), 885 | "image_2": ("IMAGE",), 886 | "mask_2": ("IMAGE",), 887 | "image_3": ("IMAGE",), 888 | "mask_3": ("IMAGE",), 889 | }, 890 | } 891 | 892 | RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "CONDITIONING", "CONDITIONING") 893 | RETURN_NAMES = ( 894 | "preprocessed_image_1", 895 | "preprocessed_image_2", 896 | "preprocessed_image_3", 897 | "positive", 898 | "negative", 899 | ) 900 | FUNCTION = "preprocessAndApply" 901 | 902 | CATEGORY = "Auto-Photoshop-SD" 903 | 904 | def preprocessAndApply(self, **kwargs): 905 | preprocessed_images = [kwargs.get(f"image_{i+1}", "") for i in range(3)] 906 | last_positive = kwargs["positive"] 907 | last_negative = kwargs["negative"] 908 | 909 | for i in range(3): 910 | args = { 911 | "image": kwargs.get(f"image_{i+1}", ""), 912 | "mask": kwargs.get(f"mask_{i+1}", ""), 913 | "preprocessor_name": kwargs.get(f"preprocessor_name_{i+1}", ""), 914 | "control_net_name": kwargs.get(f"control_net_name_{i+1}", ""), 915 | "strength": kwargs.get(f"strength_{i+1}", ""), 916 | "start_percent": kwargs.get(f"start_percent_{i+1}", ""), 917 | "end_percent": kwargs.get(f"end_percent_{i+1}", ""), 918 | "resolution": kwargs.get(f"resolution_{i+1}", ""), 919 | "threshold_a": kwargs.get(f"threshold_a_{i+1}", 0), 920 | "threshold_b": kwargs.get(f"threshold_b_{i+1}", 0), 921 | "positive": last_positive, 922 | "negative": last_negative, 923 | } 924 | 925 | if ( 926 | kwargs[f"is_enabled_{i+1}"] == "enable" 927 | and args["preprocessor_name"] != "None" 928 | and args["control_net_name"] != "None" 929 | ): 930 | # load image and mask if they are file name 931 | if isinstance(args["image"], str) and args["image"] != "": 932 | ( 933 | args["image"], 934 | _mask, 935 | ) = nodes.LoadImage().load_image(args["image"]) 936 | if ( 937 | isinstance(args["mask"], str) and args["mask"] != "" 938 | ): # mask is string file name 939 | ( 940 | args["mask"], 941 | _mask, 942 | ) = nodes.LoadImage().load_image(args["mask"]) 943 | (args["mask"],) = ImageToMask().image_to_mask(args["mask"], "red") 944 | elif args["mask"] != "": 945 | (args["mask"],) = ImageToMask().image_to_mask(args["mask"], "red") 946 | 947 | ( 948 | preprocessed_images[i], 949 | last_positive, 950 | last_negative, 951 | ) = ControlnetUnit().preprocessAndApply(**args) 952 | 953 | return ( 954 | preprocessed_images[0], 955 | preprocessed_images[1], 956 | preprocessed_images[2], 957 | last_positive, 958 | last_negative, 959 | ) 960 | 961 | 962 | class ContentMaskLatent: 963 | @classmethod 964 | def INPUT_TYPES(s): 965 | return { 966 | "required": { 967 | "content_mask": ( 968 | ["original", "latent_noise", "latent_nothing"], 969 | {"default": "original"}, 970 | ), 971 | "init_image": ("IMAGE",), 972 | "mask": ("IMAGE",), 973 | "width": ( 974 | "INT", 975 | {"default": 512, "min": 0, "max": nodes.MAX_RESOLUTION, "step": 1}, 976 | ), 977 | "height": ( 978 | "INT", 979 | {"default": 512, "min": 0, "max": nodes.MAX_RESOLUTION, "step": 1}, 980 | ), 981 | "vae": ("VAE",), 982 | "seed": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFFFFFFFFFFFF}), 983 | } 984 | } 985 | 986 | RETURN_TYPES = ("LATENT", "IMAGE", "IMAGE", "IMAGE") 987 | RETURN_NAMES = ( 988 | "latents", 989 | "original_preview", 990 | "latent_noise_preview", 991 | "latent_nothing_preview", 992 | ) 993 | FUNCTION = "generateContentMaskLatent" 994 | 995 | CATEGORY = "Auto-Photoshop-SD" 996 | 997 | def generateContentMaskLatent(self, **kwargs): 998 | content_mask = kwargs.get("content_mask") 999 | init_image = kwargs.get("init_image", "") 1000 | mask = kwargs.get("mask", "") 1001 | width = kwargs.get("width") 1002 | height = kwargs.get("height") 1003 | vae = kwargs.get("vae", "") 1004 | seed = kwargs.get("seed", 0) 1005 | original_preview = None 1006 | latent_noise_preview = None 1007 | latent_nothing_preview = None 1008 | latents = "" 1009 | upscale_method = "nearest-exact" 1010 | crop = "disabled" 1011 | 1012 | # self.map = nodes.NODE_CLASS_MAPPINGS[''] 1013 | (upscaled_init_image,) = nodes.ImageScale().upscale( 1014 | init_image, upscale_method, width, height, crop 1015 | ) 1016 | (upscaled_mask_image,) = nodes.ImageScale().upscale( 1017 | mask, upscale_method, width, height, crop 1018 | ) 1019 | (MASK,) = ImageToMask().image_to_mask(upscaled_mask_image, "red") 1020 | if content_mask == "original": 1021 | (samples,) = nodes.VAEEncode().encode(vae, upscaled_init_image) 1022 | (latents,) = nodes.SetLatentNoiseMask().set_mask(samples, MASK) 1023 | (original_preview,) = nodes.VAEDecode().decode(vae, latents) 1024 | elif content_mask == "latent_noise": 1025 | (latent_noise,) = GaussianLatentImage().generate( 1026 | width, height, batch_size=1, seed=seed 1027 | ) 1028 | (latent_noise_image,) = nodes.VAEDecode().decode(vae, latent_noise) 1029 | (latent_noise_preview,) = ImageCompositeMasked().composite( 1030 | upscaled_init_image, latent_noise_image, 0, 0, True, MASK 1031 | ) 1032 | (latents,) = nodes.VAEEncode().encode(vae, latent_noise_preview) 1033 | (latents,) = nodes.SetLatentNoiseMask().set_mask(latents, MASK) 1034 | elif content_mask == "latent_nothing": 1035 | # (latents,) = nodes.VAEEncodeForInpaint().encode( 1036 | # vae, upscaled_init_image, MASK, 0 1037 | # ) 1038 | # (latent_nothing_preview,) = nodes.VAEDecode().decode(vae, latents) 1039 | 1040 | (destination,) = nodes.VAEEncode().encode(vae, upscaled_init_image) 1041 | (source,) = nodes.EmptyLatentImage().generate(width, height) 1042 | (latents,) = LatentCompositeMasked().composite( 1043 | destination, source, 0, 0, True, MASK 1044 | ) 1045 | (latents,) = nodes.SetLatentNoiseMask().set_mask(latents, MASK) 1046 | (latent_nothing_preview,) = nodes.VAEDecode().decode(vae, latents) 1047 | 1048 | return (latents, original_preview, latent_noise_preview, latent_nothing_preview) 1049 | 1050 | 1051 | class APS_Seed: 1052 | @classmethod 1053 | def INPUT_TYPES(s): 1054 | return { 1055 | "required": { 1056 | "seed": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFFFFFFFFFFFF}), 1057 | } 1058 | } 1059 | 1060 | RETURN_TYPES = ("INT",) 1061 | RETURN_NAMES = "seed" 1062 | FUNCTION = "getSeed" 1063 | 1064 | CATEGORY = "Auto-Photoshop-SD" 1065 | 1066 | def getSeed(self, **kwargs): 1067 | seed = kwargs.get("seed", 0) 1068 | return (seed,) 1069 | 1070 | 1071 | NODE_CLASS_MAPPINGS = { 1072 | "LoadImageWithMetaData": LoadImageWithMetaData, 1073 | "GetConfig": GetConfig, 1074 | "LoadImageBase64": LoadImageBase64, 1075 | "LoadLorasFromPrompt": LoadLorasFromPrompt, 1076 | "GaussianLatentImage": GaussianLatentImage, 1077 | "APS_LatentBatch": APS_LatentBatch, 1078 | "ControlnetUnit": ControlnetUnit, 1079 | "ControlNetScript": ControlNetScript, 1080 | "ContentMaskLatent": ContentMaskLatent, 1081 | "APS_Seed": APS_Seed, 1082 | "MaskExpansion": MaskExpansion, 1083 | } 1084 | 1085 | # A dictionary that contains the friendly/humanly readable titles for the nodes 1086 | NODE_DISPLAY_NAME_MAPPINGS = { 1087 | "LoadImageWithMetaData": "load Image with metadata", 1088 | "GetConfig": "get config data", 1089 | "LoadImageBase64": "load image from base64 string", 1090 | "LoadLorasFromPrompt": "Load Loras From Prompt", 1091 | "GaussianLatentImage": "Generate Latent Noise", 1092 | "APS_LatentBatch": "Combine Multiple Latents Into Batch", 1093 | "ControlnetUnit": "General Purpose Controlnet Unit", 1094 | "ControlNetScript": "ControlNet Script", 1095 | "ContentMaskLatent": "Content Mask Latent", 1096 | "APS_Seed": "Auto-Photoshop-SD Seed", 1097 | "MaskExpansion": "Expand and Blur the Mask", 1098 | } 1099 | --------------------------------------------------------------------------------