├── ComfyI2I.py ├── Guide_Images ├── Blend_Contrast.JPG ├── Blend_Percentage.JPG ├── Blur.JPG ├── Channel.JPG ├── Color_Transfer.JPG ├── Combine_and_Paste.JPG ├── ComfyShop │ ├── Access.jpg │ ├── Brush_Menu.jpg │ ├── RGB-A.jpg │ ├── Save_To_Node.jpg │ ├── Saving_Outputs.jpg │ ├── Zoom_In.jpg │ └── Zoom_Out.jpg ├── Inpaint_Segments.JPG ├── Invert.JPG ├── ShrinkGrow.JPG ├── Use-Mask-1.jpg ├── Use-Mask-2.jpg ├── Use_Text_Sigma 1.JPG ├── Use_Text_Sigma 2.JPG ├── V2 │ ├── ColorXferworkflow.JPG │ ├── blur_amount.JPG │ ├── blur_radius.JPG │ ├── brightness.JPG │ ├── contrast.JPG │ ├── empty_mask.JPG │ ├── gamma.JPG │ ├── masked_xfer.JPG │ ├── multi_xfer.JPG │ ├── no_of_colors.JPG │ ├── separate_mask.JPG │ ├── strength.JPG │ └── xfer_across_channels.JPG └── Workflow.jpg ├── LICENSE ├── README.md ├── __init__.py ├── __pycache__ ├── ComfyI2I.cpython-310.pyc └── __init__.cpython-310.pyc ├── install.bat ├── js ├── ComfyShop.js └── imageProcessorWorker.js ├── requirements.txt └── workflows ├── Color Xfer Workflow.json ├── I2I workflow.json └── Multi_XFer_Workflow.json /ComfyI2I.py: -------------------------------------------------------------------------------- 1 | # By ManglerFTW (Discord: ManglerFTW) 2 | # 3 | # Copyright 2023 Peter Mango (ManglerFTW) 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to 6 | # deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | # and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 8 | # 9 | # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 10 | # 11 | # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 12 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 13 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 14 | # THE SOFTWARE. 15 | 16 | import numpy as np 17 | from collections import namedtuple 18 | import cv2 19 | import torch 20 | import sys 21 | import os 22 | import folder_paths as comfy_paths 23 | from torchvision.ops import masks_to_boxes 24 | import torchvision.transforms.functional as TF 25 | import torch.nn.functional as F 26 | from PIL import Image, ImageFilter, ImageOps 27 | import subprocess 28 | import math 29 | 30 | # Check for CUDA availability 31 | device = 'cuda' if torch.cuda.is_available() else 'cpu' 32 | 33 | ARRAY_DATATYPE = torch.int32 # Corresponding to 'l' 34 | 35 | Rgb = namedtuple('Rgb', ('r', 'g', 'b')) 36 | Hsl = namedtuple('Hsl', ('h', 's', 'l')) 37 | 38 | VERY_BIG_SIZE = 1024 * 1024 39 | MAX_RESOLUTION=8192 40 | 41 | MODELS_DIR = comfy_paths.models_dir 42 | 43 | class cstr(str): 44 | class color: 45 | END = '\33[0m' 46 | BOLD = '\33[1m' 47 | ITALIC = '\33[3m' 48 | UNDERLINE = '\33[4m' 49 | BLINK = '\33[5m' 50 | BLINK2 = '\33[6m' 51 | SELECTED = '\33[7m' 52 | 53 | BLACK = '\33[30m' 54 | RED = '\33[31m' 55 | GREEN = '\33[32m' 56 | YELLOW = '\33[33m' 57 | BLUE = '\33[34m' 58 | VIOLET = '\33[35m' 59 | BEIGE = '\33[36m' 60 | WHITE = '\33[37m' 61 | 62 | BLACKBG = '\33[40m' 63 | REDBG = '\33[41m' 64 | GREENBG = '\33[42m' 65 | YELLOWBG = '\33[43m' 66 | BLUEBG = '\33[44m' 67 | VIOLETBG = '\33[45m' 68 | BEIGEBG = '\33[46m' 69 | WHITEBG = '\33[47m' 70 | 71 | GREY = '\33[90m' 72 | LIGHTRED = '\33[91m' 73 | LIGHTGREEN = '\33[92m' 74 | LIGHTYELLOW = '\33[93m' 75 | LIGHTBLUE = '\33[94m' 76 | LIGHTVIOLET = '\33[95m' 77 | LIGHTBEIGE = '\33[96m' 78 | LIGHTWHITE = '\33[97m' 79 | 80 | GREYBG = '\33[100m' 81 | LIGHTREDBG = '\33[101m' 82 | LIGHTGREENBG = '\33[102m' 83 | LIGHTYELLOWBG = '\33[103m' 84 | LIGHTBLUEBG = '\33[104m' 85 | LIGHTVIOLETBG = '\33[105m' 86 | LIGHTBEIGEBG = '\33[106m' 87 | LIGHTWHITEBG = '\33[107m' 88 | 89 | @staticmethod 90 | def add_code(name, code): 91 | if not hasattr(cstr.color, name.upper()): 92 | setattr(cstr.color, name.upper(), code) 93 | else: 94 | raise ValueError(f"'cstr' object already contains a code with the name '{name}'.") 95 | 96 | def __new__(cls, text): 97 | return super().__new__(cls, text) 98 | 99 | def __getattr__(self, attr): 100 | if attr.lower().startswith("_cstr"): 101 | code = getattr(self.color, attr.upper().lstrip("_cstr")) 102 | modified_text = self.replace(f"__{attr[1:]}__", f"{code}") 103 | return cstr(modified_text) 104 | elif attr.upper() in dir(self.color): 105 | code = getattr(self.color, attr.upper()) 106 | modified_text = f"{code}{self}{self.color.END}" 107 | return cstr(modified_text) 108 | elif attr.lower() in dir(cstr): 109 | return getattr(cstr, attr.lower()) 110 | else: 111 | raise AttributeError(f"'cstr' object has no attribute '{attr}'") 112 | 113 | def print(self, **kwargs): 114 | print(self, **kwargs) 115 | 116 | def tensor2rgb(t: torch.Tensor) -> torch.Tensor: 117 | size = t.size() 118 | if (len(size) < 4): 119 | return t.unsqueeze(3).repeat(1, 1, 1, 3) 120 | if size[3] == 1: 121 | return t.repeat(1, 1, 1, 3) 122 | elif size[3] == 4: 123 | return t[:, :, :, :3] 124 | else: 125 | return t 126 | 127 | def tensor2rgba(t: torch.Tensor) -> torch.Tensor: 128 | size = t.size() 129 | if (len(size) < 4): 130 | return t.unsqueeze(3).repeat(1, 1, 1, 4) 131 | elif size[3] == 1: 132 | return t.repeat(1, 1, 1, 4) 133 | elif size[3] == 3: 134 | alpha_tensor = torch.ones((size[0], size[1], size[2], 1)) 135 | return torch.cat((t, alpha_tensor), dim=3) 136 | else: 137 | return t 138 | 139 | def tensor2mask(t: torch.Tensor) -> torch.Tensor: 140 | size = t.size() 141 | if (len(size) < 4): 142 | return t 143 | if size[3] == 1: 144 | return t[:,:,:,0] 145 | elif size[3] == 4: 146 | # Not sure what the right thing to do here is. Going to try to be a little smart and use alpha unless all alpha is 1 in case we'll fallback to RGB behavior 147 | if torch.min(t[:, :, :, 3]).item() != 1.: 148 | return t[:,:,:,3] 149 | 150 | return TF.rgb_to_grayscale(tensor2rgb(t).permute(0,3,1,2), num_output_channels=1)[:,0,:,:] 151 | 152 | def tensor2batch(t: torch.Tensor, bs: torch.Size) -> torch.Tensor: 153 | if len(t.size()) < len(bs): 154 | t = t.unsqueeze(3) 155 | if t.size()[0] < bs[0]: 156 | t.repeat(bs[0], 1, 1, 1) 157 | dim = bs[3] 158 | if dim == 1: 159 | return tensor2mask(t) 160 | elif dim == 3: 161 | return tensor2rgb(t) 162 | elif dim == 4: 163 | return tensor2rgba(t) 164 | 165 | def tensors2common(t1: torch.Tensor, t2: torch.Tensor) -> (torch.Tensor, torch.Tensor): 166 | t1s = t1.size() 167 | t2s = t2.size() 168 | if len(t1s) < len(t2s): 169 | t1 = t1.unsqueeze(3) 170 | elif len(t1s) > len(t2s): 171 | t2 = t2.unsqueeze(3) 172 | 173 | if len(t1.size()) == 3: 174 | if t1s[0] < t2s[0]: 175 | t1 = t1.repeat(t2s[0], 1, 1) 176 | elif t1s[0] > t2s[0]: 177 | t2 = t2.repeat(t1s[0], 1, 1) 178 | else: 179 | if t1s[0] < t2s[0]: 180 | t1 = t1.repeat(t2s[0], 1, 1, 1) 181 | elif t1s[0] > t2s[0]: 182 | t2 = t2.repeat(t1s[0], 1, 1, 1) 183 | 184 | t1s = t1.size() 185 | t2s = t2.size() 186 | if len(t1s) > 3 and t1s[3] < t2s[3]: 187 | return tensor2batch(t1, t2s), t2 188 | elif len(t1s) > 3 and t1s[3] > t2s[3]: 189 | return t1, tensor2batch(t2, t1s) 190 | else: 191 | return t1, t2 192 | 193 | # Tensor to PIL 194 | def tensor2pil(image): 195 | return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) 196 | 197 | # PIL to Tensor 198 | def pil2tensor(image): 199 | return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) 200 | 201 | # PIL to Tensor 202 | def pil2tensor_stacked(image): 203 | if isinstance(image, Image.Image): 204 | return torch.from_numpy(np.array(image).astype(np.float32) / 255.0) 205 | elif isinstance(image, torch.Tensor): 206 | return image 207 | else: 208 | raise ValueError(f"Unexpected datatype for input to 'pil2tensor_stacked'. Expected a PIL Image or tensor, but received type: {type(image)}") 209 | 210 | 211 | 212 | class Color(object): 213 | def __init__(self, r, g, b, proportion): 214 | self.rgb = Rgb(r, g, b) 215 | self.proportion = proportion 216 | 217 | def __repr__(self): 218 | return "".format( 219 | str(self.rgb), str(self.proportion * 100)) 220 | 221 | @property 222 | def hsl(self): 223 | try: 224 | return self._hsl 225 | except AttributeError: 226 | self._hsl = Hsl(*hsl(*self.rgb)) 227 | return self._hsl 228 | 229 | def extract(image_np, number_of_colors, mask_np=None): 230 | # Check and convert the image if needed 231 | if len(image_np.shape) == 2 or image_np.shape[2] != 3: # If grayscale or not RGB 232 | image_np = cv2.cvtColor(image_np, cv2.COLOR_GRAY2RGB) 233 | 234 | samples = sample(image_np, mask_np) 235 | used = pick_used(samples) 236 | used.sort(key=lambda x: x[0], reverse=True) 237 | return get_colors(samples, used, number_of_colors) 238 | 239 | def sample(image, mask=None): 240 | top_two_bits = 0b11000000 241 | 242 | sides = 1 << 2 243 | cubes = sides ** 7 244 | 245 | samples = torch.zeros((cubes,), dtype=torch.float32, device=device) # Make sure samples is of float32 type 246 | 247 | # Handle mask 248 | if mask is not None: 249 | mask_values = (torch.rand_like(mask, dtype=torch.float32) * 255).int() 250 | active_pixels = mask_values > mask 251 | else: 252 | active_pixels = torch.ones_like(image[:, :, 0], dtype=torch.bool) 253 | 254 | # Calculate RGB, HSL, and Y 255 | r, g, b = image[:, :, 0], image[:, :, 1], image[:, :, 2] 256 | h, s, l = hsl(r, g, b) # We need to convert the hsl function to use PyTorch 257 | Y = (r * 0.2126 + g * 0.7152 + b * 0.0722).int() 258 | 259 | # Packing 260 | packed = ((Y & top_two_bits) << 4) | ((h & top_two_bits) << 2) | (l & top_two_bits) 261 | packed *= 4 262 | 263 | # Accumulate samples 264 | packed_active = packed[active_pixels] 265 | r_active, g_active, b_active = r[active_pixels], g[active_pixels], b[active_pixels] 266 | 267 | samples.index_add_(0, packed_active, r_active) 268 | samples.index_add_(0, packed_active + 1, g_active) 269 | samples.index_add_(0, packed_active + 2, b_active) 270 | samples.index_add_(0, packed_active + 3, torch.ones_like(packed_active, dtype=torch.float32)) 271 | 272 | return samples 273 | 274 | def pick_used(samples): 275 | # Find indices where count (every 4th value) is non-zero 276 | non_zero_indices = torch.arange(0, samples.size(0), 4, device=samples.device)[samples[3::4] > 0] 277 | 278 | # Get counts for non-zero indices 279 | counts = samples[non_zero_indices + 3] 280 | 281 | # Combine counts and indices 282 | used = torch.stack((counts, non_zero_indices), dim=-1) 283 | 284 | # Convert torch tensors to list of tuples on CPU 285 | used_tuples = [(int(count.item()), int(idx.item())) for count, idx in zip(used[:, 0], used[:, 1])] 286 | 287 | return used_tuples 288 | 289 | def get_colors(samples, used, number_of_colors): 290 | number_of_colors = min(number_of_colors, len(used)) 291 | used = used[:number_of_colors] 292 | 293 | # Extract counts and indices 294 | counts, indices = zip(*used) 295 | counts = torch.tensor(counts, dtype=torch.long, device=device) 296 | indices = torch.tensor(indices, dtype=torch.long, device=device) 297 | 298 | # Calculate total pixels 299 | total_pixels = torch.sum(counts) 300 | 301 | # Get RGB values 302 | r_vals = samples[indices] // counts 303 | g_vals = samples[indices + 1] // counts 304 | b_vals = samples[indices + 2] // counts 305 | 306 | # Convert Torch tensors to lists 307 | r_vals_list = r_vals.tolist() 308 | g_vals_list = g_vals.tolist() 309 | b_vals_list = b_vals.tolist() 310 | counts_list = counts.tolist() 311 | 312 | # Create Color objects 313 | colors = [Color(r, g, b, count) for r, g, b, count in zip(r_vals_list, g_vals_list, b_vals_list, counts_list)] 314 | 315 | # Update proportions 316 | for color in colors: 317 | color.proportion /= total_pixels.item() 318 | 319 | return colors 320 | 321 | def hsl(r, g, b): 322 | r, g, b = r / 255.0, g / 255.0, b / 255.0 323 | 324 | max_val, _ = torch.max(torch.stack([r, g, b]), dim=0) 325 | min_val, _ = torch.min(torch.stack([r, g, b]), dim=0) 326 | diff = max_val - min_val 327 | 328 | # Luminance 329 | l = (max_val + min_val) / 2.0 330 | 331 | # Saturation 332 | s = torch.where( 333 | (max_val == min_val) | (l == 0), 334 | torch.zeros_like(l), 335 | torch.where(l < 0.5, diff / (max_val + min_val), diff / (2.0 - max_val - min_val)) 336 | ) 337 | 338 | # Hue 339 | conditions = [ 340 | max_val == r, 341 | max_val == g, 342 | max_val == b 343 | ] 344 | 345 | values = [ 346 | ((g - b) / diff) % 6, 347 | ((b - r) / diff) + 2, 348 | ((r - g) / diff) + 4 349 | ] 350 | 351 | h = torch.zeros_like(r) 352 | for condition, value in zip(conditions, values): 353 | h = torch.where(condition, value, h) 354 | h /= 6.0 355 | 356 | return (h * 255).int(), (s * 255).int(), (l * 255).int() 357 | 358 | def color_distance(pixel_color, palette_color): 359 | return torch.norm(pixel_color - palette_color) 360 | 361 | def segment_image(image_torch, palette_colors, mask_torch=None, threshold=128): 362 | """ 363 | Segment the image based on the color similarity of each color in the palette using PyTorch. 364 | """ 365 | if mask_torch is None: 366 | mask_torch = torch.ones(image_torch.shape[:2], device='cuda') * 255 367 | 368 | output_image_torch = torch.zeros_like(image_torch) 369 | 370 | # Convert palette colors to PyTorch tensor 371 | palette_torch = torch.tensor([list(color.rgb) for color in palette_colors], device='cuda').float() 372 | 373 | distances = torch.norm(image_torch.unsqueeze(-2) - palette_torch, dim=-1) 374 | closest_color_indices = torch.argmin(distances, dim=-1) 375 | 376 | for idx, palette_color in enumerate(palette_torch): 377 | output_image_torch[closest_color_indices == idx] = palette_color 378 | 379 | output_image_torch[mask_torch < threshold] = image_torch[mask_torch < threshold] 380 | 381 | # Convert the PyTorch tensor back to a numpy array for saving or further operations 382 | output_image_np = output_image_torch.cpu().numpy().astype('uint8') 383 | return output_image_np 384 | 385 | def calculate_luminance_vectorized(colors): 386 | """Calculate the luminance of an array of RGB colors using PyTorch.""" 387 | R, G, B = colors[:, 0], colors[:, 1], colors[:, 2] 388 | return 0.299 * R + 0.587 * G + 0.114 * B 389 | 390 | def luminance_match(palette1, palette2): 391 | # Convert palettes to PyTorch tensors 392 | palette1_rgb = torch.tensor([color.rgb for color in palette1], device='cuda').float() 393 | palette2_rgb = torch.tensor([color.rgb for color in palette2], device='cuda').float() 394 | 395 | luminance1 = calculate_luminance_vectorized(palette1_rgb) 396 | luminance2 = calculate_luminance_vectorized(palette2_rgb) 397 | 398 | # Sort luminances and get the sorted indices 399 | sorted_indices1 = torch.argsort(luminance1) 400 | sorted_indices2 = torch.argsort(luminance2) 401 | 402 | reordered_palette2 = [None] * len(palette2) 403 | 404 | # Match colors based on sorted luminance order 405 | for idx1, idx2 in zip(sorted_indices1.cpu().numpy(), sorted_indices2.cpu().numpy()): 406 | print(f"idx1: {idx1}, idx2: {idx2}") # Add this to debug 407 | reordered_palette2[idx1] = palette2[idx2] 408 | 409 | return reordered_palette2 410 | 411 | def apply_blur(image_torch, blur_radius, blur_amount): 412 | image_torch = image_torch.float().div(255.0) 413 | channels = image_torch.shape[2] 414 | 415 | kernel_size = int(6 * blur_radius + 1) 416 | kernel_size += 1 if kernel_size % 2 == 0 else 0 417 | 418 | # Calculate the padding required to keep the output size the same 419 | padding = kernel_size // 2 420 | 421 | # Create a Gaussian kernel 422 | x = torch.linspace(-blur_amount, blur_amount, kernel_size).to(image_torch.device) 423 | x = torch.exp(-x**2 / (2 * blur_radius**2)) 424 | x /= x.sum() 425 | kernel = x[:, None] * x[None, :] 426 | 427 | # Apply the kernel using depthwise convolution 428 | channels = image_torch.shape[-1] 429 | kernel = kernel[None, None, ...].repeat(channels, 1, 1, 1) 430 | blurred = F.conv2d(image_torch.permute(2, 0, 1)[None, ...], kernel, groups=channels, padding=padding) 431 | 432 | # Convert the tensor back to byte and de-normalize 433 | blurred = (blurred * 255.0).byte().squeeze(0).permute(1, 2, 0) 434 | return blurred 435 | 436 | def refined_replace_and_blend_colors(Source_np, img_np, palette1, modified_palette2, blur_radius=0, blur_amount=0, mask_torch=None): 437 | # Convert numpy arrays to torch tensors on GPU 438 | device = 'cuda' if torch.cuda.is_available() else 'cpu' 439 | Source = torch.from_numpy(Source_np).float().to(device) 440 | img_torch = torch.tensor(img_np, device=device).float() 441 | 442 | palette1_rgb = torch.stack([torch.tensor(color.rgb, device=device).float() if hasattr(color, 'rgb') else torch.tensor(color, device=device).float() for color in palette1]) 443 | modified_palette2_rgb = torch.stack([torch.tensor(color.rgb, device=device).float() if hasattr(color, 'rgb') else torch.tensor(color, device=device).float() for color in modified_palette2]) 444 | 445 | # Direct color replacement using broadcasting 446 | distances = torch.norm(img_torch[:, :, None] - palette1_rgb, dim=-1) 447 | closest_indices = torch.argmin(distances, dim=-1) 448 | intermediate_output = modified_palette2_rgb[closest_indices] 449 | 450 | # Convert to uint8 if not already 451 | intermediate_output = torch.clamp(intermediate_output, 0, 255).byte() 452 | 453 | # Apply blur if needed 454 | if blur_radius > 0 and blur_amount > 0: 455 | blurred_output = apply_blur(intermediate_output, blur_radius, blur_amount) 456 | else: 457 | blurred_output = intermediate_output 458 | 459 | # Blend based on the mask's intensity values if provided 460 | if mask_torch is not None: 461 | three_channel_mask = mask_torch[:, :, None].expand_as(Source) 462 | output_torch = Source * (1 - three_channel_mask) + blurred_output.float() * three_channel_mask 463 | else: 464 | output_torch = blurred_output 465 | 466 | output_np = output_torch.cpu().numpy().astype(np.uint8) 467 | 468 | return output_np 469 | 470 | def torch_rgb_to_hsv(rgb): 471 | """ 472 | Convert an RGB image to HSV. 473 | Assumes rgb is a PyTorch tensor with values in [0, 1]. 474 | """ 475 | 476 | # Get R, G, B values 477 | r = rgb[..., 0] 478 | g = rgb[..., 1] 479 | b = rgb[..., 2] 480 | 481 | max_val, _ = torch.max(rgb, dim=-1) 482 | min_val, _ = torch.min(rgb, dim=-1) 483 | diff = max_val - min_val 484 | 485 | # Calculate Hue 486 | h = torch.zeros_like(r) 487 | h[diff == 0] = 0 488 | mask = (max_val == r) & (diff != 0) 489 | h[mask] = (60 * ((g[mask] - b[mask]) / diff[mask]) + 360) % 360 490 | mask = max_val == g 491 | h[mask] = (60 * ((b[mask] - r[mask]) / diff[mask]) + 120) % 360 492 | mask = max_val == b 493 | h[mask] = (60 * ((r[mask] - g[mask]) / diff[mask]) + 240) % 360 494 | h = h / 360. # Normalize to [0, 1] 495 | 496 | # Calculate Saturation 497 | s = torch.zeros_like(r) 498 | s[max_val != 0] = diff[max_val != 0] / max_val[max_val != 0] 499 | 500 | # Value 501 | v = max_val 502 | 503 | hsv = torch.stack([h, s, v], dim=-1) 504 | return hsv 505 | 506 | def torch_hsv_to_rgb(hsv): 507 | """ 508 | Convert an HSV image to RGB. 509 | Assumes hsv is a PyTorch tensor with values in [0, 1] for hue and [0, 1] for saturation/value. 510 | """ 511 | 512 | h = hsv[..., 0] * 360. 513 | s = hsv[..., 1] 514 | v = hsv[..., 2] 515 | 516 | c = v * s 517 | hh = h / 60. 518 | x = c * (1 - torch.abs(hh % 2 - 1)) 519 | m = v - c 520 | 521 | r, g, b = v, v, v # Initialize with value 522 | 523 | mask = (hh >= 0) & (hh < 1) 524 | r[mask] = c[mask] 525 | g[mask] = x[mask] 526 | 527 | mask = (hh >= 1) & (hh < 2) 528 | r[mask] = x[mask] 529 | g[mask] = c[mask] 530 | 531 | mask = (hh >= 2) & (hh < 3) 532 | g[mask] = c[mask] 533 | b[mask] = x[mask] 534 | 535 | mask = (hh >= 3) & (hh < 4) 536 | g[mask] = x[mask] 537 | b[mask] = c[mask] 538 | 539 | mask = (hh >= 4) & (hh < 5) 540 | r[mask] = x[mask] 541 | b[mask] = c[mask] 542 | 543 | mask = (hh >= 5) & (hh < 6) 544 | r[mask] = c[mask] 545 | b[mask] = x[mask] 546 | 547 | r += m 548 | g += m 549 | b += m 550 | 551 | rgb = torch.stack([r, g, b], dim=-1) 552 | return rgb 553 | 554 | def retain_luminance_hsv_swap(img1_np, img2_np, strength): 555 | """ 556 | Blend two images while retaining the luminance of the first. 557 | The blending is controlled by the strength parameter. 558 | Assumes img1_np and img2_np are numpy arrays in BGR format. 559 | """ 560 | 561 | # Convert BGR to RGB 562 | img1_rgb_np = cv2.cvtColor(img1_np, cv2.COLOR_BGR2RGB).astype(float) / 255.0 563 | img2_rgb_np = cv2.cvtColor(img2_np, cv2.COLOR_BGR2RGB).astype(float) / 255.0 564 | 565 | # Blend the two RGB images linearly based on the strength 566 | blended_rgb_np = (1 - strength) * img1_rgb_np + strength * img2_rgb_np 567 | 568 | # Convert the blended RGB image and the original RGB image to YUV 569 | blended_yuv_np = cv2.cvtColor((blended_rgb_np * 255).astype(np.uint8), cv2.COLOR_RGB2YUV) 570 | img1_yuv_np = cv2.cvtColor(img1_np, cv2.COLOR_BGR2YUV) 571 | 572 | # Replace the Y channel (luminance) of the blended image with the original image's luminance 573 | blended_yuv_np[:,:,0] = img1_yuv_np[:,:,0] 574 | 575 | # Convert back to BGR 576 | result_bgr_np = cv2.cvtColor(blended_yuv_np, cv2.COLOR_YUV2BGR) 577 | 578 | return result_bgr_np 579 | 580 | def adjust_gamma_contrast(image_np, gamma, contrast, brightness, mask_np=None): 581 | # Ensure CUDA is available 582 | device = 'cuda' if torch.cuda.is_available() else 'cpu' 583 | 584 | # Transfer data to PyTorch tensors and move to the appropriate device 585 | image_torch = torch.tensor(image_np, dtype=torch.float32).to(device) 586 | 587 | # Gamma correction using a lookup table 588 | inv_gamma = 1.0 / gamma 589 | table = torch.tensor([(i / 255.0) ** inv_gamma * 255 for i in range(256)], device=device).float() 590 | gamma_corrected = torch.index_select(table, 0, image_torch.long().flatten()).reshape_as(image_torch) 591 | 592 | # Contrast and brightness adjustment 593 | contrast_adjusted = contrast * gamma_corrected + brightness 594 | contrast_adjusted = torch.clamp(contrast_adjusted, 0, 255).byte() 595 | 596 | # If mask is provided, blend the original and adjusted images 597 | if mask_np is not None: 598 | mask_torch = torch.tensor(mask_np, device=device).float() / 255.0 599 | three_channel_mask = mask_torch.unsqueeze(-1).expand_as(image_torch) 600 | contrast_adjusted = image_torch * (1 - three_channel_mask) + contrast_adjusted.float() * three_channel_mask 601 | 602 | # Transfer data back to numpy array 603 | result_np = contrast_adjusted.cpu().numpy() 604 | 605 | return result_np 606 | 607 | 608 | 609 | 610 | def CutByMask(image, mask, force_resize_width, force_resize_height, mask_mapping_optional): 611 | 612 | if len(image.shape) < 4: 613 | C = 1 614 | else: 615 | C = image.shape[3] 616 | 617 | # We operate on RGBA to keep the code clean and then convert back after 618 | image = tensor2rgba(image) 619 | mask = tensor2mask(mask) 620 | 621 | if mask_mapping_optional is not None: 622 | mask_mapping_optional = mask_mapping_optional.long() 623 | image = image[mask_mapping_optional] 624 | 625 | # Scale the mask to match the image size if it isn't 626 | B, H, W, _ = image.shape 627 | mask = F.interpolate(mask.unsqueeze(1), size=(H, W), mode='nearest')[:,0,:,:] 628 | 629 | MB, _, _ = mask.shape 630 | 631 | if MB < B: 632 | assert(B % MB == 0) 633 | mask = mask.repeat(B // MB, 1, 1) 634 | 635 | # Masks to boxes 636 | is_empty = ~torch.gt(torch.max(torch.reshape(mask, [B, H * W]), dim=1).values, 0.) 637 | mask[is_empty,0,0] = 1. 638 | boxes = masks_to_boxes(mask) 639 | mask[is_empty,0,0] = 0. 640 | 641 | min_x = boxes[:,0] 642 | min_y = boxes[:,1] 643 | max_x = boxes[:,2] 644 | max_y = boxes[:,3] 645 | 646 | width = max_x - min_x + 1 647 | height = max_y - min_y + 1 648 | 649 | use_width = int(torch.max(width).item()) 650 | use_height = int(torch.max(height).item()) 651 | 652 | if force_resize_width > 0: 653 | use_width = force_resize_width 654 | 655 | if force_resize_height > 0: 656 | use_height = force_resize_height 657 | 658 | print("use_width: ", use_width) 659 | print("use_height: ", use_height) 660 | 661 | alpha_mask = torch.ones((B, H, W, 4)) 662 | alpha_mask[:,:,:,3] = mask 663 | 664 | image = image * alpha_mask 665 | 666 | result = torch.zeros((B, use_height, use_width, 4)) 667 | for i in range(0, B): 668 | if not is_empty[i]: 669 | ymin = int(min_y[i].item()) 670 | ymax = int(max_y[i].item()) 671 | xmin = int(min_x[i].item()) 672 | xmax = int(max_x[i].item()) 673 | single = (image[i, ymin:ymax+1, xmin:xmax+1,:]).unsqueeze(0) 674 | resized = F.interpolate(single.permute(0, 3, 1, 2), size=(use_height, use_width), mode='bicubic').permute(0, 2, 3, 1) 675 | result[i] = resized[0] 676 | 677 | # Preserve our type unless we were previously RGB and added non-opaque alpha due to the mask size 678 | if C == 1: 679 | print("C == 1 output image shape: ", tensor2mask(result).shape) 680 | return tensor2mask(result) 681 | elif C == 3 and torch.min(result[:,:,:,3]) == 1: 682 | print("C == 3 output image shape: ", tensor2rgb(result).shape) 683 | return tensor2rgb(result) 684 | else: 685 | print("else result shape: ", result.shape) 686 | return result 687 | 688 | def combine(image1, image2, op, clamp_result, round_result): 689 | image1, image2 = tensors2common(image1, image2) 690 | 691 | if op == "union (max)": 692 | result = torch.max(image1, image2) 693 | elif op == "intersection (min)": 694 | result = torch.min(image1, image2) 695 | elif op == "difference": 696 | result = image1 - image2 697 | elif op == "multiply": 698 | result = image1 * image2 699 | elif op == "multiply_alpha": 700 | image1 = tensor2rgba(image1) 701 | image2 = tensor2mask(image2) 702 | result = torch.cat((image1[:, :, :, :3], (image1[:, :, :, 3] * image2).unsqueeze(3)), dim=3) 703 | elif op == "add": 704 | result = image1 + image2 705 | elif op == "greater_or_equal": 706 | result = torch.where(image1 >= image2, 1., 0.) 707 | elif op == "greater": 708 | result = torch.where(image1 > image2, 1., 0.) 709 | 710 | if clamp_result == "yes": 711 | result = torch.min(torch.max(result, torch.tensor(0.)), torch.tensor(1.)) 712 | if round_result == "yes": 713 | result = torch.round(result) 714 | 715 | return result 716 | 717 | def apply_color_correction(target_image, source_image, factor=1): 718 | 719 | if not isinstance(source_image, (torch.Tensor, Image.Image)): 720 | raise ValueError(f"Unexpected datatype for 'source_image' at method start. Expected a tensor or PIL Image, but received type: {type(source_image)}") 721 | 722 | # Ensure source_image is a tensor 723 | if isinstance(source_image, Image.Image): # Check if it's a PIL Image 724 | source_image = pil2tensor_stacked(source_image) # Convert it to tensor 725 | 726 | if not isinstance(source_image, (torch.Tensor, Image.Image)): 727 | raise ValueError(f"Unexpected datatype for 'source_image'. Expected a tensor or PIL Image, but received type: {type(source_image)}") 728 | 729 | # Get the batch size 730 | batch_size = source_image.shape[0] 731 | output_images = [] 732 | 733 | for i in range(batch_size): 734 | # Convert the source and target images to NumPy arrays for the i-th image in the batch 735 | source_numpy = source_image[i, ...].numpy() 736 | target_numpy = target_image[i, ...].numpy() 737 | 738 | # Convert to float32 739 | source_numpy = source_numpy.astype(np.float32) 740 | target_numpy = target_numpy.astype(np.float32) 741 | 742 | # If the images have an alpha channel, remove it for the color transformations 743 | if source_numpy.shape[-1] == 4: 744 | source_numpy = source_numpy[..., :3] 745 | if target_numpy.shape[-1] == 4: 746 | target_numpy = target_numpy[..., :3] 747 | 748 | # Compute the mean and standard deviation of the color channels for both images 749 | target_mean, target_std = np.mean(source_numpy, axis=(0, 1)), np.std(source_numpy, axis=(0, 1)) 750 | source_mean, source_std = np.mean(target_numpy, axis=(0, 1)), np.std(target_numpy, axis=(0, 1)) 751 | 752 | adjusted_source_mean = target_mean + factor * (target_mean - source_mean) 753 | adjusted_source_std = target_std + factor * (target_std - source_std) 754 | 755 | # Normalize the target image (zero mean and unit variance) 756 | target_norm = (target_numpy - target_mean) / target_std 757 | 758 | # Scale and shift the normalized target image to match the exaggerated source image statistics 759 | matched_rgb = target_norm * adjusted_source_std + adjusted_source_mean 760 | 761 | # Clip values to [0, 1] and convert to PIL Image 762 | img = Image.fromarray(np.clip(matched_rgb * 255, 0, 255).astype('uint8'), 'RGB') 763 | 764 | # Convert the PIL Image to a tensor and append to the list 765 | img_tensor = pil2tensor_stacked(img) 766 | output_images.append(img_tensor) 767 | 768 | # Stack the list of tensors to get the batch of corrected images 769 | stacked_images = torch.stack(output_images) 770 | 771 | return stacked_images 772 | 773 | def PasteByMask(image_base, image_to_paste, mask, resize_behavior, mask_mapping_optional): 774 | image_base = tensor2rgba(image_base) 775 | image_to_paste = tensor2rgba(image_to_paste) 776 | mask = tensor2mask(mask) 777 | 778 | # Scale the mask to be a matching size if it isn't 779 | B, H, W, C = image_base.shape 780 | MB = mask.shape[0] 781 | PB = image_to_paste.shape[0] 782 | if mask_mapping_optional is None: 783 | if B < PB: 784 | assert(PB % B == 0) 785 | image_base = image_base.repeat(PB // B, 1, 1, 1) 786 | B, H, W, C = image_base.shape 787 | if MB < B: 788 | assert(B % MB == 0) 789 | mask = mask.repeat(B // MB, 1, 1) 790 | elif B < MB: 791 | assert(MB % B == 0) 792 | image_base = image_base.repeat(MB // B, 1, 1, 1) 793 | if PB < B: 794 | assert(B % PB == 0) 795 | image_to_paste = image_to_paste.repeat(B // PB, 1, 1, 1) 796 | mask = F.interpolate(mask.unsqueeze(1), size=(H, W), mode='nearest')[:,0,:,:] 797 | MB, MH, MW = mask.shape 798 | 799 | # masks_to_boxes errors if the tensor is all zeros, so we'll add a single pixel and zero it out at the end 800 | is_empty = ~torch.gt(torch.max(torch.reshape(mask,[MB, MH * MW]), dim=1).values, 0.) 801 | mask[is_empty,0,0] = 1. 802 | boxes = masks_to_boxes(mask) 803 | mask[is_empty,0,0] = 0. 804 | 805 | min_x = boxes[:,0] 806 | min_y = boxes[:,1] 807 | max_x = boxes[:,2] 808 | max_y = boxes[:,3] 809 | mid_x = (min_x + max_x) / 2 810 | mid_y = (min_y + max_y) / 2 811 | 812 | target_width = max_x - min_x + 1 813 | target_height = max_y - min_y + 1 814 | 815 | result = image_base.detach().clone() 816 | 817 | for i in range(0, MB): 818 | if i >= len(image_to_paste): 819 | raise ValueError(f"image_to_paste does not have an entry for mask index {i}") 820 | if is_empty[i]: 821 | continue 822 | else: 823 | image_index = i 824 | if mask_mapping_optional is not None: 825 | image_index = mask_mapping_optional[i].item() 826 | source_size = image_to_paste.size() 827 | SB, SH, SW, _ = image_to_paste.shape 828 | 829 | # Figure out the desired size 830 | width = int(target_width[i].item()) 831 | height = int(target_height[i].item()) 832 | if resize_behavior == "keep_ratio_fill": 833 | target_ratio = width / height 834 | actual_ratio = SW / SH 835 | if actual_ratio > target_ratio: 836 | width = int(height * actual_ratio) 837 | elif actual_ratio < target_ratio: 838 | height = int(width / actual_ratio) 839 | elif resize_behavior == "keep_ratio_fit": 840 | target_ratio = width / height 841 | actual_ratio = SW / SH 842 | if actual_ratio > target_ratio: 843 | height = int(width / actual_ratio) 844 | elif actual_ratio < target_ratio: 845 | width = int(height * actual_ratio) 846 | elif resize_behavior == "source_size" or resize_behavior == "source_size_unmasked": 847 | width = SW 848 | height = SH 849 | 850 | # Resize the image we're pasting if needed 851 | resized_image = image_to_paste[i].unsqueeze(0) 852 | if SH != height or SW != width: 853 | resized_image = F.interpolate(resized_image.permute(0, 3, 1, 2), size=(height,width), mode='bicubic').permute(0, 2, 3, 1) 854 | 855 | pasting = torch.ones([H, W, C]) 856 | ymid = float(mid_y[i].item()) 857 | ymin = int(math.floor(ymid - height / 2)) + 1 858 | ymax = int(math.floor(ymid + height / 2)) + 1 859 | xmid = float(mid_x[i].item()) 860 | xmin = int(math.floor(xmid - width / 2)) + 1 861 | xmax = int(math.floor(xmid + width / 2)) + 1 862 | 863 | _, source_ymax, source_xmax, _ = resized_image.shape 864 | source_ymin, source_xmin = 0, 0 865 | 866 | if xmin < 0: 867 | source_xmin = abs(xmin) 868 | xmin = 0 869 | if ymin < 0: 870 | source_ymin = abs(ymin) 871 | ymin = 0 872 | if xmax > W: 873 | source_xmax -= (xmax - W) 874 | xmax = W 875 | if ymax > H: 876 | source_ymax -= (ymax - H) 877 | ymax = H 878 | 879 | pasting[ymin:ymax, xmin:xmax, :] = resized_image[0, source_ymin:source_ymax, source_xmin:source_xmax, :] 880 | pasting[:, :, 3] = 1. 881 | 882 | pasting_alpha = torch.zeros([H, W]) 883 | pasting_alpha[ymin:ymax, xmin:xmax] = resized_image[0, source_ymin:source_ymax, source_xmin:source_xmax, 3] 884 | 885 | if resize_behavior == "keep_ratio_fill" or resize_behavior == "source_size_unmasked": 886 | # If we explicitly want to fill the area, we are ok with extending outside 887 | paste_mask = pasting_alpha.unsqueeze(2).repeat(1, 1, 4) 888 | else: 889 | paste_mask = torch.min(pasting_alpha, mask[i]).unsqueeze(2).repeat(1, 1, 4) 890 | result[image_index] = pasting * paste_mask + result[image_index] * (1. - paste_mask) 891 | return result 892 | 893 | class Mask_Ops: 894 | def __init__(self): 895 | pass 896 | 897 | @classmethod 898 | def INPUT_TYPES(cls): 899 | return { 900 | "required": { 901 | "image": ("IMAGE",), 902 | "text": ("STRING", {"default":"", "multiline": False}), 903 | "separate_mask": ("INT", {"default":0, "min":0, "max":1, "step":1}), 904 | "text_sigma": ("INT", {"default":30, "min":0, "max":150, "step":1}), 905 | "use_text": ("INT", {"default":0, "min":0, "max":1, "step":1}), 906 | "blend_percentage": ("FLOAT", {"default": 0, "min": 0.0, "max": 1.0, "step": 0.01}), 907 | "black_level": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 255.0, "step": 0.1}), 908 | "mid_level": ("FLOAT", {"default": 127.5, "min": 0.0, "max": 255.0, "step": 0.1}), 909 | "white_level": ("FLOAT", {"default": 255, "min": 0.0, "max": 255.0, "step": 0.1}), 910 | "channel": (["red", "green", "blue"],), 911 | "shrink_grow": ("INT", {"default": 0, "min": -128, "max": 128, "step": 1}), 912 | "invert": ("INT", {"default":0, "min":0, "max":1, "step":1}), 913 | "blur_radius": ("FLOAT", {"default": 5.0, "min": 0.0, "max": 1024, "step": 0.1}), 914 | }, 915 | "optional": { 916 | "mask": ("MASK",), 917 | }, 918 | } 919 | 920 | CATEGORY = "I2I" 921 | 922 | RETURN_TYPES = ("IMAGE", "MASK", "MASK_MAPPING",) 923 | RETURN_NAMES = ("mask_image", "mask", "mask mapping") 924 | FUNCTION = "Mask_Ops" 925 | 926 | def Mask_Ops(self, image, text, separate_mask, text_sigma, use_text, blend_percentage, black_level, mid_level, white_level, channel, shrink_grow, invert=0, blur_radius=5.0, mask=None): 927 | channels = ["red", "green", "blue"] 928 | 929 | # Freeze PIP modules 930 | def packages(versions=False): 931 | import sys 932 | import subprocess 933 | return [( r.decode().split('==')[0] if not versions else r.decode() ) for r in subprocess.check_output([sys.executable, '-s', '-m', 'pip', 'freeze']).split()] 934 | 935 | # PIL to Mask 936 | def pil2mask(image): 937 | image_np = np.array(image.convert("L")).astype(np.float32) / 255.0 938 | mask = torch.from_numpy(image_np) 939 | return 1.0 - mask 940 | 941 | def gaussian_region(image, radius=5.0): 942 | image = ImageOps.invert(image.convert("L")) 943 | image = image.filter(ImageFilter.GaussianBlur(radius=int(radius))) 944 | return image.convert("RGB") 945 | 946 | # scipy handling 947 | if 'scipy' not in packages(): 948 | cstr("Installing `scipy` ...").msg.print() 949 | subprocess.check_call([sys.executable, '-s', '-m', 'pip', 'install', 'scipy']) 950 | try: 951 | import scipy 952 | except ImportError as e: 953 | cstr("Unable to import tools for certain masking procedures.").msg.print() 954 | print(e) 955 | 956 | def smooth_region(image, tolerance): 957 | from scipy.ndimage import gaussian_filter 958 | image = image.convert("L") 959 | mask_array = np.array(image) 960 | smoothed_array = gaussian_filter(mask_array, sigma=tolerance) 961 | threshold = np.max(smoothed_array) / 2 962 | smoothed_mask = np.where(smoothed_array >= threshold, 255, 0).astype(np.uint8) 963 | smoothed_image = Image.fromarray(smoothed_mask, mode="L") 964 | return ImageOps.invert(smoothed_image.convert("RGB")) 965 | 966 | def erode_region(image, iterations): 967 | from scipy.ndimage import binary_erosion 968 | image = image.convert("L") 969 | binary_mask = np.array(image) > 0 970 | eroded_mask = binary_erosion(binary_mask, iterations=iterations) 971 | eroded_image = Image.fromarray(eroded_mask.astype(np.uint8) * 255, mode="L") 972 | return ImageOps.invert(eroded_image.convert("RGB")) 973 | 974 | def dilate_region(image, iterations): 975 | from scipy.ndimage import binary_dilation 976 | image = image.convert("L") 977 | binary_mask = np.array(image) > 0 978 | dilated_mask = binary_dilation(binary_mask, iterations=iterations) 979 | dilated_image = Image.fromarray(dilated_mask.astype(np.uint8) * 255, mode="L") 980 | return ImageOps.invert(dilated_image.convert("RGB")) 981 | 982 | def erode(masks, iterations): 983 | iterations = iterations * -1 984 | if masks.ndim > 3: 985 | regions = [] 986 | for mask in masks: 987 | mask_np = np.clip(255. * mask.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) 988 | pil_image = Image.fromarray(mask_np, mode="L") 989 | region_mask = erode_region(pil_image, iterations) 990 | region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) 991 | regions.append(region_tensor) 992 | regions_tensor = torch.cat(regions, dim=0) 993 | return regions_tensor 994 | else: 995 | mask_np = np.clip(255. * masks.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) 996 | pil_image = Image.fromarray(mask_np, mode="L") 997 | region_mask = erode_region(pil_image, iterations) 998 | region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) 999 | return region_tensor 1000 | 1001 | def dilate(masks, iterations): 1002 | if masks.ndim > 3: 1003 | regions = [] 1004 | for mask in masks: 1005 | mask_np = np.clip(255. * mask.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) 1006 | pil_image = Image.fromarray(mask_np, mode="L") 1007 | region_mask = dilate_region(pil_image, iterations) 1008 | region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) 1009 | regions.append(region_tensor) 1010 | regions_tensor = torch.cat(regions, dim=0) 1011 | return regions_tensor 1012 | else: 1013 | mask_np = np.clip(255. * masks.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) 1014 | pil_image = Image.fromarray(mask_np, mode="L") 1015 | region_mask = dilate_region(pil_image, iterations) 1016 | region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) 1017 | return region_tensor 1018 | 1019 | def separate(mask, separate_flag=1): 1020 | if separate_flag == 0: 1021 | # Create an unseparated mapping tensor of the same length as the batch dimension 1022 | mapping = torch.arange(mask.shape[0], device=mask.device, dtype=torch.int) 1023 | return mask, mapping 1024 | 1025 | mask = tensor2mask(mask) 1026 | 1027 | thresholded = torch.gt(mask, 0).unsqueeze(1) 1028 | B, H, W = mask.shape 1029 | components = torch.arange(B * H * W, device=mask.device, dtype=mask.dtype).reshape(B, 1, H, W) + 1 1030 | components[~thresholded] = 0 1031 | 1032 | while True: 1033 | previous_components = components 1034 | components = F.max_pool2d(components, kernel_size=3, stride=1, padding=1) 1035 | components[~thresholded] = 0 1036 | if torch.equal(previous_components, components): 1037 | break 1038 | 1039 | components = components.reshape(B, H, W) 1040 | segments = torch.unique(components) 1041 | result = torch.zeros([len(segments) - 1, H, W]) 1042 | index = 0 1043 | mapping = torch.zeros([len(segments) - 1], device=mask.device, dtype=torch.int) 1044 | for i in range(len(segments)): 1045 | segment = segments[i].item() 1046 | if segment == 0: 1047 | continue 1048 | image_index = int((segment - 1) // (H * W)) 1049 | segment_mask = (components[image_index,:,:] == segment) 1050 | result[index][segment_mask] = mask[image_index][segment_mask] 1051 | mapping[index] = image_index 1052 | index += 1 1053 | 1054 | return result, mapping 1055 | 1056 | image = tensor2pil(image) 1057 | 1058 | use_text = int(round(use_text)) 1059 | 1060 | if use_text == 1: 1061 | 1062 | # CLIPSeg Model Loader 1063 | from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation 1064 | model = "CIDAS/clipseg-rd64-refined" 1065 | cache = os.path.join(MODELS_DIR, 'clipseg') 1066 | 1067 | inputs = CLIPSegProcessor.from_pretrained(model, cache_dir=cache) 1068 | model = CLIPSegForImageSegmentation.from_pretrained(model, cache_dir=cache) 1069 | 1070 | image = image.convert('RGB') 1071 | cache = os.path.join(MODELS_DIR, 'clipseg') 1072 | 1073 | with torch.no_grad(): 1074 | result = model(**inputs(text=text, images=image, padding=True, return_tensors="pt")) 1075 | 1076 | tensor = torch.sigmoid(result[0]) 1077 | mask = 1. - (tensor - tensor.min()) / tensor.max() 1078 | mask = mask.unsqueeze(0) 1079 | mask = tensor2pil(mask).convert("L") 1080 | mask = mask.resize(image.size) 1081 | 1082 | sigma = text_sigma 1083 | mask = pil2mask(mask) 1084 | 1085 | if mask.ndim > 3: 1086 | regions = [] 1087 | for mk in mask: 1088 | mask_np = np.clip(255. * mk.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) 1089 | pil_image = Image.fromarray(mask_np, mode="L") 1090 | region_mask = smooth_region(pil_image, sigma) 1091 | region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) 1092 | regions.append(region_tensor) 1093 | mask = torch.cat(regions, dim=0) 1094 | 1095 | else: 1096 | mask_np = np.clip(255. * mask.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) 1097 | pil_image = Image.fromarray(mask_np, mode="L") 1098 | region_mask = smooth_region(pil_image, sigma) 1099 | mask = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) 1100 | 1101 | else: 1102 | if mask is None: 1103 | # Create a full mask for the entire image 1104 | mask_shape = (image.size[1], image.size[0]) # Assuming image is in (H, W, C) format 1105 | mask = torch.ones(mask_shape, dtype=torch.float32) 1106 | else: 1107 | mask = mask 1108 | 1109 | if shrink_grow < 0: 1110 | mask = erode(mask, shrink_grow) 1111 | elif shrink_grow > 0: 1112 | mask = dilate(mask, shrink_grow) 1113 | 1114 | invert = int(round(invert)) 1115 | if invert == 1: 1116 | mask = 1.0 - mask 1117 | 1118 | #Invert Mask 1119 | Mask_Inv = mask 1120 | #Convert Inverted Mask to Image 1121 | Inv_Mask_2_Img = Mask_Inv.reshape((-1, 1, Mask_Inv.shape[-2], Mask_Inv.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3) 1122 | 1123 | #Convert Mask to Image 1124 | Mask_2_Img = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3) 1125 | 1126 | #Image Blend by Mask 1127 | # Convert images to PIL 1128 | img_a = tensor2pil(Inv_Mask_2_Img) 1129 | 1130 | 1131 | img_b = image 1132 | 1133 | 1134 | mask = ImageOps.invert(tensor2pil(Mask_2_Img).convert('L')) 1135 | 1136 | # Mask image 1137 | masked_img = Image.composite(img_a, img_b, mask.resize(img_a.size)) 1138 | 1139 | # Blend image 1140 | blend_mask = Image.new(mode="L", size=img_a.size, 1141 | color=(round(blend_percentage * 255))) 1142 | blend_mask = ImageOps.invert(blend_mask) 1143 | Blended_Image = Image.composite(img_a, masked_img, blend_mask) 1144 | 1145 | Blended_Image = pil2tensor(Blended_Image) 1146 | 1147 | del img_a, img_b, blend_mask, mask 1148 | 1149 | #Image Levels Adjustment 1150 | # Convert image to PIL 1151 | tensor_images = [] 1152 | for img in Blended_Image: 1153 | img = tensor2pil(img) 1154 | img = img.convert("RGB") 1155 | levels = self.AdjustLevels(black_level, mid_level, white_level) 1156 | tensor_images.append(pil2tensor(levels.adjust(img))) 1157 | tensor_images = torch.cat(tensor_images, dim=0) 1158 | 1159 | #Convert Image to Mask 1160 | masks = tensor_images[0, :, :, channels.index(channel)] 1161 | 1162 | if masks.ndim > 3: 1163 | regions = [] 1164 | for mask in masks: 1165 | mask_np = np.clip(255. * mask.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) 1166 | pil_image = Image.fromarray(mask_np, mode="L") 1167 | region_mask = gaussian_region(pil_image, blur_radius) 1168 | region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) 1169 | regions.append(region_tensor) 1170 | result = torch.cat(regions, dim=0) 1171 | 1172 | else: 1173 | mask_np = np.clip(255. * masks.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) 1174 | pil_image = Image.fromarray(mask_np, mode="L") 1175 | region_mask = gaussian_region(pil_image, blur_radius) 1176 | result = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) 1177 | 1178 | result = result.reshape((-1, 1, result.shape[-2], result.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3) 1179 | 1180 | if invert == 1: 1181 | result = 1.0 - result 1182 | 1183 | result, mapping = separate(result, separate_mask) 1184 | 1185 | if invert == 1: 1186 | result = 1.0 - result 1187 | 1188 | return (result, result, mapping,) 1189 | 1190 | class AdjustLevels: 1191 | def __init__(self, min_level, mid_level, max_level): 1192 | self.min_level = min_level 1193 | self.mid_level = mid_level 1194 | self.max_level = max_level 1195 | 1196 | def adjust(self, im): 1197 | 1198 | im_arr = np.array(im) 1199 | im_arr[im_arr < self.min_level] = self.min_level 1200 | im_arr = (im_arr - self.min_level) * \ 1201 | (255 / (self.max_level - self.min_level)) 1202 | im_arr[im_arr < 0] = 0 1203 | im_arr[im_arr > 255] = 255 1204 | im_arr = im_arr.astype(np.uint8) 1205 | 1206 | im = Image.fromarray(im_arr) 1207 | im = ImageOps.autocontrast(im, cutoff=self.max_level) 1208 | 1209 | return im 1210 | 1211 | class Color_Correction: 1212 | def __init__(self): 1213 | pass 1214 | 1215 | @classmethod 1216 | def INPUT_TYPES(cls): 1217 | return { 1218 | "required": { 1219 | "source_image": ("IMAGE",), 1220 | "target_image": ("IMAGE",), 1221 | "no_of_colors": ("INT", {"default": 6, "min": 0, "max": 256, "step": 1}), 1222 | "blur_radius": ("INT", {"default": 2, "min": 0, "max": 100, "step": 1}), 1223 | "blur_amount": ("INT", {"default": 2, "min": 0, "max": 100, "step": 1}), 1224 | "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 2.0, "step": 0.1}), 1225 | "gamma": ("FLOAT", {"default": 1.0, "min": 0.10, "max": 2.0, "step": 0.1}), 1226 | "contrast": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 3.0, "step": 0.1}), 1227 | "brightness": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.1}), 1228 | }, 1229 | "optional": { 1230 | "mask": ("MASK",), 1231 | }, 1232 | } 1233 | 1234 | CATEGORY = "I2I" 1235 | 1236 | RETURN_TYPES = ("IMAGE", ) 1237 | RETURN_NAMES = ("image", ) 1238 | FUNCTION = "ColorXfer2" 1239 | 1240 | def ColorXfer2(cls, source_image, target_image, no_of_colors, blur_radius, blur_amount, strength, gamma, contrast, brightness, mask=None): 1241 | if mask is not None: 1242 | if torch.is_tensor(mask): 1243 | # Convert to grayscale if it's a 3-channel image 1244 | if mask.shape[-1] == 3: 1245 | mask = torch.mean(mask, dim=-1) 1246 | 1247 | # Remove batch dimension if present 1248 | if mask.dim() == 3: 1249 | mask = mask.squeeze(0) 1250 | 1251 | mask_np1 = (mask.cpu().numpy() * 255).astype(np.uint8) 1252 | else: 1253 | mask_np1 = (mask * 255).astype(np.uint8) 1254 | 1255 | mask_np = mask_np1 / 255.0 1256 | mask_torch = torch.tensor(mask_np).to(device) 1257 | 1258 | # If the source_image is a tensor, convert it to a numpy array 1259 | if torch.is_tensor(source_image): 1260 | Source_np = (source_image[0].cpu().numpy() * 255).astype(np.uint8) 1261 | else: 1262 | Source_np = (source_image * 255).astype(np.uint8) 1263 | 1264 | # If the source_image is a tensor, convert it to a numpy array 1265 | if torch.is_tensor(target_image): 1266 | Target_np = (target_image[0].cpu().numpy() * 255).astype(np.uint8) 1267 | else: 1268 | Target_np = (target_image * 255).astype(np.uint8) 1269 | 1270 | # Load the source image and convert to torch tensor 1271 | Source_np = cv2.cvtColor(Source_np, cv2.COLOR_BGR2RGB) 1272 | Source = torch.from_numpy(Source_np).float().to(device) 1273 | 1274 | # Extract colors from the source image 1275 | colors1 = extract(Source, no_of_colors, mask_np=mask_torch) 1276 | 1277 | # Load the target image 1278 | Target_np = cv2.cvtColor(Target_np, cv2.COLOR_BGR2RGB) 1279 | Target = torch.from_numpy(Target_np).float().to(device=device) 1280 | 1281 | # Extract colors from the target image 1282 | colors2 = extract(Target, no_of_colors) 1283 | 1284 | min_length = min(len(colors1), len(colors2)) 1285 | colors1 = colors1[:min_length] 1286 | colors2 = colors2[:min_length] 1287 | 1288 | # Segment the image 1289 | segmented_np = segment_image(Source, colors1, mask_torch=mask_torch, threshold=1) 1290 | 1291 | matched_pairs = luminance_match(colors1, colors2) 1292 | 1293 | result_rgb = refined_replace_and_blend_colors(Source.cpu().numpy(), segmented_np, colors1, matched_pairs, blur_radius, blur_amount, mask_torch=mask_torch) 1294 | 1295 | luminance_np = retain_luminance_hsv_swap(Source.cpu().numpy(), result_rgb, strength) 1296 | 1297 | gamma_contrast_np = adjust_gamma_contrast(luminance_np, gamma, contrast, brightness, mask_np=mask_np1) 1298 | 1299 | final_img_np_rgb = cv2.cvtColor(gamma_contrast_np, cv2.COLOR_BGR2RGB) 1300 | 1301 | # Convert the numpy array back to a PyTorch tensor 1302 | final_img_tensor = torch.tensor(final_img_np_rgb).float().to(device) 1303 | 1304 | final_img_tensor = final_img_tensor.unsqueeze(0) 1305 | 1306 | if final_img_tensor.max() > 1.0: 1307 | final_img_tensor /= 255.0 1308 | 1309 | return (final_img_tensor, ) 1310 | 1311 | class MaskToRegion: 1312 | def __init__(self): 1313 | pass 1314 | 1315 | @classmethod 1316 | def INPUT_TYPES(cls): 1317 | return { 1318 | "required": { 1319 | "image": ("IMAGE",), 1320 | "mask": ("IMAGE",), 1321 | "force_resize_width": ("INT", {"default": 1024, "min": 0, "max": VERY_BIG_SIZE, "step": 1}), 1322 | "force_resize_height": ("INT", {"default": 1024, "min": 0, "max": VERY_BIG_SIZE, "step": 1}), 1323 | "kind": (["mask", "RGB", "RGBA"],), 1324 | "padding": ("INT", {"default": 3, "min": 0, "max": VERY_BIG_SIZE, "step": 1}), 1325 | "constraints": (["keep_ratio", "keep_ratio_divisible", "multiple_of", "ignore"],), 1326 | "constraint_x": ("INT", {"default": 64, "min": 2, "max": VERY_BIG_SIZE, "step": 1}), 1327 | "constraint_y": ("INT", {"default": 64, "min": 2, "max": VERY_BIG_SIZE, "step": 1}), 1328 | "min_width": ("INT", {"default": 0, "min": 0, "max": VERY_BIG_SIZE, "step": 1}), 1329 | "min_height": ("INT", {"default": 0, "min": 0, "max": VERY_BIG_SIZE, "step": 1}), 1330 | "batch_behavior": (["match_ratio", "match_size"],), 1331 | }, 1332 | "optional": { 1333 | "mask_mapping_optional": ("MASK_MAPPING",), 1334 | }, 1335 | } 1336 | 1337 | RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", ) 1338 | RETURN_NAMES = ("cut image", "cut mask", "region") 1339 | FUNCTION = "get_region" 1340 | 1341 | CATEGORY = "I2I" 1342 | 1343 | def get_region(self, image, mask, force_resize_width, force_resize_height, kind, padding, constraints, constraint_x, constraint_y, min_width, min_height, batch_behavior, mask_mapping_optional = None): 1344 | mask2 = tensor2mask(mask) 1345 | mask_size = mask2.size() 1346 | mask_width = int(mask_size[2]) 1347 | mask_height = int(mask_size[1]) 1348 | 1349 | # masks_to_boxes errors if the tensor is all zeros, so we'll add a single pixel and zero it out at the end 1350 | is_empty = ~torch.gt(torch.max(torch.reshape(mask2,[mask_size[0], mask_width * mask_height]), dim=1).values, 0.) 1351 | mask2[is_empty,0,0] = 1. 1352 | boxes = masks_to_boxes(mask2) 1353 | mask2[is_empty,0,0] = 0. 1354 | 1355 | # Account for padding 1356 | min_x = torch.max(boxes[:,0] - padding, torch.tensor(0.)) 1357 | min_y = torch.max(boxes[:,1] - padding, torch.tensor(0.)) 1358 | max_x = torch.min(boxes[:,2] + padding, torch.tensor(mask_width)) 1359 | max_y = torch.min(boxes[:,3] + padding, torch.tensor(mask_height)) 1360 | 1361 | width = max_x - min_x 1362 | height = max_y - min_y 1363 | 1364 | # Make sure the width and height are big enough 1365 | target_width = torch.max(width, torch.tensor(min_width)) 1366 | target_height = torch.max(height, torch.tensor(min_height)) 1367 | 1368 | if constraints == "keep_ratio": 1369 | target_width = torch.max(target_width, target_height * constraint_x // constraint_y) 1370 | target_height = torch.max(target_height, target_width * constraint_y // constraint_x) 1371 | elif constraints == "keep_ratio_divisible": 1372 | # Probably a more efficient way to do this, but given the bounds it's not too bad 1373 | max_factors = torch.min(constraint_x // target_width, constraint_y // target_height) 1374 | max_factor = int(torch.max(max_factors).item()) 1375 | for i in range(1, max_factor+1): 1376 | divisible = constraint_x % i == 0 and constraint_y % i == 0 1377 | if divisible: 1378 | big_enough = ~torch.lt(target_width, constraint_x // i) * ~torch.lt(target_height, constraint_y // i) 1379 | target_width[big_enough] = constraint_x // i 1380 | target_height[big_enough] = constraint_y // i 1381 | elif constraints == "multiple_of": 1382 | target_width[torch.gt(target_width % constraint_x, 0)] = (target_width // constraint_x + 1) * constraint_x 1383 | target_height[torch.gt(target_height % constraint_y, 0)] = (target_height // constraint_y + 1) * constraint_y 1384 | 1385 | if batch_behavior == "match_size": 1386 | target_width[:] = torch.max(target_width) 1387 | target_height[:] = torch.max(target_height) 1388 | elif batch_behavior == "match_ratio": 1389 | # We'll target the ratio that's closest to 1:1, but don't want to take into account empty masks 1390 | ratios = torch.abs(target_width / target_height - 1) 1391 | ratios[is_empty] = 10000 1392 | match_ratio = torch.min(ratios,dim=0).indices.item() 1393 | target_width = torch.max(target_width, target_height * target_width[match_ratio] // target_height[match_ratio]) 1394 | target_height = torch.max(target_height, target_width * target_height[match_ratio] // target_width[match_ratio]) 1395 | 1396 | missing = target_width - width 1397 | min_x = min_x - missing // 2 1398 | max_x = max_x + (missing - missing // 2) 1399 | 1400 | missing = target_height - height 1401 | min_y = min_y - missing // 2 1402 | max_y = max_y + (missing - missing // 2) 1403 | 1404 | # Move the region into range if needed 1405 | bad = torch.lt(min_x,0) 1406 | max_x[bad] -= min_x[bad] 1407 | min_x[bad] = 0 1408 | 1409 | bad = torch.lt(min_y,0) 1410 | max_y[bad] -= min_y[bad] 1411 | min_y[bad] = 0 1412 | 1413 | bad = torch.gt(max_x, mask_width) 1414 | min_x[bad] -= (max_x[bad] - mask_width) 1415 | max_x[bad] = mask_width 1416 | 1417 | bad = torch.gt(max_y, mask_height) 1418 | min_y[bad] -= (max_y[bad] - mask_height) 1419 | max_y[bad] = mask_height 1420 | 1421 | region = torch.zeros((mask_size[0], mask_height, mask_width)) 1422 | for i in range(0, mask_size[0]): 1423 | if not is_empty[i]: 1424 | ymin = int(min_y[i].item()) 1425 | ymax = int(max_y[i].item()) 1426 | xmin = int(min_x[i].item()) 1427 | xmax = int(max_x[i].item()) 1428 | region[i, ymin:ymax+1, xmin:xmax+1] = 1 1429 | 1430 | Cut_Image = CutByMask(image, region, force_resize_width, force_resize_height, mask_mapping_optional) 1431 | 1432 | #Change Channels >>>> OUTPUT TO VAE ENCODE 1433 | if kind == "mask": 1434 | Cut_Image = tensor2mask(Cut_Image) 1435 | elif kind == "RGBA": 1436 | Cut_Image = tensor2rgba(Cut_Image) 1437 | else: # RGB 1438 | Cut_Image = tensor2rgb(Cut_Image) 1439 | 1440 | Cut_Mask = CutByMask(mask, region, force_resize_width, force_resize_height, mask_mapping_optional = None) 1441 | 1442 | return (Cut_Image, Cut_Mask, region, ) 1443 | 1444 | class Combine_And_Paste_Op: 1445 | def __init__(self): 1446 | pass 1447 | 1448 | @classmethod 1449 | def INPUT_TYPES(cls): 1450 | return { 1451 | "required": { 1452 | "decoded_vae": ("IMAGE",), 1453 | "Original_Image": ("IMAGE",), 1454 | "Cut_Image": ("IMAGE",), 1455 | "Cut_Mask": ("IMAGE",), 1456 | "region": ("IMAGE",), 1457 | "color_xfer_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 2.0, "step": 0.1}), 1458 | "op": (["union (max)", "intersection (min)", "difference", "multiply", "multiply_alpha", "add", "greater_or_equal", "greater"],), 1459 | "clamp_result": (["yes", "no"],), 1460 | "round_result": (["no", "yes"],), 1461 | "resize_behavior": (["resize", "keep_ratio_fill", "keep_ratio_fit", "source_size", "source_size_unmasked"],), 1462 | }, 1463 | "optional": { 1464 | "mask_mapping_optional": ("MASK_MAPPING",), 1465 | }, 1466 | } 1467 | 1468 | RETURN_TYPES = ("IMAGE", ) 1469 | RETURN_NAMES = ("FinalOut", ) 1470 | FUNCTION = "com_paste_op" 1471 | 1472 | CATEGORY = "I2I" 1473 | 1474 | def com_paste_op(self, decoded_vae, Original_Image, Cut_Image, Cut_Mask, region, color_xfer_factor, op, clamp_result, round_result, resize_behavior, mask_mapping_optional = None): 1475 | 1476 | Combined_Decoded = combine(decoded_vae, Cut_Mask, op, clamp_result, round_result) 1477 | 1478 | Combined_Originals = combine(Cut_Image, Cut_Mask, op, clamp_result, round_result) 1479 | 1480 | Cx_Decoded = apply_color_correction(Combined_Decoded, Combined_Originals, color_xfer_factor) 1481 | 1482 | Cx_Decode_Mask = combine(Cx_Decoded, Cut_Mask, op, clamp_result, round_result) 1483 | 1484 | FinalOut = PasteByMask(Original_Image, Cx_Decode_Mask, region, resize_behavior, mask_mapping_optional) 1485 | 1486 | return (FinalOut, ) 1487 | 1488 | NODE_CLASS_MAPPINGS = { 1489 | "Color Transfer": Color_Correction, 1490 | "Mask Ops": Mask_Ops, 1491 | "Inpaint Segments": MaskToRegion, 1492 | "Combine and Paste": Combine_And_Paste_Op, 1493 | } 1494 | 1495 | NODE_DISPLAY_NAME_MAPPINGS = { 1496 | "Color Transfer": "Color Transfer", 1497 | "Mask Ops": "Mask Ops", 1498 | "Inpaint Segments": "Inpaint Segments", 1499 | "Combine and Paste": "Combine and Paste", 1500 | } 1501 | -------------------------------------------------------------------------------- /Guide_Images/Blend_Contrast.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/Blend_Contrast.JPG -------------------------------------------------------------------------------- /Guide_Images/Blend_Percentage.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/Blend_Percentage.JPG -------------------------------------------------------------------------------- /Guide_Images/Blur.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/Blur.JPG -------------------------------------------------------------------------------- /Guide_Images/Channel.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/Channel.JPG -------------------------------------------------------------------------------- /Guide_Images/Color_Transfer.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/Color_Transfer.JPG -------------------------------------------------------------------------------- /Guide_Images/Combine_and_Paste.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/Combine_and_Paste.JPG -------------------------------------------------------------------------------- /Guide_Images/ComfyShop/Access.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/ComfyShop/Access.jpg -------------------------------------------------------------------------------- /Guide_Images/ComfyShop/Brush_Menu.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/ComfyShop/Brush_Menu.jpg -------------------------------------------------------------------------------- /Guide_Images/ComfyShop/RGB-A.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/ComfyShop/RGB-A.jpg -------------------------------------------------------------------------------- /Guide_Images/ComfyShop/Save_To_Node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/ComfyShop/Save_To_Node.jpg -------------------------------------------------------------------------------- /Guide_Images/ComfyShop/Saving_Outputs.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/ComfyShop/Saving_Outputs.jpg -------------------------------------------------------------------------------- /Guide_Images/ComfyShop/Zoom_In.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/ComfyShop/Zoom_In.jpg -------------------------------------------------------------------------------- /Guide_Images/ComfyShop/Zoom_Out.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/ComfyShop/Zoom_Out.jpg -------------------------------------------------------------------------------- /Guide_Images/Inpaint_Segments.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/Inpaint_Segments.JPG -------------------------------------------------------------------------------- /Guide_Images/Invert.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/Invert.JPG -------------------------------------------------------------------------------- /Guide_Images/ShrinkGrow.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/ShrinkGrow.JPG -------------------------------------------------------------------------------- /Guide_Images/Use-Mask-1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/Use-Mask-1.jpg -------------------------------------------------------------------------------- /Guide_Images/Use-Mask-2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/Use-Mask-2.jpg -------------------------------------------------------------------------------- /Guide_Images/Use_Text_Sigma 1.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/Use_Text_Sigma 1.JPG -------------------------------------------------------------------------------- /Guide_Images/Use_Text_Sigma 2.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/Use_Text_Sigma 2.JPG -------------------------------------------------------------------------------- /Guide_Images/V2/ColorXferworkflow.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/V2/ColorXferworkflow.JPG -------------------------------------------------------------------------------- /Guide_Images/V2/blur_amount.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/V2/blur_amount.JPG -------------------------------------------------------------------------------- /Guide_Images/V2/blur_radius.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/V2/blur_radius.JPG -------------------------------------------------------------------------------- /Guide_Images/V2/brightness.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/V2/brightness.JPG -------------------------------------------------------------------------------- /Guide_Images/V2/contrast.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/V2/contrast.JPG -------------------------------------------------------------------------------- /Guide_Images/V2/empty_mask.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/V2/empty_mask.JPG -------------------------------------------------------------------------------- /Guide_Images/V2/gamma.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/V2/gamma.JPG -------------------------------------------------------------------------------- /Guide_Images/V2/masked_xfer.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/V2/masked_xfer.JPG -------------------------------------------------------------------------------- /Guide_Images/V2/multi_xfer.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/V2/multi_xfer.JPG -------------------------------------------------------------------------------- /Guide_Images/V2/no_of_colors.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/V2/no_of_colors.JPG -------------------------------------------------------------------------------- /Guide_Images/V2/separate_mask.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/V2/separate_mask.JPG -------------------------------------------------------------------------------- /Guide_Images/V2/strength.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/V2/strength.JPG -------------------------------------------------------------------------------- /Guide_Images/V2/xfer_across_channels.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/V2/xfer_across_channels.JPG -------------------------------------------------------------------------------- /Guide_Images/Workflow.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/Guide_Images/Workflow.jpg -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | # By ManglerFTW (Discord: ManglerFTW) 2 | # 3 | # Copyright 2023 Peter Mango (ManglerFTW) 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to 6 | # deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | # and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 8 | # 9 | # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 10 | # 11 | # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 12 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 13 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 14 | # THE SOFTWARE. 15 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ComfyI2I 2 | 3 | A set of custom nodes to perform image 2 image functions in ComfyUI. 4 | 5 | If you find these nodes useful please consider contributing to it's further development. All donations are much appreciated. [Buy me a coffee](https://bmc.link/ManglerFTW) 6 | 7 | ## ComfyShop Small Bugfix and Reframe Update - 9/30/2023 8 | - Fixed a bug that was causing a loss of image quality when saved. 9 | - Large Images should handle better as you now have 2 framing options. 'f' hotkey will frame to full scale and shift + f will now fit to screen. 10 | - You can now close the brush menu with right click in addition to closing it by drawing on the canvas. 11 | 12 | ## ComfyShop Update - 9/25/2023 13 | 14 | ## New Features 15 | ComfyShop has been introduced to the ComfyI2I family. Enjoy a comfortable and intuitive painting app. ComfyShop phase 1 is to establish the basic painting features for ComfyUI. 16 | To open ComfyShop, simply right click on any image node that outputs an image and mask and you will see the ComfyShop option much in the same way you would see MaskEditor. 17 | Current features of ComfyShop include: 18 | # 19 | - Paint both mask('greyscale') and color and output accordingly. 20 | - Controls(shift + right click) for brush size(mouse wheel), opacity(shift + alt + mouse wheel), softness(alt + mousewheel), and color. 21 | - Undo(ctrl + z) / Redo(shift + ctrl + z) 22 | - Zoom In/Out(ctrl + space + mouse move left/right) 23 | - Pan(ctrl + space + click and drag image canvas with mouse) 24 | - Autofocus(f) 25 | 26 | ## V2 Update - 9/2/2023 27 | 28 | ## New Features 29 | - Mask_Ops node will now output the whole image if mask = None and use_text = 0 30 | - Mask_Ops node now has a separate_mask function that if 0, will keep all mask islands in 1 image vs separating them into their own images if it's at 1 (use 0 for color transfer) 31 | - New Color Tansfer and Multi-Color Transfer Workflows added 32 | - Significantly improved Color_Transfer node 33 | - Extract up to 256 colors from each image (generally between 5-20 is fine) then segment the source image by the extracted palette and replace the colors in each segment 34 | - Set a blur to the segments created 35 | - Control the strength of the color transfer function 36 | - Controls for Gamma, Contrast, and Brightness 37 | 38 | ## Installation 39 | If you're running on Linux, or non-admin account on windows you'll want to ensure /ComfyUI/custom_nodes, ComfyUI_I2I, and ComfyI2I.py has write permissions. 40 | 41 | There is an install.bat you can run to install to portable if detected. Otherwise it will default to system and assume you followed ComfyUI's manual installation steps. 42 | 43 | Navigate to your /ComfyUI/custom_nodes/ folder 44 | Run git clone https://github.com/ManglerFTW/ComfyI2I/ 45 | Navigate to your ComfyUI_I2I folder 46 | Run pip install -r requirements.txt 47 | Start ComfyUI 48 | Tools will be located in the I2I menu. 49 | 50 | ## Features: 51 | 52 | ### ComfyShop 53 | #### Access ComfyShop: 54 | Right click on any image node that has 'IMAGE' and 'MASK' as outputs and select ComfyShop in the pop up menu. 55 | 56 | Access ComfyShop 57 | 58 | #### Brush Menu: 59 | To access the brush menu press shift and the right mouse button anywhere on the canvas. Brush settings can also be changed by keyboard shortcuts which is recommended. 60 | 61 | Brush Menu 62 | 63 | #### Zoom In/Out: 64 | Gain more control over your image editing with the ability to zoom in and out, pan, and autofocus your canvas. 65 | 66 | Zoom Out 67 | Zoom Out 68 | 69 | #### Save to node: 70 | Save to node will save anything painted in greyscale to the 'MASK' output and anything painted in RGB mode to the 'IMAGE' output. 71 | 72 | Zoom Out 73 | 74 | ## 75 | 76 | ### Color Transfer Node (improved for V2) 77 | This is a standalone node that can take the colors of one image and transfer them to another. 78 | 79 | ### Variables: 80 | #### No_of_Colors: 81 | Choose the amount of colors you would like to extract from each image. Usually between 5-20 is fine. For smaller masked regions you can bring it to 1-5 and it will only extract those that are most dominant. 82 | 83 | no_of_colors 84 | 85 | #### Blur_Radius and Blur_Amount: 86 | These controls will effect how the edges of the separated color segments in the source image blur at their respective edges. 87 | 88 | blur_radius 89 | 90 | #### Strength: 91 | Adjust Strength to control how weak or strong you would like the color transfer effect to be. 92 | 93 | strength 94 | 95 | #### Gamma: 96 | Adjust Gamma to control the Gamma amount of the resulting image. 97 | 98 | gamma 99 | 100 | #### Contrast: 101 | Adjust Contrast to control the Contrast amount of the resulting image. 102 | 103 | contrast 104 | 105 | #### Brightness: 106 | Adjust Brightness to control the Brightness amount of the resulting image. 107 | 108 | brightness 109 | 110 | #### Masked Color Transfer: 111 | The Color Transfer node now works with masked regions giving you more control with which areas to transfer color to. 112 | 113 | masked_xfer 114 | 115 | #### Interoperability with the Mask_Ops node: 116 | The Color Transfer node works well with masks created with the Mask_Ops node. The example below shows color transfer difference across each individial R, G, B channel. 117 | 118 | xfer_across_channels 119 | 120 | ## 121 | 122 | ### Mask Ops Node (improved for V2) 123 | The mask ops node performs various mask operations on a mask created either from an image or a text prompt. 124 | 125 | ### Variables: 126 | #### Separate_Mask: 127 | The Separate_Mask option will tell the node whether to separate the mask by each island, or keep all islands in 1 image. Use 0 if you want to connect to the Color Transfer node. 128 | 129 | separate_mask 130 | 131 | #### Text: 132 | Type a prompt to create a mask from your image (make sure use_text is set to 1) 133 | 134 | #### Text_Sigma: 135 | The sigma factor can smooth out a mask that has been created by text. The model being used is clipseg and it might not always come out perfectly from the start. You can sometimes adjust sigma to smooth errors. 136 | 137 | Sigma 0 138 | Sigma 90 139 | 140 | #### Use_Text: 141 | 0 to input a mask and 1 to use a text prompt. 142 | 143 | #### Blend_Percentage: 144 | You can adjust this parameter to blend your solid mask with a black and white image of what's underneath it. 145 | 146 | Blend Percentage 147 | 148 | #### Black Level, Mid Level, White Level: 149 | Adjust these settings to change the levels of your mask. 150 | 151 | Levels 152 | 153 | #### Channel: 154 | Affect the red, green, or blue channel of the underlying image. 155 | 156 | #### Shrink_Grow: 157 | Shrink or grow your mask using these settings. 158 | 159 | Shrink_Grow 160 | 161 | #### Invert: 162 | Invert your mask. 163 | 164 | Invert 165 | 166 | #### Blur_Radius: 167 | Blur your mask. 168 | 169 | Blur_Radius 170 | 171 | ## 172 | 173 | ### Inpaint Segments Node 174 | This node essentially will segment and crop your mask and your image based on the mapped bounding boxes of each mask and then upscale them to 1024x1024, or a custom size of your choice. The images then go to a VAE Encode node to be processed. 175 | 176 | Inpaint Segments 177 | 178 | ## 179 | 180 | ### Combine and Paste Node 181 | The combine and paste node will take the new images from the VAE Decode node, resize them to the bounding boxes of your mask and paste them over the original image. Use color_xfer_factor to adjust the effects of the color transfer. 182 | 183 | Combine and Paste 184 | 185 | ## 186 | 187 | ### Workflow 188 | A Basic workflow with all of the nodes combined has been included in the workflows directory under I2I workflow.json. Use this as a reference to see how they are all connected. 189 | 190 | Workflow 191 | 192 | ### Color Transfer Workflow 193 | A Basic workflow for Color Transfer has been included in the workflows directory under Color Xfer Workflow.json. Use this as a reference to see how it works. 194 | 195 | ColorXferworkflow 196 | 197 | ### Multi Color Transfer Workflow 198 | A Basic workflow for Color Transfer has been included in the workflows directory under Multi_XFer_Workflow.json. Use this as a reference to see how it works. 199 | 200 | Multi ColorXferworkflow 201 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | @author: ManglerFTW 3 | @title: ComfyI2I 4 | """ 5 | 6 | import shutil 7 | import folder_paths 8 | import os 9 | import sys 10 | 11 | comfy_path = os.path.dirname(folder_paths.__file__) 12 | comfyi2i_path = os.path.join(os.path.dirname(__file__)) 13 | 14 | def setup_js(): 15 | # remove garbage 16 | old_js_path = os.path.join(comfy_path, "web", "extensions", "core", "ComfyShop.js") 17 | if os.path.exists(old_js_path): 18 | os.remove(old_js_path) 19 | 20 | old_ip_path = os.path.join(comfy_path, "web", "extensions", "core", "imageProcessorWorker.js") 21 | if os.path.exists(old_ip_path): 22 | os.remove(old_ip_path) 23 | 24 | # setup js 25 | js_dest_path = os.path.join(comfy_path, "web", "extensions", "ComfyI2I") 26 | if not os.path.exists(js_dest_path): 27 | os.makedirs(js_dest_path) 28 | 29 | js_src_path = os.path.join(comfyi2i_path, "js", "ComfyShop.js") 30 | shutil.copy(js_src_path, js_dest_path) 31 | 32 | # setup ip 33 | ip_dest_path = os.path.join(comfy_path, "web", "extensions", "ComfyI2I") 34 | if not os.path.exists(ip_dest_path): 35 | os.makedirs(ip_dest_path) 36 | 37 | ip_src_path = os.path.join(comfyi2i_path, "js", "imageProcessorWorker.js") 38 | shutil.copy(ip_src_path, ip_dest_path) 39 | 40 | setup_js() 41 | 42 | 43 | 44 | from .ComfyI2I import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS 45 | 46 | __all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS'] 47 | -------------------------------------------------------------------------------- /__pycache__/ComfyI2I.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/__pycache__/ComfyI2I.cpython-310.pyc -------------------------------------------------------------------------------- /__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ManglerFTW/ComfyI2I/2bd011b043f87023a2d86a82cd23d594a9793e10/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /install.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | set "requirements_txt=%~dp0\requirements.txt" 4 | set "python_exec=......\python_embeded\python.exe" 5 | 6 | echo Installing ComfyI2I ... 7 | 8 | if exist "%python_exec%" ( 9 | echo Installing with ComfyUI Portable 10 | %python_exec% -s -m pip install -r %requirements_txt% 11 | ) else ( 12 | echo Installing with system Python 13 | pip install -r %requirements_txt% 14 | ) 15 | 16 | pause 17 | -------------------------------------------------------------------------------- /js/ComfyShop.js: -------------------------------------------------------------------------------- 1 | import { app } from "../../scripts/app.js"; 2 | import { ComfyDialog, $el } from "../../scripts/ui.js"; 3 | import { ComfyApp } from "../../scripts/app.js"; 4 | import { api } from "../../scripts/api.js" 5 | import { ClipspaceDialog } from "../../extensions/core/clipspace.js"; 6 | 7 | //// 8 | function addMenuHandler(nodeType, cb) { 9 | const getOpts = nodeType.prototype.getExtraMenuOptions; 10 | nodeType.prototype.getExtraMenuOptions = function () { 11 | const r = getOpts.apply(this, arguments); 12 | cb.apply(this, arguments); 13 | return r; 14 | }; 15 | } 16 | 17 | async function uploadMask(filepath, formData) { 18 | await api.fetchApi('/upload/mask', { 19 | method: 'POST', 20 | body: formData 21 | }).then(response => {}).catch(error => { 22 | console.error('Error:', error); 23 | }); 24 | 25 | ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']] = new Image(); 26 | ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']].src = api.apiURL("/view?" + new URLSearchParams(filepath).toString() + app.getPreviewFormatParam()); 27 | if(ComfyApp.clipspace.images) 28 | ComfyApp.clipspace.images[ComfyApp.clipspace['selectedIndex']] = filepath; 29 | 30 | ClipspaceDialog.invalidatePreview(); 31 | } 32 | 33 | // Helper function to convert a data URL to a Blob object 34 | function dataURLToBlob(dataURL) { 35 | const parts = dataURL.split(';base64,'); 36 | const contentType = parts[0].split(':')[1]; 37 | const byteString = atob(parts[1]); 38 | const arrayBuffer = new ArrayBuffer(byteString.length); 39 | const uint8Array = new Uint8Array(arrayBuffer); 40 | for (let i = 0; i < byteString.length; i++) { 41 | uint8Array[i] = byteString.charCodeAt(i); 42 | } 43 | return new Blob([arrayBuffer], { type: contentType }); 44 | } 45 | 46 | 47 | function getActualCoordinates(event, element, zoomLevel) { 48 | const rect = element.getBoundingClientRect(); 49 | const offsetX = (event.clientX - rect.left) / zoomLevel; 50 | const offsetY = (event.clientY - rect.top) / zoomLevel; 51 | return { x: offsetX, y: offsetY }; 52 | } 53 | 54 | function loadedImageToBlob(image) { 55 | return new Promise((resolve, reject) => { 56 | const canvas = document.createElement('canvas'); 57 | 58 | canvas.width = image.width; 59 | canvas.height = image.height; 60 | 61 | const ctx = canvas.getContext('2d', { willReadFrequently: true }); 62 | 63 | image.onload = () => { 64 | ctx.drawImage(image, 0, 0); 65 | 66 | const dataURL = canvas.toDataURL('image/png', 1); 67 | 68 | const blob = dataURLToBlob(dataURL); 69 | resolve(blob); // Resolving the promise with the blob 70 | }; 71 | 72 | image.onerror = () => { 73 | console.error('Error loading image'); 74 | reject(new Error('Error loading image')); // Rejecting the promise with an error 75 | }; 76 | }); 77 | } 78 | 79 | 80 | function prepareRGB(image, backupCanvas, backupCtx) { 81 | // paste mask data into alpha channel 82 | backupCtx.drawImage(image, 0, 0, backupCanvas.width, backupCanvas.height); 83 | const backupData = backupCtx.getImageData(0, 0, backupCanvas.width, backupCanvas.height); 84 | 85 | // refine mask image 86 | for (let i = 0; i < backupData.data.length; i += 4) { 87 | if(backupData.data[i+3] == 255) 88 | backupData.data[i+3] = 0; 89 | else 90 | backupData.data[i+3] = 255; 91 | 92 | backupData.data[i] = 0; 93 | backupData.data[i+1] = 0; 94 | backupData.data[i+2] = 0; 95 | } 96 | 97 | backupCtx.globalCompositeOperation = 'source-over'; 98 | backupCtx.putImageData(backupData, 0, 0); 99 | } 100 | 101 | class PaintAction { 102 | constructor(previousImageData, actionType, points, color, canvasType) { 103 | this.previousImageData = previousImageData; 104 | this.actionType = actionType; 105 | this.points = points; 106 | this.color = color; 107 | this.canvasType = canvasType; // 'imgCanvas' or 'maskCanvas' 108 | } 109 | } 110 | 111 | class ComfyShopDialog extends ComfyDialog { 112 | static instance = null; 113 | 114 | static getInstance() { 115 | if(!ComfyShopDialog.instance) { 116 | ComfyShopDialog.instance = new ComfyShopDialog(app); 117 | } 118 | 119 | return ComfyShopDialog.instance; 120 | } 121 | 122 | is_layout_created = false; 123 | 124 | constructor() { 125 | if(ComfyShopDialog.instance) { 126 | throw new Error("Use ComfyShopDialog.getInstance() to get an instance of this class."); 127 | } 128 | 129 | super(); 130 | this.eventHandlers = { 131 | contextmenu: (event) => { event.preventDefault(); }, 132 | wheel: (event) => this.handleWheelEvent(this, event), 133 | keydown: (event) => this.handleKeyDown(this, event), 134 | keyup: (event) => this.handleKeyUp(this, event), 135 | pointerdown: (event) => this.handlePointerDown(this, event), 136 | pointerup: (event) => ComfyShopDialog.handlePointerUp(event), 137 | pointermove: (event) => this.draw_move(this, event), 138 | touchmove: (event) => this.draw_move(this, event), 139 | pointerout: (event) => this.handlePointerOut(event), 140 | pointerover: (event) => this.handlePointerOver(event), 141 | documentPointerMove: (event) => ComfyShopDialog.getInstance().handlePointerMove(event), 142 | childContainerMouseMove: (event) => this.handlePointerMove(event, this.childContainer) 143 | }; 144 | 145 | this.rgbBlob = null; 146 | this.rgbItem = null; 147 | 148 | const styleSheet = document.createElement("style"); 149 | styleSheet.type = "text/css"; 150 | styleSheet.innerText = '.brush-preview { transition: opacity 0.1s ease; }'; // Adjust the transition time to a smaller value to reduce flickering 151 | document.head.appendChild(styleSheet); 152 | 153 | this.isGreyscale = 'greyscale' 154 | var brush = document.createElement("div"); 155 | brush.id = "brush"; 156 | brush.style.backgroundColor = "transparent"; 157 | brush.style.outline = "1px dashed black"; 158 | brush.style.boxShadow = "0 0 0 1px white"; 159 | brush.style.borderRadius = "50%"; 160 | brush.style.MozBorderRadius = "50%"; 161 | brush.style.WebkitBorderRadius = "50%"; 162 | brush.style.position = "absolute"; 163 | brush.style.zIndex = 8889; 164 | brush.style.pointerEvents = "none"; 165 | this.brush_softness = 0.1 166 | this.brushColor = "#000000"; // Default brush color 167 | this.lastRGBColor = "#000000"; 168 | this.isAltDown = false; 169 | this.isCtrlDown = false; 170 | this.isSpaceDown = false; 171 | this.initialMouseX = null; 172 | this.initialMouseY = null; 173 | this.previousMousePosition = { x: 0, y: 0 }; 174 | this.zoomLevel = 1.0; 175 | this.zoomCenterX = 0; 176 | this.zoomCenterY = 0; 177 | this.brush = brush; 178 | this.brushVisible = true; 179 | this.brushOpacity = 1.0; // a value between 0.0 and 1.0 180 | this.brush_size = 50; // Initialize brush size 181 | this.contextMenu = this.createContextMenu(); 182 | this.isDragging = false; 183 | this.dragOffsetX = 0; 184 | this.dragOffsetY = 0; 185 | this.initialOffsetX = 0; 186 | this.initialOffsetY = 0; 187 | 188 | this.lastPointerMoveTime = 0; 189 | this.pointerMoveThrottleTime = 16; // roughly 60 fps 190 | 191 | this.handleKeyDown = this.handleKeyDown.bind(this); 192 | this.handleKeyUp = this.handleKeyUp.bind(this); 193 | 194 | document.removeEventListener('keydown', this.handleKeyDown); 195 | document.removeEventListener('keyup', this.handleKeyUp); 196 | 197 | document.addEventListener('keydown', this.handleKeyDown); 198 | document.addEventListener('keyup', this.handleKeyUp); 199 | 200 | this.isDrawing = false; 201 | 202 | this.currentAction = null; 203 | this.rgbActionStack = []; 204 | this.rgbUndoStack = []; 205 | this.greyActionStack = []; 206 | this.greyUndoStack = []; 207 | 208 | 209 | this.left = "0"; 210 | this.top = "0"; 211 | 212 | this.element = $el("div.comfy-modal", { parent: document.body }, 213 | [ $el("div.comfy-modal-content", 214 | [...this.createButtons()]), 215 | ]); 216 | } 217 | 218 | createButtons() { 219 | return []; 220 | } 221 | 222 | createButton(name, callback) { 223 | var button = document.createElement("button"); 224 | button.innerText = name; 225 | button.addEventListener("click", callback); 226 | return button; 227 | } 228 | 229 | createLeftButton(name, callback) { 230 | var button = this.createButton(name, callback); 231 | button.style.cssFloat = "left"; 232 | button.style.marginRight = "4px"; 233 | return button; 234 | } 235 | 236 | createRightButton(name, callback) { 237 | var button = this.createButton(name, callback); 238 | button.style.cssFloat = "right"; 239 | button.style.marginLeft = "4px"; 240 | return button; 241 | } 242 | 243 | createContextMenu() { 244 | // Create a div element for the context menu 245 | const contextMenu = document.createElement('div'); 246 | contextMenu.id = 'canvasContextMenu'; 247 | contextMenu.style.position = 'absolute'; 248 | contextMenu.style.display = 'none'; 249 | contextMenu.style.zIndex = '99999'; // Set a high z-index value 250 | 251 | // Create a div for the dark grey parent box 252 | const menuBox = document.createElement('div'); 253 | menuBox.style.backgroundColor = '#333'; // Dark grey background color 254 | menuBox.style.padding = '5px'; // Add some padding for spacing 255 | menuBox.style.borderRadius = '5px'; // Rounded border 256 | menuBox.style.border = '2px solid #777'; // 4px light grey border 257 | 258 | const colorPickerWrapper = this.createColorPickerWrapper(); 259 | 260 | menuBox.appendChild(colorPickerWrapper); 261 | 262 | const thicknessSlider = this.createSlider("Thickness", (event) => { 263 | // Update brush size 264 | this.brush_size = event.target.value; 265 | // Handle any additional logic related to brush size change 266 | 267 | // Update the brush preview 268 | this.updateBrushPreview(this); 269 | }, this.brush_size); 270 | thicknessSlider.id = "thicknessSlider"; 271 | 272 | const opacitySlider = this.createSlider("Opacity", (event) => { 273 | // Update brush opacity (dividing by 100 to get a value between 0 and 1) 274 | this.brushOpacity = event.target.value / 100; 275 | 276 | // Update the brush preview 277 | this.updateBrushPreview(this); 278 | }, this.brushOpacity * 100); // Note the multiplication here to set the initial slider position 279 | opacitySlider.id = "opacitySlider"; 280 | 281 | // Create a div for the "Softness" slider 282 | const softnessSlider = this.createSlider("Softness", (event) => { 283 | // Update brush softness 284 | const sliderValue = parseFloat(event.target.value); // assuming the slider value is between 0 and 1 285 | this.brush_softness = sliderValue * 1; // map the slider value to the range [0, 1] 286 | 287 | // Handle any additional logic related to brush softness change 288 | 289 | // Update the brush preview 290 | this.updateBrushPreview(this); 291 | }, this.brush_softness / 1); // normalize the initial brush softness to the slider's range 292 | softnessSlider.id = "softnessSlider"; 293 | 294 | // Append sliders to the dark grey parent box 295 | menuBox.appendChild(thicknessSlider); 296 | menuBox.appendChild(opacitySlider); 297 | menuBox.appendChild(softnessSlider); 298 | 299 | // Append the dark grey parent box to the context menu 300 | contextMenu.appendChild(menuBox); 301 | 302 | // Append the context menu to the body or another container (if available) 303 | document.body.appendChild(contextMenu); 304 | 305 | return contextMenu; // Return the context menu element for future reference 306 | } 307 | 308 | createSlider(name, callback, defaultValue) { 309 | const sliderDiv = document.createElement('div'); 310 | 311 | // Create a label for the slider 312 | const labelElement = document.createElement('label'); 313 | labelElement.textContent = name; 314 | 315 | // Create the slider input element 316 | const sliderInput = document.createElement('input'); 317 | sliderInput.setAttribute('type', 'range'); 318 | sliderInput.setAttribute('min', '1'); 319 | sliderInput.setAttribute('max', '100'); 320 | sliderInput.setAttribute('value', defaultValue || '10'); // Set the default value using the provided defaultValue 321 | 322 | // Add an event listener to the slider 323 | sliderInput.addEventListener("input", callback); 324 | 325 | // Append label and slider input to the slider div 326 | sliderDiv.appendChild(labelElement); 327 | sliderDiv.appendChild(sliderInput); 328 | 329 | return sliderDiv; 330 | } 331 | 332 | createColorPickerWrapper() { 333 | this.colorPickerWrapper = document.createElement('div'); 334 | this.colorPickerWrapper.id = 'colorPickerWrapper'; 335 | 336 | this.colorPickerWrapper.style.position = 'relative'; 337 | this.colorPickerWrapper.style.display = 'inline-block'; 338 | this.colorPickerWrapper.style.borderRadius = '5px'; 339 | this.colorPickerWrapper.style.border = '2px solid #777'; 340 | this.colorPickerWrapper.style.overflow = 'hidden'; 341 | 342 | this.colorPicker = document.createElement('input'); 343 | 344 | this.colorPicker.type = 'color'; 345 | this.colorPicker.style.borderRadius = '10px'; 346 | this.colorPicker.style.border = 'none'; 347 | this.colorPicker.style.outline = 'none'; 348 | this.colorPicker.style.backgroundColor = 'transparent'; 349 | this.colorPickerWrapper.appendChild(this.colorPicker); 350 | this.colorPicker.value = this.lastRGBColor || "#000000"; 351 | 352 | this.colorPickerWrapper.addEventListener('click', (event) => { 353 | this.colorPicker.click(); 354 | }); 355 | 356 | this.colorPicker.addEventListener('input', (event) => { 357 | this.brushColor = event.target.value; 358 | this.lastRGBColor = this.colorPicker.value; 359 | this.updateBrushPreview(this); 360 | }); 361 | 362 | return this.colorPickerWrapper; 363 | } 364 | 365 | createComboBox() { 366 | var comboBox = document.createElement("select"); 367 | comboBox.style.cssFloat = "right"; 368 | comboBox.style.marginLeft = "4px"; 369 | comboBox.style.marginRight = "4px"; 370 | comboBox.style.height = "29px"; 371 | 372 | var option1 = document.createElement("option"); 373 | option1.value = "greyscale"; 374 | option1.text = "Greyscale"; 375 | comboBox.appendChild(option1); 376 | 377 | var option2 = document.createElement("option"); 378 | option2.value = "rgb"; 379 | option2.text = "RGB"; 380 | comboBox.appendChild(option2); 381 | 382 | comboBox.addEventListener('change', (event) => { 383 | if (event.target.value === 'greyscale') { 384 | this.isGreyscale = 'greyscale'; 385 | this.unbindEventsFromCanvas(this.imgCanvas); 386 | this.setEventHandler(this.maskCanvas); 387 | this.maskCanvas.style.visibility = 'visible'; 388 | this.colorPicker.type = 'range'; 389 | this.colorPicker.value = "#000000"; 390 | this.brushColor = "#000000"; 391 | this.colorPickerWrapper.style.backgroundColor = "#000000"; 392 | } else { 393 | this.isGreyscale = 'rgb'; 394 | this.colorPicker.type = 'color'; 395 | // Switch to the RGB canvas and update the visibility 396 | this.unbindEventsFromCanvas(this.maskCanvas); 397 | this.setEventHandler(this.imgCanvas); 398 | this.maskCanvas.style.visibility = 'hidden'; 399 | this.colorPicker.value = this.lastRGBColor; 400 | this.brushColor = this.lastRGBColor; 401 | } 402 | }); 403 | 404 | return comboBox; 405 | } 406 | 407 | setlayout(imgCanvas, maskCanvas) { 408 | const self = this; 409 | 410 | // If it is specified as relative, using it only as a hidden placeholder for padding is recommended 411 | // to prevent anomalies where it exceeds a certain size and goes outside of the window. 412 | var placeholder = document.createElement("div"); 413 | placeholder.style.position = "relative"; 414 | placeholder.style.height = "50px"; 415 | 416 | // Update brush references to this.brush 417 | this.brush.id = "brush"; 418 | this.brush.style.backgroundColor = "transparent"; 419 | this.brush.style.outline = "1px dashed black"; 420 | this.brush.style.boxShadow = "0 0 0 1px white"; 421 | this.brush.style.borderRadius = "50%"; 422 | this.brush.style.MozBorderRadius = "50%"; 423 | this.brush.style.WebkitBorderRadius = "50%"; 424 | this.brush.style.position = "absolute"; 425 | this.brush.style.zIndex = 99999; 426 | this.brush.style.pointerEvents = "none"; 427 | 428 | // Create a child container 429 | this.childContainer = document.createElement("div"); 430 | this.childContainer.id = "childContainer"; 431 | this.childContainer.style.position = "relative"; 432 | 433 | // Step 1: Create a wrapper container 434 | var wrapperDiv = document.createElement("div"); 435 | wrapperDiv.id = "wrapperDiv"; 436 | wrapperDiv.style.position = "relative"; 437 | wrapperDiv.style.overflow = "hidden"; 438 | wrapperDiv.style.maxWidth = "100%"; 439 | wrapperDiv.style.maxHeight = "100%"; 440 | 441 | wrapperDiv.style.border = "2px solid #777"; 442 | wrapperDiv.style.borderRadius = "10px"; 443 | 444 | var bottom_panel = document.createElement("div"); 445 | bottom_panel.style.position = "absolute"; 446 | bottom_panel.style.bottom = "0px"; 447 | bottom_panel.style.left = "20px"; 448 | bottom_panel.style.right = "20px"; 449 | bottom_panel.style.height = "50px"; 450 | bottom_panel.style.display = "flex"; 451 | bottom_panel.style.justifyContent = "space-between"; 452 | 453 | // Append imgCanvas and maskCanvas to the child container 454 | this.childContainer.appendChild(imgCanvas); 455 | this.childContainer.appendChild(maskCanvas); 456 | wrapperDiv.appendChild(this.childContainer); 457 | this.element.appendChild(wrapperDiv); 458 | this.element.appendChild(placeholder); // must below z-index than bottom_panel to avoid covering button 459 | this.element.appendChild(bottom_panel); 460 | document.body.appendChild(this.brush); 461 | 462 | var clearButton = this.createLeftButton("Clear", 463 | () => { 464 | self.maskCtx.clearRect(0, 0, self.maskCanvas.width, self.maskCanvas.height); 465 | self.backupCtx.clearRect(0, 0, self.backupCanvas.width, self.backupCanvas.height); 466 | }); 467 | 468 | var cancelButton = this.createRightButton("Cancel", () => { 469 | this.zoomLevel = 1; // Reset zoom level to 1 470 | this.childContainer.style.transform = `scale(${this.zoomLevel})`; // Resetting zoom level in DOM 471 | this.childContainer.style.left = this.left; 472 | this.childContainer.style.top = this.top; 473 | 474 | // Hide the context menu 475 | const contextMenu = document.getElementById('canvasContextMenu'); 476 | contextMenu.style.display = 'none'; 477 | 478 | self.close(); 479 | }); 480 | 481 | 482 | this.saveButton = this.createRightButton("Save", () => { 483 | document.removeEventListener('keydown', this.boundHandleKeyDown); 484 | document.removeEventListener('keyup', this.boundHandleKeyUp); 485 | this.maskCanvas.removeEventListener('pointerdown', this.handlePointerDown); 486 | document.removeEventListener('pointerup', this.handlePointerUp); 487 | this.zoomLevel = 1; // Reset zoom level to 1 488 | this.childContainer.style.transform = `scale(${this.zoomLevel})`; // Resetting zoom level in DOM 489 | this.childContainer.removeEventListener('mousemove', this.handlePointerMove); 490 | this.childContainer.style.left = this.left; 491 | this.childContainer.style.top = this.top; 492 | 493 | // Hide the context menu 494 | const contextMenu = document.getElementById('canvasContextMenu'); 495 | contextMenu.style.display = 'none'; 496 | 497 | this.save() 498 | }); 499 | 500 | var leftGroup = document.createElement("div"); 501 | leftGroup.style.display = "flex"; 502 | leftGroup.style.alignItems = "center"; 503 | leftGroup.appendChild(clearButton); 504 | 505 | var rightGroup = document.createElement("div"); 506 | rightGroup.style.display = "flex"; 507 | rightGroup.style.alignItems = "center"; 508 | rightGroup.appendChild(cancelButton); 509 | 510 | var comboBox = this.createComboBox(); 511 | rightGroup.appendChild(comboBox); 512 | 513 | rightGroup.appendChild(this.saveButton); 514 | 515 | bottom_panel.appendChild(leftGroup); 516 | bottom_panel.appendChild(rightGroup); 517 | 518 | imgCanvas.style.position = "relative"; 519 | imgCanvas.style.top = "200"; 520 | imgCanvas.style.left = "0"; 521 | 522 | maskCanvas.style.position = "absolute"; 523 | } 524 | 525 | show() { 526 | if(!this.is_layout_created) { 527 | // layout 528 | const imgCanvas = document.createElement('canvas'); 529 | const maskCanvas = document.createElement('canvas'); 530 | const backupCanvas = document.createElement('canvas'); 531 | 532 | imgCanvas.id = "imageCanvas"; 533 | maskCanvas.id = "maskCanvas"; 534 | backupCanvas.id = "backupCanvas"; 535 | 536 | this.setlayout(imgCanvas, maskCanvas); 537 | 538 | // prepare content 539 | this.imgCanvas = imgCanvas; 540 | this.maskCanvas = maskCanvas; 541 | this.backupCanvas = backupCanvas; 542 | this.maskCtx = maskCanvas.getContext('2d', { willReadFrequently: true }); 543 | this.backupCtx = backupCanvas.getContext('2d', { willReadFrequently: true }); 544 | 545 | this.setEventHandler(maskCanvas); 546 | 547 | document.addEventListener('keydown', this.boundHandleKeyDown); 548 | document.addEventListener('keyup', this.boundHandleKeyUp); 549 | 550 | 551 | 552 | this.is_layout_created = true; 553 | 554 | const self = this; 555 | const observer = new MutationObserver(function(mutations) { 556 | mutations.forEach(function(mutation) { 557 | if (mutation.type === 'attributes' && mutation.attributeName === 'style') { 558 | if(self.last_display_style && self.last_display_style != 'none' && self.element.style.display == 'none') { 559 | ComfyApp.onClipspaceEditorClosed(); 560 | } 561 | 562 | self.last_display_style = self.element.style.display; 563 | } 564 | }); 565 | }); 566 | 567 | const config = { attributes: true }; 568 | observer.observe(this.element, config); 569 | } 570 | 571 | this.setImages(this.imgCanvas, this.backupCanvas); 572 | 573 | if(ComfyApp.clipspace_return_node) { 574 | this.saveButton.innerText = "Save to node"; 575 | } 576 | else { 577 | this.saveButton.innerText = "Save"; 578 | } 579 | this.saveButton.disabled = false; 580 | 581 | this.element.style.display = "block"; 582 | this.element.style.zIndex = 8888; // NOTE: alert dialog must be high priority. 583 | } 584 | 585 | isOpened() { 586 | return this.element.style.display == "block"; 587 | } 588 | 589 | resizeHandler = () => { 590 | // Ensure the image is fully loaded 591 | if (!this.image.complete) { 592 | console.error('Image not yet loaded'); 593 | return; 594 | } 595 | 596 | const originalWidth = this.image.width; 597 | const originalHeight = this.image.height; 598 | 599 | // Get the viewport size 600 | const viewportWidth = window.innerWidth; 601 | const viewportHeight = window.innerHeight; 602 | 603 | // Get available screen width and height 604 | var availablescreenWidth = screen.availWidth; 605 | var availablescreenHeight = screen.availHeight; 606 | 607 | // Calculate the available size for the wrapperDiv while leaving space for other elements 608 | const availableWidth = viewportWidth * 0.9; 609 | const availableHeight = viewportHeight * 0.78; 610 | 611 | wrapperDiv.style.width = `${originalWidth}px`; 612 | wrapperDiv.style.height = `${originalHeight}px`; 613 | 614 | wrapperDiv.style.maxWidth = `${availableWidth}px`; 615 | wrapperDiv.style.maxHeight = `${availableHeight}px`; 616 | 617 | // Set the canvas sizes to match the image size 618 | this.imgCanvas.width = originalWidth; 619 | this.imgCanvas.height = originalHeight; 620 | this.maskCanvas.width = originalWidth; 621 | this.maskCanvas.height = originalHeight; 622 | 623 | // Draw the image onto the imgCanvas 624 | this.imgCtx.clearRect(0, 0, originalWidth, originalHeight); 625 | this.imgCtx.drawImage(this.image, 0, 0, originalWidth, originalHeight); 626 | 627 | // Updating the mask 628 | this.maskCanvas.style.top = this.imgCanvas.offsetTop + "px"; 629 | this.maskCanvas.style.left = this.imgCanvas.offsetLeft + "px"; 630 | 631 | // You may need to adapt the following lines depending on your exact requirements 632 | this.backupCtx.drawImage(this.maskCanvas, 0, 0, originalWidth, originalHeight, 0, 0, originalWidth, originalHeight); 633 | this.maskCtx.drawImage(this.backupCanvas, 0, 0, originalWidth, originalHeight, 0, 0, originalWidth, originalHeight); 634 | 635 | this.fitAndCenterImage(); 636 | }; 637 | 638 | setImages(imgCanvas, backupCanvas) { 639 | this.imgCtx = imgCanvas.getContext('2d', { willReadFrequently: true }); 640 | const backupCtx = backupCanvas.getContext('2d', { willReadFrequently: true }); 641 | const maskCtx = this.maskCtx; 642 | const maskCanvas = this.maskCanvas; 643 | 644 | let origRetryCount = 0; 645 | let touchedRetryCount = 0; 646 | const maxRetries = 3; 647 | 648 | backupCtx.clearRect(0, 0, this.backupCanvas.width, this.backupCanvas.height); 649 | this.imgCtx.clearRect(0, 0, this.imgCanvas.width, this.imgCanvas.height); 650 | maskCtx.clearRect(0, 0, this.maskCanvas.width, this.maskCanvas.height); 651 | 652 | // image load 653 | const orig_image = new Image(); 654 | window.removeEventListener("resize", this.resizeHandler); 655 | window.addEventListener("resize", this.resizeHandler); 656 | 657 | const touched_image = new Image(); 658 | 659 | touched_image.onload = function() { 660 | backupCanvas.width = touched_image.width; 661 | backupCanvas.height = touched_image.height; 662 | 663 | prepareRGB(touched_image, backupCanvas, backupCtx); 664 | }; 665 | 666 | touched_image.onerror = (errorEvent) => { 667 | console.error('Failed to load the touched image:', errorEvent); 668 | if (touchedRetryCount < maxRetries) { 669 | touched_image.src = touched_image.src; 670 | touchedRetryCount++; 671 | } else { 672 | alert('We encountered an issue loading the touched image multiple times. Please try again later.'); 673 | } 674 | }; 675 | 676 | const alpha_url = new URL(ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']].src); 677 | alpha_url.searchParams.delete('channel'); 678 | alpha_url.searchParams.delete('preview'); 679 | alpha_url.searchParams.set('channel', 'a'); 680 | touched_image.src = alpha_url.href; 681 | 682 | // Preloading the image for better perceived performance 683 | const preloadedImage = new Image(); 684 | preloadedImage.src = alpha_url.href; 685 | 686 | orig_image.onload = () => { 687 | this.originalWidth = orig_image.width; 688 | this.originalHeight = orig_image.height; 689 | 690 | // Calculate grid size based on image dimensions 691 | const gridSize = Math.min( 692 | this.originalWidth / Math.round(this.originalWidth / 20), 693 | this.originalHeight / Math.round(this.originalHeight / 20) 694 | ); 695 | 696 | // Update the grid background with the new gridSize 697 | wrapperDiv.style.background = ` 698 | repeating-linear-gradient( 699 | 0deg, 700 | transparent, 701 | transparent ${gridSize - 1}px, 702 | #777 ${gridSize - 1}px, 703 | #777 ${gridSize}px 704 | ), 705 | repeating-linear-gradient( 706 | 90deg, 707 | transparent, 708 | transparent ${gridSize - 1}px, 709 | #777 ${gridSize - 1}px, 710 | #777 ${gridSize}px 711 | )`; 712 | 713 | window.dispatchEvent(new Event('resize')); 714 | }; 715 | 716 | orig_image.onerror = (errorEvent) => { 717 | console.error('Failed to load the original image:', errorEvent); 718 | if (origRetryCount < maxRetries) { 719 | orig_image.src = orig_image.src; 720 | origRetryCount++; 721 | } else { 722 | alert('We encountered an issue loading the original image multiple times. Please try again later.'); 723 | } 724 | }; 725 | 726 | const rgb_url = new URL(ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']].src); 727 | rgb_url.searchParams.delete('channel'); 728 | rgb_url.searchParams.set('channel', 'rgb'); 729 | orig_image.src = rgb_url.href; 730 | this.image = orig_image; 731 | } 732 | 733 | setEventHandler(canvas) { 734 | Object.keys(this.eventHandlers).forEach(eventName => { 735 | if (eventName === 'documentPointerMove') { 736 | this.childContainer.addEventListener('pointermove', this.eventHandlers[eventName]); 737 | } else if (eventName === 'childContainerMouseMove') { 738 | this.childContainer.addEventListener('mousemove', this.eventHandlers[eventName]); 739 | } else if (eventName === 'wheel') { 740 | this.childContainer.addEventListener('wheel', this.eventHandlers.wheel, { passive: false }); 741 | } else if (eventName === 'touchmove') { 742 | this.childContainer.addEventListener('touchmove', this.eventHandlers.touchmove, { passive: true }); 743 | } else { 744 | canvas.addEventListener(eventName, this.eventHandlers[eventName]); 745 | } 746 | }); 747 | 748 | document.addEventListener('contextmenu', (event) => { 749 | event.preventDefault(); 750 | event.stopPropagation(); 751 | }); 752 | 753 | canvas.addEventListener('contextmenu', (event) => { 754 | if (event.altKey || event.shiftKey) { 755 | event.preventDefault(); 756 | event.stopPropagation(); 757 | 758 | const mouseX = event.clientX; 759 | const mouseY = event.clientY; 760 | 761 | const contextMenu = document.getElementById('canvasContextMenu'); 762 | contextMenu.style.left = mouseX + 'px'; 763 | contextMenu.style.top = mouseY + 'px'; 764 | contextMenu.style.display = 'block'; 765 | } 766 | }); 767 | } 768 | 769 | unbindEventsFromCanvas(canvas) { 770 | Object.keys(this.eventHandlers).forEach(eventName => { 771 | if (eventName === 'documentPointerMove') { 772 | document.removeEventListener('pointermove', this.eventHandlers[eventName]); 773 | } else if (eventName === 'childContainerMouseMove') { 774 | this.childContainer.removeEventListener('mousemove', this.eventHandlers[eventName]); 775 | } else if (eventName === 'wheel') { 776 | this.childContainer.removeEventListener('wheel', this.eventHandlers.wheel, { passive: true }); 777 | } else if (eventName === 'touchmove') { 778 | this.childContainer.removeEventListener('touchmove', this.eventHandlers.touchmove, { passive: true }); 779 | } else { 780 | canvas.removeEventListener(eventName, this.eventHandlers[eventName]); 781 | } 782 | }); 783 | } 784 | 785 | handlePointerOut(event) { 786 | this.drawing_mode = false; 787 | this.cursorX = event.pageX; 788 | this.cursorY = event.pageY; 789 | this.updateBrushPreview(this); 790 | this.brush.style.display = 'none'; 791 | } 792 | 793 | handlePointerOver(event) { 794 | this.brush.style.display = 'block'; 795 | if (event.buttons === 1 || event.buttons === 2) { 796 | this.drawing_mode = true; 797 | } 798 | this.cursorX = event.pageX; 799 | this.cursorY = event.pageY; 800 | this.updateBrushPreview(this); 801 | } 802 | 803 | handlePointerMove(event) { 804 | if (this.isCtrlDown && this.isSpaceDown) { 805 | if (event.buttons === 1) { 806 | if (this.isDragging) { 807 | this.handleDragMove(event); 808 | } 809 | return; // Prevent zooming when dragging 810 | } 811 | 812 | const currentTime = Date.now(); 813 | if (currentTime - this.lastPointerMoveTime < 16) { 814 | return; 815 | } 816 | 817 | if (this.initialMouseX === null) { 818 | this.initialMouseX = event.clientX; 819 | const rect = this.childContainer.getBoundingClientRect(); 820 | const style = window.getComputedStyle(this.childContainer); 821 | const matrix = new DOMMatrix(style.transform); 822 | this.zoomCenterX = (event.clientX - rect.left) / matrix.a - parseFloat(style.left); 823 | this.zoomCenterY = (event.clientY - rect.top) / matrix.d - parseFloat(style.top); 824 | } 825 | 826 | // Adjust to use movementX and movementY for more immediate response to direction changes 827 | this.adjustZoomLevel(event.movementX > 0 || event.movementY > 0 ? 'in' : 'out'); 828 | 829 | requestAnimationFrame(() => { 830 | this.applyZoom(); 831 | }); 832 | 833 | this.lastPointerMoveTime = currentTime; 834 | } else { 835 | this.initialMouseX = null; // Reset the initialMouseX when Ctrl+Space is not pressed 836 | } 837 | 838 | this.cursorX = event.pageX; 839 | this.cursorY = event.pageY; 840 | this.updateBrushPreview(this); 841 | } 842 | 843 | handleDragMove(event) { 844 | if (this.isDragging) { 845 | const deltaX = event.clientX - this.initialMouseX; 846 | const deltaY = event.clientY - this.initialMouseY; 847 | 848 | this.childContainer.style.left = `${this.initialOffsetX + deltaX}px`; 849 | this.childContainer.style.top = `${this.initialOffsetY + deltaY}px`; 850 | } 851 | } 852 | 853 | adjustZoomLevel(direction) { 854 | if (this.isDragging) { 855 | return; // Prevent zooming if a drag move is in progress 856 | } 857 | 858 | let zoomStep = 0.05 * this.zoomLevel; // Increased the zoom step multiplier for greater sensitivity 859 | if (direction === 'in') { 860 | this.zoomLevel = Math.min(this.zoomLevel + zoomStep, 5); 861 | } else if (direction === 'out') { 862 | this.zoomLevel = Math.max(this.zoomLevel - zoomStep, 0.1); 863 | } 864 | } 865 | 866 | applyZoom() { 867 | if (this.isDragging) { 868 | return; // Prevent zooming if a drag move is in progress 869 | } 870 | 871 | this.childContainer.style.transform = `scale(${this.zoomLevel})`; 872 | this.childContainer.style.transformOrigin = `${this.zoomCenterX}px ${this.zoomCenterY}px`; 873 | this.childContainer.style.willChange = 'transform'; // Hint the browser to optimize for transform changes 874 | } 875 | 876 | 877 | brush_size = 10; 878 | drawing_mode = false; 879 | lastx = -1; 880 | lasty = -1; 881 | lasttime = 0; 882 | 883 | handleKeyDown(event) { 884 | const self = ComfyShopDialog.instance; 885 | if(event.key === 'Control') { 886 | self.isCtrlDown = true; 887 | } 888 | if(event.key === ' ') { // Spacebar key event 889 | self.isSpaceDown = true; 890 | } 891 | if(event.key === 'Alt') { // Alt key event 892 | self.isAltDown = true; 893 | } 894 | if(event.key === 'w') { // Spacebar key event 895 | console.log('w Down'); 896 | console.log('nodeData Available', this.nodeData.input.required.image[0][0]); 897 | 898 | this.copyToClipspace(); 899 | } 900 | if(event.key === 'f') { 901 | self.zoomLevel = 1; 902 | self.childContainer.style.transform = `scale(${self.zoomLevel})`; // Resetting zoom level in DOM 903 | self.childContainer.style.left = self.left; 904 | self.childContainer.style.top = self.top; 905 | 906 | // Reset the size of wrapperDiv to match the original image dimensions 907 | const imageWidth = self.image.width; 908 | const imageHeight = self.image.height; 909 | wrapperDiv.style.width = `${imageWidth}px`; 910 | wrapperDiv.style.height = `${imageHeight}px`; 911 | } 912 | if (event.key === ']') { 913 | self.brush_size = Math.min(self.brush_size+2, 100); 914 | } else if (event.key === '[') { 915 | self.brush_size = Math.max(self.brush_size-2, 1); 916 | } else if(event.key === 'Enter') { 917 | self.save(); 918 | } 919 | 920 | if(event.key === 'Shift') { 921 | self.isShiftDown = true; 922 | } 923 | 924 | if(self.isShiftDown && event.key === 'F') { // Shift + F key event 925 | this.fitAndCenterImage(); 926 | } 927 | 928 | if (event.ctrlKey && event.key.toLowerCase() === 'z') { 929 | if (event.shiftKey) { 930 | // If Shift is also pressed, redo 931 | self.redo(); 932 | } else { 933 | // Otherwise, just undo 934 | self.undo(); 935 | } 936 | 937 | // Prevent the default behavior for Ctrl+Z and Ctrl+Shift+Z 938 | event.preventDefault(); 939 | } else if (event.ctrlKey && event.key.toLowerCase() === 'y') { 940 | // Redo for Ctrl+Y 941 | self.redo(); 942 | 943 | // Prevent the default behavior for Ctrl+Y 944 | event.preventDefault(); 945 | } 946 | 947 | self.updateBrushPreview(self); 948 | } 949 | 950 | fitAndCenterImage = () => { 951 | // Get the dimensions of wrapperDiv 952 | const wrapperWidth = wrapperDiv.clientWidth; 953 | const wrapperHeight = wrapperDiv.clientHeight; 954 | 955 | // Get the dimensions of the original image 956 | const imageWidth = this.image.width; 957 | const imageHeight = this.image.height; 958 | 959 | // Calculate the scale needed to fit the image within the wrapperDiv 960 | const scaleX = wrapperWidth / imageWidth; 961 | const scaleY = wrapperHeight / imageHeight; 962 | const scale = Math.min(scaleX, scaleY); 963 | 964 | // Update the zoomLevel 965 | this.zoomLevel = scale; 966 | 967 | // Calculate the scaled dimensions of the image 968 | const scaledWidth = imageWidth * scale; 969 | const scaledHeight = imageHeight * scale; 970 | 971 | // Adjust the wrapperDiv size to match the scaled dimensions of the image 972 | wrapperDiv.style.width = `${scaledWidth}px`; 973 | wrapperDiv.style.height = `${scaledHeight}px`; 974 | 975 | // Calculate the position to center the image in the adjusted wrapperDiv 976 | const centeredLeft = (scaledWidth - scaledWidth) / 2; 977 | const centeredTop = (scaledHeight - scaledHeight) / 2; 978 | 979 | // Reset the transform origin and apply the calculated scale and position to the childContainer 980 | this.childContainer.style.transformOrigin = '0 0'; 981 | this.childContainer.style.transform = `scale(${scale})`; 982 | this.childContainer.style.left = `${centeredLeft}px`; 983 | this.childContainer.style.top = `${centeredTop}px`; 984 | } 985 | 986 | copyToClipspace() { 987 | try { 988 | const maskCtx = this.maskCanvas.getContext('2d', { willReadFrequently: true }); 989 | const backupCtx = this.backupCanvas.getContext('2d', { willReadFrequently: true }); 990 | 991 | const maskImageData = maskCtx.getImageData(0, 0, this.maskCanvas.width, this.maskCanvas.height); 992 | const backupImageData = backupCtx.getImageData(0, 0, this.backupCanvas.width, this.backupCanvas.height); 993 | 994 | ComfyApp.clipspace = { 995 | 'maskImageData': maskImageData, 996 | 'backupImageData': backupImageData, 997 | }; 998 | } catch (error) { 999 | console.error('Error copying data to clipspace', error); 1000 | } 1001 | } 1002 | 1003 | handleKeyUp(event) { 1004 | event.preventDefault(); 1005 | event.stopPropagation(); 1006 | 1007 | const self = ComfyShopDialog.instance; 1008 | 1009 | if (event.key === 'Control') { 1010 | self.isCtrlDown = false; 1011 | } 1012 | 1013 | if (event.key === ' ') { 1014 | self.isSpaceDown = false; 1015 | } 1016 | 1017 | if (event.key === 'Alt') { 1018 | self.isAltDown = false; 1019 | } 1020 | 1021 | if(event.key === 'Shift') { 1022 | self.isShiftDown = false; 1023 | } 1024 | 1025 | } 1026 | 1027 | get currentActionStack() { 1028 | return this.isGreyscale === 'rgb' ? this.rgbActionStack : this.greyActionStack; 1029 | } 1030 | 1031 | get currentUndoStack() { 1032 | return this.isGreyscale === 'rgb' ? this.rgbUndoStack : this.greyUndoStack; 1033 | } 1034 | 1035 | undo() { 1036 | if (this.currentActionStack.length === 0) { 1037 | return; 1038 | } 1039 | const actionToUndo = this.currentActionStack.pop(); 1040 | 1041 | if (!actionToUndo) { 1042 | return; 1043 | } 1044 | 1045 | if (actionToUndo.previousImageData) { 1046 | const context = actionToUndo.canvasType === 'imgCanvas' ? this.imgCtx : this.maskCtx; 1047 | context.putImageData(actionToUndo.previousImageData, 0, 0); 1048 | } 1049 | this.currentUndoStack.push(actionToUndo); 1050 | } 1051 | 1052 | redo() { 1053 | if (this.currentUndoStack.length === 0) { 1054 | return; 1055 | } 1056 | 1057 | const actionToRedo = this.currentUndoStack.pop(); 1058 | 1059 | const context = actionToRedo.canvasType === 'imgCanvas' ? this.imgCtx : this.maskCtx; 1060 | 1061 | // Use postActionImageData for redos 1062 | if (actionToRedo.postActionImageData) { 1063 | context.putImageData(actionToRedo.postActionImageData, 0, 0); 1064 | } 1065 | 1066 | // Add the action back to currentActionStack 1067 | this.currentActionStack.push(actionToRedo); 1068 | } 1069 | 1070 | applyAction(action) { 1071 | const context = action.canvasType === 'imgCanvas' ? this.imgCtx : this.maskCtx; 1072 | 1073 | // Implement logic to apply the action here based on its type. 1074 | switch (action.actionType) { 1075 | case 'drawLine': 1076 | this.drawLines(context, action.points, action.color, action.brushSize); 1077 | break; 1078 | // Add more cases for other action types if needed. 1079 | } 1080 | } 1081 | 1082 | drawLines(context, points, color, brushSize) { 1083 | // Set the drawing style properties 1084 | context.strokeStyle = color; 1085 | context.lineWidth = brushSize; 1086 | context.lineJoin = 'round'; 1087 | context.lineCap = 'round'; 1088 | 1089 | // Begin the path for drawing 1090 | context.beginPath(); 1091 | 1092 | // Move to the first point (the starting point of the line) 1093 | context.moveTo(points[0].x, points[0].y); 1094 | 1095 | // Loop through the points and draw line segments 1096 | for (let i = 1; i < points.length; i++) { 1097 | context.lineTo(points[i].x, points[i].y); 1098 | } 1099 | 1100 | // Stroke the path to draw the lines 1101 | context.stroke(); 1102 | 1103 | // Close the path (optional) 1104 | context.closePath(); 1105 | } 1106 | 1107 | drawFromAction(action) { 1108 | if (action.actionType === 'drawLine') { 1109 | const ctx = action.canvasType === 'imgCanvas' ? this.imgCanvas.getContext('2d', { willReadFrequently: true }) : this.maskCanvas.getContext('2d', { willReadFrequently: true }); 1110 | ctx.strokeStyle = action.color; 1111 | ctx.lineWidth = action.brushSize; 1112 | ctx.lineJoin = "round"; 1113 | ctx.lineCap = "round"; 1114 | 1115 | ctx.beginPath(); 1116 | for (let i = 0; i < action.points.length - 1; i++) { 1117 | ctx.moveTo(action.points[i].x, action.points[i].y); 1118 | ctx.lineTo(action.points[i + 1].x, action.points[i + 1].y); 1119 | ctx.stroke(); 1120 | } 1121 | } 1122 | } 1123 | 1124 | drawLine(points, color, brushSize, canvasType) { 1125 | if (points.length === 0) { 1126 | return; 1127 | } 1128 | 1129 | // Determine the drawing context based on the canvasType parameter 1130 | const drawingContext = canvasType === 'imgCanvas' ? this.imgCtx : this.maskCtx; 1131 | 1132 | // Set up the drawing context 1133 | drawingContext.globalCompositeOperation = 'source-over'; 1134 | drawingContext.imageSmoothingEnabled = true; 1135 | drawingContext.imageSmoothingQuality = 'high'; 1136 | drawingContext.lineJoin = 'round'; 1137 | drawingContext.lineCap = 'round'; 1138 | drawingContext.strokeStyle = color; 1139 | drawingContext.lineWidth = brushSize; 1140 | 1141 | // Begin a new path 1142 | drawingContext.beginPath(); 1143 | 1144 | // Move to the first point 1145 | drawingContext.moveTo(points[0].x, points[0].y); 1146 | 1147 | // Draw lines to subsequent points 1148 | for (let i = 1; i < points.length; i++) { 1149 | drawingContext.lineTo(points[i].x, points[i].y); 1150 | } 1151 | 1152 | // Stroke the path 1153 | drawingContext.stroke(); 1154 | } 1155 | 1156 | static handlePointerUp(event) { 1157 | event.preventDefault(); 1158 | 1159 | const instance = ComfyShopDialog.instance; 1160 | 1161 | instance.drawing_mode = false; 1162 | 1163 | const drawingContext = instance.isGreyscale === 'rgb' 1164 | ? instance.imgCanvas.getContext('2d', { willReadFrequently: true }) 1165 | : instance.maskCanvas.getContext('2d', { willReadFrequently: true }); 1166 | 1167 | drawingContext.globalAlpha = 1; 1168 | 1169 | if (instance.isDragging) { 1170 | instance.isDragging = false; 1171 | instance.initialMouseX = null; // Reset the initial mouse coordinates 1172 | instance.initialMouseY = null; 1173 | } 1174 | 1175 | if (ComfyShopDialog.instance.currentUndoStack.length > 0) { 1176 | ComfyShopDialog.instance.currentUndoStack.splice(0, ComfyShopDialog.instance.currentUndoStack.length); 1177 | } 1178 | 1179 | // Capture the postActionImageData 1180 | if (instance.currentAction) { 1181 | instance.currentAction.postActionImageData = drawingContext.getImageData(0, 0, 1182 | (instance.isGreyscale === 'rgb' ? instance.imgCanvas : instance.maskCanvas).width, 1183 | (instance.isGreyscale === 'rgb' ? instance.imgCanvas : instance.maskCanvas).height); 1184 | } 1185 | 1186 | if (instance.currentAction) { 1187 | 1188 | instance.currentActionStack.push(instance.currentAction); 1189 | 1190 | instance.currentAction = null; // Reset the current action for the next one 1191 | } 1192 | } 1193 | 1194 | updateBrushPreview(self) { 1195 | requestAnimationFrame(() => { 1196 | const brush = self.brush; 1197 | 1198 | var centerX = self.cursorX; 1199 | var centerY = self.cursorY; 1200 | 1201 | const brushColorRgb = this.hexToRgb(self.brushColor); 1202 | 1203 | brush.style.width = self.brush_size * 2 + "px"; 1204 | brush.style.height = self.brush_size * 2 + "px"; 1205 | brush.style.left = (centerX - self.brush_size) + "px"; 1206 | brush.style.top = (centerY - self.brush_size) + "px"; 1207 | brush.style.borderRadius = "50%"; 1208 | brush.style.background = `rgba(${brushColorRgb.r}, ${brushColorRgb.g}, ${brushColorRgb.b}, ${self.brushOpacity})`; 1209 | brush.style.boxShadow = `0 0 ${self.brush_softness}px rgba(${brushColorRgb.r}, ${brushColorRgb.g}, ${brushColorRgb.b}, ${self.brush_softness})`; 1210 | brush.style.transition = 'top 0.01s, left 0.01s'; 1211 | brush.style.transform = 'translate3d(0, 0, 0) scale(1)'; 1212 | brush.style.boxSizing = 'border-box'; 1213 | 1214 | // Hide the brush preview while dragging or zooming 1215 | if (self.isDragging || self.isZooming) { 1216 | brush.style.visibility = 'hidden'; 1217 | } else { 1218 | brush.style.visibility = 'visible'; 1219 | } 1220 | }); 1221 | } 1222 | 1223 | handleWheelEvent(self, event) { 1224 | // Target the actual sliders inside the divs by using querySelector 1225 | const thicknessSlider = document.querySelector("#thicknessSlider input[type='range']"); 1226 | const opacitySlider = document.querySelector("#opacitySlider input[type='range']"); 1227 | const softnessSlider = document.querySelector("#softnessSlider input[type='range']"); 1228 | 1229 | if (self.isShiftDown && self.isAltDown) { 1230 | // Adjust the brush opacity 1231 | if (event.deltaY > 0) { 1232 | self.brushOpacity = Math.min(self.brushOpacity + 0.05, 1); 1233 | } else { 1234 | self.brushOpacity = Math.max(self.brushOpacity - 0.05, 0); 1235 | } 1236 | if (opacitySlider) { 1237 | opacitySlider.value = self.brushOpacity * 100; // Note the multiplication here 1238 | } 1239 | self.updateBrushPreview(self); 1240 | event.preventDefault(); 1241 | } else if (self.isAltDown) { 1242 | // Adjust the brush softness 1243 | const wheelAdjustment = 0.01; 1244 | if(event.deltaY < 0) { 1245 | self.brush_softness = Math.min(self.brush_softness + wheelAdjustment * 50, 50); 1246 | } else { 1247 | self.brush_softness = Math.max(self.brush_softness - wheelAdjustment * 50, 0); 1248 | } 1249 | if (softnessSlider) { 1250 | softnessSlider.value = `${self.brush_softness / 50 * 100}`; // Convert to string and assuming the max value is 100 1251 | } 1252 | 1253 | event.preventDefault(); 1254 | } else { 1255 | // Adjust brush size 1256 | if(event.deltaY < 0) { 1257 | self.brush_size = Math.min(self.brush_size + 2, 100); 1258 | } else { 1259 | self.brush_size = Math.max(self.brush_size - 2, 1); 1260 | } 1261 | if (thicknessSlider) { 1262 | thicknessSlider.value = `${self.brush_size}`; // Convert to string 1263 | } 1264 | event.preventDefault(); 1265 | } 1266 | 1267 | self.updateBrushPreview(self); 1268 | } 1269 | 1270 | hexToRgb(hex) { 1271 | let result = /^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i.exec(hex); 1272 | return result ? { 1273 | r: parseInt(result[1], 16), 1274 | g: parseInt(result[2], 16), 1275 | b: parseInt(result[3], 16) 1276 | } : { r: 0, g: 0, b: 0 }; 1277 | } 1278 | 1279 | draw_move(self, event) { 1280 | if (event) { 1281 | event.preventDefault(); 1282 | } 1283 | 1284 | const drawingContext = this.isGreyscale === 'rgb' ? self.imgCtx : self.maskCtx; 1285 | const { x, y } = getActualCoordinates(event, self.maskCanvas, self.zoomLevel); 1286 | let brush_size = this.brush_size / self.zoomLevel; 1287 | 1288 | if (this.isCtrlDown && this.isSpaceDown && event.buttons === 1) { 1289 | return; 1290 | } 1291 | 1292 | if (!self.drawing_mode) { 1293 | return; 1294 | } 1295 | 1296 | if (event instanceof PointerEvent && event.pointerType == 'pen') { 1297 | brush_size *= event.pressure; 1298 | this.last_pressure = event.pressure; 1299 | } else if (window.TouchEvent && event instanceof TouchEvent) { 1300 | brush_size *= this.last_pressure; 1301 | } 1302 | 1303 | const brushColorRgb = this.hexToRgb(this.brushColor); 1304 | 1305 | if (this.brushVisible) { 1306 | this.updateBrushPreview(self); 1307 | } 1308 | 1309 | this.cursorX = event.pageX; 1310 | this.cursorY = event.pageY; 1311 | self.updateBrushPreview(self); 1312 | 1313 | const operation = event.buttons === 1 ? "source-over" : event.buttons === 2 ? "destination-out" : null; 1314 | const currentBrushOpacity = this.brushOpacity; 1315 | 1316 | const softFactor = Math.max(0.1, 1 - self.brush_softness / 100); // Normalize brush_softness to [0,1] 1317 | const innerRadius = brush_size * softFactor; 1318 | const outerRadius = brush_size; 1319 | 1320 | const gradient = drawingContext.createRadialGradient(x, y, innerRadius, x, y, outerRadius); 1321 | 1322 | if (operation === "destination-out") { 1323 | gradient.addColorStop(0, `rgba(255, 255, 255, 1)`); 1324 | gradient.addColorStop(1 - softFactor, `rgba(255, 255, 255, ${currentBrushOpacity * softFactor})`); 1325 | gradient.addColorStop(1, `rgba(255, 255, 255, 0)`); // This ensures the outer edge is fully transparent 1326 | } else { 1327 | gradient.addColorStop(0, `rgba(${brushColorRgb.r}, ${brushColorRgb.g}, ${brushColorRgb.b}, ${currentBrushOpacity})`); 1328 | gradient.addColorStop(1 - softFactor, `rgba(${brushColorRgb.r}, ${brushColorRgb.g}, ${brushColorRgb.b}, ${currentBrushOpacity * softFactor})`); 1329 | gradient.addColorStop(1, `rgba(${brushColorRgb.r}, ${brushColorRgb.g}, ${brushColorRgb.b}, 0)`); // This ensures the outer edge is fully transparent 1330 | } 1331 | 1332 | if (operation && !this.isDrawing) { 1333 | this.isDrawing = true; 1334 | 1335 | 1336 | requestAnimationFrame(() => { 1337 | drawingContext.globalCompositeOperation = operation; 1338 | drawingContext.imageSmoothingEnabled = true; 1339 | drawingContext.imageSmoothingQuality = 'high'; 1340 | drawingContext.lineJoin = 'round'; 1341 | drawingContext.lineCap = 'round'; 1342 | 1343 | 1344 | if (self.lastx !== null && self.lasty !== null) { 1345 | const distance = Math.hypot(x - self.lastx, y - self.lasty); 1346 | const pointsToFill = Math.ceil(distance / 0.01); 1347 | 1348 | drawingContext.globalAlpha = this.brushOpacity; 1349 | drawingContext.strokeStyle = gradient; 1350 | drawingContext.beginPath(); 1351 | drawingContext.moveTo(self.lastx, self.lasty); 1352 | 1353 | const controlPoint1 = { x: (self.lastx + x) / 2, y: self.lasty }; 1354 | const controlPoint2 = { x: (self.lastx + x) / 2, y: y }; 1355 | 1356 | drawingContext.bezierCurveTo(controlPoint1.x, controlPoint1.y, controlPoint2.x, controlPoint2.y, x, y); 1357 | drawingContext.lineWidth = brush_size * 2; 1358 | drawingContext.stroke(); 1359 | 1360 | self.lastx = x; 1361 | self.lasty = y; 1362 | 1363 | this.currentAction = { 1364 | actionType: 'drawLine', 1365 | points: [ { x: self.lastx, y: self.lasty }, { x, y } ], 1366 | color: gradient, 1367 | brushSize: brush_size, 1368 | canvasType: this.isGreyscale === 'rgb' ? 'imgCanvas' : 'maskCanvas', 1369 | previousImageData: this.previousImageData 1370 | }; 1371 | } 1372 | self.lasttime = performance.now(); 1373 | this.isDrawing = false; 1374 | }); 1375 | } 1376 | } 1377 | 1378 | handlePointerDown(self, event) { 1379 | let skipDrawing = false; 1380 | // Hide the context menu before any drawing operation 1381 | this.contextMenu.style.display = 'none'; 1382 | 1383 | // Check for Alt + right mouse button click to show/hide context menu 1384 | if ((event.altKey || event.shiftKey) && event.button === 2) { 1385 | event.preventDefault(); 1386 | event.stopPropagation(); 1387 | 1388 | skipDrawing = true; 1389 | 1390 | // Toggle the context menu visibility 1391 | if (this.contextMenu.style.display === 'block') { 1392 | this.contextMenu.style.display = 'none'; 1393 | } else { 1394 | const mouseX = event.clientX; 1395 | const mouseY = event.clientY; 1396 | this.contextMenu.style.left = mouseX + 'px'; 1397 | this.contextMenu.style.top = mouseY + 'px'; 1398 | this.contextMenu.style.display = 'block'; 1399 | 1400 | this.createContextMenu(); 1401 | } 1402 | 1403 | } 1404 | if (!skipDrawing) { 1405 | if (this.isCtrlDown && this.isSpaceDown && event.buttons === 1) { 1406 | this.isDragging = true; 1407 | 1408 | this.initialMouseX = event.clientX; 1409 | this.initialMouseY = event.clientY; 1410 | 1411 | this.initialOffsetX = this.childContainer.offsetLeft; 1412 | this.initialOffsetY = this.childContainer.offsetTop; 1413 | 1414 | // Store the initial dragging offsets 1415 | this.dragStartX = this.dragOffsetX; 1416 | this.dragStartY = this.dragOffsetY; 1417 | 1418 | // Set drawing_mode to false when Ctrl+Spacebar are pressed and dragging 1419 | this.drawing_mode = false; 1420 | 1421 | return; // prevent drawing when Ctrl+Spacebar are pressed and dragging 1422 | } 1423 | } 1424 | 1425 | const drawingContext = this.isGreyscale === 'rgb' 1426 | ? this.imgCanvas.getContext('2d', { willReadFrequently: true }) 1427 | : this.maskCanvas.getContext('2d', { willReadFrequently: true }); 1428 | 1429 | // Always capture the current state of the canvas for undo functionality. 1430 | this.previousImageData = drawingContext.getImageData(0, 0, (this.isGreyscale === 'rgb' ? this.imgCanvas : this.maskCanvas).width, (this.isGreyscale === 'rgb' ? this.imgCanvas : this.maskCanvas).height); 1431 | 1432 | const { x, y } = getActualCoordinates(event, this.isGreyscale === 'rgb' ? this.imgCanvas : this.maskCanvas, self.zoomLevel); 1433 | 1434 | var brush_size = this.brush_size / self.zoomLevel; 1435 | if (event instanceof PointerEvent && event.pointerType == 'pen') { 1436 | brush_size *= event.pressure; 1437 | this.last_pressure = event.pressure; 1438 | } else if (window.TouchEvent && event instanceof TouchEvent) { 1439 | brush_size *= this.last_pressure; 1440 | } 1441 | 1442 | const ColorContext = this.isGreyscale === 'rgb' ? self.brushColor : '#000000'; 1443 | const brushColorRgb = this.hexToRgb(ColorContext); 1444 | const currentBrushOpacity = this.brushOpacity; 1445 | 1446 | const softFactor = Math.max(0.1, 1 - self.brush_softness); 1447 | const innerRadius = brush_size * softFactor; 1448 | const outerRadius = brush_size; 1449 | 1450 | const gradient = drawingContext.createRadialGradient(x, y, innerRadius, x, y, outerRadius); 1451 | gradient.addColorStop(0, `rgba(${brushColorRgb.r}, ${brushColorRgb.g}, ${brushColorRgb.b}, ${currentBrushOpacity})`); 1452 | gradient.addColorStop(1, `rgba(${brushColorRgb.r}, ${brushColorRgb.g}, ${brushColorRgb.b}, ${currentBrushOpacity * softFactor})`); 1453 | 1454 | drawingContext.beginPath(); 1455 | drawingContext.arc(x, y, brush_size, 0, 2 * Math.PI); 1456 | drawingContext.fillStyle = gradient; 1457 | drawingContext.fill(); 1458 | 1459 | self.lastx = x; 1460 | self.lasty = y; 1461 | 1462 | this.drawing_mode = true; 1463 | this.brushVisible = true; 1464 | 1465 | this.draw_move(this, event); 1466 | 1467 | this.currentAction = { 1468 | actionType: 'drawDot', 1469 | points: [ { x, y } ], 1470 | color: gradient, 1471 | brushSize: brush_size, 1472 | canvasType: this.isGreyscale === 'rgb' ? 'imgCanvas' : 'maskCanvas', 1473 | previousImageData: this.previousImageData 1474 | 1475 | }; 1476 | } 1477 | 1478 | async saveRGBImage() { 1479 | const imgFilename = "clipspace-rgba-" + performance.now() + ".png"; 1480 | try { 1481 | const self = this; 1482 | 1483 | const offscreen = new OffscreenCanvas(this.originalWidth, this.originalHeight); 1484 | const offscreenCtx = offscreen.getContext('2d', { willReadFrequently: true }); 1485 | 1486 | offscreenCtx.drawImage(this.imgCanvas, 0, 0, this.imgCanvas.width, this.imgCanvas.height, 0, 0, this.originalWidth, this.originalHeight); 1487 | 1488 | if (typeof this.originalWidth !== 'number' || typeof this.originalHeight !== 'number') { 1489 | throw new Error("Original image dimensions are not properly set."); 1490 | } 1491 | 1492 | // Image Processing and Uploading Promises 1493 | let imageProcessingPromise = new Promise((resolve, reject) => { 1494 | offscreen.convertToBlob({type: 'image/png'}).then(blob => { 1495 | 1496 | const imgFile = new File([blob], imgFilename, { type: "image/png" }); 1497 | const original_ref = URL.createObjectURL(blob); 1498 | 1499 | // Image processing for RGB 1500 | const imgFormData = new FormData(); 1501 | 1502 | const imgForm = { 1503 | "filename": imgFilename, 1504 | "subfolder": "clipspace", 1505 | "type": "input", 1506 | }; 1507 | 1508 | if(ComfyApp.clipspace.images) 1509 | ComfyApp.clipspace.images[0] = imgForm; 1510 | 1511 | if(ComfyApp.clipspace.widgets) { 1512 | const index1 = ComfyApp.clipspace.widgets.findIndex(obj => obj.name === 'image'); 1513 | 1514 | if(index1 >= 0) 1515 | ComfyApp.clipspace.widgets[index1].value = imgForm; 1516 | } 1517 | 1518 | imgFormData.append('image', imgFile, imgFilename); 1519 | imgFormData.append('type', "input"); 1520 | imgFormData.append('subfolder', "clipspace"); 1521 | 1522 | // Create a new image element and set its src attribute to the object URL of the Blob 1523 | const image = new Image(); 1524 | image.src = original_ref; 1525 | image.onload = async () => { 1526 | try { 1527 | await self.processAndUploadImage(imgFile, imgForm, imgFormData); 1528 | resolve({ original_ref, imageProcessingPromise, imgFilename }); 1529 | } catch (error) { 1530 | console.error('Error converting RGB image to blob:', error); 1531 | reject(error); 1532 | } finally { 1533 | URL.revokeObjectURL(original_ref); // Revoke the blob URL here 1534 | } 1535 | }; 1536 | 1537 | image.onerror = (error) => { 1538 | console.error('RGB Image OnLoad Error:', error); 1539 | reject(error); 1540 | URL.revokeObjectURL(original_ref); // Revoke the blob URL here as well 1541 | }; 1542 | 1543 | }).catch(error => { 1544 | console.error('Error converting RGB offscreen canvas to blob:', error); 1545 | reject(error); 1546 | }); 1547 | }); 1548 | 1549 | return imageProcessingPromise; 1550 | 1551 | } catch (error) { 1552 | console.error('Error in saving RGB image:', error); 1553 | throw error; 1554 | } 1555 | } 1556 | 1557 | 1558 | async save() { 1559 | try { 1560 | const { imgFilename } = await this.saveRGBImage(); 1561 | ComfyApp.onClipspaceEditorSave(); 1562 | await this.save_mask(imgFilename); 1563 | } catch (error) { 1564 | console.error('Error in save function:', error); 1565 | } 1566 | } 1567 | 1568 | async processAndUploadImage(image, item, formData, isMask = false) { 1569 | try { 1570 | formData.append('image', image, item.filename); 1571 | 1572 | this.saveButton.innerText = "Saving..."; 1573 | this.saveButton.disabled = true; 1574 | 1575 | // Upload image and return the promise 1576 | return await this.uploadMask(item, formData, isMask); // Passing isMask to indicate whether this is a mask 1577 | } catch (error) { 1578 | console.error("Error in processAndUploadImage function:", error); 1579 | 1580 | this.saveButton.innerText = "Save"; // Reset button text 1581 | this.saveButton.disabled = false; // Enable the button again 1582 | 1583 | throw error; // Propagate the error up the call stack 1584 | } 1585 | } 1586 | 1587 | async uploadMask(filepath, formData, isMask) { 1588 | const uploadUrl = isMask ? '/upload/mask' : '/upload/image'; 1589 | 1590 | try { 1591 | const response = await api.fetchApi(uploadUrl, { 1592 | method: 'POST', 1593 | body: formData 1594 | }); 1595 | 1596 | if (response && response.ok) { 1597 | ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']] = new Image(); 1598 | ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']].src = api.apiURL("/view?" + new URLSearchParams(filepath).toString() + app.getPreviewFormatParam()); 1599 | 1600 | if (ComfyApp.clipspace.images) { 1601 | ComfyApp.clipspace.images[ComfyApp.clipspace['selectedIndex']] = filepath; 1602 | } else { 1603 | } 1604 | 1605 | ClipspaceDialog.invalidatePreview(); 1606 | } else { 1607 | throw new Error('Unexpected response from the server'); 1608 | } 1609 | } catch (error) { 1610 | console.error('Error:', error); 1611 | 1612 | // Cleanup code 1613 | formData = null; // Allow formData to be garbage collected 1614 | if (ComfyApp.clipspace.imgs && ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']]) { 1615 | ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']].src = ''; // Release the reference to the image URL 1616 | ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']] = null; // Allow the Image object to be garbage collected 1617 | } 1618 | 1619 | throw error; // Re-throw the error to propagate it up the call stack 1620 | } 1621 | } 1622 | 1623 | async save_mask(imgFilenameFromRGB) { 1624 | const offscreen = new OffscreenCanvas(this.originalWidth, this.originalHeight); 1625 | const offscreenCtx = offscreen.getContext('2d', { willReadFrequently: true }); 1626 | 1627 | const original_ref = { filename: imgFilenameFromRGB }; 1628 | 1629 | offscreenCtx.drawImage(this.maskCanvas, 0, 0, this.maskCanvas.width, this.maskCanvas.height, 0, 0, offscreen.width, offscreen.height); 1630 | 1631 | // paste mask data into alpha channel 1632 | const imageData = offscreenCtx.getImageData(0, 0, offscreen.width, offscreen.height); 1633 | 1634 | // Create a worker instance 1635 | const worker = new Worker("../../extensions/ComfyI2I/imageProcessorWorker.js"); 1636 | 1637 | // Sending imageData to worker 1638 | worker.postMessage({ imageData }); 1639 | 1640 | worker.onmessage = async (e) => { 1641 | // Receive the processed data back from the worker 1642 | const processedData = e.data.processedData; 1643 | 1644 | // Put the processed data back onto the offscreen canvas 1645 | offscreenCtx.putImageData(processedData, 0, 0); 1646 | 1647 | const formData = new FormData(); 1648 | const filename = "clipspace-mask-" + performance.now() + ".png"; 1649 | 1650 | const item = { 1651 | "filename": filename, 1652 | "subfolder": "clipspace", 1653 | "type": "input", 1654 | }; 1655 | 1656 | if(ComfyApp.clipspace.images) 1657 | ComfyApp.clipspace.images[0] = item; 1658 | 1659 | if(ComfyApp.clipspace.widgets) { 1660 | const index = ComfyApp.clipspace.widgets.findIndex(obj => obj.name === 'image'); 1661 | 1662 | if(index >= 0) 1663 | ComfyApp.clipspace.widgets[index].value = item; 1664 | } 1665 | 1666 | const blob = await offscreen.convertToBlob({ type: 'image/png' }); 1667 | 1668 | // You can manually set these values or pass them as parameters to the function 1669 | let original_subfolder = "clipspace"; 1670 | if(original_subfolder) 1671 | original_ref.subfolder = original_subfolder; 1672 | 1673 | let original_type = "input"; 1674 | if(original_type) 1675 | original_ref.type = original_type; 1676 | 1677 | formData.append('image', blob, filename); 1678 | formData.append('original_ref', JSON.stringify(original_ref)); 1679 | formData.append('type', "input"); 1680 | formData.append('subfolder', "clipspace"); 1681 | 1682 | this.saveButton.innerText = "Saving..."; 1683 | this.saveButton.disabled = true; 1684 | 1685 | try { 1686 | await this.uploadMask(item, formData, true); // Ensure this is awaited 1687 | ComfyApp.onClipspaceEditorSave(); 1688 | this.close(); 1689 | } catch (error) { 1690 | console.error('Error in save_mask function:', error); 1691 | this.saveButton.innerText = "Save"; // Revert the button text 1692 | this.saveButton.disabled = false; // Re-enable the save button 1693 | } 1694 | 1695 | // Terminate the worker after use 1696 | worker.terminate(); 1697 | }; 1698 | 1699 | worker.onerror = (error) => { 1700 | console.error('Error in worker:', error); 1701 | }; 1702 | } 1703 | } 1704 | 1705 | 1706 | app.registerExtension({ 1707 | name: "Comfy.ComfyI2I.ComfyShop", 1708 | init(app) { 1709 | const callback = 1710 | function () { 1711 | let dlg = ComfyShopDialog.getInstance(); 1712 | dlg.show(); 1713 | }; 1714 | 1715 | const context_predicate = () => ComfyApp.clipspace && ComfyApp.clipspace.imgs && ComfyApp.clipspace.imgs.length > 0 1716 | ClipspaceDialog.registerButton("ComfyShop", context_predicate, callback); 1717 | }, 1718 | 1719 | async beforeRegisterNodeDef(nodeType, nodeData, app) { 1720 | if (nodeData.output.includes("MASK") && nodeData.output.includes("IMAGE")) { 1721 | addMenuHandler(nodeType, function (_, options) { 1722 | options.unshift({ 1723 | content: "Open in ComfyShop", 1724 | callback: () => { 1725 | ComfyApp.copyToClipspace(this); 1726 | ComfyApp.clipspace_return_node = this; 1727 | 1728 | let dlg = ComfyShopDialog.getInstance(); 1729 | dlg.show(); 1730 | }, 1731 | }); 1732 | }); 1733 | } 1734 | } 1735 | }); 1736 | -------------------------------------------------------------------------------- /js/imageProcessorWorker.js: -------------------------------------------------------------------------------- 1 | self.onmessage = function(e) { 2 | const { imageData } = e.data; 3 | 4 | // Your image processing code here, for example: 5 | for (let i = 0; i < imageData.data.length; i += 4) { 6 | // Invert the alpha channel 7 | imageData.data[i+3] = 255 - imageData.data[i+3]; 8 | 9 | // Setting the color channels to 0 to keep it a grayscale image 10 | imageData.data[i] = 0; 11 | imageData.data[i+1] = 0; 12 | imageData.data[i+2] = 0; 13 | } 14 | 15 | // Return the processed data back to the main thread 16 | self.postMessage({ processedData: imageData }); 17 | }; -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | torch 3 | torchvision 4 | tensorflow 5 | scikit-image 6 | scipy 7 | opencv-python 8 | -------------------------------------------------------------------------------- /workflows/Color Xfer Workflow.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 397, 3 | "last_link_id": 59335, 4 | "nodes": [ 5 | { 6 | "id": 87, 7 | "type": "LoadImage", 8 | "pos": [ 9 | 164, 10 | 1053 11 | ], 12 | "size": [ 13 | 794.0693724097752, 14 | 916.5139546278999 15 | ], 16 | "flags": {}, 17 | "order": 0, 18 | "mode": 0, 19 | "inputs": [], 20 | "outputs": [ 21 | { 22 | "name": "IMAGE", 23 | "type": "IMAGE", 24 | "links": [ 25 | 59268 26 | ], 27 | "shape": 3, 28 | "slot_index": 0 29 | }, 30 | { 31 | "name": "MASK", 32 | "type": "MASK", 33 | "links": [], 34 | "shape": 3, 35 | "slot_index": 1 36 | } 37 | ], 38 | "properties": { 39 | "Node name for S&R": "LoadImage" 40 | }, 41 | "widgets_values": [ 42 | "14387-3274465582-Photo of fusion of a huge savage rabbit monster with an alien in the space, style of laurie greasley, studio ghibli, akira toriy.png", 43 | "image" 44 | ] 45 | }, 46 | { 47 | "id": 338, 48 | "type": "LoadImage", 49 | "pos": [ 50 | 171, 51 | 58 52 | ], 53 | "size": [ 54 | 782.6728711721954, 55 | 899.2087522146759 56 | ], 57 | "flags": {}, 58 | "order": 1, 59 | "mode": 0, 60 | "outputs": [ 61 | { 62 | "name": "IMAGE", 63 | "type": "IMAGE", 64 | "links": [ 65 | 59266, 66 | 59306 67 | ], 68 | "shape": 3, 69 | "slot_index": 0 70 | }, 71 | { 72 | "name": "MASK", 73 | "type": "MASK", 74 | "links": [], 75 | "shape": 3, 76 | "slot_index": 1 77 | } 78 | ], 79 | "properties": { 80 | "Node name for S&R": "LoadImage" 81 | }, 82 | "widgets_values": [ 83 | "clipspace/clipspace-mask-6355470.899999976.png [input]", 84 | "image" 85 | ] 86 | }, 87 | { 88 | "id": 374, 89 | "type": "PreviewImage", 90 | "pos": [ 91 | 1555, 92 | 1104 93 | ], 94 | "size": { 95 | "0": 315.37579345703125, 96 | "1": 317.5821228027344 97 | }, 98 | "flags": {}, 99 | "order": 3, 100 | "mode": 0, 101 | "inputs": [ 102 | { 103 | "name": "images", 104 | "type": "IMAGE", 105 | "link": 59308 106 | } 107 | ], 108 | "properties": { 109 | "Node name for S&R": "PreviewImage" 110 | } 111 | }, 112 | { 113 | "id": 387, 114 | "type": "SaveImage", 115 | "pos": [ 116 | 2242, 117 | 278 118 | ], 119 | "size": [ 120 | 1130.873684716073, 121 | 1392.548730847608 122 | ], 123 | "flags": { 124 | "collapsed": false 125 | }, 126 | "order": 5, 127 | "mode": 0, 128 | "inputs": [ 129 | { 130 | "name": "images", 131 | "type": "IMAGE", 132 | "link": 59298 133 | } 134 | ], 135 | "properties": {}, 136 | "widgets_values": [ 137 | "ComfyUI" 138 | ] 139 | }, 140 | { 141 | "id": 391, 142 | "type": "Mask Ops", 143 | "pos": [ 144 | 1025, 145 | 384 146 | ], 147 | "size": { 148 | "0": 315, 149 | "1": 362 150 | }, 151 | "flags": {}, 152 | "order": 2, 153 | "mode": 0, 154 | "inputs": [ 155 | { 156 | "name": "image", 157 | "type": "IMAGE", 158 | "link": 59306 159 | }, 160 | { 161 | "name": "mask", 162 | "type": "MASK", 163 | "link": null 164 | } 165 | ], 166 | "outputs": [ 167 | { 168 | "name": "mask_image", 169 | "type": "IMAGE", 170 | "links": [ 171 | 59308 172 | ], 173 | "shape": 3, 174 | "slot_index": 0 175 | }, 176 | { 177 | "name": "mask", 178 | "type": "MASK", 179 | "links": [ 180 | 59321 181 | ], 182 | "shape": 3, 183 | "slot_index": 1 184 | }, 185 | { 186 | "name": "mask mapping", 187 | "type": "MASK_MAPPING", 188 | "links": null, 189 | "shape": 3 190 | } 191 | ], 192 | "properties": { 193 | "Node name for S&R": "Mask Ops" 194 | }, 195 | "widgets_values": [ 196 | "", 197 | 0, 198 | 30, 199 | 0, 200 | 1, 201 | 0, 202 | 127.5, 203 | 255, 204 | "green", 205 | 0, 206 | 0, 207 | 5 208 | ] 209 | }, 210 | { 211 | "id": 371, 212 | "type": "Color Transfer", 213 | "pos": [ 214 | 1842, 215 | 403 216 | ], 217 | "size": { 218 | "0": 315, 219 | "1": 242 220 | }, 221 | "flags": {}, 222 | "order": 4, 223 | "mode": 0, 224 | "inputs": [ 225 | { 226 | "name": "source_image", 227 | "type": "IMAGE", 228 | "link": 59266 229 | }, 230 | { 231 | "name": "target_image", 232 | "type": "IMAGE", 233 | "link": 59268 234 | }, 235 | { 236 | "name": "mask", 237 | "type": "MASK", 238 | "link": 59321 239 | } 240 | ], 241 | "outputs": [ 242 | { 243 | "name": "image", 244 | "type": "IMAGE", 245 | "links": [ 246 | 59298 247 | ], 248 | "shape": 3, 249 | "slot_index": 0 250 | } 251 | ], 252 | "properties": { 253 | "Node name for S&R": "Color Transfer" 254 | }, 255 | "widgets_values": [ 256 | 13, 257 | 10, 258 | 2, 259 | 1.3, 260 | 1, 261 | 1, 262 | 1 263 | ] 264 | } 265 | ], 266 | "links": [ 267 | [ 268 | 59266, 269 | 338, 270 | 0, 271 | 371, 272 | 0, 273 | "IMAGE" 274 | ], 275 | [ 276 | 59268, 277 | 87, 278 | 0, 279 | 371, 280 | 1, 281 | "IMAGE" 282 | ], 283 | [ 284 | 59298, 285 | 371, 286 | 0, 287 | 387, 288 | 0, 289 | "IMAGE" 290 | ], 291 | [ 292 | 59306, 293 | 338, 294 | 0, 295 | 391, 296 | 0, 297 | "IMAGE" 298 | ], 299 | [ 300 | 59308, 301 | 391, 302 | 0, 303 | 374, 304 | 0, 305 | "IMAGE" 306 | ], 307 | [ 308 | 59321, 309 | 391, 310 | 1, 311 | 371, 312 | 2, 313 | "MASK" 314 | ] 315 | ], 316 | "groups": [], 317 | "config": {}, 318 | "extra": {}, 319 | "version": 0.4 320 | } -------------------------------------------------------------------------------- /workflows/I2I workflow.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 343, 3 | "last_link_id": 59216, 4 | "nodes": [ 5 | { 6 | "id": 108, 7 | "type": "CLIPTextEncodeSDXLRefiner", 8 | "pos": [ 9 | -3270, 10 | 1300 11 | ], 12 | "size": { 13 | "0": 344.3999938964844, 14 | "1": 126 15 | }, 16 | "flags": {}, 17 | "order": 12, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "clip", 22 | "type": "CLIP", 23 | "link": 59044 24 | }, 25 | { 26 | "name": "text", 27 | "type": "STRING", 28 | "link": 59046, 29 | "widget": { 30 | "name": "text", 31 | "config": [ 32 | "STRING", 33 | { 34 | "multiline": true 35 | } 36 | ] 37 | } 38 | } 39 | ], 40 | "outputs": [ 41 | { 42 | "name": "CONDITIONING", 43 | "type": "CONDITIONING", 44 | "links": [ 45 | 59047 46 | ], 47 | "shape": 3, 48 | "slot_index": 0 49 | } 50 | ], 51 | "title": "CLIPTextEncodeSDXLRefiner Negative Prompt", 52 | "properties": { 53 | "Node name for S&R": "CLIPTextEncodeSDXLRefiner" 54 | }, 55 | "widgets_values": [ 56 | 2.5, 57 | 1024, 58 | 1024, 59 | "terrible quality, bad anatomy, crossed eyes" 60 | ] 61 | }, 62 | { 63 | "id": 107, 64 | "type": "CLIPTextEncodeSDXLRefiner", 65 | "pos": [ 66 | -3270, 67 | 1130 68 | ], 69 | "size": { 70 | "0": 344.3999938964844, 71 | "1": 126 72 | }, 73 | "flags": {}, 74 | "order": 14, 75 | "mode": 0, 76 | "inputs": [ 77 | { 78 | "name": "clip", 79 | "type": "CLIP", 80 | "link": 59041 81 | }, 82 | { 83 | "name": "text", 84 | "type": "STRING", 85 | "link": 59042, 86 | "widget": { 87 | "name": "text", 88 | "config": [ 89 | "STRING", 90 | { 91 | "multiline": true 92 | } 93 | ] 94 | } 95 | } 96 | ], 97 | "outputs": [ 98 | { 99 | "name": "CONDITIONING", 100 | "type": "CONDITIONING", 101 | "links": [ 102 | 59043 103 | ], 104 | "shape": 3, 105 | "slot_index": 0 106 | } 107 | ], 108 | "title": "CLIPTextEncodeSDXLRefiner Positive Prompt", 109 | "properties": { 110 | "Node name for S&R": "CLIPTextEncodeSDXLRefiner" 111 | }, 112 | "widgets_values": [ 113 | 6, 114 | 1024, 115 | 1024, 116 | "a highly detailed 1700's painting of a colonial person, highly detailed face, highly detailed eyes, best quality, masterpiece" 117 | ] 118 | }, 119 | { 120 | "id": 44, 121 | "type": "VAELoader", 122 | "pos": [ 123 | -3270, 124 | 1470 125 | ], 126 | "size": { 127 | "0": 346.82275390625, 128 | "1": 58 129 | }, 130 | "flags": {}, 131 | "order": 0, 132 | "mode": 0, 133 | "outputs": [ 134 | { 135 | "name": "VAE", 136 | "type": "VAE", 137 | "links": [ 138 | 59033, 139 | 59034 140 | ], 141 | "shape": 3, 142 | "slot_index": 0 143 | } 144 | ], 145 | "properties": { 146 | "Node name for S&R": "VAELoader" 147 | }, 148 | "widgets_values": [ 149 | "sdxl_vae.safetensors" 150 | ] 151 | }, 152 | { 153 | "id": 18, 154 | "type": "CheckpointLoaderSimple", 155 | "pos": [ 156 | -3690, 157 | 930 158 | ], 159 | "size": { 160 | "0": 317.6700134277344, 161 | "1": 98 162 | }, 163 | "flags": { 164 | "collapsed": false 165 | }, 166 | "order": 1, 167 | "mode": 0, 168 | "outputs": [ 169 | { 170 | "name": "MODEL", 171 | "type": "MODEL", 172 | "links": [ 173 | 59030 174 | ], 175 | "slot_index": 0 176 | }, 177 | { 178 | "name": "CLIP", 179 | "type": "CLIP", 180 | "links": [ 181 | 59041, 182 | 59044 183 | ], 184 | "slot_index": 1 185 | }, 186 | { 187 | "name": "VAE", 188 | "type": "VAE", 189 | "links": [], 190 | "slot_index": 2 191 | } 192 | ], 193 | "title": "Load Refiner", 194 | "properties": { 195 | "Node name for S&R": "CheckpointLoaderSimple" 196 | }, 197 | "widgets_values": [ 198 | "sd_xl_refiner_1.0.safetensors" 199 | ], 200 | "color": "#322", 201 | "bgcolor": "#533" 202 | }, 203 | { 204 | "id": 4, 205 | "type": "CheckpointLoaderSimple", 206 | "pos": [ 207 | -3690, 208 | 790 209 | ], 210 | "size": { 211 | "0": 314.88055419921875, 212 | "1": 98 213 | }, 214 | "flags": {}, 215 | "order": 2, 216 | "mode": 0, 217 | "outputs": [ 218 | { 219 | "name": "MODEL", 220 | "type": "MODEL", 221 | "links": [ 222 | 59027 223 | ], 224 | "slot_index": 0 225 | }, 226 | { 227 | "name": "CLIP", 228 | "type": "CLIP", 229 | "links": [ 230 | 59035, 231 | 59038 232 | ], 233 | "slot_index": 1 234 | }, 235 | { 236 | "name": "VAE", 237 | "type": "VAE", 238 | "links": [], 239 | "slot_index": 2 240 | } 241 | ], 242 | "properties": { 243 | "Node name for S&R": "CheckpointLoaderSimple" 244 | }, 245 | "widgets_values": [ 246 | "sd_xl_base_1.0.safetensors" 247 | ], 248 | "color": "#223", 249 | "bgcolor": "#335" 250 | }, 251 | { 252 | "id": 34, 253 | "type": "PrimitiveNode", 254 | "pos": [ 255 | -3560, 256 | 1430 257 | ], 258 | "size": { 259 | "0": 210, 260 | "1": 82 261 | }, 262 | "flags": {}, 263 | "order": 3, 264 | "mode": 0, 265 | "outputs": [ 266 | { 267 | "name": "FLOAT", 268 | "type": "FLOAT", 269 | "links": [ 270 | 41410, 271 | 41419 272 | ], 273 | "slot_index": 0, 274 | "widget": { 275 | "name": "cfg", 276 | "config": [ 277 | "FLOAT", 278 | { 279 | "default": 8, 280 | "min": 0, 281 | "max": 100 282 | } 283 | ] 284 | } 285 | } 286 | ], 287 | "title": "CFG", 288 | "properties": {}, 289 | "widgets_values": [ 290 | 7, 291 | "fixed" 292 | ] 293 | }, 294 | { 295 | "id": 72, 296 | "type": "CLIPTextEncode", 297 | "pos": [ 298 | -3280, 299 | 1030 300 | ], 301 | "size": { 302 | "0": 344.9279479980469, 303 | "1": 54 304 | }, 305 | "flags": { 306 | "collapsed": false 307 | }, 308 | "order": 11, 309 | "mode": 0, 310 | "inputs": [ 311 | { 312 | "name": "clip", 313 | "type": "CLIP", 314 | "link": 59038 315 | }, 316 | { 317 | "name": "text", 318 | "type": "STRING", 319 | "link": 59039, 320 | "widget": { 321 | "name": "text", 322 | "config": [ 323 | "STRING", 324 | { 325 | "multiline": true 326 | } 327 | ] 328 | } 329 | } 330 | ], 331 | "outputs": [ 332 | { 333 | "name": "CONDITIONING", 334 | "type": "CONDITIONING", 335 | "links": [ 336 | 59040 337 | ], 338 | "shape": 3, 339 | "slot_index": 0 340 | } 341 | ], 342 | "title": "CLIP Text Encode (Negative Prompt)", 343 | "properties": { 344 | "Node name for S&R": "CLIPTextEncode" 345 | }, 346 | "widgets_values": [ 347 | "terrible quality, bad anatomy, crossed eyes" 348 | ], 349 | "color": "#323", 350 | "bgcolor": "#535" 351 | }, 352 | { 353 | "id": 24, 354 | "type": "PrimitiveNode", 355 | "pos": [ 356 | -3800, 357 | 1430 358 | ], 359 | "size": { 360 | "0": 210, 361 | "1": 82 362 | }, 363 | "flags": {}, 364 | "order": 4, 365 | "mode": 0, 366 | "outputs": [ 367 | { 368 | "name": "INT", 369 | "type": "INT", 370 | "links": [ 371 | 41411, 372 | 41418 373 | ], 374 | "slot_index": 0, 375 | "widget": { 376 | "name": "noise_seed", 377 | "config": [ 378 | "INT", 379 | { 380 | "default": 0, 381 | "min": 0, 382 | "max": 18446744073709552000 383 | } 384 | ] 385 | } 386 | } 387 | ], 388 | "title": "Seed", 389 | "properties": {}, 390 | "widgets_values": [ 391 | 1053970276754110, 392 | "fixed" 393 | ] 394 | }, 395 | { 396 | "id": 87, 397 | "type": "LoadImage", 398 | "pos": [ 399 | -4210, 400 | 1810 401 | ], 402 | "size": { 403 | "0": 480.041015625, 404 | "1": 644.239501953125 405 | }, 406 | "flags": {}, 407 | "order": 6, 408 | "mode": 0, 409 | "inputs": [], 410 | "outputs": [ 411 | { 412 | "name": "IMAGE", 413 | "type": "IMAGE", 414 | "links": [ 415 | 59177, 416 | 59194, 417 | 59198, 418 | 59206 419 | ], 420 | "shape": 3, 421 | "slot_index": 0 422 | }, 423 | { 424 | "name": "MASK", 425 | "type": "MASK", 426 | "links": [ 427 | 59178 428 | ], 429 | "shape": 3, 430 | "slot_index": 1 431 | } 432 | ], 433 | "properties": { 434 | "Node name for S&R": "LoadImage" 435 | }, 436 | "widgets_values": [ 437 | "clipspace/clipspace-mask-3469647.899999976.png [input]", 438 | "image" 439 | ] 440 | }, 441 | { 442 | "id": 333, 443 | "type": "Inpaint Segments", 444 | "pos": [ 445 | -3200, 446 | 1750 447 | ], 448 | "size": { 449 | "0": 393, 450 | "1": 314 451 | }, 452 | "flags": {}, 453 | "order": 15, 454 | "mode": 0, 455 | "inputs": [ 456 | { 457 | "name": "image", 458 | "type": "IMAGE", 459 | "link": 59206 460 | }, 461 | { 462 | "name": "mask", 463 | "type": "IMAGE", 464 | "link": 59207 465 | }, 466 | { 467 | "name": "mask_mapping_optional", 468 | "type": "MASK_MAPPING", 469 | "link": 59184 470 | } 471 | ], 472 | "outputs": [ 473 | { 474 | "name": "cut image", 475 | "type": "IMAGE", 476 | "links": [ 477 | 59193, 478 | 59199 479 | ], 480 | "shape": 3, 481 | "slot_index": 0 482 | }, 483 | { 484 | "name": "cut mask", 485 | "type": "IMAGE", 486 | "links": [ 487 | 59192 488 | ], 489 | "shape": 3, 490 | "slot_index": 1 491 | }, 492 | { 493 | "name": "region", 494 | "type": "IMAGE", 495 | "links": [ 496 | 59190 497 | ], 498 | "shape": 3, 499 | "slot_index": 2 500 | } 501 | ], 502 | "properties": { 503 | "Node name for S&R": "Inpaint Segments" 504 | }, 505 | "widgets_values": [ 506 | 1024, 507 | 1024, 508 | "RGB", 509 | 3, 510 | "keep_ratio", 511 | 64, 512 | 64, 513 | 0, 514 | 0, 515 | "match_ratio" 516 | ] 517 | }, 518 | { 519 | "id": 332, 520 | "type": "Mask Ops", 521 | "pos": [ 522 | -3610, 523 | 2150 524 | ], 525 | "size": { 526 | "0": 315, 527 | "1": 318 528 | }, 529 | "flags": {}, 530 | "order": 10, 531 | "mode": 0, 532 | "inputs": [ 533 | { 534 | "name": "image", 535 | "type": "IMAGE", 536 | "link": 59177 537 | }, 538 | { 539 | "name": "mask", 540 | "type": "MASK", 541 | "link": 59178 542 | } 543 | ], 544 | "outputs": [ 545 | { 546 | "name": "mask", 547 | "type": "IMAGE", 548 | "links": [ 549 | 59207 550 | ], 551 | "shape": 3, 552 | "slot_index": 0 553 | }, 554 | { 555 | "name": "mask mapping", 556 | "type": "MASK_MAPPING", 557 | "links": [ 558 | 59184, 559 | 59191 560 | ], 561 | "shape": 3, 562 | "slot_index": 1 563 | } 564 | ], 565 | "properties": { 566 | "Node name for S&R": "Mask Ops" 567 | }, 568 | "widgets_values": [ 569 | "face", 570 | 0, 571 | 0, 572 | 0, 573 | 0, 574 | 145, 575 | 255, 576 | "red", 577 | 4, 578 | 0, 579 | 3.9444458007812497 580 | ] 581 | }, 582 | { 583 | "id": 110, 584 | "type": "PrimitiveNode", 585 | "pos": [ 586 | -4160, 587 | 1200 588 | ], 589 | "size": { 590 | "0": 317.7752685546875, 591 | "1": 132.7703399658203 592 | }, 593 | "flags": {}, 594 | "order": 7, 595 | "mode": 0, 596 | "outputs": [ 597 | { 598 | "name": "STRING", 599 | "type": "STRING", 600 | "links": [ 601 | 59039, 602 | 59046 603 | ], 604 | "slot_index": 0, 605 | "widget": { 606 | "name": "text_2", 607 | "config": [ 608 | "STRING", 609 | { 610 | "multiline": true 611 | } 612 | ] 613 | } 614 | } 615 | ], 616 | "title": "Negative Prompt", 617 | "properties": {}, 618 | "widgets_values": [ 619 | "terrible quality, bad anatomy, crossed eyes" 620 | ] 621 | }, 622 | { 623 | "id": 71, 624 | "type": "CLIPTextEncode", 625 | "pos": [ 626 | -3270, 627 | 940 628 | ], 629 | "size": { 630 | "0": 345.9279479980469, 631 | "1": 54 632 | }, 633 | "flags": { 634 | "collapsed": false 635 | }, 636 | "order": 13, 637 | "mode": 0, 638 | "inputs": [ 639 | { 640 | "name": "clip", 641 | "type": "CLIP", 642 | "link": 59035 643 | }, 644 | { 645 | "name": "text", 646 | "type": "STRING", 647 | "link": 59036, 648 | "widget": { 649 | "name": "text", 650 | "config": [ 651 | "STRING", 652 | { 653 | "multiline": true 654 | } 655 | ] 656 | } 657 | } 658 | ], 659 | "outputs": [ 660 | { 661 | "name": "CONDITIONING", 662 | "type": "CONDITIONING", 663 | "links": [ 664 | 59037 665 | ], 666 | "shape": 3, 667 | "slot_index": 0 668 | } 669 | ], 670 | "title": "CLIP Text Encode (Positive Prompt)", 671 | "properties": { 672 | "Node name for S&R": "CLIPTextEncode" 673 | }, 674 | "widgets_values": [ 675 | "a highly detailed 1700's painting of a colonial person, highly detailed face, highly detailed eyes, best quality, masterpiece" 676 | ], 677 | "color": "#232", 678 | "bgcolor": "#353" 679 | }, 680 | { 681 | "id": 74, 682 | "type": "VAEDecode", 683 | "pos": [ 684 | -1580, 685 | 2060 686 | ], 687 | "size": { 688 | "0": 210, 689 | "1": 46 690 | }, 691 | "flags": {}, 692 | "order": 19, 693 | "mode": 0, 694 | "inputs": [ 695 | { 696 | "name": "samples", 697 | "type": "LATENT", 698 | "link": 41412 699 | }, 700 | { 701 | "name": "vae", 702 | "type": "VAE", 703 | "link": 59034 704 | } 705 | ], 706 | "outputs": [ 707 | { 708 | "name": "IMAGE", 709 | "type": "IMAGE", 710 | "links": [ 711 | 59195 712 | ], 713 | "shape": 3, 714 | "slot_index": 0 715 | } 716 | ], 717 | "properties": { 718 | "Node name for S&R": "VAEDecode" 719 | } 720 | }, 721 | { 722 | "id": 339, 723 | "type": "Color Transfer", 724 | "pos": [ 725 | -300, 726 | 1870 727 | ], 728 | "size": { 729 | "0": 315, 730 | "1": 78 731 | }, 732 | "flags": {}, 733 | "order": 22, 734 | "mode": 0, 735 | "inputs": [ 736 | { 737 | "name": "source_image", 738 | "type": "IMAGE", 739 | "link": 59209 740 | }, 741 | { 742 | "name": "target_image", 743 | "type": "IMAGE", 744 | "link": 59210 745 | } 746 | ], 747 | "outputs": [ 748 | { 749 | "name": "i", 750 | "type": "IMAGE", 751 | "links": [ 752 | 59211 753 | ], 754 | "shape": 3, 755 | "slot_index": 0 756 | } 757 | ], 758 | "properties": { 759 | "Node name for S&R": "Color Transfer" 760 | }, 761 | "widgets_values": [ 762 | 0.5 763 | ] 764 | }, 765 | { 766 | "id": 340, 767 | "type": "SaveImage", 768 | "pos": [ 769 | 230, 770 | 1870 771 | ], 772 | "size": [ 773 | 881.9852305151353, 774 | 1089.0461233630367 775 | ], 776 | "flags": { 777 | "collapsed": false 778 | }, 779 | "order": 23, 780 | "mode": 0, 781 | "inputs": [ 782 | { 783 | "name": "images", 784 | "type": "IMAGE", 785 | "link": 59211 786 | } 787 | ], 788 | "properties": {}, 789 | "widgets_values": [ 790 | "ComfyUI" 791 | ] 792 | }, 793 | { 794 | "id": 338, 795 | "type": "LoadImage", 796 | "pos": [ 797 | -1480, 798 | 1280 799 | ], 800 | "size": [ 801 | 653.5256699865715, 802 | 623.6300641461175 803 | ], 804 | "flags": {}, 805 | "order": 5, 806 | "mode": 0, 807 | "outputs": [ 808 | { 809 | "name": "IMAGE", 810 | "type": "IMAGE", 811 | "links": [ 812 | 59209 813 | ], 814 | "shape": 3, 815 | "slot_index": 0 816 | }, 817 | { 818 | "name": "MASK", 819 | "type": "MASK", 820 | "links": null, 821 | "shape": 3 822 | } 823 | ], 824 | "properties": { 825 | "Node name for S&R": "LoadImage" 826 | }, 827 | "widgets_values": [ 828 | "14387-3274465582-Photo of fusion of a huge savage rabbit monster with an alien in the space, style of laurie greasley, studio ghibli, akira toriy.png", 829 | "image" 830 | ] 831 | }, 832 | { 833 | "id": 109, 834 | "type": "PrimitiveNode", 835 | "pos": [ 836 | -4170, 837 | 1000 838 | ], 839 | "size": { 840 | "0": 319.213134765625, 841 | "1": 126.30355072021484 842 | }, 843 | "flags": {}, 844 | "order": 8, 845 | "mode": 0, 846 | "outputs": [ 847 | { 848 | "name": "STRING", 849 | "type": "STRING", 850 | "links": [ 851 | 59036, 852 | 59042 853 | ], 854 | "slot_index": 0, 855 | "widget": { 856 | "name": "text", 857 | "config": [ 858 | "STRING", 859 | { 860 | "multiline": true 861 | } 862 | ] 863 | } 864 | } 865 | ], 866 | "title": "Positive Prompt", 867 | "properties": {}, 868 | "widgets_values": [ 869 | "a highly detailed 1700's painting of a colonial person, highly detailed face, highly detailed eyes, best quality, masterpiece" 870 | ] 871 | }, 872 | { 873 | "id": 335, 874 | "type": "PreviewImage", 875 | "pos": [ 876 | -4780, 877 | 1830 878 | ], 879 | "size": { 880 | "0": 533.1870727539062, 881 | "1": 548.014404296875 882 | }, 883 | "flags": {}, 884 | "order": 9, 885 | "mode": 0, 886 | "inputs": [ 887 | { 888 | "name": "images", 889 | "type": "IMAGE", 890 | "link": 59198 891 | } 892 | ], 893 | "properties": { 894 | "Node name for S&R": "PreviewImage" 895 | } 896 | }, 897 | { 898 | "id": 105, 899 | "type": "VAEEncode", 900 | "pos": [ 901 | -2714, 902 | 1676 903 | ], 904 | "size": { 905 | "0": 210, 906 | "1": 46 907 | }, 908 | "flags": {}, 909 | "order": 16, 910 | "mode": 0, 911 | "inputs": [ 912 | { 913 | "name": "pixels", 914 | "type": "IMAGE", 915 | "link": 59199 916 | }, 917 | { 918 | "name": "vae", 919 | "type": "VAE", 920 | "link": 59033 921 | } 922 | ], 923 | "outputs": [ 924 | { 925 | "name": "LATENT", 926 | "type": "LATENT", 927 | "links": [ 928 | 59208 929 | ], 930 | "shape": 3, 931 | "slot_index": 0 932 | } 933 | ], 934 | "properties": { 935 | "Node name for S&R": "VAEEncode" 936 | } 937 | }, 938 | { 939 | "id": 130, 940 | "type": "KSampler", 941 | "pos": [ 942 | -2392, 943 | 1160 944 | ], 945 | "size": { 946 | "0": 315, 947 | "1": 262 948 | }, 949 | "flags": {}, 950 | "order": 17, 951 | "mode": 0, 952 | "inputs": [ 953 | { 954 | "name": "model", 955 | "type": "MODEL", 956 | "link": 59027 957 | }, 958 | { 959 | "name": "positive", 960 | "type": "CONDITIONING", 961 | "link": 59037 962 | }, 963 | { 964 | "name": "negative", 965 | "type": "CONDITIONING", 966 | "link": 59040 967 | }, 968 | { 969 | "name": "latent_image", 970 | "type": "LATENT", 971 | "link": 59208 972 | }, 973 | { 974 | "name": "seed", 975 | "type": "INT", 976 | "link": 41418, 977 | "widget": { 978 | "name": "seed", 979 | "config": [ 980 | "INT", 981 | { 982 | "default": 0, 983 | "min": 0, 984 | "max": 18446744073709552000 985 | } 986 | ] 987 | } 988 | }, 989 | { 990 | "name": "cfg", 991 | "type": "FLOAT", 992 | "link": 41419, 993 | "widget": { 994 | "name": "cfg", 995 | "config": [ 996 | "FLOAT", 997 | { 998 | "default": 8, 999 | "min": 0, 1000 | "max": 100 1001 | } 1002 | ] 1003 | } 1004 | } 1005 | ], 1006 | "outputs": [ 1007 | { 1008 | "name": "LATENT", 1009 | "type": "LATENT", 1010 | "links": [ 1011 | 41406 1012 | ], 1013 | "shape": 3, 1014 | "slot_index": 0 1015 | } 1016 | ], 1017 | "title": "Checkpoint KSampler", 1018 | "properties": { 1019 | "Node name for S&R": "KSampler" 1020 | }, 1021 | "widgets_values": [ 1022 | 1053970276754110, 1023 | "fixed", 1024 | 7, 1025 | 7, 1026 | "dpmpp_sde_gpu", 1027 | "simple", 1028 | 0.55 1029 | ], 1030 | "color": "#223", 1031 | "bgcolor": "#335" 1032 | }, 1033 | { 1034 | "id": 111, 1035 | "type": "KSamplerAdvanced", 1036 | "pos": [ 1037 | -2010, 1038 | 1463 1039 | ], 1040 | "size": { 1041 | "0": 315, 1042 | "1": 334 1043 | }, 1044 | "flags": {}, 1045 | "order": 18, 1046 | "mode": 0, 1047 | "inputs": [ 1048 | { 1049 | "name": "model", 1050 | "type": "MODEL", 1051 | "link": 59030 1052 | }, 1053 | { 1054 | "name": "positive", 1055 | "type": "CONDITIONING", 1056 | "link": 59043 1057 | }, 1058 | { 1059 | "name": "negative", 1060 | "type": "CONDITIONING", 1061 | "link": 59047 1062 | }, 1063 | { 1064 | "name": "latent_image", 1065 | "type": "LATENT", 1066 | "link": 41406 1067 | }, 1068 | { 1069 | "name": "cfg", 1070 | "type": "FLOAT", 1071 | "link": 41410, 1072 | "widget": { 1073 | "name": "cfg", 1074 | "config": [ 1075 | "FLOAT", 1076 | { 1077 | "default": 8, 1078 | "min": 0, 1079 | "max": 100 1080 | } 1081 | ] 1082 | } 1083 | }, 1084 | { 1085 | "name": "noise_seed", 1086 | "type": "INT", 1087 | "link": 41411, 1088 | "widget": { 1089 | "name": "noise_seed", 1090 | "config": [ 1091 | "INT", 1092 | { 1093 | "default": 0, 1094 | "min": 0, 1095 | "max": 18446744073709552000 1096 | } 1097 | ] 1098 | } 1099 | } 1100 | ], 1101 | "outputs": [ 1102 | { 1103 | "name": "LATENT", 1104 | "type": "LATENT", 1105 | "links": [ 1106 | 41412 1107 | ], 1108 | "shape": 3, 1109 | "slot_index": 0 1110 | } 1111 | ], 1112 | "title": "Refiner KSampler (Advanced)", 1113 | "properties": { 1114 | "Node name for S&R": "KSamplerAdvanced" 1115 | }, 1116 | "widgets_values": [ 1117 | "enable", 1118 | 26169760911060, 1119 | "randomize", 1120 | 10, 1121 | 7, 1122 | "dpmpp_sde_gpu", 1123 | "simple", 1124 | 7, 1125 | 10, 1126 | "enable" 1127 | ], 1128 | "color": "#322", 1129 | "bgcolor": "#533" 1130 | }, 1131 | { 1132 | "id": 334, 1133 | "type": "Combine and Paste", 1134 | "pos": [ 1135 | -1240, 1136 | 2330 1137 | ], 1138 | "size": { 1139 | "0": 380.4000244140625, 1140 | "1": 254 1141 | }, 1142 | "flags": {}, 1143 | "order": 20, 1144 | "mode": 0, 1145 | "inputs": [ 1146 | { 1147 | "name": "decoded_vae", 1148 | "type": "IMAGE", 1149 | "link": 59195 1150 | }, 1151 | { 1152 | "name": "Original_Image", 1153 | "type": "IMAGE", 1154 | "link": 59194 1155 | }, 1156 | { 1157 | "name": "Cut_Image", 1158 | "type": "IMAGE", 1159 | "link": 59193 1160 | }, 1161 | { 1162 | "name": "Cut_Mask", 1163 | "type": "IMAGE", 1164 | "link": 59192 1165 | }, 1166 | { 1167 | "name": "region", 1168 | "type": "IMAGE", 1169 | "link": 59190 1170 | }, 1171 | { 1172 | "name": "mask_mapping_optional", 1173 | "type": "MASK_MAPPING", 1174 | "link": 59191 1175 | } 1176 | ], 1177 | "outputs": [ 1178 | { 1179 | "name": "FinalOut", 1180 | "type": "IMAGE", 1181 | "links": [ 1182 | 59196, 1183 | 59210 1184 | ], 1185 | "shape": 3, 1186 | "slot_index": 0 1187 | } 1188 | ], 1189 | "properties": { 1190 | "Node name for S&R": "Combine and Paste" 1191 | }, 1192 | "widgets_values": [ 1193 | 1, 1194 | "multiply_alpha", 1195 | "yes", 1196 | "no", 1197 | "resize" 1198 | ] 1199 | }, 1200 | { 1201 | "id": 75, 1202 | "type": "SaveImage", 1203 | "pos": [ 1204 | -754, 1205 | 2625 1206 | ], 1207 | "size": [ 1208 | 787.3301374877929, 1209 | 1004.6297954711918 1210 | ], 1211 | "flags": { 1212 | "collapsed": false 1213 | }, 1214 | "order": 21, 1215 | "mode": 0, 1216 | "inputs": [ 1217 | { 1218 | "name": "images", 1219 | "type": "IMAGE", 1220 | "link": 59196 1221 | } 1222 | ], 1223 | "properties": {}, 1224 | "widgets_values": [ 1225 | "ComfyUI" 1226 | ] 1227 | } 1228 | ], 1229 | "links": [ 1230 | [ 1231 | 41406, 1232 | 130, 1233 | 0, 1234 | 111, 1235 | 3, 1236 | "LATENT" 1237 | ], 1238 | [ 1239 | 41410, 1240 | 34, 1241 | 0, 1242 | 111, 1243 | 4, 1244 | "FLOAT" 1245 | ], 1246 | [ 1247 | 41411, 1248 | 24, 1249 | 0, 1250 | 111, 1251 | 5, 1252 | "INT" 1253 | ], 1254 | [ 1255 | 41412, 1256 | 111, 1257 | 0, 1258 | 74, 1259 | 0, 1260 | "LATENT" 1261 | ], 1262 | [ 1263 | 41418, 1264 | 24, 1265 | 0, 1266 | 130, 1267 | 4, 1268 | "INT" 1269 | ], 1270 | [ 1271 | 41419, 1272 | 34, 1273 | 0, 1274 | 130, 1275 | 5, 1276 | "FLOAT" 1277 | ], 1278 | [ 1279 | 59027, 1280 | 4, 1281 | 0, 1282 | 130, 1283 | 0, 1284 | "MODEL" 1285 | ], 1286 | [ 1287 | 59030, 1288 | 18, 1289 | 0, 1290 | 111, 1291 | 0, 1292 | "MODEL" 1293 | ], 1294 | [ 1295 | 59033, 1296 | 44, 1297 | 0, 1298 | 105, 1299 | 1, 1300 | "VAE" 1301 | ], 1302 | [ 1303 | 59034, 1304 | 44, 1305 | 0, 1306 | 74, 1307 | 1, 1308 | "VAE" 1309 | ], 1310 | [ 1311 | 59035, 1312 | 4, 1313 | 1, 1314 | 71, 1315 | 0, 1316 | "CLIP" 1317 | ], 1318 | [ 1319 | 59036, 1320 | 109, 1321 | 0, 1322 | 71, 1323 | 1, 1324 | "STRING" 1325 | ], 1326 | [ 1327 | 59037, 1328 | 71, 1329 | 0, 1330 | 130, 1331 | 1, 1332 | "CONDITIONING" 1333 | ], 1334 | [ 1335 | 59038, 1336 | 4, 1337 | 1, 1338 | 72, 1339 | 0, 1340 | "CLIP" 1341 | ], 1342 | [ 1343 | 59039, 1344 | 110, 1345 | 0, 1346 | 72, 1347 | 1, 1348 | "STRING" 1349 | ], 1350 | [ 1351 | 59040, 1352 | 72, 1353 | 0, 1354 | 130, 1355 | 2, 1356 | "CONDITIONING" 1357 | ], 1358 | [ 1359 | 59041, 1360 | 18, 1361 | 1, 1362 | 107, 1363 | 0, 1364 | "CLIP" 1365 | ], 1366 | [ 1367 | 59042, 1368 | 109, 1369 | 0, 1370 | 107, 1371 | 1, 1372 | "STRING" 1373 | ], 1374 | [ 1375 | 59043, 1376 | 107, 1377 | 0, 1378 | 111, 1379 | 1, 1380 | "CONDITIONING" 1381 | ], 1382 | [ 1383 | 59044, 1384 | 18, 1385 | 1, 1386 | 108, 1387 | 0, 1388 | "CLIP" 1389 | ], 1390 | [ 1391 | 59046, 1392 | 110, 1393 | 0, 1394 | 108, 1395 | 1, 1396 | "STRING" 1397 | ], 1398 | [ 1399 | 59047, 1400 | 108, 1401 | 0, 1402 | 111, 1403 | 2, 1404 | "CONDITIONING" 1405 | ], 1406 | [ 1407 | 59177, 1408 | 87, 1409 | 0, 1410 | 332, 1411 | 0, 1412 | "IMAGE" 1413 | ], 1414 | [ 1415 | 59178, 1416 | 87, 1417 | 1, 1418 | 332, 1419 | 1, 1420 | "MASK" 1421 | ], 1422 | [ 1423 | 59184, 1424 | 332, 1425 | 1, 1426 | 333, 1427 | 2, 1428 | "MASK_MAPPING" 1429 | ], 1430 | [ 1431 | 59190, 1432 | 333, 1433 | 2, 1434 | 334, 1435 | 4, 1436 | "IMAGE" 1437 | ], 1438 | [ 1439 | 59191, 1440 | 332, 1441 | 1, 1442 | 334, 1443 | 5, 1444 | "MASK_MAPPING" 1445 | ], 1446 | [ 1447 | 59192, 1448 | 333, 1449 | 1, 1450 | 334, 1451 | 3, 1452 | "IMAGE" 1453 | ], 1454 | [ 1455 | 59193, 1456 | 333, 1457 | 0, 1458 | 334, 1459 | 2, 1460 | "IMAGE" 1461 | ], 1462 | [ 1463 | 59194, 1464 | 87, 1465 | 0, 1466 | 334, 1467 | 1, 1468 | "IMAGE" 1469 | ], 1470 | [ 1471 | 59195, 1472 | 74, 1473 | 0, 1474 | 334, 1475 | 0, 1476 | "IMAGE" 1477 | ], 1478 | [ 1479 | 59196, 1480 | 334, 1481 | 0, 1482 | 75, 1483 | 0, 1484 | "IMAGE" 1485 | ], 1486 | [ 1487 | 59198, 1488 | 87, 1489 | 0, 1490 | 335, 1491 | 0, 1492 | "IMAGE" 1493 | ], 1494 | [ 1495 | 59199, 1496 | 333, 1497 | 0, 1498 | 105, 1499 | 0, 1500 | "IMAGE" 1501 | ], 1502 | [ 1503 | 59206, 1504 | 87, 1505 | 0, 1506 | 333, 1507 | 0, 1508 | "IMAGE" 1509 | ], 1510 | [ 1511 | 59207, 1512 | 332, 1513 | 0, 1514 | 333, 1515 | 1, 1516 | "IMAGE" 1517 | ], 1518 | [ 1519 | 59208, 1520 | 105, 1521 | 0, 1522 | 130, 1523 | 3, 1524 | "LATENT" 1525 | ], 1526 | [ 1527 | 59209, 1528 | 338, 1529 | 0, 1530 | 339, 1531 | 0, 1532 | "IMAGE" 1533 | ], 1534 | [ 1535 | 59210, 1536 | 334, 1537 | 0, 1538 | 339, 1539 | 1, 1540 | "IMAGE" 1541 | ], 1542 | [ 1543 | 59211, 1544 | 339, 1545 | 0, 1546 | 340, 1547 | 0, 1548 | "IMAGE" 1549 | ] 1550 | ], 1551 | "groups": [], 1552 | "config": {}, 1553 | "extra": {}, 1554 | "version": 0.4 1555 | } -------------------------------------------------------------------------------- /workflows/Multi_XFer_Workflow.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 407, 3 | "last_link_id": 59363, 4 | "nodes": [ 5 | { 6 | "id": 374, 7 | "type": "PreviewImage", 8 | "pos": [ 9 | 194, 10 | 880 11 | ], 12 | "size": { 13 | "0": 500.72332763671875, 14 | "1": 363.1986389160156 15 | }, 16 | "flags": {}, 17 | "order": 7, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "images", 22 | "type": "IMAGE", 23 | "link": 59308 24 | } 25 | ], 26 | "properties": { 27 | "Node name for S&R": "PreviewImage" 28 | } 29 | }, 30 | { 31 | "id": 407, 32 | "type": "PreviewImage", 33 | "pos": [ 34 | 651, 35 | -64 36 | ], 37 | "size": [ 38 | 685.6106591057126, 39 | 709.0367810860923 40 | ], 41 | "flags": {}, 42 | "order": 3, 43 | "mode": 0, 44 | "inputs": [ 45 | { 46 | "name": "images", 47 | "type": "IMAGE", 48 | "link": 59358 49 | } 50 | ], 51 | "properties": { 52 | "Node name for S&R": "PreviewImage" 53 | } 54 | }, 55 | { 56 | "id": 391, 57 | "type": "Mask Ops", 58 | "pos": [ 59 | -175, 60 | 891 61 | ], 62 | "size": { 63 | "0": 315, 64 | "1": 362 65 | }, 66 | "flags": {}, 67 | "order": 5, 68 | "mode": 0, 69 | "inputs": [ 70 | { 71 | "name": "image", 72 | "type": "IMAGE", 73 | "link": 59306 74 | }, 75 | { 76 | "name": "mask", 77 | "type": "MASK", 78 | "link": 59345 79 | } 80 | ], 81 | "outputs": [ 82 | { 83 | "name": "mask_image", 84 | "type": "IMAGE", 85 | "links": [ 86 | 59308 87 | ], 88 | "shape": 3, 89 | "slot_index": 0 90 | }, 91 | { 92 | "name": "mask", 93 | "type": "MASK", 94 | "links": [ 95 | 59321 96 | ], 97 | "shape": 3, 98 | "slot_index": 1 99 | }, 100 | { 101 | "name": "mask mapping", 102 | "type": "MASK_MAPPING", 103 | "links": null, 104 | "shape": 3 105 | } 106 | ], 107 | "properties": { 108 | "Node name for S&R": "Mask Ops" 109 | }, 110 | "widgets_values": [ 111 | "", 112 | 0, 113 | 30, 114 | 0, 115 | 0.4111105346679689, 116 | 0, 117 | 146.55555419921873, 118 | 255, 119 | "blue", 120 | 0, 121 | 0, 122 | 5 123 | ] 124 | }, 125 | { 126 | "id": 338, 127 | "type": "LoadImage", 128 | "pos": [ 129 | -196, 130 | -92 131 | ], 132 | "size": { 133 | "0": 782.6728515625, 134 | "1": 899.208740234375 135 | }, 136 | "flags": {}, 137 | "order": 0, 138 | "mode": 0, 139 | "outputs": [ 140 | { 141 | "name": "IMAGE", 142 | "type": "IMAGE", 143 | "links": [ 144 | 59306, 145 | 59356, 146 | 59358, 147 | 59363 148 | ], 149 | "shape": 3, 150 | "slot_index": 0 151 | }, 152 | { 153 | "name": "MASK", 154 | "type": "MASK", 155 | "links": [ 156 | 59337, 157 | 59345 158 | ], 159 | "shape": 3, 160 | "slot_index": 1 161 | } 162 | ], 163 | "properties": { 164 | "Node name for S&R": "LoadImage" 165 | }, 166 | "widgets_values": [ 167 | "clipspace/clipspace-mask-37543423.099999964.png [input]", 168 | "image" 169 | ] 170 | }, 171 | { 172 | "id": 401, 173 | "type": "SaveImage", 174 | "pos": [ 175 | 2412, 176 | -85 177 | ], 178 | "size": [ 179 | 873.1226779115714, 180 | 930.0452261887258 181 | ], 182 | "flags": { 183 | "collapsed": false 184 | }, 185 | "order": 11, 186 | "mode": 0, 187 | "inputs": [ 188 | { 189 | "name": "images", 190 | "type": "IMAGE", 191 | "link": 59342 192 | } 193 | ], 194 | "properties": {}, 195 | "widgets_values": [ 196 | "ComfyUI" 197 | ] 198 | }, 199 | { 200 | "id": 387, 201 | "type": "SaveImage", 202 | "pos": [ 203 | 1472, 204 | -83 205 | ], 206 | "size": [ 207 | 877.7262370513931, 208 | 926.4544252689561 209 | ], 210 | "flags": { 211 | "collapsed": false 212 | }, 213 | "order": 9, 214 | "mode": 0, 215 | "inputs": [ 216 | { 217 | "name": "images", 218 | "type": "IMAGE", 219 | "link": 59298 220 | } 221 | ], 222 | "properties": {}, 223 | "widgets_values": [ 224 | "ComfyUI" 225 | ] 226 | }, 227 | { 228 | "id": 398, 229 | "type": "Mask Ops", 230 | "pos": [ 231 | -175, 232 | 1311 233 | ], 234 | "size": { 235 | "0": 315, 236 | "1": 362 237 | }, 238 | "flags": {}, 239 | "order": 4, 240 | "mode": 0, 241 | "inputs": [ 242 | { 243 | "name": "image", 244 | "type": "IMAGE", 245 | "link": 59356 246 | }, 247 | { 248 | "name": "mask", 249 | "type": "MASK", 250 | "link": 59337 251 | } 252 | ], 253 | "outputs": [ 254 | { 255 | "name": "mask_image", 256 | "type": "IMAGE", 257 | "links": [ 258 | 59346 259 | ], 260 | "shape": 3, 261 | "slot_index": 0 262 | }, 263 | { 264 | "name": "mask", 265 | "type": "MASK", 266 | "links": [ 267 | 59339 268 | ], 269 | "shape": 3, 270 | "slot_index": 1 271 | }, 272 | { 273 | "name": "mask mapping", 274 | "type": "MASK_MAPPING", 275 | "links": [], 276 | "shape": 3, 277 | "slot_index": 2 278 | } 279 | ], 280 | "properties": { 281 | "Node name for S&R": "Mask Ops" 282 | }, 283 | "widgets_values": [ 284 | "", 285 | 0, 286 | 30, 287 | 0, 288 | 0.6777777099609374, 289 | 0, 290 | 127.5, 291 | 255, 292 | "blue", 293 | 0, 294 | 1, 295 | 5 296 | ] 297 | }, 298 | { 299 | "id": 402, 300 | "type": "PreviewImage", 301 | "pos": [ 302 | 185, 303 | 1306 304 | ], 305 | "size": { 306 | "0": 479.34649658203125, 307 | "1": 409.572998046875 308 | }, 309 | "flags": {}, 310 | "order": 6, 311 | "mode": 0, 312 | "inputs": [ 313 | { 314 | "name": "images", 315 | "type": "IMAGE", 316 | "link": 59346 317 | } 318 | ], 319 | "properties": { 320 | "Node name for S&R": "PreviewImage" 321 | } 322 | }, 323 | { 324 | "id": 87, 325 | "type": "LoadImage", 326 | "pos": [ 327 | 734, 328 | 1311 329 | ], 330 | "size": { 331 | "0": 319.9939880371094, 332 | "1": 390.1728210449219 333 | }, 334 | "flags": {}, 335 | "order": 1, 336 | "mode": 0, 337 | "inputs": [], 338 | "outputs": [ 339 | { 340 | "name": "IMAGE", 341 | "type": "IMAGE", 342 | "links": [ 343 | 59362 344 | ], 345 | "shape": 3, 346 | "slot_index": 0 347 | }, 348 | { 349 | "name": "MASK", 350 | "type": "MASK", 351 | "links": [], 352 | "shape": 3, 353 | "slot_index": 1 354 | } 355 | ], 356 | "properties": { 357 | "Node name for S&R": "LoadImage" 358 | }, 359 | "widgets_values": [ 360 | "claire-lin-img-7677.jpg", 361 | "image" 362 | ] 363 | }, 364 | { 365 | "id": 399, 366 | "type": "LoadImage", 367 | "pos": [ 368 | 730, 369 | 791 370 | ], 371 | "size": [ 372 | 336.8473351609832, 373 | 464.35929617921215 374 | ], 375 | "flags": {}, 376 | "order": 2, 377 | "mode": 0, 378 | "inputs": [], 379 | "outputs": [ 380 | { 381 | "name": "IMAGE", 382 | "type": "IMAGE", 383 | "links": [ 384 | 59361 385 | ], 386 | "shape": 3, 387 | "slot_index": 0 388 | }, 389 | { 390 | "name": "MASK", 391 | "type": "MASK", 392 | "links": [], 393 | "shape": 3, 394 | "slot_index": 1 395 | } 396 | ], 397 | "properties": { 398 | "Node name for S&R": "LoadImage" 399 | }, 400 | "widgets_values": [ 401 | "big-chief.jpg", 402 | "image" 403 | ] 404 | }, 405 | { 406 | "id": 400, 407 | "type": "Color Transfer", 408 | "pos": [ 409 | 1541, 410 | 1173 411 | ], 412 | "size": { 413 | "0": 315, 414 | "1": 242 415 | }, 416 | "flags": {}, 417 | "order": 10, 418 | "mode": 0, 419 | "inputs": [ 420 | { 421 | "name": "source_image", 422 | "type": "IMAGE", 423 | "link": 59357 424 | }, 425 | { 426 | "name": "target_image", 427 | "type": "IMAGE", 428 | "link": 59362 429 | }, 430 | { 431 | "name": "mask", 432 | "type": "MASK", 433 | "link": 59339 434 | } 435 | ], 436 | "outputs": [ 437 | { 438 | "name": "image", 439 | "type": "IMAGE", 440 | "links": [ 441 | 59342 442 | ], 443 | "shape": 3, 444 | "slot_index": 0 445 | } 446 | ], 447 | "properties": { 448 | "Node name for S&R": "Color Transfer" 449 | }, 450 | "widgets_values": [ 451 | 8, 452 | 5, 453 | 10, 454 | 1, 455 | 0.9, 456 | 1, 457 | 1 458 | ] 459 | }, 460 | { 461 | "id": 371, 462 | "type": "Color Transfer", 463 | "pos": [ 464 | 1097, 465 | 859 466 | ], 467 | "size": { 468 | "0": 315, 469 | "1": 242 470 | }, 471 | "flags": {}, 472 | "order": 8, 473 | "mode": 0, 474 | "inputs": [ 475 | { 476 | "name": "source_image", 477 | "type": "IMAGE", 478 | "link": 59363 479 | }, 480 | { 481 | "name": "target_image", 482 | "type": "IMAGE", 483 | "link": 59361 484 | }, 485 | { 486 | "name": "mask", 487 | "type": "MASK", 488 | "link": 59321 489 | } 490 | ], 491 | "outputs": [ 492 | { 493 | "name": "image", 494 | "type": "IMAGE", 495 | "links": [ 496 | 59298, 497 | 59357 498 | ], 499 | "shape": 3, 500 | "slot_index": 0 501 | } 502 | ], 503 | "properties": { 504 | "Node name for S&R": "Color Transfer" 505 | }, 506 | "widgets_values": [ 507 | 8, 508 | 15, 509 | 4, 510 | 1, 511 | 0.9, 512 | 1.1, 513 | 0.9999999999999992 514 | ] 515 | } 516 | ], 517 | "links": [ 518 | [ 519 | 59298, 520 | 371, 521 | 0, 522 | 387, 523 | 0, 524 | "IMAGE" 525 | ], 526 | [ 527 | 59306, 528 | 338, 529 | 0, 530 | 391, 531 | 0, 532 | "IMAGE" 533 | ], 534 | [ 535 | 59308, 536 | 391, 537 | 0, 538 | 374, 539 | 0, 540 | "IMAGE" 541 | ], 542 | [ 543 | 59321, 544 | 391, 545 | 1, 546 | 371, 547 | 2, 548 | "MASK" 549 | ], 550 | [ 551 | 59337, 552 | 338, 553 | 1, 554 | 398, 555 | 1, 556 | "MASK" 557 | ], 558 | [ 559 | 59339, 560 | 398, 561 | 1, 562 | 400, 563 | 2, 564 | "MASK" 565 | ], 566 | [ 567 | 59342, 568 | 400, 569 | 0, 570 | 401, 571 | 0, 572 | "IMAGE" 573 | ], 574 | [ 575 | 59345, 576 | 338, 577 | 1, 578 | 391, 579 | 1, 580 | "MASK" 581 | ], 582 | [ 583 | 59346, 584 | 398, 585 | 0, 586 | 402, 587 | 0, 588 | "IMAGE" 589 | ], 590 | [ 591 | 59356, 592 | 338, 593 | 0, 594 | 398, 595 | 0, 596 | "IMAGE" 597 | ], 598 | [ 599 | 59357, 600 | 371, 601 | 0, 602 | 400, 603 | 0, 604 | "IMAGE" 605 | ], 606 | [ 607 | 59358, 608 | 338, 609 | 0, 610 | 407, 611 | 0, 612 | "IMAGE" 613 | ], 614 | [ 615 | 59361, 616 | 399, 617 | 0, 618 | 371, 619 | 1, 620 | "IMAGE" 621 | ], 622 | [ 623 | 59362, 624 | 87, 625 | 0, 626 | 400, 627 | 1, 628 | "IMAGE" 629 | ], 630 | [ 631 | 59363, 632 | 338, 633 | 0, 634 | 371, 635 | 0, 636 | "IMAGE" 637 | ] 638 | ], 639 | "groups": [], 640 | "config": {}, 641 | "extra": {}, 642 | "version": 0.4 643 | } --------------------------------------------------------------------------------