├── requirements.txt ├── explain ├── 微调1.png ├── LOGO1.png ├── LOGO2.png ├── 丢失细节.png ├── 保存细节.png ├── 变异参数.png ├── 局部重绘.png ├── 变异种子微调.png ├── 变异种子微调实例.png ├── 局部CN应用.png ├── 局部CN应用实例.png ├── 局部CN应用结果.png ├── 局部裁剪参数.png ├── 局部重绘参数.png ├── 局部重绘采样器.png └── 通过变异种子对图像进行微调.png ├── pyproject.toml ├── __init__.py ├── .github └── workflows │ └── publish.yml ├── README.md ├── EGJBCHCYQTXCJ.py ├── EGBYCYQ.py ├── EGJBCHCYQ.py └── workflow └── CNworkflow.json /requirements.txt: -------------------------------------------------------------------------------- 1 | torch 2 | numpy 3 | colorama 4 | pillow -------------------------------------------------------------------------------- /explain/微调1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/11dogzi/Comfyui-ergouzi-samplers/HEAD/explain/微调1.png -------------------------------------------------------------------------------- /explain/LOGO1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/11dogzi/Comfyui-ergouzi-samplers/HEAD/explain/LOGO1.png -------------------------------------------------------------------------------- /explain/LOGO2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/11dogzi/Comfyui-ergouzi-samplers/HEAD/explain/LOGO2.png -------------------------------------------------------------------------------- /explain/丢失细节.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/11dogzi/Comfyui-ergouzi-samplers/HEAD/explain/丢失细节.png -------------------------------------------------------------------------------- /explain/保存细节.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/11dogzi/Comfyui-ergouzi-samplers/HEAD/explain/保存细节.png -------------------------------------------------------------------------------- /explain/变异参数.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/11dogzi/Comfyui-ergouzi-samplers/HEAD/explain/变异参数.png -------------------------------------------------------------------------------- /explain/局部重绘.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/11dogzi/Comfyui-ergouzi-samplers/HEAD/explain/局部重绘.png -------------------------------------------------------------------------------- /explain/变异种子微调.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/11dogzi/Comfyui-ergouzi-samplers/HEAD/explain/变异种子微调.png -------------------------------------------------------------------------------- /explain/变异种子微调实例.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/11dogzi/Comfyui-ergouzi-samplers/HEAD/explain/变异种子微调实例.png -------------------------------------------------------------------------------- /explain/局部CN应用.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/11dogzi/Comfyui-ergouzi-samplers/HEAD/explain/局部CN应用.png -------------------------------------------------------------------------------- /explain/局部CN应用实例.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/11dogzi/Comfyui-ergouzi-samplers/HEAD/explain/局部CN应用实例.png -------------------------------------------------------------------------------- /explain/局部CN应用结果.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/11dogzi/Comfyui-ergouzi-samplers/HEAD/explain/局部CN应用结果.png -------------------------------------------------------------------------------- /explain/局部裁剪参数.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/11dogzi/Comfyui-ergouzi-samplers/HEAD/explain/局部裁剪参数.png -------------------------------------------------------------------------------- /explain/局部重绘参数.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/11dogzi/Comfyui-ergouzi-samplers/HEAD/explain/局部重绘参数.png -------------------------------------------------------------------------------- /explain/局部重绘采样器.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/11dogzi/Comfyui-ergouzi-samplers/HEAD/explain/局部重绘采样器.png -------------------------------------------------------------------------------- /explain/通过变异种子对图像进行微调.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/11dogzi/Comfyui-ergouzi-samplers/HEAD/explain/通过变异种子对图像进行微调.png -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "comfyui-ergouzi-samplers" 3 | description = "Partial redraw sampler and variant seed sampler" 4 | version = "1.0.0" 5 | license = "LICENSE" 6 | dependencies = ["torch", "numpy", "colorama", "pillow"] 7 | 8 | [project.urls] 9 | Repository = "https://github.com/11dogzi/Comfyui-ergouzi-samplers" 10 | # Used by Comfy Registry https://comfyregistry.org 11 | 12 | [tool.comfy] 13 | PublisherId = "" 14 | DisplayName = "Comfyui-ergouzi-samplers" 15 | Icon = "" 16 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | from .EGBYCYQ import EGBYZZCYQ 2 | from .EGJBCHCYQ import EGCYQJB 3 | from .EGJBCHCYQTXCJ import EGCYQJBCJ 4 | 5 | # A dictionary that contains all nodes you want to export with their names 6 | NODE_CLASS_MAPPINGS = { 7 | "EGBYZZCYQ":EGBYZZCYQ, 8 | "EGCYQJB": EGCYQJB, 9 | "EGCYQJBCJ": EGCYQJBCJ 10 | } 11 | 12 | # A dictionary that contains the friendly/humanly readable titles for the nodes 13 | NODE_DISPLAY_NAME_MAPPINGS = { 14 | "EGBYZZCYQ": "2🐕Mutant seed sampler", 15 | "EGCYQJB": "2🐕Local sampler", 16 | "EGCYQJBCJ": "2🐕Local Image" 17 | } -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish to Comfy registry 2 | on: 3 | workflow_dispatch: 4 | push: 5 | branches: 6 | - main 7 | - master 8 | paths: 9 | - "pyproject.toml" 10 | 11 | jobs: 12 | publish-node: 13 | name: Publish Custom Node to registry 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Check out code 17 | uses: actions/checkout@v4 18 | - name: Publish Custom Node 19 | uses: Comfy-Org/publish-node-action@main 20 | with: 21 | ## Add your own personal access token to your Github Repository secrets and reference it here. 22 | personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }} 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 哈喽!我是二狗子(2🐕)!仅局部重绘采样器与变异种子采样器来啦! 2 | Hello! I am Er Gouzi (2) 🐕)! Only partial redrawing sampler and variant seed sampler are here! 3 | 4 | 如果你没有代码基础,不会安装依赖或者安装节点时不想敲pip可以使用我制作的环境安装器 5 | [二狗环境编辑安装器]([二狗环境便捷安装器](https://github.com/11dogzi/Comfyuinodes-HJGL) 6 | 7 | ## 局部重绘采样器 8 | Local redraw sampler 9 | 实现仅对蒙版区域进行需求分辨率的重绘(类似WebUi的仅局部重绘模式) 10 | Implement only the required resolution redrawing for masked areas (similar to WebUi's local redrawing mode) 11 | ![局部重绘采样器](explain/局部重绘采样器.png "局部重绘采样器") 12 | ![局部重绘](explain/局部重绘.png "局部重绘") 13 | 14 | 并且保留还原非重绘区域像素空间的原像素,常规重绘模式中非重绘区域由于压缩导致像素信息丢失 15 | And retain the original pixels that restore the pixel space of non redrawn areas. In conventional redrawn modes, non redrawn areas lose pixel information due to compression 16 | ![丢失细节](explain/丢失细节.png "丢失细节") 17 | 18 | 局部重绘采样器还原非重绘区域细节(安照原图像素) 19 | Partial redrawn sampler restores details of non redrawn areas (as shown in the original image pixels) 20 | ![保存细节](explain/保存细节.png "保存细节") 21 | 22 | ## 变异种子采样器 23 | Mutant seed sampler 24 | 通过对原噪声图seed进行组合微调实现在不改变图像构图的情况下实现微调生成 25 | By combining and fine-tuning the original noise image seed, achieving fine-tuning generation without changing the image composition 26 | ![通过变异种子对图像进行微调](explain/通过变异种子对图像进行微调.png "通过变异种子对图像进行微调") 27 | ![微调1](explain/微调1.png "微调1") 28 | ![变异种子微调](explain/变异种子微调.png "变异种子微调") 29 | ![变异种子微调实例](explain/变异种子微调实例.png "变异种子微调实例") 30 | 31 | ## 局部重绘采样器的CN实现 32 | CN Implementation of Local Redraw Sampler 33 | 通过2🐕Local Image节点对CN控制网进行裁剪处理以匹配局部重绘采样器 34 | 具体连接方式可使用实例工作流 35 | Through 2 🐕 The Local Image node prunes the CN control network to match the local redraw sampler 36 | The specific connection method can be achieved using instance workflow 37 | ![局部CN应用](explain/局部CN应用.png "局部CN应用") 38 | 通过线稿进行局部重绘 39 | Partial redrawing through line drafts 40 | ![局部CN应用结果](explain/局部CN应用结果.png "局部CN应用结果") 41 | 42 | ## 局部重绘采样器参数 43 | Partial redraw sampler parameters 44 | ![局部重绘参数](explain/局部重绘参数.png "局部重绘参数") 45 | 46 | ## 局部重绘采样器CN控制网参数 47 | Partial redrawn sampler CN control network parameters 48 | ![局部裁剪参数](explain/局部裁剪参数.png "局部裁剪参数") 49 | 50 | ## 变异种子采样器参数 51 | Mutation seed sampler parameters 52 | ![变异参数](explain/变异参数.png "变异参数") 53 | 54 | 55 | ## 更多SD免费教程 56 | More SD free tutorials 57 | 灵仙儿和二狗子的Bilibili空间,欢迎访问: 58 | Bilibili space for Lingxian'er and Ergouzi, welcome to visit: 59 | [灵仙儿二狗子的Bilibili空间](https://space.bilibili.com/19723588?spm_id_from=333.1007.0.0) 60 | 欢迎加入我们的QQ频道,点击这里进入: 61 | Welcome to our QQ channel, click here to enter: 62 | [二狗子的QQ频道](https://pd.qq.com/s/3d9ys5wpr) 63 | ![LOGO1](explain/LOGO1.png "LOGO1")![LOGO](explain/LOGO1.png "LOGO1")![LOGO](explain/LOGO1.png "LOGO1") 64 | -------------------------------------------------------------------------------- /EGJBCHCYQTXCJ.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | import numpy as np 4 | 5 | def tensor2pil(image): 6 | return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) 7 | 8 | def pil2tensor(image): 9 | return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) 10 | 11 | class EGCYQJBCJ: 12 | def __init__(self): 13 | pass 14 | 15 | @classmethod 16 | def INPUT_TYPES(s): 17 | return {"required": { 18 | "reference_image": ("IMAGE",), 19 | "image": ("IMAGE",), 20 | "mask": ("MASK",), 21 | "region_extension": ("INT", {"default": 50, "min": 0}), 22 | "partial_size": ("INT", {"default": 512, "min": 0, "max": 2048, "step": 1}) 23 | }} 24 | 25 | RETURN_TYPES = ("IMAGE", "MASK") 26 | FUNCTION = "local_crop" 27 | CATEGORY = "2🐕sampler" 28 | 29 | def resize_to_reference(self, image_pil, reference_image_pil): 30 | return image_pil.resize(reference_image_pil.size, Image.LANCZOS) 31 | 32 | def local_crop(self, image, mask, reference_image, region_extension, partial_size): 33 | image_pil = tensor2pil(image) 34 | mask_pil = tensor2pil(mask) 35 | reference_image_pil = tensor2pil(reference_image) 36 | 37 | # Resize the input image and mask to match the reference image size 38 | image_pil = self.resize_to_reference(image_pil, reference_image_pil) 39 | mask_pil = self.resize_to_reference(mask_pil, reference_image_pil) 40 | 41 | mask_array = np.array(mask_pil) > 0 42 | coords = np.where(mask_array) 43 | 44 | if coords[0].size == 0 or coords[1].size == 0: 45 | return (pil2tensor(image_pil), pil2tensor(mask_pil)) 46 | 47 | x0, y0, x1, y1 = coords[1].min(), coords[0].min(), coords[1].max(), coords[0].max() 48 | x0 -= region_extension 49 | y0 -= region_extension 50 | x1 += region_extension 51 | y1 += region_extension 52 | x0 = max(x0, 0) 53 | y0 = max(y0, 0) 54 | x1 = min(x1, image_pil.width) 55 | y1 = min(y1, image_pil.height) 56 | 57 | cropped_image_pil = image_pil.crop((x0, y0, x1, y1)) 58 | cropped_mask_pil = mask_pil.crop((x0, y0, x1, y1)) 59 | 60 | if partial_size > 0: 61 | min_size = min(cropped_image_pil.size) 62 | if min_size < partial_size or min_size > partial_size: 63 | scale_ratio = partial_size / min_size 64 | new_size = (int(cropped_image_pil.width * scale_ratio), int(cropped_image_pil.height * scale_ratio)) 65 | cropped_image_pil = cropped_image_pil.resize(new_size, Image.LANCZOS) 66 | cropped_mask_pil = cropped_mask_pil.resize(new_size, Image.LANCZOS) 67 | 68 | cropped_image_tensor = pil2tensor(cropped_image_pil) 69 | cropped_mask_tensor = pil2tensor(cropped_mask_pil) 70 | 71 | return (cropped_image_tensor, cropped_mask_tensor) 72 | 73 | NODE_CLASS_MAPPINGS = { 74 | "EGCYQJBCJ": EGCYQJBCJ 75 | } 76 | 77 | NODE_DISPLAY_NAME_MAPPINGS = { 78 | "EGCYQJBCJ": "2🐕Local Image" 79 | } 80 | -------------------------------------------------------------------------------- /EGBYCYQ.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import comfy.sample 4 | import comfy.utils 5 | import latent_preview 6 | 7 | 8 | class EGBYZZCYQ: 9 | 10 | @classmethod 11 | def INPUT_TYPES(s): 12 | return { 13 | "required": { 14 | "model": ("MODEL",), 15 | "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), 16 | "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), 17 | "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.01}), 18 | "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), 19 | "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), 20 | "positive": ("CONDITIONING",), 21 | "negative": ("CONDITIONING",), 22 | "latent_image": ("LATENT",), 23 | "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), 24 | "variation_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), 25 | "variation_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}), 26 | "variation_width": ("INT", {"default": 0, "min": 0, "max": 512}), 27 | "variation_height": ("INT", {"default": 0, "min": 0, "max": 512}), 28 | "device": (["GPU", "CPU"], {"default": "CPU"}), 29 | } 30 | } 31 | 32 | RETURN_TYPES = ("LATENT",) 33 | FUNCTION = "sample" 34 | CATEGORY = "2🐕sampler" 35 | 36 | def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0, 37 | variation_seed=0, variation_strength=0.0, variation_width=0, variation_height=0, device="GPU"): 38 | return self.common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, 39 | denoise, variation_seed, variation_strength, variation_width, variation_height, 40 | device) 41 | 42 | def common_ksampler(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, 43 | variation_seed=0, variation_strength=0.0, variation_width=0, variation_height=0, device="GPU", 44 | disable_noise=False, start_step=None, last_step=None, force_full_denoise=False): 45 | latent_image = latent["samples"] 46 | if disable_noise: 47 | noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") 48 | else: 49 | batch_inds = latent["batch_index"] if "batch_index" in latent else None 50 | noise = self.prepare_noise(latent_image, seed, variation_seed, variation_strength, variation_width, 51 | variation_height, device, batch_inds) 52 | 53 | noise_mask = None 54 | if "noise_mask" in latent: 55 | noise_mask = latent["noise_mask"] 56 | 57 | callback = latent_preview.prepare_callback(model, steps) 58 | disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED 59 | samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, 60 | latent_image, 61 | denoise=denoise, disable_noise=disable_noise, start_step=start_step, 62 | last_step=last_step, 63 | force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, 64 | disable_pbar=disable_pbar, seed=seed) 65 | out = latent.copy() 66 | out["samples"] = samples 67 | return (out,) 68 | 69 | def prepare_noise(self, latent_image, seed, variation_seed, variation_strength, variation_width, variation_height, 70 | device, noise_inds=None): 71 | device = "cuda" if device == "GPU" and torch.cuda.is_available() else "cpu" 72 | base_generator = torch.Generator(device=device).manual_seed(seed) 73 | base_noise = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, 74 | generator=base_generator, device=device) 75 | 76 | if variation_strength > 0: 77 | variation_generator = torch.Generator(device=device).manual_seed(variation_seed) 78 | variation_noise = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, 79 | generator=variation_generator, device=device) 80 | 81 | # Calculate the center region to apply the variation 82 | width, height = latent_image.shape[-1], latent_image.shape[-2] 83 | v_width = min(max(variation_width, 0), width) 84 | v_height = min(max(variation_height, 0), height) 85 | v_width = min(v_width, width) # Ensure does not exceed latent width 86 | v_height = min(v_height, height) # Ensure does not exceed latent height 87 | v_width = (v_width // 8) * 8 # Ensure multiple of 8 88 | v_height = (v_height // 8) * 8 # Ensure multiple of 8 89 | 90 | start_x = (width - v_width) // 2 91 | start_y = (height - v_height) // 2 92 | 93 | combined_noise = base_noise.clone() 94 | if v_width > 0 and v_height > 0: 95 | combined_noise[..., start_y:start_y + v_height, start_x:start_x + v_width] = \ 96 | (1 - variation_strength) * base_noise[..., start_y:start_y + v_height, start_x:start_x + v_width] + \ 97 | variation_strength * variation_noise[..., start_y:start_y + v_height, start_x:start_x + v_width] 98 | else: 99 | combined_noise = base_noise 100 | 101 | if noise_inds is not None: 102 | unique_inds, inverse = np.unique(noise_inds, return_inverse=True) 103 | noises = [] 104 | for i in range(unique_inds[-1] + 1): 105 | single_base_noise = combined_noise[0].unsqueeze(0) 106 | noises.append(single_base_noise) 107 | combined_noise = torch.cat([noises[i] for i in inverse], axis=0) 108 | 109 | return combined_noise 110 | -------------------------------------------------------------------------------- /EGJBCHCYQ.py: -------------------------------------------------------------------------------- 1 | from pickle import NONE 2 | from telnetlib import OUTMRK 3 | import latent_preview 4 | import comfy.samplers 5 | import comfy.sample 6 | import torch 7 | import math 8 | import base64 9 | from colorama import Fore 10 | from typing import Tuple, Dict, Any 11 | from PIL import Image, ImageFilter 12 | import numpy as np 13 | from torchvision import transforms 14 | 15 | def tensor2pil(image): 16 | return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) 17 | 18 | def pil2tensor(image): 19 | return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) 20 | 21 | def batch_tensor_to_pil(img_tensor): 22 | return [tensor2pil(img_tensor, i) for i in range(img_tensor.shape[0])] 23 | 24 | def batched_pil_to_tensor(images): 25 | return torch.cat([pil2tensor(image) for image in images], dim=0) 26 | 27 | def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False): 28 | latent_image = latent["samples"] 29 | if disable_noise: 30 | noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") 31 | else: 32 | batch_inds = latent["batch_index"] if "batch_index" in latent else None 33 | noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds) 34 | 35 | noise_mask = None 36 | if "noise_mask" in latent: 37 | noise_mask = latent["noise_mask"] 38 | 39 | callback = latent_preview.prepare_callback(model, steps) 40 | disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED 41 | samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, 42 | denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step, 43 | force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) 44 | out = latent.copy() 45 | out["samples"] = samples 46 | return (out, ) 47 | 48 | def mask2image(input_mask_pil): 49 | input_mask_tensor = pil2tensor(input_mask_pil) 50 | result_tensor = input_mask_tensor.expand(-1, 3, -1, -1) 51 | return result_tensor 52 | 53 | statement = 'Ouino+mUgeabtOWkmuWunei0teaXtuWFie+8jOWOu+WBmuabtOacieaEj+S5ieeahOS6i+aDhe+8jOi/meaJjeaYr0FJLS0tLS0tLS1C56uZQOeBteS7meWEv+WSjOS6jOeLl+WtkA==' 54 | EGSMWBA = base64.b64decode(statement.encode('utf-8')).decode('utf-8') 55 | tstatement='Q29tZnl1aS1lcmdvdXppLXNhbXBsZXJz' 56 | EGSMWBB = base64.b64decode(tstatement.encode('utf-8')).decode('utf-8') 57 | 58 | red_part = EGSMWBB 59 | yellow_part = EGSMWBA.replace(red_part, "") 60 | print(Fore.RED + red_part + Fore.YELLOW + yellow_part + Fore.RESET) 61 | 62 | class EGCYQJB: 63 | def __init__(self): 64 | pass 65 | 66 | @classmethod 67 | def INPUT_TYPES(s): 68 | return {"required": { 69 | "model": ("MODEL",), 70 | "image": ("IMAGE",), 71 | "vae": ("VAE",), 72 | "mask": ("MASK",), 73 | "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), 74 | "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), 75 | "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}), 76 | "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), 77 | "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), 78 | "positive": ("CONDITIONING", ), 79 | "negative": ("CONDITIONING", ), 80 | "denoise": ("FLOAT", {"default": 0.35, "min": 0.0, "max": 1.0, "step": 0.01}), 81 | "Redraw_Mode": (["Original_image", "Filling"],), 82 | "Mask_extension": ("INT", {"default": 6, "min": 0, "max": 64, "step": 1}), 83 | "Local_Only": ("BOOLEAN", {"default": True}), 84 | "Partial_size": ("INT", {"default": 512, "min": 0, "max": 2048, "step": 1}), 85 | "Region_Extension": ("INT", {"default": 50, "min": 0}), 86 | "Mask_Feather":("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), 87 | "TEXT":("STRING", {"default":"2🐕"}), 88 | }} 89 | 90 | RETURN_TYPES = ("LATENT", "IMAGE", "IMAGE","MASK") 91 | RETURN_NAMES = ('LATENT', 'IMAGE', 'LocalIMAGE', 'PartialMask') 92 | FUNCTION = "sample" 93 | CATEGORY = "2🐕sampler" 94 | 95 | def mask_crop(self, image, mask, Region_Extension, Partial_size=0): 96 | image_pil = tensor2pil(image) 97 | mask_pil = tensor2pil(mask) 98 | mask_array = np.array(mask_pil) > 0 99 | coords = np.where(mask_array) 100 | if coords[0].size == 0 or coords[1].size == 0: 101 | return (image, None, mask) 102 | x0, y0, x1, y1 = coords[1].min(), coords[0].min(), coords[1].max(), coords[0].max() 103 | x0 -= Region_Extension 104 | y0 -= Region_Extension 105 | x1 += Region_Extension 106 | y1 += Region_Extension 107 | x0 = max(x0, 0) 108 | y0 = max(y0, 0) 109 | x1 = min(x1, image_pil.width) 110 | y1 = min(y1, image_pil.height) 111 | cropped_image_pil = image_pil.crop((x0, y0, x1, y1)) 112 | cropped_mask_pil = mask_pil.crop((x0, y0, x1, y1)) 113 | if Partial_size > 0: 114 | min_size = min(cropped_image_pil.size) 115 | if min_size < Partial_size or min_size > Partial_size: 116 | scale_ratio = Partial_size / min_size 117 | new_size = (int(cropped_image_pil.width * scale_ratio), int(cropped_image_pil.height * scale_ratio)) 118 | cropped_image_pil = cropped_image_pil.resize(new_size, Image.LANCZOS) 119 | cropped_mask_pil = cropped_mask_pil.resize(new_size, Image.LANCZOS) 120 | 121 | cropped_image_tensor = pil2tensor(cropped_image_pil) 122 | cropped_mask_tensor = pil2tensor(cropped_mask_pil) 123 | qtch = image 124 | qtzz = mask 125 | return (cropped_image_tensor, cropped_mask_tensor, (y0, y1, x0, x1) ,qtch ,qtzz ) 126 | 127 | def encode(self, vae, image, mask, Mask_extension=6, Redraw_Mode="Filling"): 128 | x = (image.shape[1] // 8) * 8 129 | y = (image.shape[2] // 8) * 8 130 | mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), 131 | size=(image.shape[1], image.shape[2]), mode="bilinear") 132 | if Redraw_Mode == "Filling": 133 | image = image.clone() 134 | if image.shape[1] != x or image.shape[2] != y: 135 | x_offset = (image.shape[1] % 8) // 2 136 | y_offset = (image.shape[2] % 8) // 2 137 | image = image[:, x_offset:x + x_offset, y_offset:y + y_offset, :] 138 | mask = mask[:, :, x_offset:x + x_offset, y_offset:y + y_offset] 139 | if Mask_extension == 0: 140 | mask_erosion = mask 141 | else: 142 | kernel_tensor = torch.ones((1, 1, Mask_extension, Mask_extension)) 143 | padding = math.ceil((Mask_extension - 1) / 2) 144 | mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask.round(), kernel_tensor, padding=padding), 0, 1) 145 | 146 | m = (1.0 - mask.round()).squeeze(1) 147 | if Redraw_Mode == "Filling": 148 | for i in range(3): 149 | image[:, :, :, i] -= 0.5 150 | image[:, :, :, i] *= m 151 | image[:, :, :, i] += 0.5 152 | t = vae.encode(image) 153 | return {"samples": t, "noise_mask": (mask_erosion[:, :, :x, :y].round())}, None 154 | 155 | def paste_cropped_image_with_mask(self, original_image, cropped_image, crop_coords, mask, MHmask, Mask_Feather): 156 | y0, y1, x0, x1 = crop_coords 157 | original_image_pil = tensor2pil(original_image) 158 | cropped_image_pil = tensor2pil(cropped_image) 159 | mask_pil = tensor2pil(mask) 160 | crop_width = x1 - x0 161 | crop_height = y1 - y0 162 | crop_size = (crop_width, crop_height) 163 | 164 | cropped_image_pil = cropped_image_pil.resize(crop_size, Image.LANCZOS) 165 | mask_pil = mask_pil.resize(crop_size, Image.LANCZOS) 166 | 167 | mask_binary = mask_pil.convert('L') 168 | mask_rgba = mask_binary.convert('RGBA') 169 | blurred_mask = mask_rgba 170 | transparent_mask = mask_binary 171 | blurred_mask = mask_binary 172 | cropped_image_pil = cropped_image_pil.convert('RGBA') 173 | original_image_pil = original_image_pil.convert('RGBA') 174 | original_image_pil.paste(cropped_image_pil, (x0, y0), mask=blurred_mask) 175 | ZT_image_pil=original_image_pil.convert('RGB') 176 | IMAGEEE = pil2tensor(ZT_image_pil) 177 | mask_ecmhpil= tensor2pil(MHmask) 178 | mask_ecmh = mask_ecmhpil.convert('L') 179 | mask_ecrgba = tensor2pil(MHmask) 180 | maskecmh = None 181 | if Mask_Feather is not None: 182 | if Mask_Feather > -1: 183 | maskecmh = mask_ecrgba.filter(ImageFilter.GaussianBlur(Mask_Feather)) 184 | dyzz = pil2tensor(maskecmh) 185 | maskeccmh = pil2tensor(maskecmh) 186 | destination = original_image 187 | source = IMAGEEE 188 | dyyt = source 189 | multiplier = 8 190 | resize_source = True 191 | mask = dyzz 192 | destination = destination.clone().movedim(-1, 1) 193 | source=source.clone().movedim(-1, 1) 194 | source = source.to(destination.device) 195 | if resize_source: 196 | source = torch.nn.functional.interpolate(source, size=(destination.shape[2], destination.shape[3]), mode="bilinear") 197 | 198 | source = comfy.utils.repeat_to_batch_size(source, destination.shape[0]) 199 | x=0 200 | y=0 201 | x = int(x) 202 | y = int(y) 203 | x = max(-source.shape[3] * multiplier, min(x, destination.shape[3] * multiplier)) 204 | y = max(-source.shape[2] * multiplier, min(y, destination.shape[2] * multiplier)) 205 | 206 | left, top = (x // multiplier, y // multiplier) 207 | right, bottom = (left + source.shape[3], top + source.shape[2],) 208 | 209 | if mask is None: 210 | mask = torch.ones_like(source) 211 | else: 212 | mask = mask.to(destination.device, copy=True) 213 | mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(source.shape[2], source.shape[3]), mode="bilinear") 214 | mask = comfy.utils.repeat_to_batch_size(mask, source.shape[0]) 215 | visible_width, visible_height = (destination.shape[3] - left + min(0, x), destination.shape[2] - top + min(0, y),) 216 | 217 | mask = mask[:, :, :visible_height, :visible_width] 218 | inverse_mask = torch.ones_like(mask) - mask 219 | 220 | source_portion = mask * source[:, :, :visible_height, :visible_width] 221 | destination_portion = inverse_mask * destination[:, :, top:bottom, left:right] 222 | 223 | destination[:, :, top:bottom, left:right] = source_portion + destination_portion 224 | zztx = destination.movedim(1, -1) 225 | return zztx,dyzz,dyyt 226 | 227 | def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, image, vae, mask, Mask_extension=6, Redraw_Mode="Filling", denoise=1.0, Local_Only=False, Region_Extension=0, Partial_size=0, Mask_Feather=1, TEXT="2🐕" ): 228 | original_image = image 229 | hqccimage = tensor2pil(image) 230 | sfmask = tensor2pil(mask) 231 | sfhmask = sfmask.resize(hqccimage.size, Image.LANCZOS) 232 | mask = pil2tensor(sfhmask) 233 | 234 | MHmask = mask 235 | 236 | if Local_Only: 237 | image, mask, crop_coords,bytx, byzz = self.mask_crop(image, mask, Region_Extension, Partial_size) 238 | latent_image, _ = self.encode(vae, image, mask, Mask_extension, Redraw_Mode) 239 | samples = common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise) 240 | decoded_image = vae.decode(samples[0]["samples"]) 241 | final_image,dyzz,dyyt = self.paste_cropped_image_with_mask(original_image, decoded_image, crop_coords, mask, MHmask, Mask_Feather) 242 | return (samples[0], final_image,decoded_image,dyzz) 243 | else: 244 | image, mask, crop_coords,bytx, byzz = self.mask_crop(image, mask, Region_Extension, Partial_size) 245 | latent_image, _ = self.encode(vae, image, mask, Mask_extension, Redraw_Mode) 246 | samples = common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise) 247 | decoded_image = vae.decode(samples[0]["samples"]) 248 | 249 | mask_ecrgba = tensor2pil(mask) 250 | 251 | maskecmh = None 252 | if Mask_Feather is not None: 253 | if Mask_Feather > -1: 254 | maskecmh = mask_ecrgba.filter(ImageFilter.GaussianBlur(Mask_Feather)) 255 | dyzz = pil2tensor(maskecmh) 256 | maskeccmh = pil2tensor(maskecmh) 257 | mask = maskeccmh 258 | destination = original_image 259 | source = decoded_image 260 | multiplier = 8 261 | resize_source = True 262 | mask = dyzz 263 | destination = destination.clone().movedim(-1, 1) 264 | source=source.clone().movedim(-1, 1) 265 | source = source.to(destination.device) 266 | if resize_source: 267 | source = torch.nn.functional.interpolate(source, size=(destination.shape[2], destination.shape[3]), mode="bilinear") 268 | 269 | source = comfy.utils.repeat_to_batch_size(source, destination.shape[0]) 270 | x=0 271 | y=0 272 | x = int(x) 273 | y = int(y) 274 | x = max(-source.shape[3] * multiplier, min(x, destination.shape[3] * multiplier)) 275 | y = max(-source.shape[2] * multiplier, min(y, destination.shape[2] * multiplier)) 276 | 277 | left, top = (x // multiplier, y // multiplier) 278 | right, bottom = (left + source.shape[3], top + source.shape[2],) 279 | 280 | if mask is None: 281 | mask = torch.ones_like(source) 282 | else: 283 | mask = mask.to(destination.device, copy=True) 284 | mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(source.shape[2], source.shape[3]), mode="bilinear") 285 | mask = comfy.utils.repeat_to_batch_size(mask, source.shape[0]) 286 | visible_width, visible_height = (destination.shape[3] - left + min(0, x), destination.shape[2] - top + min(0, y),) 287 | mask = mask[:, :, :visible_height, :visible_width] 288 | inverse_mask = torch.ones_like(mask) - mask 289 | source_portion = mask * source[:, :, :visible_height, :visible_width] 290 | destination_portion = inverse_mask * destination[:, :, top:bottom, left:right] 291 | destination[:, :, top:bottom, left:right] = source_portion + destination_portion 292 | zztx = destination.movedim(1, -1) 293 | return (samples[0], zztx, decoded_image, dyzz) 294 | -------------------------------------------------------------------------------- /workflow/CNworkflow.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 32, 3 | "last_link_id": 40, 4 | "nodes": [ 5 | { 6 | "id": 22, 7 | "type": "LineArtPreprocessor", 8 | "pos": [ 9 | -331, 10 | 951 11 | ], 12 | "size": { 13 | "0": 315, 14 | "1": 82 15 | }, 16 | "flags": {}, 17 | "order": 5, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "image", 22 | "type": "IMAGE", 23 | "link": 36, 24 | "label": "图像", 25 | "slot_index": 0 26 | } 27 | ], 28 | "outputs": [ 29 | { 30 | "name": "IMAGE", 31 | "type": "IMAGE", 32 | "links": [ 33 | 31, 34 | 33 35 | ], 36 | "shape": 3, 37 | "label": "图像", 38 | "slot_index": 0 39 | } 40 | ], 41 | "properties": { 42 | "Node name for S&R": "LineArtPreprocessor" 43 | }, 44 | "widgets_values": [ 45 | "disable", 46 | 512 47 | ] 48 | }, 49 | { 50 | "id": 27, 51 | "type": "EGCYQJBCJ", 52 | "pos": [ 53 | -1, 54 | 956 55 | ], 56 | "size": { 57 | "0": 315, 58 | "1": 122 59 | }, 60 | "flags": {}, 61 | "order": 7, 62 | "mode": 0, 63 | "inputs": [ 64 | { 65 | "name": "reference_image", 66 | "type": "IMAGE", 67 | "link": 32, 68 | "label": "reference_image" 69 | }, 70 | { 71 | "name": "image", 72 | "type": "IMAGE", 73 | "link": 33, 74 | "label": "image" 75 | }, 76 | { 77 | "name": "mask", 78 | "type": "MASK", 79 | "link": 34, 80 | "label": "mask" 81 | } 82 | ], 83 | "outputs": [ 84 | { 85 | "name": "IMAGE", 86 | "type": "IMAGE", 87 | "links": [ 88 | 35, 89 | 37 90 | ], 91 | "shape": 3, 92 | "label": "IMAGE", 93 | "slot_index": 0 94 | }, 95 | { 96 | "name": "MASK", 97 | "type": "MASK", 98 | "links": [ 99 | 38 100 | ], 101 | "shape": 3, 102 | "label": "MASK", 103 | "slot_index": 1 104 | } 105 | ], 106 | "properties": { 107 | "Node name for S&R": "EGCYQJBCJ" 108 | }, 109 | "widgets_values": [ 110 | 100, 111 | 1024 112 | ] 113 | }, 114 | { 115 | "id": 26, 116 | "type": "PreviewImage", 117 | "pos": [ 118 | -5, 119 | 1147 120 | ], 121 | "size": { 122 | "0": 210, 123 | "1": 246 124 | }, 125 | "flags": {}, 126 | "order": 6, 127 | "mode": 0, 128 | "inputs": [ 129 | { 130 | "name": "images", 131 | "type": "IMAGE", 132 | "link": 31, 133 | "label": "图像" 134 | } 135 | ], 136 | "properties": { 137 | "Node name for S&R": "PreviewImage" 138 | } 139 | }, 140 | { 141 | "id": 28, 142 | "type": "PreviewImage", 143 | "pos": [ 144 | 239, 145 | 1146 146 | ], 147 | "size": { 148 | "0": 210, 149 | "1": 246 150 | }, 151 | "flags": {}, 152 | "order": 9, 153 | "mode": 0, 154 | "inputs": [ 155 | { 156 | "name": "images", 157 | "type": "IMAGE", 158 | "link": 37, 159 | "label": "图像" 160 | } 161 | ], 162 | "properties": { 163 | "Node name for S&R": "PreviewImage" 164 | } 165 | }, 166 | { 167 | "id": 30, 168 | "type": "PreviewImage", 169 | "pos": [ 170 | 470, 171 | 1147 172 | ], 173 | "size": { 174 | "0": 210, 175 | "1": 246 176 | }, 177 | "flags": {}, 178 | "order": 12, 179 | "mode": 0, 180 | "inputs": [ 181 | { 182 | "name": "images", 183 | "type": "IMAGE", 184 | "link": 39, 185 | "label": "图像" 186 | } 187 | ], 188 | "properties": { 189 | "Node name for S&R": "PreviewImage" 190 | } 191 | }, 192 | { 193 | "id": 29, 194 | "type": "MaskToImage", 195 | "pos": [ 196 | 401, 197 | 1436 198 | ], 199 | "size": { 200 | "0": 210, 201 | "1": 26 202 | }, 203 | "flags": {}, 204 | "order": 10, 205 | "mode": 0, 206 | "inputs": [ 207 | { 208 | "name": "mask", 209 | "type": "MASK", 210 | "link": 38, 211 | "label": "遮罩" 212 | } 213 | ], 214 | "outputs": [ 215 | { 216 | "name": "IMAGE", 217 | "type": "IMAGE", 218 | "links": [ 219 | 39 220 | ], 221 | "shape": 3, 222 | "label": "图像", 223 | "slot_index": 0 224 | } 225 | ], 226 | "properties": { 227 | "Node name for S&R": "MaskToImage" 228 | } 229 | }, 230 | { 231 | "id": 24, 232 | "type": "ControlNetApply", 233 | "pos": [ 234 | 344, 235 | 960 236 | ], 237 | "size": { 238 | "0": 317.4000244140625, 239 | "1": 98 240 | }, 241 | "flags": {}, 242 | "order": 8, 243 | "mode": 0, 244 | "inputs": [ 245 | { 246 | "name": "conditioning", 247 | "type": "CONDITIONING", 248 | "link": 27, 249 | "label": "条件", 250 | "slot_index": 0 251 | }, 252 | { 253 | "name": "control_net", 254 | "type": "CONTROL_NET", 255 | "link": 29, 256 | "label": "ControlNet", 257 | "slot_index": 1 258 | }, 259 | { 260 | "name": "image", 261 | "type": "IMAGE", 262 | "link": 35, 263 | "label": "图像", 264 | "slot_index": 2 265 | } 266 | ], 267 | "outputs": [ 268 | { 269 | "name": "CONDITIONING", 270 | "type": "CONDITIONING", 271 | "links": [ 272 | 28 273 | ], 274 | "shape": 3, 275 | "label": "条件", 276 | "slot_index": 0 277 | } 278 | ], 279 | "properties": { 280 | "Node name for S&R": "ControlNetApply" 281 | }, 282 | "widgets_values": [ 283 | 1 284 | ] 285 | }, 286 | { 287 | "id": 25, 288 | "type": "ControlNetLoader", 289 | "pos": [ 290 | 13, 291 | 806 292 | ], 293 | "size": { 294 | "0": 315, 295 | "1": 58 296 | }, 297 | "flags": {}, 298 | "order": 0, 299 | "mode": 0, 300 | "outputs": [ 301 | { 302 | "name": "CONTROL_NET", 303 | "type": "CONTROL_NET", 304 | "links": [ 305 | 29 306 | ], 307 | "shape": 3, 308 | "label": "ControlNet" 309 | } 310 | ], 311 | "properties": { 312 | "Node name for S&R": "ControlNetLoader" 313 | }, 314 | "widgets_values": [ 315 | "control_v11p_sd15_lineart.pth" 316 | ] 317 | }, 318 | { 319 | "id": 7, 320 | "type": "CLIPTextEncode", 321 | "pos": [ 322 | -100, 323 | 574 324 | ], 325 | "size": { 326 | "0": 425.27801513671875, 327 | "1": 180.6060791015625 328 | }, 329 | "flags": {}, 330 | "order": 4, 331 | "mode": 0, 332 | "inputs": [ 333 | { 334 | "name": "clip", 335 | "type": "CLIP", 336 | "link": 5, 337 | "label": "CLIP" 338 | } 339 | ], 340 | "outputs": [ 341 | { 342 | "name": "CONDITIONING", 343 | "type": "CONDITIONING", 344 | "links": [ 345 | 15 346 | ], 347 | "slot_index": 0, 348 | "label": "条件" 349 | } 350 | ], 351 | "properties": { 352 | "Node name for S&R": "CLIPTextEncode" 353 | }, 354 | "widgets_values": [ 355 | "text, watermark" 356 | ] 357 | }, 358 | { 359 | "id": 4, 360 | "type": "CheckpointLoaderSimple", 361 | "pos": [ 362 | -668, 363 | 380 364 | ], 365 | "size": { 366 | "0": 315, 367 | "1": 98 368 | }, 369 | "flags": {}, 370 | "order": 1, 371 | "mode": 0, 372 | "outputs": [ 373 | { 374 | "name": "MODEL", 375 | "type": "MODEL", 376 | "links": [ 377 | 13 378 | ], 379 | "slot_index": 0, 380 | "label": "模型" 381 | }, 382 | { 383 | "name": "CLIP", 384 | "type": "CLIP", 385 | "links": [ 386 | 3, 387 | 5 388 | ], 389 | "slot_index": 1, 390 | "label": "CLIP" 391 | }, 392 | { 393 | "name": "VAE", 394 | "type": "VAE", 395 | "links": [ 396 | 12 397 | ], 398 | "slot_index": 2, 399 | "label": "VAE" 400 | } 401 | ], 402 | "properties": { 403 | "Node name for S&R": "CheckpointLoaderSimple" 404 | }, 405 | "widgets_values": [ 406 | "majicMIX realistic 麦橘写实_v6(此模型为展示模型跟多模型请自行下载).safetensors" 407 | ] 408 | }, 409 | { 410 | "id": 6, 411 | "type": "CLIPTextEncode", 412 | "pos": [ 413 | -101, 414 | 353 415 | ], 416 | "size": { 417 | "0": 422.84503173828125, 418 | "1": 164.31304931640625 419 | }, 420 | "flags": {}, 421 | "order": 3, 422 | "mode": 0, 423 | "inputs": [ 424 | { 425 | "name": "clip", 426 | "type": "CLIP", 427 | "link": 3, 428 | "label": "CLIP" 429 | } 430 | ], 431 | "outputs": [ 432 | { 433 | "name": "CONDITIONING", 434 | "type": "CONDITIONING", 435 | "links": [ 436 | 27 437 | ], 438 | "slot_index": 0, 439 | "label": "条件" 440 | } 441 | ], 442 | "properties": { 443 | "Node name for S&R": "CLIPTextEncode" 444 | }, 445 | "widgets_values": [ 446 | "1girl" 447 | ] 448 | }, 449 | { 450 | "id": 10, 451 | "type": "EGCYQJB", 452 | "pos": [ 453 | 457, 454 | 384 455 | ], 456 | "size": { 457 | "0": 315, 458 | "1": 470 459 | }, 460 | "flags": {}, 461 | "order": 11, 462 | "mode": 0, 463 | "inputs": [ 464 | { 465 | "name": "model", 466 | "type": "MODEL", 467 | "link": 13, 468 | "label": "model" 469 | }, 470 | { 471 | "name": "image", 472 | "type": "IMAGE", 473 | "link": 11, 474 | "label": "image" 475 | }, 476 | { 477 | "name": "vae", 478 | "type": "VAE", 479 | "link": 12, 480 | "label": "vae" 481 | }, 482 | { 483 | "name": "mask", 484 | "type": "MASK", 485 | "link": 10, 486 | "label": "mask" 487 | }, 488 | { 489 | "name": "positive", 490 | "type": "CONDITIONING", 491 | "link": 28, 492 | "label": "positive" 493 | }, 494 | { 495 | "name": "negative", 496 | "type": "CONDITIONING", 497 | "link": 15, 498 | "label": "negative" 499 | } 500 | ], 501 | "outputs": [ 502 | { 503 | "name": "LATENT", 504 | "type": "LATENT", 505 | "links": null, 506 | "shape": 3, 507 | "label": "LATENT" 508 | }, 509 | { 510 | "name": "IMAGE", 511 | "type": "IMAGE", 512 | "links": [ 513 | 40 514 | ], 515 | "shape": 3, 516 | "label": "IMAGE", 517 | "slot_index": 1 518 | }, 519 | { 520 | "name": "LocalIMAGE", 521 | "type": "IMAGE", 522 | "links": [ 523 | 23 524 | ], 525 | "shape": 3, 526 | "label": "LocalIMAGE", 527 | "slot_index": 2 528 | }, 529 | { 530 | "name": "PartialMask", 531 | "type": "MASK", 532 | "links": [ 533 | 24 534 | ], 535 | "shape": 3, 536 | "label": "PartialMask", 537 | "slot_index": 3 538 | } 539 | ], 540 | "properties": { 541 | "Node name for S&R": "EGCYQJB" 542 | }, 543 | "widgets_values": [ 544 | 1, 545 | "fixed", 546 | 20, 547 | 8, 548 | "euler", 549 | "normal", 550 | 1, 551 | "Original_image", 552 | 6, 553 | true, 554 | 1024, 555 | 100, 556 | 5, 557 | "2🐕" 558 | ] 559 | }, 560 | { 561 | "id": 31, 562 | "type": "SaveImage", 563 | "pos": [ 564 | 852, 565 | 362 566 | ], 567 | "size": [ 568 | 315, 569 | 270.00000858306885 570 | ], 571 | "flags": {}, 572 | "order": 13, 573 | "mode": 0, 574 | "inputs": [ 575 | { 576 | "name": "images", 577 | "type": "IMAGE", 578 | "link": 40, 579 | "label": "图像" 580 | } 581 | ], 582 | "properties": {}, 583 | "widgets_values": [ 584 | "ComfyUI" 585 | ] 586 | }, 587 | { 588 | "id": 20, 589 | "type": "MaskToImage", 590 | "pos": [ 591 | 865, 592 | 988 593 | ], 594 | "size": { 595 | "0": 210, 596 | "1": 26 597 | }, 598 | "flags": {}, 599 | "order": 15, 600 | "mode": 0, 601 | "inputs": [ 602 | { 603 | "name": "mask", 604 | "type": "MASK", 605 | "link": 24, 606 | "label": "遮罩" 607 | } 608 | ], 609 | "outputs": [ 610 | { 611 | "name": "IMAGE", 612 | "type": "IMAGE", 613 | "links": [ 614 | 25 615 | ], 616 | "shape": 3, 617 | "label": "图像", 618 | "slot_index": 0 619 | } 620 | ], 621 | "properties": { 622 | "Node name for S&R": "MaskToImage" 623 | } 624 | }, 625 | { 626 | "id": 21, 627 | "type": "PreviewImage", 628 | "pos": [ 629 | 858, 630 | 1072 631 | ], 632 | "size": [ 633 | 308.7182840032124, 634 | 334.3786692953404 635 | ], 636 | "flags": {}, 637 | "order": 16, 638 | "mode": 0, 639 | "inputs": [ 640 | { 641 | "name": "images", 642 | "type": "IMAGE", 643 | "link": 25, 644 | "label": "图像" 645 | } 646 | ], 647 | "properties": { 648 | "Node name for S&R": "PreviewImage" 649 | } 650 | }, 651 | { 652 | "id": 13, 653 | "type": "LoadImage", 654 | "pos": [ 655 | -693, 656 | 648 657 | ], 658 | "size": { 659 | "0": 315, 660 | "1": 314 661 | }, 662 | "flags": {}, 663 | "order": 2, 664 | "mode": 0, 665 | "outputs": [ 666 | { 667 | "name": "IMAGE", 668 | "type": "IMAGE", 669 | "links": [ 670 | 11, 671 | 32, 672 | 36 673 | ], 674 | "shape": 3, 675 | "label": "图像", 676 | "slot_index": 0 677 | }, 678 | { 679 | "name": "MASK", 680 | "type": "MASK", 681 | "links": [ 682 | 10, 683 | 34 684 | ], 685 | "shape": 3, 686 | "label": "遮罩", 687 | "slot_index": 1 688 | } 689 | ], 690 | "properties": { 691 | "Node name for S&R": "LoadImage" 692 | }, 693 | "widgets_values": [ 694 | "clipspace/clipspace-mask-132714.79999995232.png [input]", 695 | "image" 696 | ] 697 | }, 698 | { 699 | "id": 19, 700 | "type": "SaveImage", 701 | "pos": [ 702 | 854, 703 | 674 704 | ], 705 | "size": [ 706 | 308.17133651617564, 707 | 268.5210477596013 708 | ], 709 | "flags": {}, 710 | "order": 14, 711 | "mode": 0, 712 | "inputs": [ 713 | { 714 | "name": "images", 715 | "type": "IMAGE", 716 | "link": 23, 717 | "label": "图像" 718 | } 719 | ], 720 | "properties": {}, 721 | "widgets_values": [ 722 | "ComfyUI" 723 | ] 724 | } 725 | ], 726 | "links": [ 727 | [ 728 | 3, 729 | 4, 730 | 1, 731 | 6, 732 | 0, 733 | "CLIP" 734 | ], 735 | [ 736 | 5, 737 | 4, 738 | 1, 739 | 7, 740 | 0, 741 | "CLIP" 742 | ], 743 | [ 744 | 10, 745 | 13, 746 | 1, 747 | 10, 748 | 3, 749 | "MASK" 750 | ], 751 | [ 752 | 11, 753 | 13, 754 | 0, 755 | 10, 756 | 1, 757 | "IMAGE" 758 | ], 759 | [ 760 | 12, 761 | 4, 762 | 2, 763 | 10, 764 | 2, 765 | "VAE" 766 | ], 767 | [ 768 | 13, 769 | 4, 770 | 0, 771 | 10, 772 | 0, 773 | "MODEL" 774 | ], 775 | [ 776 | 15, 777 | 7, 778 | 0, 779 | 10, 780 | 5, 781 | "CONDITIONING" 782 | ], 783 | [ 784 | 23, 785 | 10, 786 | 2, 787 | 19, 788 | 0, 789 | "IMAGE" 790 | ], 791 | [ 792 | 24, 793 | 10, 794 | 3, 795 | 20, 796 | 0, 797 | "MASK" 798 | ], 799 | [ 800 | 25, 801 | 20, 802 | 0, 803 | 21, 804 | 0, 805 | "IMAGE" 806 | ], 807 | [ 808 | 27, 809 | 6, 810 | 0, 811 | 24, 812 | 0, 813 | "CONDITIONING" 814 | ], 815 | [ 816 | 28, 817 | 24, 818 | 0, 819 | 10, 820 | 4, 821 | "CONDITIONING" 822 | ], 823 | [ 824 | 29, 825 | 25, 826 | 0, 827 | 24, 828 | 1, 829 | "CONTROL_NET" 830 | ], 831 | [ 832 | 31, 833 | 22, 834 | 0, 835 | 26, 836 | 0, 837 | "IMAGE" 838 | ], 839 | [ 840 | 32, 841 | 13, 842 | 0, 843 | 27, 844 | 0, 845 | "IMAGE" 846 | ], 847 | [ 848 | 33, 849 | 22, 850 | 0, 851 | 27, 852 | 1, 853 | "IMAGE" 854 | ], 855 | [ 856 | 34, 857 | 13, 858 | 1, 859 | 27, 860 | 2, 861 | "MASK" 862 | ], 863 | [ 864 | 35, 865 | 27, 866 | 0, 867 | 24, 868 | 2, 869 | "IMAGE" 870 | ], 871 | [ 872 | 36, 873 | 13, 874 | 0, 875 | 22, 876 | 0, 877 | "IMAGE" 878 | ], 879 | [ 880 | 37, 881 | 27, 882 | 0, 883 | 28, 884 | 0, 885 | "IMAGE" 886 | ], 887 | [ 888 | 38, 889 | 27, 890 | 1, 891 | 29, 892 | 0, 893 | "MASK" 894 | ], 895 | [ 896 | 39, 897 | 29, 898 | 0, 899 | 30, 900 | 0, 901 | "IMAGE" 902 | ], 903 | [ 904 | 40, 905 | 10, 906 | 1, 907 | 31, 908 | 0, 909 | "IMAGE" 910 | ] 911 | ], 912 | "groups": [], 913 | "config": {}, 914 | "extra": { 915 | "ds": { 916 | "scale": 0.6934334949441372, 917 | "offset": [ 918 | 776.3883386300583, 919 | 76.61081209891313 920 | ] 921 | } 922 | }, 923 | "version": 0.4 924 | } --------------------------------------------------------------------------------