├── .gitattributes ├── .github └── workflows │ └── publish_action.yml ├── .gitignore ├── LICENSE ├── __init__.py ├── crop_by_mask.py ├── example └── InpaintEasy-CY-V1.png ├── image_and_mask_resize.py ├── image_crop_merge.py ├── inpaint_easy_model.py ├── pyproject.toml ├── readme.md └── requirements.txt /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.github/workflows/publish_action.yml: -------------------------------------------------------------------------------- 1 | name: Publish to Comfy registry 2 | on: 3 | workflow_dispatch: 4 | push: 5 | branches: 6 | - main 7 | paths: 8 | - "pyproject.toml" 9 | 10 | jobs: 11 | publish-node: 12 | name: Publish Custom Node to registry 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Check out code 16 | uses: actions/checkout@v4 17 | - name: Publish Custom Node 18 | uses: Comfy-Org/publish-node-action@main 19 | with: 20 | personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }} ## Add your own personal access token to your Github Repository secrets and reference it here. 21 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | /__pycache__ 3 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 CYCHENYUE 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from .inpaint_easy_model import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS 4 | from .image_and_mask_resize import NODE_CLASS_MAPPINGS as RESIZE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS as RESIZE_DISPLAY_MAPPINGS 5 | from .crop_by_mask import NODE_CLASS_MAPPINGS as CROP_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS as CROP_DISPLAY_MAPPINGS 6 | from .image_crop_merge import NODE_CLASS_MAPPINGS as MERGE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS as MERGE_DISPLAY_MAPPINGS 7 | 8 | NODE_CLASS_MAPPINGS = { 9 | **NODE_CLASS_MAPPINGS, 10 | **RESIZE_CLASS_MAPPINGS, 11 | **CROP_CLASS_MAPPINGS, 12 | **MERGE_CLASS_MAPPINGS 13 | } 14 | 15 | NODE_DISPLAY_NAME_MAPPINGS = { 16 | **NODE_DISPLAY_NAME_MAPPINGS, 17 | **RESIZE_DISPLAY_MAPPINGS, 18 | **CROP_DISPLAY_MAPPINGS, 19 | **MERGE_DISPLAY_MAPPINGS 20 | } 21 | 22 | __all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS'] 23 | 24 | -------------------------------------------------------------------------------- /crop_by_mask.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from PIL import Image 4 | 5 | class CropByMask: 6 | def __init__(self): 7 | pass 8 | 9 | @classmethod 10 | def INPUT_TYPES(cls): 11 | return { 12 | "required": { 13 | "image": ("IMAGE",), 14 | "mask": ("MASK",), 15 | "padding": ("INT", { 16 | "default": 64, 17 | "min": 0, 18 | "max": 512, 19 | "step": 8, 20 | "display_step": 8, 21 | "display": "slider" 22 | }) 23 | } 24 | } 25 | 26 | RETURN_TYPES = ("IMAGE", "MASK", "INT", "INT", "INT", "INT") 27 | RETURN_NAMES = ("image", "mask", "crop_x", "crop_y", "original_width", "original_height") 28 | FUNCTION = "process" 29 | CATEGORY = "InpaintEasy" 30 | 31 | def process(self, image, mask, padding): 32 | """ 33 | image: torch.Tensor [B, H, W, C] 范围在 0-1 34 | mask: torch.Tensor [B, H, W] 范围在 0-1 35 | 返回 36 | - 放大后的裁剪图像 37 | - 裁剪区域在原图中的左上角x坐标 38 | - 裁剪区域在原图中的左上角y坐�� 39 | - 原始裁剪宽度 40 | - 原始裁剪高度 41 | """ 42 | # 获取mask中非零区域的边界框坐标 43 | mask_np = mask.squeeze(0).cpu().numpy() 44 | nonzero_indices = np.nonzero(mask_np) 45 | if len(nonzero_indices[0]) == 0: 46 | raise ValueError("Mask is empty") 47 | 48 | # 获取边界框坐标 49 | min_y, max_y = np.min(nonzero_indices[0]), np.max(nonzero_indices[0]) 50 | min_x, max_x = np.min(nonzero_indices[1]), np.max(nonzero_indices[1]) 51 | 52 | # 计算mask区域的最大边长 53 | mask_size = max(max_x - min_x + 1, max_y - min_y + 1) 54 | 55 | # 获取原图尺寸 56 | original_height, original_width = mask_np.shape 57 | 58 | # 添加padding并确保是8的倍数 59 | target_size = mask_size + (2 * padding) 60 | target_size = ((target_size + 7) // 8) * 8 61 | 62 | # 分别处理宽度和高度 63 | crop_width = min(target_size, original_width) 64 | crop_height = min(target_size, original_height) 65 | 66 | # 计算中心点 67 | center_x = (min_x + max_x) // 2 68 | center_y = (min_y + max_y) // 2 69 | 70 | # 计算裁剪区域的起始位置 71 | crop_x = center_x - (crop_width // 2) 72 | crop_y = center_y - (crop_height // 2) 73 | 74 | # 确保裁剪区域在图像范围内 75 | crop_x = max(0, min(crop_x, original_width - crop_width)) 76 | crop_y = max(0, min(crop_y, original_height - crop_height)) 77 | 78 | # 裁剪图像和mask 79 | cropped_image = image[:, crop_y:crop_y+crop_height, crop_x:crop_x+crop_width, :] 80 | cropped_mask = mask[:, crop_y:crop_y+crop_height, crop_x:crop_x+crop_width] 81 | 82 | return (cropped_image, cropped_mask, int(crop_x), int(crop_y), 83 | int(crop_width), int(crop_height)) 84 | 85 | NODE_CLASS_MAPPINGS = { 86 | "CropByMask": CropByMask 87 | } 88 | 89 | NODE_DISPLAY_NAME_MAPPINGS = { 90 | "CropByMask": "Crop By Mask" 91 | } -------------------------------------------------------------------------------- /example/InpaintEasy-CY-V1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CY-CHENYUE/ComfyUI-InpaintEasy/d631a03dea2397db27042f5e9ec34fce34b2cfb6/example/InpaintEasy-CY-V1.png -------------------------------------------------------------------------------- /image_and_mask_resize.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn.functional as F 3 | import comfy.utils 4 | 5 | class ImageAndMaskResizeNode: 6 | DESCRIPTION = "InpaintEasy- 同时调整图片和蒙版的大小" 7 | upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"] 8 | crop_methods = ["disabled", "center", "top_left", "top_right", "bottom_left", "bottom_right"] 9 | 10 | 11 | def __init__(self): 12 | self.type = "ImageMaskResize" 13 | self.output_node = True 14 | 15 | @classmethod 16 | def INPUT_TYPES(s): 17 | return { 18 | "required": { 19 | "image": ("IMAGE",), 20 | "mask": ("MASK",), 21 | "width": ("INT", { 22 | "default": 512, 23 | "min": 64, 24 | "max": 8192, 25 | "step": 8 26 | }), 27 | "height": ("INT", { 28 | "default": 512, 29 | "min": 64, 30 | "max": 8192, 31 | "step": 8 32 | }), 33 | "resize_method": (s.upscale_methods, {"default": "lanczos"}), 34 | "crop": (s.crop_methods, {"default": "disabled"}), 35 | "mask_blur_radius": ("INT", { 36 | "default": 10, 37 | "min": 0, 38 | "max": 64, 39 | "step": 1 40 | }), 41 | } 42 | } 43 | 44 | RETURN_TYPES = ("IMAGE", "MASK",) 45 | RETURN_NAMES = ("image", "mask",) 46 | FUNCTION = "resize_image_and_mask" 47 | 48 | CATEGORY = "InpaintEasy" 49 | 50 | def resize_image_and_mask(self, image, mask, width, height, resize_method="lanczos", crop="disabled", mask_blur_radius=0): 51 | # 处理宽高为0的情况 52 | if width == 0 and height == 0: 53 | return (image, mask) 54 | 55 | # 对于图像的处理 56 | samples = image.movedim(-1, 1) # NHWC -> NCHW 57 | if width == 0: 58 | width = max(1, round(samples.shape[3] * height / samples.shape[2])) 59 | elif height == 0: 60 | height = max(1, round(samples.shape[2] * width / samples.shape[3])) 61 | 62 | # 使用 torch.nn.functional 直接进行缩放和裁剪 63 | if crop != "disabled": 64 | old_width = samples.shape[3] 65 | old_height = samples.shape[2] 66 | 67 | # 计算缩放比例 68 | scale = max(width / old_width, height / old_height) 69 | scaled_width = int(old_width * scale) 70 | scaled_height = int(old_height * scale) 71 | 72 | # 使用 common_upscale 进行缩放 73 | samples = comfy.utils.common_upscale(samples, scaled_width, scaled_height, resize_method, crop="disabled") 74 | 75 | # 蒙版始终使用bilinear插值 76 | mask = F.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(scaled_height, scaled_width), mode='bilinear', align_corners=True) 77 | 78 | # 计算裁剪位置 79 | crop_x = 0 80 | crop_y = 0 81 | 82 | if crop == "center": 83 | crop_x = (scaled_width - width) // 2 84 | crop_y = (scaled_height - height) // 2 85 | elif crop == "top_left": 86 | crop_x = 0 87 | crop_y = 0 88 | elif crop == "top_right": 89 | crop_x = scaled_width - width 90 | crop_y = 0 91 | elif crop == "bottom_left": 92 | crop_x = 0 93 | crop_y = scaled_height - height 94 | elif crop == "bottom_right": 95 | crop_x = scaled_width - width 96 | crop_y = scaled_height - height 97 | elif crop == "random": 98 | crop_x = torch.randint(0, max(1, scaled_width - width), (1,)).item() 99 | crop_y = torch.randint(0, max(1, scaled_height - height), (1,)).item() 100 | 101 | # 执行裁剪 102 | samples = samples[:, :, crop_y:crop_y + height, crop_x:crop_x + width] 103 | mask = mask[:, :, crop_y:crop_y + height, crop_x:crop_x + width] 104 | else: 105 | # 直接使用 common_upscale 调整大小 106 | samples = comfy.utils.common_upscale(samples, width, height, resize_method, crop="disabled") 107 | mask = F.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(height, width), mode='bilinear', align_corners=True) 108 | 109 | image_resized = samples.movedim(1, -1) # NCHW -> NHWC 110 | mask_resized = mask.squeeze(1) # NCHW -> NHW 111 | 112 | # 在返回之前添加高斯模糊处理 113 | if mask_blur_radius > 0: 114 | # 创建高斯核 115 | kernel_size = mask_blur_radius * 2 + 1 116 | x = torch.arange(kernel_size, dtype=torch.float32, device=mask_resized.device) 117 | x = x - (kernel_size - 1) / 2 118 | gaussian = torch.exp(-(x ** 2) / (2 * (mask_blur_radius / 3) ** 2)) 119 | gaussian = gaussian / gaussian.sum() 120 | 121 | # 将kernel转换为2D 122 | gaussian_2d = gaussian.view(1, -1) * gaussian.view(-1, 1) 123 | gaussian_2d = gaussian_2d.view(1, 1, kernel_size, kernel_size) 124 | 125 | # 应用高斯模糊 126 | mask_for_blur = mask_resized.unsqueeze(1) # Add channel dimension 127 | # 对边界进行padding,使用reflect模式避免边缘问题 128 | padding = kernel_size // 2 129 | mask_padded = F.pad(mask_for_blur, (padding, padding, padding, padding), mode='reflect') 130 | mask_resized = F.conv2d(mask_padded, gaussian_2d.to(mask_resized.device), padding=0).squeeze(1) 131 | 132 | # 确保值在0-1范围内 133 | mask_resized = torch.clamp(mask_resized, 0, 1) 134 | 135 | return (image_resized, mask_resized) 136 | 137 | NODE_CLASS_MAPPINGS = { 138 | "ImageAndMaskResizeNode": ImageAndMaskResizeNode 139 | } 140 | 141 | NODE_DISPLAY_NAME_MAPPINGS = { 142 | "ImageAndMaskResizeNode": "Image and Mask Resize" 143 | } -------------------------------------------------------------------------------- /image_crop_merge.py: -------------------------------------------------------------------------------- 1 | from comfy.utils import common_upscale 2 | 3 | class ImageCropMerge: 4 | DESCRIPTION = "InpaintEasy- 图片与裁剪覆合,将裁剪的图片合并回原图中" 5 | @classmethod 6 | def INPUT_TYPES(s): 7 | return { 8 | "required": { 9 | "cropped_image": ("IMAGE",), # 裁剪并处理后的图片 10 | "original_image": ("IMAGE",), # 需要被合并的原始完整图片 11 | "crop_x": ("INT", {"default": 0, "min": 0, "max": 4096, "forceInput": True}), 12 | "crop_y": ("INT", {"default": 0, "min": 0, "max": 4096, "forceInput": True}), 13 | "cropped_original_width": ("INT", {"default": 512, "min": 1, "max": 4096, "forceInput": True}), 14 | "cropped_original_height": ("INT", {"default": 512, "min": 1, "max": 4096, "forceInput": True}), 15 | "resize_method": (["nearest-exact", "bilinear", "area", "bicubic", "lanczos"], {"default": "lanczos"}), 16 | } 17 | } 18 | 19 | RETURN_TYPES = ("IMAGE",) 20 | FUNCTION = "merge_images" 21 | CATEGORY = "InpaintEasy" 22 | 23 | def merge_images(self, cropped_image, original_image, cropped_original_width, cropped_original_height, crop_x, crop_y, resize_method): 24 | # 首先调整裁剪图片的大小 25 | samples = cropped_image.movedim(-1, 1) 26 | resized_image = common_upscale(samples, cropped_original_width, cropped_original_height, resize_method, "disabled") 27 | resized_image = resized_image.movedim(1, -1) 28 | 29 | # 将调整后的图片合并到原始图片的指定位置 30 | result = original_image.clone() 31 | result[:, crop_y:crop_y+cropped_original_height, crop_x:crop_x+cropped_original_width] = resized_image 32 | 33 | return (result,) 34 | 35 | 36 | NODE_CLASS_MAPPINGS = { 37 | "ImageCropMerge": ImageCropMerge 38 | } 39 | NODE_DISPLAY_NAME_MAPPINGS = { 40 | "ImageCropMerge": "Image Crop Merge" 41 | } -------------------------------------------------------------------------------- /inpaint_easy_model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import node_helpers 3 | 4 | class InpaintEasyModel: 5 | DESCRIPTION = "InpaintEasy- 用来处理控制模块" 6 | def __init__(self): 7 | self.type = "InpaintEasyModel" 8 | self.output_node = True 9 | self.description = "InpaintEasyModel-用来处理控制模块" 10 | 11 | @classmethod 12 | def INPUT_TYPES(s): 13 | return { 14 | "required": { 15 | "positive": ("CONDITIONING",), 16 | "negative": ("CONDITIONING",), 17 | "inpaint_image": ("IMAGE",), 18 | "mask": ("MASK",), 19 | "vae": ("VAE",), 20 | "strength": ("FLOAT", { 21 | "default": 0.5, 22 | "min": 0.0, 23 | "max": 10.0, 24 | "step": 0.01 25 | }), 26 | "start_percent": ("FLOAT", { 27 | "default": 0.0, 28 | "min": 0.0, 29 | "max": 1.0, 30 | "step": 0.001 31 | }), 32 | "end_percent": ("FLOAT", { 33 | "default": 1.0, 34 | "min": 0.0, 35 | "max": 1.0, 36 | "step": 0.001 37 | }), 38 | }, 39 | "optional": { 40 | "control_net": ("CONTROL_NET",), 41 | "control_image": ("IMAGE",), 42 | } 43 | } 44 | 45 | RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT",) 46 | RETURN_NAMES = ("positive", "negative", "latent",) 47 | FUNCTION = "combine_conditioning" 48 | 49 | CATEGORY = "InpaintEasy" 50 | 51 | def combine_conditioning(self, positive, negative, inpaint_image, mask, vae, 52 | strength=1.0, start_percent=0.0, end_percent=1.0, 53 | control_net=None, control_image=None): 54 | x = (inpaint_image.shape[1] // 8) * 8 55 | y = (inpaint_image.shape[2] // 8) * 8 56 | mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), 57 | size=(inpaint_image.shape[1], inpaint_image.shape[2]), mode="bilinear") 58 | 59 | orig_pixels = inpaint_image 60 | pixels = orig_pixels.clone() 61 | if pixels.shape[1] != x or pixels.shape[2] != y: 62 | x_offset = (pixels.shape[1] % 8) // 2 63 | y_offset = (pixels.shape[2] % 8) // 2 64 | pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:] 65 | mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset] 66 | 67 | m = (1.0 - mask.round()).squeeze(1) 68 | for i in range(3): 69 | pixels[:,:,:,i] -= 0.5 70 | pixels[:,:,:,i] *= m 71 | pixels[:,:,:,i] += 0.5 72 | 73 | concat_latent = vae.encode(pixels) 74 | orig_latent = vae.encode(orig_pixels) 75 | 76 | out_latent = { 77 | "samples": orig_latent, 78 | "noise_mask": mask 79 | } 80 | 81 | inpaint_conditioning = [] 82 | for conditioning in [positive, negative]: 83 | c = node_helpers.conditioning_set_values(conditioning, { 84 | "concat_latent_image": concat_latent, 85 | "concat_mask": mask 86 | }) 87 | inpaint_conditioning.append(c) 88 | 89 | if strength == 0 or control_net is None or control_image is None: 90 | return (inpaint_conditioning[0], inpaint_conditioning[1], out_latent) 91 | 92 | control_hint = control_image.movedim(-1,1) 93 | cnets = {} 94 | 95 | out = [] 96 | for conditioning in inpaint_conditioning: 97 | c = [] 98 | for t in conditioning: 99 | d = t[1].copy() 100 | 101 | prev_cnet = d.get('control', None) 102 | if prev_cnet in cnets: 103 | c_net = cnets[prev_cnet] 104 | else: 105 | c_net = control_net.copy().set_cond_hint(control_hint, strength, 106 | (start_percent, end_percent), 107 | vae=vae) 108 | c_net.set_previous_controlnet(prev_cnet) 109 | cnets[prev_cnet] = c_net 110 | 111 | d['control'] = c_net 112 | d['control_apply_to_uncond'] = False 113 | n = [t[0], d] 114 | c.append(n) 115 | out.append(c) 116 | 117 | return (out[0], out[1], out_latent) 118 | 119 | NODE_CLASS_MAPPINGS = { 120 | "InpaintEasyModel": InpaintEasyModel 121 | } 122 | 123 | NODE_DISPLAY_NAME_MAPPINGS = { 124 | "InpaintEasyModel": "Inpaint Model" 125 | } -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "comfyui-inpainteasy" 3 | description = "InpaintEasy is a set of optimized local repainting (Inpaint) nodes that provide a simpler and more powerful local repainting workflow. It makes local repainting work easier and more efficient with intelligent cropping and merging functions." 4 | version = "1.0.2" 5 | license = {file = "LICENSE"} 6 | dependencies = ["numpy"] 7 | 8 | [project.urls] 9 | Repository = "https://github.com/CY-CHENYUE/ComfyUI-InpaintEasy" 10 | # Used by Comfy Registry https://comfyregistry.org 11 | 12 | [tool.comfy] 13 | PublisherId = "cychenyue" 14 | DisplayName = "ComfyUI-InpaintEasy" 15 | Icon = "" 16 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # ComfyUI InpaintEasy 2 | 3 | ComfyUI InpaintEasy是一组优化的局部重绘(Inpaint)节点,提供了更简单和更强大的局部重绘工作流程。通过智能的裁剪和合并功能,让局部重绘工作更加便捷和高效。 4 | 5 | ## 功能特点 6 | 7 | - 一体化的局部重绘模型节点 8 | - 智能裁剪功能,自动检测重绘区域并进行最优裁剪 9 | - 图像和蒙版同步缩放,支持多种缩放算法 10 | - 支持蒙版模糊处理,获得更自然的重绘效果 11 | - 智能合并功能,无缝将重绘结果融合回原图 12 | 13 | ## 安装方法 14 | 15 | 第一种方式: 16 | 17 | 在ComfyUI-Manager中搜索`ComfyUI-InpaintEasy`安装,之后重启ComfyUI。 18 | 19 | 第二种方式: 20 | 21 | 1. 打开ComfyUI的`custom_nodes`文件夹 22 | 2. 使用以下命令克隆仓库: 23 | ```bash 24 | git clone https://github.com/CY-CHENYUE/ComfyUI-InpaintEasy.git 25 | ``` 26 | ## 工作流 27 | ![alt text](example/InpaintEasy-CY-V1.png) 28 | ## 节点说明 29 | 30 | ### 1. Inpaint Model 31 | 集成了一体化局部重绘模型节点。 32 | 33 | 输入参数: 34 | - `positive`: 正向提示词条件 35 | - `negative`: 负向提示词条件 36 | - `inpaint_image`: 需要重绘的图像 37 | - `control_net`: ControlNet模型 38 | - `control_image`: ControlNet的输入图像 39 | - `mask`: 重绘蒙版 40 | - `vae`: VAE模型 41 | - `strength`: strength参数,需要根据实际情况来做调整。重绘生成的图像不受控制,提升strength的值。需要有更大的创意,降低值。 42 | eg: 43 | 1. 在画面中添加物体(无中生有),值可以小一些。 44 | 2. 改变画面中原有物品,可以适当提 45 | - `start_percent`: 控制起始 46 | - `end_percent`: 控制结束 47 | 48 | ### 2. Image and Mask Resize 49 | 同时调整图像和蒙版大小的节点。 50 | 51 | 输入参数: 52 | - `image`: 输入图像 53 | - `mask`: 输入蒙版 54 | - `width`: 目标宽度 55 | - `height`: 目标高度 56 | - `resize_method`: 缩放方法 ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"] 57 | - `crop`: 裁剪方式 ["disabled", "center", "top_left", "top_right", "bottom_left", "bottom_right"] 58 | - `mask_blur_radius`: 蒙版模糊半径 59 | 60 | ### 3. Crop By Mask 61 | 根据蒙版智能裁剪图像,自动计算最佳裁剪区域。 62 | 63 | 输入参数: 64 | - `image`: 输入图像 65 | - `mask`: 输入蒙版 66 | - `padding`: 裁剪区域额外边距。padding参数,用来控制重绘获取的画面的内容范围,范围越大,生成的内容就会越与原图相关。但是过大会导致的新内容无法生成也需要更多的显存、内存。 67 | 68 | 69 | 输出: 70 | - `image`: 裁剪后的图像 71 | - `mask`: 裁剪后的蒙版 72 | - `crop_x`: 裁剪区域X坐标 73 | - `crop_y`: 裁剪区域Y坐标 74 | - `original_width`: 原始裁剪宽度 75 | - `original_height`: 原始裁剪高度 76 | 77 | ### 4. Image Crop Merge 78 | 将处理后的裁剪图像智能合并回原始图像。 79 | 80 | 输入参数: 81 | - `cropped_image`: 裁剪并处理后的图片 82 | - `original_image`: 需要被合并的原始完整图片 83 | - `crop_x`: 裁剪区域X坐标 84 | - `crop_y`: 裁剪区域Y坐标 85 | - `cropped_original_width`: 原始裁剪宽度 86 | - `cropped_original_height`: 原始裁剪高度 87 | - `resize_method`: 缩放方法 ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"] 88 | 89 | ## 使用建议 90 | 91 | 1. 建议使用Crop By Mask节点先对重绘区域进行智能裁剪,可以提高处理效率 92 | 2. Image and Mask Resize节点在需要调整图像尺寸时很有用,支持多种缩放算法 93 | 3. 使用Image Crop Merge节点时,确保输入参数与Crop By Mask节点的输出参数对应 94 | 4. 可以通过调整mask_blur_radius参数来获得更自然的边缘过渡效果 95 | 5. 遮罩mask需要大一点,让重绘有更多的发挥空间,如果是重绘区域发生比较大的改变,就通过提升权重来进行控制。 96 | 97 | 98 | 99 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | --------------------------------------------------------------------------------