├── requirements.txt
├── nodes
├── __pycache__
│ └── ImageStitchAdvance.cpython-311.pyc
├── RBGPadPro.py
└── ImageStitchAdvance.py
├── web
├── js
│ └── RBGSuitePack.js
└── nodes
│ ├── RBGPadPro.js
│ └── RBGImageStitchPlus.js
├── pyproject.toml
├── __init__.py
└── README.md
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | Pillow
3 |
--------------------------------------------------------------------------------
/nodes/__pycache__/ImageStitchAdvance.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RamonGuthrie/ComfyUI-RBG-ImageStitchPlus/HEAD/nodes/__pycache__/ImageStitchAdvance.cpython-311.pyc
--------------------------------------------------------------------------------
/web/js/RBGSuitePack.js:
--------------------------------------------------------------------------------
1 |
2 | (function() {
3 | const link = document.createElement('link');
4 | link.rel = 'stylesheet';
5 | link.href = 'https://fonts.googleapis.com/icon?family=Material+Icons';
6 | document.head.appendChild(link);
7 | })();
8 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "comfyui-rbg-imagestitchplus"
3 | version = "2.1.0"
4 | description = "An advanced image stitching node for ComfyUI."
5 | readme = "README.md"
6 | requires-python = ">=3.8"
7 | # Used by Comfy Registry https://comfyregistry.org
8 | requires = ["setuptools", "wheel"]
9 | build-backend = "setuptools.build_meta"
10 | license = { text = "MIT" }
11 | authors = [
12 | { name = "Ramon Guthrie"},
13 | ]
14 | classifiers = [
15 | "Development Status: 4 - Beta",
16 | "Intended Audience: Developers",
17 | "License :: OSI Approved :: MIT License",
18 | "Programming Language :: Python :: 3",
19 | "Programming Language :: Python :: 3.8",
20 | "Programming Language :: Python :: 3.9",
21 | "Programming Language :: Python :: 3.10",
22 | "Programming Language :: Python :: 3.11",
23 | "Topic :: Multimedia :: Graphics",
24 | "Topic :: Scientific/Engineering :: Image Processing",
25 | ]
26 |
27 | dependencies = [
28 | "numpy",
29 | "Pillow",
30 | ]
31 |
32 | [project.urls]
33 | "Homepage" = "https://github.com/RamonGuthrie/ComfyUI-RBG-ImageStitchPlus"
34 | "Bug Tracker" = "https://github.com/RamonGuthrie/ComfyUI-RBG-ImageStitchPlus/issues"
35 | "Linkedin" = "https://www.linkedin.com/in/ramonguthrie"
36 |
37 |
38 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
1 | from .nodes.ImageStitchAdvance import RBGImageStitchPlus
2 | from .nodes.RBGPadPro import RBGPadPro
3 |
4 | NODE_CLASS_MAPPINGS = {
5 | "RBGImageStitchPlus": RBGImageStitchPlus,
6 | "RBGPadPro": RBGPadPro
7 | }
8 |
9 | NODE_DISPLAY_NAME_MAPPINGS = {
10 | "RBGImageStitchPlus": "RBG Image Stitch Plus 🧩",
11 | "RBGPadPro": "RBG Pad Pro 🛠️"
12 | }
13 |
14 | WEB_DIRECTORY = "./web"
15 |
16 | print("### Loading: ComfyUI-RBG-ImageStitchPlus ###")
17 |
18 | # Add Google Fonts Icons stylesheet
19 | import os
20 | import server
21 |
22 | # Get the directory of the current script
23 | dir_path = os.path.dirname(os.path.realpath(__file__))
24 | js_path = os.path.join(dir_path, "web", "js")
25 |
26 | # Create the directory if it doesn't exist
27 | os.makedirs(js_path, exist_ok=True)
28 |
29 | # Create a new JS file to load the stylesheet
30 | js_file_path = os.path.join(js_path, "RBGSuitePack.js")
31 | with open(js_file_path, "w") as f:
32 | f.write("""
33 | (function() {
34 | const link = document.createElement('link');
35 | link.rel = 'stylesheet';
36 | link.href = 'https://fonts.googleapis.com/icon?family=Material+Icons';
37 | document.head.appendChild(link);
38 | })();
39 | """)
40 |
41 | __all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS", "WEB_DIRECTORY"]
--------------------------------------------------------------------------------
/web/nodes/RBGPadPro.js:
--------------------------------------------------------------------------------
1 | import { app } from "/scripts/app.js";
2 |
3 | app.registerExtension({
4 | name: "RBGSuitePack.PadPro.Tooltips",
5 | async beforeRegisterNodeDef(nodeType, nodeData, app) {
6 | if (nodeData.name === "RBGPadPro") {
7 | const onNodeCreated = nodeType.prototype.onNodeCreated;
8 | nodeType.prototype.onNodeCreated = function () {
9 | if (onNodeCreated) onNodeCreated.apply(this, arguments);
10 |
11 | const tooltips = {
12 | pad_mode: "Choose how to handle the padding. 'Pad' adds space, 'pad_edge' uses edge colors, 'transparent_fill' fills transparent areas, and 'crop' cuts the image.",
13 | pad_left: "Amount of padding to add to the left side.",
14 | pad_right: "Amount of padding to add to the right side.",
15 | pad_top: "Amount of padding to add to the top.",
16 | pad_bottom: "Amount of padding to add to the bottom.",
17 | pad_feathering: "Blends the image edges with the background for a smoother transition.",
18 | pad_color: "The color of the padded area.",
19 | image_position: "The base position of the image on the canvas.",
20 | image_offset_x: "Fine-tune the image's horizontal position.",
21 | image_offset_y: "Fine-tune the image's vertical position.",
22 | image_scale: "Scale the image before padding. The canvas size is not affected.",
23 | fill_transparent_background: "Fill transparent areas with a solid color.",
24 | transparent_fill_color: "The color to use for transparent areas.",
25 | pad_aspect_ratio: "Automatically adjust padding to match a specific aspect ratio.",
26 | resize_mode: "Choose how to resize the final image. 'resize_longer_side' and 'resize_shorter_side' maintain the aspect ratio.",
27 | target_size: "The target size in pixels for the selected side.",
28 | resample_filter: "The interpolation method to use for resizing."
29 | };
30 |
31 | for (const widget of this.widgets) {
32 | if (tooltips[widget.name] && widget.canvas) {
33 | widget.canvas.title = tooltips[widget.name];
34 | }
35 | }
36 | };
37 | }
38 | }
39 | });
40 |
--------------------------------------------------------------------------------
/web/nodes/RBGImageStitchPlus.js:
--------------------------------------------------------------------------------
1 | import { app } from "/scripts/app.js";
2 |
3 | app.registerExtension({
4 | name: "RBGSuitePack.ImageStitchPlus.Icons",
5 | async beforeRegisterNodeDef(nodeType, nodeData, app) {
6 | if (nodeData.name === "RBGImageStitchPlus") {
7 |
8 | const WIDGETS_TO_UPDATE = ["direction", "keep_proportion", "crop_position", "spacing_color"];
9 |
10 | // --- OnConfigure: Apply icons to widgets ---
11 | const onConfigure = nodeType.prototype.onConfigure;
12 | nodeType.prototype.onConfigure = function () {
13 | if (onConfigure) onConfigure.apply(this, arguments);
14 |
15 | const updateWidget = (widgetName, map, isColor = false) => {
16 | const widget = this.widgets.find(w => w.name === widgetName);
17 | if (!widget) return;
18 |
19 | // Add a class to the widget's element for styling
20 | if (widget.inputEl && widget.inputEl.parentElement) {
21 | widget.inputEl.parentElement.classList.add("rbg-styled-widget");
22 | }
23 |
24 | // Check if the widget has already been updated
25 | if (widget.options.values && typeof widget.options.values[0] === 'object') {
26 | return;
27 | }
28 |
29 | const values = widget.options.values || [];
30 |
31 | widget.options.values = values.map(value => {
32 | const item = map[value];
33 | if (item) {
34 | let content;
35 | const wrapperStyle = `display: inline-block; width: calc(100% - 20px); white-space: nowrap; overflow: hidden; text-overflow: ellipsis;`;
36 | if (isColor) {
37 | let style = `display: inline-block; width: 12px; height: 12px; border-radius: 50%; margin-right: 5px; vertical-align: middle; border: 1px solid #555;`;
38 | if (value === 'custom') {
39 | style += ` background: conic-gradient(red, yellow, lime, aqua, blue, magenta, red);`;
40 | } else {
41 | style += ` background-color: ${item};`;
42 | }
43 | content = `${value}`;
44 | } else {
45 | content = `${item}${value}`;
46 | }
47 | return {
48 | content: content,
49 | value: value,
50 | text: value,
51 | toString: function() { return this.text; },
52 | toJSON: function() { return this.value; }
53 | };
54 | }
55 | return value;
56 | });
57 | };
58 |
59 | const keepProportionIcons = { "resize": "photo_size_select_large", "pad": "aspect_ratio", "pad_edge": "fullscreen_exit", "crop": "crop" };
60 | const directionIcons = { "right": "arrow_forward", "down": "arrow_downward", "left": "arrow_back", "up": "arrow_upward", "H_then_V_down": "south_east", "H_then_V_up": "north_east", "V_then_H_right": "south_east", "V_then_H_left": "south_west", "Grid_2x2": "grid_view" };
61 | const cropPositionIcons = { "center": "center_focus_strong", "top": "vertical_align_top", "bottom": "vertical_align_bottom", "left": "align_horizontal_left", "right": "align_horizontal_right" };
62 | const spacingColorMap = { "white": "#FFFFFF", "black": "#000000", "red": "#FF0000", "green": "#00FF00", "blue": "#0000FF", "custom": "custom" };
63 |
64 | updateWidget("keep_proportion", keepProportionIcons);
65 | updateWidget("direction", directionIcons);
66 | updateWidget("crop_position", cropPositionIcons);
67 | updateWidget("spacing_color", spacingColorMap, true);
68 | };
69 |
70 | // Inject CSS to ensure arrows are clickable
71 | const style = document.createElement('style');
72 | style.innerHTML = `
73 | .rbg-styled-widget .combo-arrow {
74 | pointer-events: auto !important;
75 | z-index: 100 !important;
76 | }
77 | `;
78 | document.head.appendChild(style);
79 | }
80 | },
81 | });
82 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # ComfyUI-RBG-ImageStitchPlus 🧩
2 | 

3 |
4 | ## An Advanced Image Stitching Node for ComfyUI ✨
5 |
6 | This project provides a powerful and flexible image stitching node for ComfyUI, designed to seamlessly combine multiple images into a single canvas. Whether you're creating panoramas, grids, or complex layouts, the RBG Image Stitch Plus node offers the tools you need to get the job done.
7 |
8 | ---
9 | 
10 | ## Feature List 🚀
11 |
12 | - **Advanced Stitching Directions:** Combine up to three images with multiple layout options:
13 | - **Simple:** `right`, `down`, `left`, `up`.
14 | - **Compound:** `H_then_V_down`, `H_then_V_up`, `V_then_H_right`, `V_then_H_left`.
15 | - **Grid:** `Grid_2x2` for a four-quadrant layout.
16 | - **Intelligent Proportion Control:** Choose how to handle images of different sizes:
17 | - `resize`: Resizes images to match the dimensions required by the layout.
18 | - `pad`: Adds padding to smaller images to match the largest image in the relevant dimension.
19 | - `pad_edge`: A unique padding mode that analyses the border pixels of an image and uses their average colour for a seamless extension.
20 | - `crop`: Crops images to a uniform size. You can control the crop's origin with `crop_position` (`centre`, `top`, `left`, etc.) for precise framing.
21 | - **Customizable Spacing:** Add a visual separator between stitched images with a custom width and colour.
22 | - **Background Fill:** Fill transparent areas of the final canvas with a solid colour, perfect for ensuring consistency in your final output.
23 | - **Advanced Resizing & Anti-Aliasing:**
24 | - **Final Resize:** Scale the entire stitched canvas based on its `longer_side` or `shorter_side` to fit specific dimensions.
25 | - **Supersampling:** Apply high-quality anti-aliasing by rendering the image at a higher resolution (`supersample_factor`) and then downscaling it.
26 | - **Interpolation Control:** Select specific resampling filters (e.g., `lanczos`, `bicubic`, `area`) for both resizing and the final downsample, giving you full control over the final texture and sharpness.
27 | - **Clarity Adjustment:** Enhance or soften the midtone contrast of the final image. This powerful feature can make an image "pop" with punchy detail or give it a soft, dreamlike feel by adjusting the `clarity_strength`.
28 | ---
29 | ## Watch the ImageStitchPlus Feature Showcase 📺
30 | https://github.com/user-attachments/assets/52eec166-9c79-4583-9c89-d83c2dcbe986
31 |
32 | ---
33 |
34 | ## Update: 14/07/2025 - RBG Pad Pro Node 🛠️
35 |
36 | The `RBGPadPro` node is a powerful and versatile tool designed for advanced image padding and resizing operations. It goes far beyond simple padding by offering intelligent features like aspect ratio correction, edge-aware colour filling, and feathered blending, making it an essential utility for image composition, outpainting preparation, and layout adjustments.
37 |
38 | The node takes a primary image and an optional mask, applies a series of transformations based on user-defined parameters, and outputs the modified image and its corresponding mask, ensuring they remain perfectly aligned.
39 |
40 | ## Feature List 🚀
41 |
42 | - **Multi-Mode Operation:** The node can operate in three main modes:
43 | - `pad`: Adds padding around the image using a solid colour.
44 | - `pad_edge`: A smart padding mode that samples the image's border to create a seamless, context-aware fill colour. This is excellent for extending backgrounds.
45 | - **Automatic Aspect Ratio Padding:** Automatically calculates and adds the necessary padding to conform the image to a standard aspect ratio (e.g., 16:9, 4:3, 1:1), which is incredibly useful for standardising outputs.
46 | - **Feathered Edges:** Includes a `pad_feathering` option to create a soft, blended transition between the original image and the padded area, avoiding hard lines.
47 | - **Flexible Image Placement:** Provides granular control over the original image's position (center, top, left, etc.) and fine-tuning offsets on the final canvas.
48 | - **Alpha Channel Handling:** Can correctly process RGBA images, either by filling the transparent background with a solid colour or preserving it.
49 | - **Final Resizing:** Allows you to specify a final target resolution, resizing the entire canvas after all padding and cropping operations are complete.
50 |
51 |
52 | ---
53 |
54 | https://github.com/user-attachments/assets/a4264baf-06bf-452c-bd74-6f1dfd930853
55 |
56 |
57 |
58 | ## Installation 🛠️
59 |
60 | 1. Clone this repository into your `ComfyUI/custom_nodes` directory:
61 | ```bash
62 | git clone https://github.com/RamonGuthrie/ComfyUI-RBG-ImageStitchPlus.git
63 | ```
64 | 2. Install the required dependencies by running the following command in your terminal:
65 | ```bash
66 | pip install -r requirements.txt
67 | ```
68 | 3. Restart ComfyUI.
69 |
70 | ---
71 |
72 | ## Usage 🚀
73 |
74 | After installation, you can find the `RBG Image Stitch Plus` node under the `RBG/ImageStitchPlus` category in ComfyUI. Connect up to three images and configure the settings to create your desired composition.
75 |
76 | ---
77 |
78 | ## Contributing ❤️
79 |
80 | Contributions are always welcome! If you have any suggestions, improvements, or new ideas, please feel free to submit a pull request or open an issue.
81 |
82 | ---
83 |
84 | ## License 📜
85 |
86 | This project is licensed under the MIT License.
87 |
--------------------------------------------------------------------------------
/nodes/RBGPadPro.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import comfy.utils
3 | from torchvision.transforms import functional as TF
4 |
5 | MAX_RESOLUTION = 8192
6 |
7 | class RBGPadPro:
8 | ASPECT_RATIOS = [
9 | "custom",
10 | "1:1 Square (Instagram, Facebook)",
11 | "2:3 Portrait (35mm Film)",
12 | "3:4 Portrait (Pinterest, Mobile)",
13 | "5:8 Portrait (Editorial/Magazine)",
14 | "9:16 Portrait (Instagram Stories, TikTok)",
15 | "9:21 Portrait (Cinematic Widescreen)",
16 | "4:3 Landscape (Classic TV, iPad)",
17 | "3:2 Landscape (35mm Film, DSLRs)",
18 | "8:5 Landscape (Widescreen Laptop)",
19 | "16:9 Landscape (HDTV, YouTube)",
20 | "21:9 Landscape (Cinematic Widescreen)",
21 | ]
22 |
23 | upscale_methods = ["lanczos", "bicubic", "nearest-exact", "bilinear", "area"]
24 |
25 | @classmethod
26 | def INPUT_TYPES(s):
27 | return {
28 | "required": {
29 | "image": ("IMAGE",),
30 | "pad_mode": (["pad", "pad_edge", "transparent_fill"], {"default": "pad"}),
31 | "pad_left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
32 | "pad_right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
33 | "pad_top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
34 | "pad_bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
35 | "pad_feathering": ("INT", {"default": 0, "min": 0, "max": 100, "step": 1}),
36 | "pad_color": ("STRING", {"default": "#FFFFFF"}),
37 | "image_position": (["center", "left", "right", "top", "bottom"], {"default": "center"}),
38 | "image_offset_x": ("INT", {"default": 0, "min": -MAX_RESOLUTION, "max": MAX_RESOLUTION, "step": 1}),
39 | "image_offset_y": ("INT", {"default": 0, "min": -MAX_RESOLUTION, "max": MAX_RESOLUTION, "step": 1}),
40 | "image_scale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
41 | "fill_transparent_background": ("BOOLEAN", {"default": False}),
42 | "transparent_fill_color": ("STRING", {"default": "#000000"}),
43 | "pad_aspect_ratio": (s.ASPECT_RATIOS, {"default": "custom"}),
44 | "resize_mode": (["none", "resize_longer_side", "resize_shorter_side"], { "default": "none" }),
45 | "target_size": ("INT", {"default": 1024, "min": 64, "max": 8192, "step": 8}),
46 | "resample_filter": (s.upscale_methods, {"default": "bicubic"}),
47 | },
48 | "optional": {
49 | "mask": ("MASK",),
50 | }
51 | }
52 |
53 | RETURN_TYPES = ("IMAGE", "MASK")
54 | FUNCTION = "pad_image"
55 | CATEGORY = "RBG-Suite-Pack"
56 |
57 | def hex_to_rgb(self, hex_color):
58 | hex_color = hex_color.lstrip('#')
59 | return tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4))
60 |
61 | def _ensure_rgba(self, image_tensor):
62 | if image_tensor.shape[-1] == 3:
63 | alpha_channel = torch.ones(*image_tensor.shape[:-1], 1, device=image_tensor.device, dtype=image_tensor.dtype)
64 | return torch.cat([image_tensor, alpha_channel], dim=-1)
65 | return image_tensor
66 |
67 | def get_edge_color(self, image_tensor):
68 | top_edge = image_tensor[:, 0, :, :3]
69 | bottom_edge = image_tensor[:, -1, :, :3]
70 | left_edge = image_tensor[:, :, 0, :3]
71 | right_edge = image_tensor[:, :, -1, :3]
72 | all_edges = torch.cat([
73 | top_edge.reshape(-1, 3), bottom_edge.reshape(-1, 3),
74 | left_edge.reshape(-1, 3), right_edge.reshape(-1, 3)
75 | ], dim=0)
76 | mean_color = torch.mean(all_edges, dim=0) * 255.0
77 | return mean_color.cpu().numpy().astype(int).tolist()
78 |
79 | def _calculate_gaussian_kernel_size(self, sigma: float) -> int:
80 | radius = int(round(3 * sigma))
81 | kernel_s = 2 * radius + 1
82 | return max(1, kernel_s)
83 |
84 | def pad_image(self, image, pad_mode, pad_left, pad_right, pad_top, pad_bottom, pad_feathering, pad_color, image_position, image_offset_x, image_offset_y, image_scale, fill_transparent_background, transparent_fill_color, pad_aspect_ratio, resize_mode, target_size, resample_filter, mask=None):
85 | image = self._ensure_rgba(image)
86 | B, orig_H, orig_W, C = image.shape
87 |
88 | aspect_pad_h = 0
89 | aspect_pad_w = 0
90 | if pad_aspect_ratio != "custom":
91 | ratio_str = pad_aspect_ratio.split(' ')[0]
92 | w_ratio, h_ratio = map(int, ratio_str.split(':'))
93 | target_ratio = w_ratio / h_ratio
94 | current_ratio = orig_W / orig_H
95 |
96 | if abs(current_ratio - target_ratio) > 1e-6:
97 | if current_ratio < target_ratio:
98 | new_width = round(orig_H * target_ratio)
99 | aspect_pad_w = new_width - orig_W
100 | else:
101 | new_height = round(orig_W / target_ratio)
102 | aspect_pad_h = new_height - orig_H
103 |
104 | final_width = orig_W + pad_left + pad_right + aspect_pad_w
105 | final_height = orig_H + pad_top + pad_bottom + aspect_pad_h
106 |
107 | scaled_image = image
108 | scaled_mask = mask
109 | if image_scale != 1.0:
110 | new_width = int(orig_W * image_scale)
111 | new_height = int(orig_H * image_scale)
112 | if new_width > 0 and new_height > 0:
113 | scaled_image = comfy.utils.common_upscale(image.movedim(-1,1), new_width, new_height, "lanczos", "disabled").movedim(1,-1)
114 | if mask is not None:
115 | scaled_mask = comfy.utils.common_upscale(mask.unsqueeze(1), new_width, new_height, "bilinear", "disabled").squeeze(1)
116 |
117 | B, scaled_H, scaled_W, C = scaled_image.shape
118 |
119 | if scaled_mask is None:
120 | scaled_mask = torch.ones((B, scaled_H, scaled_W), device=image.device, dtype=torch.float32)
121 | else:
122 | scaled_mask = scaled_mask.to(image.device)
123 | if scaled_mask.shape[1] != scaled_H or scaled_mask.shape[2] != scaled_W:
124 | scaled_mask = comfy.utils.common_upscale(scaled_mask.unsqueeze(1), scaled_W, scaled_H, "bilinear", "disabled").squeeze(1)
125 |
126 | if pad_mode == 'transparent_fill':
127 | # Create a fully transparent canvas
128 | final_canvas = torch.zeros((B, final_height, final_width, 4), device=image.device, dtype=image.dtype)
129 |
130 | # Ensure the input image has an alpha channel
131 | scaled_image = self._ensure_rgba(scaled_image)
132 |
133 | # Calculate position with offset
134 | if image_position == 'center':
135 | pos_x = (final_width - scaled_W) // 2
136 | pos_y = (final_height - scaled_H) // 2
137 | elif image_position == 'left':
138 | pos_x = 0
139 | pos_y = (final_height - scaled_H) // 2
140 | elif image_position == 'right':
141 | pos_x = final_width - scaled_W
142 | pos_y = (final_height - scaled_H) // 2
143 | elif image_position == 'top':
144 | pos_x = (final_width - scaled_W) // 2
145 | pos_y = 0
146 | elif image_position == 'bottom':
147 | pos_x = (final_width - scaled_W) // 2
148 | pos_y = final_height - scaled_H
149 |
150 | pos_x += image_offset_x
151 | pos_y += image_offset_y
152 |
153 | # Safe copy from scaled_image to the transparent canvas
154 | img_start_x = max(0, -pos_x)
155 | img_start_y = max(0, -pos_y)
156 | can_start_x = max(0, pos_x)
157 | can_start_y = max(0, pos_y)
158 |
159 | copy_width = min(scaled_W - img_start_x, final_width - can_start_x)
160 | copy_height = min(scaled_H - img_start_y, final_height - can_start_y)
161 |
162 | if copy_width > 0 and copy_height > 0:
163 | final_canvas[:, can_start_y:can_start_y + copy_height, can_start_x:can_start_x + copy_width, :] = \
164 | scaled_image[:, img_start_y:img_start_y + copy_height, img_start_x:img_start_x + copy_width, :]
165 |
166 | final_mask = final_canvas[..., 3].clone()
167 |
168 | return (final_canvas, final_mask)
169 |
170 | color_val = self.get_edge_color(image) if pad_mode == 'pad_edge' else self.hex_to_rgb(pad_color)
171 | color_tensor = torch.tensor(color_val, device=image.device, dtype=image.dtype).div(255.0)
172 |
173 | canvas = torch.zeros((B, final_height, final_width, C), device=image.device, dtype=image.dtype)
174 | mask_canvas = torch.zeros((B, final_height, final_width), device=image.device, dtype=torch.float32)
175 |
176 | if C == 4:
177 | canvas_rgb = color_tensor.view(1, 1, 1, 3).repeat(B, final_height, final_width, 1)
178 | canvas_alpha = torch.ones((B, final_height, final_width, 1), device=image.device, dtype=image.dtype)
179 | canvas = torch.cat([canvas_rgb, canvas_alpha], dim=-1)
180 | else:
181 | canvas = color_tensor.view(1, 1, 1, C).repeat(B, final_height, final_width, 1)
182 |
183 | if image_position == 'center':
184 | base_x = (final_width - scaled_W) // 2
185 | base_y = (final_height - scaled_H) // 2
186 | elif image_position == 'left':
187 | base_x = pad_left
188 | base_y = (final_height - scaled_H) // 2
189 | elif image_position == 'right':
190 | base_x = final_width - scaled_W - pad_right
191 | base_y = (final_height - scaled_H) // 2
192 | elif image_position == 'top':
193 | base_x = (final_width - scaled_W) // 2
194 | base_y = pad_top
195 | elif image_position == 'bottom':
196 | base_x = (final_width - scaled_W) // 2
197 | base_y = final_height - scaled_H - pad_bottom
198 |
199 | canvas_x = base_x + image_offset_x
200 | canvas_y = base_y + image_offset_y
201 |
202 | img_start_x = max(0, -canvas_x)
203 | img_start_y = max(0, -canvas_y)
204 | can_start_x = max(0, canvas_x)
205 | can_start_y = max(0, canvas_y)
206 |
207 | copy_width = min(scaled_W - img_start_x, final_width - can_start_x)
208 | copy_height = min(scaled_H - img_start_y, final_height - can_start_y)
209 |
210 | if copy_width > 0 and copy_height > 0:
211 | # Get the slices
212 | canvas_slice = canvas[:, can_start_y:can_start_y + copy_height, can_start_x:can_start_x + copy_width, :]
213 | image_slice = scaled_image[:, img_start_y:img_start_y + copy_height, img_start_x:img_start_x + copy_width, :]
214 | mask_slice = scaled_mask[:, img_start_y:img_start_y + copy_height, img_start_x:img_start_x + copy_width]
215 |
216 | # Update mask_canvas
217 | mask_canvas[:, can_start_y:can_start_y + copy_height, can_start_x:can_start_x + copy_width] = mask_slice
218 |
219 | # Blend the image onto the canvas
220 | blended_slice = canvas_slice * (1 - mask_slice.unsqueeze(-1)) + image_slice * mask_slice.unsqueeze(-1)
221 |
222 | # Put the blended slice back
223 | canvas[:, can_start_y:can_start_y + copy_height, can_start_x:can_start_x + copy_width, :] = blended_slice
224 |
225 | if pad_feathering > 0:
226 | sigma = pad_feathering / 10.0
227 | kernel_size = self._calculate_gaussian_kernel_size(sigma)
228 | blurred_mask = TF.gaussian_blur(mask_canvas.unsqueeze(1), kernel_size=[kernel_size, kernel_size], sigma=[sigma, sigma]).squeeze(1)
229 |
230 | background_color_tensor = color_tensor
231 | background = background_color_tensor.view(1, 1, 1, 3).repeat(B, final_height, final_width, 1)
232 |
233 | blended_rgb = canvas[..., :3] * blurred_mask.unsqueeze(-1) + background * (1 - blurred_mask.unsqueeze(-1))
234 | canvas = torch.cat([blended_rgb, canvas[..., 3:]], dim=-1)
235 | mask_canvas = blurred_mask
236 |
237 | if fill_transparent_background and C == 4:
238 | fill_color_rgb = self.hex_to_rgb(transparent_fill_color)
239 | fill_color_tensor = torch.tensor(fill_color_rgb, device=image.device, dtype=image.dtype).div(255.0)
240 | background = fill_color_tensor.view(1, 1, 1, 3).repeat(B, final_height, final_width, 1)
241 | alpha = canvas[..., 3:]
242 | blended_rgb = canvas[..., :3] * alpha + background * (1 - alpha)
243 | canvas = torch.cat([blended_rgb, torch.ones_like(alpha)], dim=-1)
244 |
245 | # Final Resizing and Supersampling
246 | if resize_mode != "none":
247 | B, H, W, C = canvas.shape
248 | if W > 0 and H > 0:
249 | # 1. Determine final target dimensions
250 | if resize_mode == "resize_longer_side":
251 | ratio = target_size / max(W, H)
252 | else: # resize_shorter_side
253 | ratio = target_size / min(W, H)
254 |
255 | target_width = round(W * ratio)
256 | target_height = round(H * ratio)
257 |
258 | # Use a safe resample filter for the mask if lanczos is selected
259 | mask_resample_filter = resample_filter
260 | if mask_resample_filter == "lanczos":
261 | mask_resample_filter = "bicubic"
262 |
263 | # No supersampling, just resize to target
264 | canvas = comfy.utils.common_upscale(canvas.movedim(-1,1), target_width, target_height, resample_filter, "disabled").movedim(1,-1)
265 | mask_canvas = comfy.utils.common_upscale(mask_canvas.unsqueeze(1), target_width, target_height, mask_resample_filter, "disabled").squeeze(1)
266 |
267 | return (canvas, mask_canvas)
268 |
269 | NODE_CLASS_MAPPINGS = {
270 | "RBGPadPro": RBGPadPro
271 | }
272 |
273 | NODE_DISPLAY_NAME_MAPPINGS = {
274 | "RBGPadPro": "RBG Pad Pro"
275 | }
276 |
--------------------------------------------------------------------------------
/nodes/ImageStitchAdvance.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import comfy.utils
3 | from torchvision.transforms import functional as TF
4 |
5 | class RBGImageStitchPlus:
6 | # Added "mitchell" to the list of upscale methods as requested.
7 | upscale_methods = ["lanczos", "bicubic", "nearest-exact", "bilinear", "area"]
8 |
9 | @classmethod
10 | def INPUT_TYPES(s):
11 | return {
12 | "required": {
13 | "direction": (["right", "down", "left", "up", "H_then_V_down", "H_then_V_up", "V_then_H_right", "V_then_H_left", "Grid_2x2"], {"default": "right"}),
14 | "keep_proportion": (["resize", "pad", "pad_edge", "crop"], { "default": "resize" }),
15 | "pad_color": ("STRING", {"default": "#FFFFFF", "tooltip": "Color to use for padding (R,G,B)."}),
16 | "crop_position": (["center", "top", "bottom", "left", "right"], { "default": "center" }),
17 | "spacing_width": ("INT", {"default": 0, "min": 0, "max": 1024, "step": 2}),
18 | "spacing_color": (["white", "black", "red", "green", "blue", "custom"], {"default": "white"}),
19 | "custom_spacing_color": ("STRING", {"default": "#FFFFFF"}),
20 | "fill_transparent_background": ("BOOLEAN", {"default": False, "tooltip": "If true, transparent areas will be filled with the specified color."}),
21 | "transparent_fill_color": ("STRING", {"default": "#000000", "tooltip": "Color to fill transparent areas (R,G,B). Only used if 'fill_transparent_background' is true."}),
22 |
23 | # Final Resizing Options
24 | "final_resize_mode": (["none", "resize_longer_side", "resize_shorter_side"], { "default": "none" }),
25 | "final_target_size": ("INT", {"default": 1024, "min": 64, "max": 8192, "step": 8}),
26 | "resample_filter": (s.upscale_methods, {"default": "bicubic", "tooltip": "Interpolation for general resizing and the upscaling part of supersampling."}),
27 |
28 | # Supersampling for anti-aliasing
29 | "supersample_factor": ("FLOAT", {"default": 1.0, "min": 1.0, "max": 4.0, "step": 0.1, "tooltip": "Upscales then downscales the final image for anti-aliasing. Factor > 1 enables it."}),
30 | "final_downsample_interpolation": (s.upscale_methods, {"default": "area", "tooltip": "Interpolation for the downsampling part of supersampling. 'area' is often best for this."}),
31 |
32 | # Clarity (Midtone Contrast)
33 | "clarity_strength": ("FLOAT", {"default": 0.0, "min": -1.0, "max": 1.0, "step": 0.01, "tooltip": "Adjusts midtone contrast. Negative values for a dreamlike look, positive for punchy. -100=soft, +100=punchy."}),
34 | },
35 | "optional": {
36 | "image1": ("IMAGE",),
37 | "image2": ("IMAGE",),
38 | "image3": ("IMAGE",),
39 | }
40 | }
41 |
42 | RETURN_TYPES = ("IMAGE",)
43 | FUNCTION = "stitch"
44 | CATEGORY = "RBG/ImageStitchPlus"
45 |
46 | def hex_to_rgb(self, hex_color):
47 | hex_color = hex_color.lstrip('#')
48 | rgb = tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4))
49 | return rgb + (255,)
50 |
51 | def _ensure_rgba(self, image_tensor):
52 | if image_tensor.shape[-1] == 3:
53 | alpha_channel = torch.ones(*image_tensor.shape[:-1], 1, device=image_tensor.device, dtype=image_tensor.dtype)
54 | image_tensor = torch.cat([image_tensor, alpha_channel], dim=-1)
55 | return image_tensor
56 |
57 | def get_edge_color(self, image_tensor):
58 | top_edge = image_tensor[:, 0, :, :]
59 | bottom_edge = image_tensor[:, -1, :, :]
60 | left_edge = image_tensor[:, :, 0, :]
61 | right_edge = image_tensor[:, :, -1, :]
62 | all_edges = torch.cat([top_edge.reshape(-1, image_tensor.shape[-1]),
63 | bottom_edge.reshape(-1, image_tensor.shape[-1]),
64 | left_edge.reshape(-1, image_tensor.shape[-1]),
65 | right_edge.reshape(-1, image_tensor.shape[-1])], dim=0)
66 | mean_color = torch.mean(all_edges, dim=0) * 255.0
67 | color_list = mean_color.cpu().numpy().astype(int).tolist()
68 | if len(color_list) == 3: color_list.append(255)
69 | return color_list
70 |
71 | def pad_to_match(self, tensors, concat_dim):
72 | valid_tensors = [t for t in tensors if t is not None]
73 | if not valid_tensors: return []
74 |
75 | ref_shape = list(valid_tensors[0].shape)
76 | for t in valid_tensors[1:]:
77 | for d in range(len(ref_shape)):
78 | if d != concat_dim:
79 | ref_shape[d] = max(ref_shape[d], t.shape[d])
80 |
81 | padded = []
82 | for t in valid_tensors:
83 | pad_spec = []
84 | for d in reversed(range(len(ref_shape))):
85 | if d == concat_dim:
86 | pad_spec.extend([0, 0])
87 | else:
88 | diff = ref_shape[d] - t.shape[d]
89 | pad_spec.extend([0, diff])
90 | padded.append(torch.nn.functional.pad(t, pad_spec))
91 | return padded
92 |
93 | def _perform_stitch(self, img1, img2, direction, spacing_width, spacing_color, custom_spacing_color):
94 | if img1 is None and img2 is None: return None
95 | if img1 is None: return self._ensure_rgba(img2)
96 | if img2 is None: return self._ensure_rgba(img1)
97 |
98 | img1, img2 = self._ensure_rgba(img1), self._ensure_rgba(img2)
99 |
100 | if img1.shape[0] != img2.shape[0]:
101 | max_batch = max(img1.shape[0], img2.shape[0])
102 | if img1.shape[0] < max_batch: img1 = torch.cat([img1, img1[-1:].repeat(max_batch - img1.shape[0], 1, 1, 1)])
103 | if img2.shape[0] < max_batch: img2 = torch.cat([img2, img2[-1:].repeat(max_batch - img2.shape[0], 1, 1, 1)])
104 |
105 | spacing = None
106 | if spacing_width > 0:
107 | color_map = {"white": (255,255,255,255), "black": (0,0,0,255), "red": (255,0,0,255), "green": (0,255,0,255), "blue": (0,0,255,255)}
108 | color_val = self.hex_to_rgb(custom_spacing_color) if spacing_color == "custom" else color_map[spacing_color]
109 |
110 | num_channels = img1.shape[-1]
111 | spacing_shape = (img1.shape[0], max(img1.shape[1], img2.shape[1]), spacing_width, num_channels) if direction in ["left", "right"] else (img1.shape[0], spacing_width, max(img1.shape[2], img2.shape[2]), num_channels)
112 | spacing = torch.full(spacing_shape, 0.0, device=img1.device, dtype=img1.dtype)
113 | for j, c in enumerate(color_val):
114 | if j < num_channels: spacing[..., j] = c / 255.0
115 | if num_channels == 4: spacing[..., 3] = 1.0
116 |
117 | temp_images = [img2, img1] if direction in ["left", "up"] else [img1, img2]
118 | if spacing is not None: temp_images.insert(1, spacing)
119 |
120 | concat_dim = 2 if direction in ["left", "right"] else 1
121 | temp_images = self.pad_to_match(temp_images, concat_dim)
122 | return torch.cat(temp_images, dim=concat_dim)
123 |
124 | def _calculate_gaussian_kernel_size(self, sigma: float) -> int:
125 | """Calculates an appropriate odd kernel size for Gaussian blur."""
126 | radius = int(round(3 * sigma))
127 | kernel_s = 2 * radius + 1
128 | return max(1, kernel_s)
129 | def stitch(self, direction, keep_proportion, pad_color, crop_position, spacing_width, spacing_color, custom_spacing_color, fill_transparent_background, transparent_fill_color, final_resize_mode, final_target_size, resample_filter, supersample_factor=1.0, final_downsample_interpolation="area", clarity_strength=0.0, image1=None, image2=None, image3=None):
130 | all_images_input = [img for img in [image1, image2, image3] if img is not None and img.shape[0] > 0]
131 | if not all_images_input: raise ValueError("At least one image must be provided.")
132 | if len(all_images_input) == 1: return (self._ensure_rgba(all_images_input[0]),)
133 |
134 | # Initialize progress bar with 5 potential major steps
135 | pbar = comfy.utils.ProgressBar(5)
136 |
137 | current_batch_size = max(img.shape[0] for img in all_images_input)
138 | def unify_image(img_tensor):
139 | if img_tensor is None: return None
140 | img_tensor = self._ensure_rgba(img_tensor)
141 | if img_tensor.shape[0] < current_batch_size:
142 | return torch.cat([img_tensor, img_tensor[-1:].repeat(current_batch_size - img_tensor.shape[0], 1, 1, 1)])
143 | return img_tensor
144 |
145 | image1, image2, image3 = unify_image(image1), unify_image(image2), unify_image(image3)
146 | present_images = [img for img in [image1, image2, image3] if img is not None]
147 | pbar.update(1) # Step 1: Image unification and prep complete
148 |
149 | def get_processed_image(img_tensor, target_w, target_h, prop_mode):
150 | if img_tensor is None: return None
151 | return self.resize(img_tensor, target_w, target_h, prop_mode, "lanczos", 2, pad_color, crop_position)[0]
152 |
153 | stitched_image = None
154 | if keep_proportion == "crop":
155 | if direction in ["right", "down", "left", "up"]:
156 | max_dim = max(min(img.shape[1], img.shape[2]) for img in present_images)
157 | processed = [get_processed_image(img, max_dim, max_dim, "crop") for img in present_images]
158 | stitch_dir = "right" if direction in ["right", "left"] else "down"
159 | if direction in ["left", "up"]: processed.reverse()
160 | stitched_image = processed[0]
161 | for i in range(1, len(processed)):
162 | stitched_image = self._perform_stitch(stitched_image, processed[i], stitch_dir, spacing_width, spacing_color, custom_spacing_color)
163 |
164 | elif direction == "Grid_2x2":
165 | max_dim = max(min(img.shape[1], img.shape[2]) for img in present_images)
166 | p_img1 = get_processed_image(image1, max_dim, max_dim, "crop")
167 | p_img2 = get_processed_image(image2, max_dim, max_dim, "crop")
168 | p_img3 = get_processed_image(image3, max_dim, max_dim, "crop")
169 | row1 = self._perform_stitch(p_img1, p_img2, "right", spacing_width, spacing_color, custom_spacing_color)
170 | row2 = self._perform_stitch(p_img3, None, "right", spacing_width, spacing_color, custom_spacing_color)
171 | stitched_image = self._perform_stitch(row1, row2, "down", spacing_width, spacing_color, custom_spacing_color)
172 |
173 | else: # Compound crop
174 | is_vertical_first = direction.startswith("V_then_H")
175 | main_stitch_dir = "down" if is_vertical_first else "right"
176 | secondary_stitch_dir = "right" if is_vertical_first else "down"
177 | if direction.endswith("left"): secondary_stitch_dir = "left"
178 | if direction.endswith("up"): secondary_stitch_dir = "up"
179 |
180 | primary_pair = [img for img in [image1, image2] if img is not None]
181 | if not primary_pair:
182 | if image3 is not None:
183 | img3_proc = get_processed_image(image3, min(image3.shape[1], image3.shape[2]), min(image3.shape[1], image3.shape[2]), "crop")
184 | stitched_image = img3_proc
185 | else: raise ValueError("No images provided for stitching.")
186 |
187 | elif image3 is None:
188 | return self.stitch(main_stitch_dir, keep_proportion, pad_color, crop_position, spacing_width, spacing_color, custom_spacing_color, fill_transparent_background, transparent_fill_color, final_resize_mode, final_target_size, resample_filter, image1, image2, None)
189 |
190 | else:
191 | max_dim_primary = max(min(img.shape[1], img.shape[2]) for img in primary_pair)
192 | p_img1 = get_processed_image(image1, max_dim_primary, max_dim_primary, "crop")
193 | p_img2 = get_processed_image(image2, max_dim_primary, max_dim_primary, "crop")
194 | primary_stitch = self._perform_stitch(p_img1, p_img2, main_stitch_dir, spacing_width, spacing_color, custom_spacing_color)
195 | target_dim_secondary = primary_stitch.shape[1] if is_vertical_first else primary_stitch.shape[2]
196 | p_img3 = get_processed_image(image3, target_dim_secondary, target_dim_secondary, "crop")
197 | stitched_image = self._perform_stitch(primary_stitch, p_img3, secondary_stitch_dir, spacing_width, spacing_color, custom_spacing_color)
198 | else: # Logic for "stretch", "resize", "pad", "pad_edge"
199 | if direction in ["right", "down", "left", "up"]:
200 | max_h = max(img.shape[1] for img in present_images)
201 | max_w = max(img.shape[2] for img in present_images)
202 |
203 | target_h, target_w = (max_h, max_w) if keep_proportion.startswith("pad") else (max_h if direction in ["right", "left"] else 0, max_w if direction in ["down", "up"] else 0)
204 |
205 | processed = [get_processed_image(img, target_w, target_h, keep_proportion) for img in present_images]
206 | stitch_dir = "right" if direction in ["right", "left"] else "down"
207 | if direction in ["left", "up"]: processed.reverse()
208 |
209 | stitched_image = processed[0]
210 | for i in range(1, len(processed)):
211 | stitched_image = self._perform_stitch(stitched_image, processed[i], stitch_dir, spacing_width, spacing_color, custom_spacing_color)
212 |
213 | elif direction == "Grid_2x2":
214 | max_h = max(img.shape[1] for img in present_images)
215 | max_w = max(img.shape[2] for img in present_images)
216 | p_img1 = get_processed_image(image1, max_w, max_h, keep_proportion)
217 | p_img2 = get_processed_image(image2, max_w, max_h, keep_proportion)
218 | p_img3 = get_processed_image(image3, max_w, max_h, keep_proportion)
219 | row1 = self._perform_stitch(p_img1, p_img2, "right", spacing_width, spacing_color, custom_spacing_color)
220 | row2 = self._perform_stitch(p_img3, None, "right", spacing_width, spacing_color, custom_spacing_color)
221 | stitched_image = self._perform_stitch(row1, row2, "down", spacing_width, spacing_color, custom_spacing_color)
222 |
223 | else: # Compound layouts
224 | is_vertical_first = direction.startswith("V_then_H")
225 | main_stitch_dir, secondary_stitch_dir = ("down", "right") if is_vertical_first else ("right", "down")
226 | if direction.endswith("left"): secondary_stitch_dir = "left"
227 | if direction.endswith("up"): secondary_stitch_dir = "up"
228 |
229 | primary_imgs = [img for img in [image1, image2] if img is not None]
230 | primary_stitch = None
231 | if primary_imgs:
232 | max_h1 = max(img.shape[1] for img in primary_imgs)
233 | max_w1 = max(img.shape[2] for img in primary_imgs)
234 |
235 | target_h_primary, target_w_primary = (max_h1, max_w1) if keep_proportion.startswith("pad") else ((0, max_w1) if is_vertical_first else (max_h1, 0))
236 |
237 | p_img1 = get_processed_image(image1, target_w_primary, target_h_primary, keep_proportion)
238 | p_img2 = get_processed_image(image2, target_w_primary, target_h_primary, keep_proportion)
239 | primary_stitch = self._perform_stitch(p_img1, p_img2, main_stitch_dir, spacing_width, spacing_color, custom_spacing_color)
240 |
241 | if primary_stitch is not None and image3 is not None:
242 | target_h3, target_w3 = (primary_stitch.shape[1], 0) if is_vertical_first else (0, primary_stitch.shape[2])
243 | p_img3 = get_processed_image(image3, target_w3, target_h3, keep_proportion)
244 | stitched_image = self._perform_stitch(primary_stitch, p_img3, secondary_stitch_dir, spacing_width, spacing_color, custom_spacing_color)
245 | else:
246 | stitched_image = primary_stitch if primary_stitch is not None else image3
247 |
248 | pbar.update(1) # Step 2: Stitching logic complete
249 |
250 | if stitched_image is None: raise ValueError("Stitching failed.")
251 |
252 | if fill_transparent_background and stitched_image.shape[-1] == 4:
253 | fill_color_rgba = self.hex_to_rgb(transparent_fill_color)
254 | fill_color_tensor = torch.tensor(fill_color_rgba, device=stitched_image.device, dtype=stitched_image.dtype) / 255.0
255 | background = torch.full_like(stitched_image, 0.0)
256 | background[..., :3], background[..., 3] = fill_color_tensor[:3], fill_color_tensor[3] if len(fill_color_rgba) == 4 else 1.0
257 | alpha, stitched_image_rgb = stitched_image[..., 3:], stitched_image[..., :3]
258 | blended_rgb = (stitched_image_rgb * alpha) + (background[..., :3] * (1 - alpha))
259 | stitched_image = torch.cat([blended_rgb, torch.ones_like(alpha)], dim=-1)
260 |
261 | pbar.update(1) # Step 3: Background fill complete
262 |
263 | # Final Resizing and Supersampling
264 | if final_resize_mode != "none":
265 | B, H, W, C = stitched_image.shape
266 | if W > 0 and H > 0:
267 | # 1. Determine final target dimensions
268 | if final_resize_mode == "resize_longer_side":
269 | ratio = final_target_size / max(W, H)
270 | else: # resize_shorter_side
271 | ratio = final_target_size / min(W, H)
272 |
273 | target_width = round(W * ratio)
274 | target_height = round(H * ratio)
275 |
276 | # 2. Apply supersampling if enabled
277 | if supersample_factor > 1.0:
278 | # Upscale to supersampled dimensions first
279 | ss_width = int(target_width * supersample_factor)
280 | ss_height = int(target_height * supersample_factor)
281 |
282 | # Upscale the original stitched image to the supersampled size using the main interpolation method
283 | temp_image = comfy.utils.common_upscale(stitched_image.movedim(-1,1), ss_width, ss_height, resample_filter, "disabled").movedim(1,-1)
284 |
285 | # Downscale to the final target size for anti-aliasing using the new downsample interpolation method
286 | stitched_image = comfy.utils.common_upscale(temp_image.movedim(-1,1), target_width, target_height, final_downsample_interpolation, "disabled").movedim(1,-1)
287 | else:
288 | # No supersampling, just resize to target
289 | stitched_image = comfy.utils.common_upscale(stitched_image.movedim(-1,1), target_width, target_height, resample_filter, "disabled").movedim(1,-1)
290 | elif supersample_factor > 1.0: # Supersampling without final resize (just upscale)
291 | ss_H = int(stitched_image.shape[1] * supersample_factor)
292 | ss_W = int(stitched_image.shape[2] * supersample_factor)
293 | stitched_image = comfy.utils.common_upscale(stitched_image.movedim(-1,1), ss_W, ss_H, resample_filter, "disabled").movedim(1,-1)
294 |
295 | pbar.update(1) # Step 4: Final resizing/supersampling complete
296 |
297 | # Apply Clarity (Midtone Contrast)
298 | if abs(clarity_strength) > 1e-6:
299 | # Convert to B, C, H, W for TF.gaussian_blur
300 | image_bchw = stitched_image.movedim(-1, 1)
301 |
302 | clarity_blur_sigma = max(1.0, min(image_bchw.shape[2], image_bchw.shape[3]) / 50.0)
303 | kernel_size_clarity = self._calculate_gaussian_kernel_size(clarity_blur_sigma)
304 |
305 | blurred_image_for_clarity = TF.gaussian_blur(image_bchw, kernel_size=(kernel_size_clarity, kernel_size_clarity), sigma=(clarity_blur_sigma, clarity_blur_sigma))
306 | detail_for_clarity = image_bchw - blurred_image_for_clarity
307 | clarity_effect_scale = 1.0
308 | image_bchw = image_bchw + detail_for_clarity * clarity_strength * clarity_effect_scale
309 |
310 | stitched_image = image_bchw.clamp(0.0, 1.0).movedim(1, -1)
311 |
312 | pbar.update(1) # Step 5: Clarity adjustment complete
313 |
314 | return (stitched_image,)
315 |
316 | def resize(self, image, width, height, keep_proportion, upscale_method, divisible_by, pad_color, crop_position):
317 | B, H, W, C = image.shape
318 | if width == 0 and height == 0: return (image.clone(), W, H)
319 |
320 | original_image = image.clone()
321 | target_W, target_H = width, height
322 |
323 | if keep_proportion == "crop":
324 | square_dim = min(W, H)
325 | x_crop, y_crop = {
326 | "center": ((W - square_dim) // 2, (H - square_dim) // 2),
327 | "top": ((W - square_dim) // 2, 0), "bottom": ((W - square_dim) // 2, H - square_dim),
328 | "left": (0, (H - square_dim) // 2), "right": (W - square_dim, (H - square_dim) // 2)
329 | }[crop_position]
330 | image = image.narrow(-3, y_crop, square_dim).narrow(-2, x_crop, square_dim)
331 | B, H, W, C = image.shape
332 |
333 | if keep_proportion == "stretch":
334 | new_width, new_height = target_W if target_W!=0 else W, target_H if target_H!=0 else H
335 | else:
336 | if W == 0 or H == 0: return (torch.zeros((B, target_H, target_W, C), device=image.device), target_W, target_H)
337 | ratio = 1.0
338 | if target_W == 0 and target_H != 0: ratio = target_H / H
339 | elif target_H == 0 and target_W != 0: ratio = target_W / W
340 | elif target_W != 0 and target_H != 0: ratio = min(target_W / W, target_H / H)
341 | new_width, new_height = round(W * ratio), round(H * ratio)
342 |
343 | if divisible_by > 1:
344 | new_width, new_height = new_width - (new_width % divisible_by), new_height - (new_height % divisible_by)
345 |
346 | out_image = comfy.utils.common_upscale(image.movedim(-1,1), new_width, new_height, upscale_method, crop="disabled").movedim(1,-1)
347 |
348 | if keep_proportion.startswith("pad"):
349 | pad_w, pad_h = (target_W if target_W!=0 else new_width), (target_H if target_H!=0 else new_height)
350 | if (pad_w != new_width) or (pad_h != new_height):
351 | pad_top, pad_left = (pad_h - new_height) // 2, (pad_w - new_width) // 2
352 |
353 | if keep_proportion == "pad":
354 | color_val = self.hex_to_rgb(pad_color)
355 | else: # pad_edge
356 | color_val = self.get_edge_color(original_image)
357 |
358 | color_tensor = torch.tensor(color_val, device=out_image.device, dtype=out_image.dtype).div(255.0)
359 | canvas = color_tensor[:C].view(1, 1, 1, C).repeat(B, pad_h, pad_w, 1)
360 |
361 | canvas[:, pad_top:pad_top+new_height, pad_left:pad_left+new_width, :] = out_image
362 | out_image = canvas
363 |
364 | return (out_image, out_image.shape[2], out_image.shape[1],)
365 |
--------------------------------------------------------------------------------