├── .github
└── workflows
│ └── publish.yml
├── .gitignore
├── Compositor3.py
├── CompositorColorPicker.py
├── CompositorConfig3.py
├── CompositorMasksOutputV3.py
├── CompositorTools3.py
├── CompositorTransformsOut3.py
├── ImageColorSampler.py
├── LICENSE
├── README.md
├── __init__.py
├── assets
├── bear.jpg
├── forest.jpg
├── gallerySamples.jpg
├── output examples
│ ├── ComfyUI_00286_.png
│ ├── ComfyUI_00356_.png
│ ├── ComfyUI_00360_.png
│ ├── ComfyUI_00369_.png
│ ├── ComfyUI_00370_.png
│ └── ComfyUI_00392_.png
├── showreel1.jpg
├── showreel1.png
├── v3.0.2.PNG
├── v3.1.PNG
├── v3.1.json
├── v3.PNG
├── warrior.jpg
├── weak.png
├── wolf.jpg
└── workflows
│ ├── compositorMasksV3_sample.json
│ ├── compositorMasksV3_sample_assets
│ ├── ComfyUI_00159_.png
│ ├── ComfyUI_00160_.png
│ ├── ComfyUI_00161_.png
│ └── ComfyUI_00162_.png
│ ├── v3.1.0_multiple_instances_with_lettering.json
│ └── v3.json
├── pyproject.toml
└── web
├── colorPicker.js
├── compositor.js
├── compositor3.js
├── empty.png
├── fabric.js
├── fabricNew.js
├── imageSampler.js
├── test.png
├── test_2.png
└── tools.js
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | name: Publish to Comfy registry
2 | on:
3 | workflow_dispatch:
4 | push:
5 | branches:
6 | - main
7 | - master
8 | paths:
9 | - "pyproject.toml"
10 |
11 | permissions:
12 | issues: write
13 |
14 | jobs:
15 | publish-node:
16 | name: Publish Custom Node to registry
17 | runs-on: ubuntu-latest
18 | if: ${{ github.repository_owner == 'erosDiffusion' }}
19 | steps:
20 | - name: Check out code
21 | uses: actions/checkout@v4
22 | - name: Publish Custom Node
23 | uses: Comfy-Org/publish-node-action@v1
24 | with:
25 | ## Add your own personal access token to your Github Repository secrets and reference it here.
26 | personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }}
27 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/*
2 | *.iml
3 | .idea/**
4 | /.idea/
5 |
--------------------------------------------------------------------------------
/Compositor3.py:
--------------------------------------------------------------------------------
1 | import folder_paths
2 | from PIL import Image, ImageOps
3 | import numpy as np
4 | import torch
5 | from comfy_execution.graph import ExecutionBlocker
6 | import threading
7 | from server import PromptServer
8 | from aiohttp import web
9 | import json # Added import for json parsing
10 |
11 | thread = None
12 | g_node_id = None
13 | g_filename = None
14 | threads = []
15 |
16 | # Helper functions (assuming these are standard ComfyUI tensor/PIL conversions)
17 | def tensor2pil(image: torch.Tensor) -> Image.Image:
18 | return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(0), 0, 255).astype(np.uint8))
19 |
20 | def pil2tensor(image: Image.Image) -> torch.Tensor:
21 | return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0)
22 |
23 | # Function to create an empty mask tensor of specified dimensions
24 | def create_empty_mask(width, height, inverted=False):
25 | """
26 | Create an empty mask tensor with specified dimensions.
27 |
28 | Parameters:
29 | - width: Width of the mask
30 | - height: Height of the mask
31 | - inverted: If True, creates a white mask (all 255), otherwise black mask (all 0)
32 |
33 | Returns:
34 | - Tensor representing an empty mask
35 | """
36 | try:
37 | # Create a black image (all zeros) or white image (all 255) of the specified dimensions
38 | value = 255 if inverted else 0
39 | empty_mask = Image.new('L', (width, height), value)
40 | # Convert to tensor
41 | return pil2tensor(empty_mask)
42 | except Exception as e:
43 | print(f"Error creating empty mask: {e}")
44 | # As a fallback, create a 1x1 pixel mask
45 | value = 255 if inverted else 0
46 | fallback_mask = Image.new('L', (1, 1), value)
47 | return pil2tensor(fallback_mask)
48 |
49 | # Add a new helper function for placing images on a canvas with proper positioning
50 | def place_on_canvas(image_tensor, canvas_width, canvas_height, left, top, scale_x=1.0, scale_y=1.0, mask_tensor=None, invert_mask=True):
51 | """
52 | Place an image tensor on a canvas of specified dimensions at the given position.
53 | Images exceeding canvas boundaries will be truncated.
54 | Preserves transparency of original image and ensures areas not covered by image are transparent.
55 |
56 | Parameters:
57 | - image_tensor: Torch tensor image to place
58 | - canvas_width, canvas_height: Dimensions of the target canvas
59 | - left, top: Position to place the image (top-left corner)
60 | - scale_x, scale_y: Optional scaling factors
61 | - mask_tensor: Optional mask tensor to apply to the image
62 | - invert_mask: Whether to invert the final mask (True means white=masked, black=unmasked)
63 |
64 | Returns:
65 | - Tuple of (positioned image tensor, positioned mask tensor)
66 | """
67 | if image_tensor is None:
68 | return None, None
69 |
70 | try:
71 | # Convert tensor to PIL for manipulation
72 | pil_image = tensor2pil(image_tensor)
73 |
74 | # Convert to RGBA to preserve transparency
75 | if pil_image.mode != 'RGBA':
76 | pil_image = pil_image.convert('RGBA')
77 |
78 | # Create alpha channel if not already present
79 | if len(pil_image.split()) < 4:
80 | r, g, b = pil_image.split()
81 | alpha = Image.new('L', pil_image.size, 255) # Start with fully opaque
82 | pil_image = Image.merge('RGBA', (r, g, b, alpha))
83 |
84 | # Convert mask tensor to PIL if provided
85 | pil_mask = None
86 | if mask_tensor is not None:
87 | pil_mask = tensor2pil(mask_tensor)
88 | # Convert to grayscale if it's not already
89 | if pil_mask.mode != 'L':
90 | pil_mask = pil_mask.convert('L')
91 |
92 | # Apply scaling if needed (different from 1.0)
93 | original_width, original_height = pil_image.size
94 | if scale_x != 1.0 or scale_y != 1.0:
95 | new_width = max(1, int(original_width * scale_x))
96 | new_height = max(1, int(original_height * scale_y))
97 | if new_width > 0 and new_height > 0: # Ensure dimensions are valid
98 | pil_image = pil_image.resize((new_width, new_height), Image.Resampling.LANCZOS)
99 | if pil_mask is not None:
100 | pil_mask = pil_mask.resize((new_width, new_height), Image.Resampling.LANCZOS)
101 |
102 | # Create a transparent canvas for the image (RGBA with alpha=0)
103 | canvas = Image.new('RGBA', (canvas_width, canvas_height), (0, 0, 0, 0))
104 |
105 | # Create a mask canvas - start with fully masked (255 for inverted masks)
106 | # This ensures anything outside the bounding box is considered masked
107 | mask_canvas = Image.new('L', (canvas_width, canvas_height), 255 if invert_mask else 0)
108 |
109 | # Calculate position with integer precision
110 | pos_left = int(left)
111 | pos_top = int(top)
112 |
113 | # Paste the image onto the canvas with transparency
114 | # PIL will handle truncation automatically when the image extends beyond canvas boundaries
115 | canvas.paste(pil_image, (pos_left, pos_top), pil_image.split()[3]) # Use alpha channel as mask
116 |
117 | # Get the dimensions of the placed image
118 | placed_width = min(pil_image.width, canvas_width - pos_left) if pos_left < canvas_width else 0
119 | placed_height = min(pil_image.height, canvas_height - pos_top) if pos_top < canvas_height else 0
120 |
121 | # Create a bounding box mask (black inside bounding box, white outside)
122 | if placed_width > 0 and placed_height > 0:
123 | # For the area where the image is placed, we need to:
124 | # - If invert_mask=False: Set to 0 (unmasked) where image exists
125 | # - If invert_mask=True: Set to 0 (masked) where image exists
126 | bbox_value = 0
127 |
128 | # Create a temporary mask for the bounding box area
129 | bbox_rect = Image.new('L', (placed_width, placed_height), bbox_value)
130 |
131 | # Paste this rectangle onto our mask canvas at the image position
132 | # For inverted masks, this means the area where the image will be placed starts as unmasked (0)
133 | # and the rest of the canvas is masked (255)
134 | mask_canvas.paste(bbox_rect, (pos_left, pos_top))
135 |
136 | # Process the input mask if provided
137 | if pil_mask is not None:
138 | # Create a temporary transparent canvas for the input mask
139 | input_mask_canvas = Image.new('L', (canvas_width, canvas_height), 0)
140 |
141 | # Paste the input mask at the correct position
142 | input_mask_canvas.paste(pil_mask, (pos_left, pos_top))
143 |
144 | # If we're using inverted masks, we need to invert the input mask before combining
145 | if invert_mask:
146 | input_mask_canvas = ImageOps.invert(input_mask_canvas)
147 |
148 | # Now combine with our bounding box mask
149 | # For inverted masks, we use the minimum value (logical AND)
150 | # This ensures that:
151 | # - Areas outside bbox are always masked (255 for inverted)
152 | # - Areas inside bbox are masked according to input mask
153 | mask_array = np.array(mask_canvas)
154 | input_mask_array = np.array(input_mask_canvas)
155 |
156 | if invert_mask:
157 | # For inverted masks: black=unmasked (0), white=masked (255)
158 | # Take the maximum value at each point (logical OR)
159 | combined_array = np.maximum(mask_array, input_mask_array)
160 | else:
161 | # For normal masks: white=unmasked (255), black=masked (0)
162 | # Take the minimum value at each point (logical AND)
163 | combined_array = np.minimum(mask_array, input_mask_array)
164 |
165 | # Convert back to PIL
166 | mask_canvas = Image.fromarray(combined_array.astype(np.uint8))
167 |
168 | # Convert back to tensor - need to handle RGBA to RGB conversion for ComfyUI compatibility
169 | # First extract RGB channels and create an RGB image
170 | r, g, b, a = canvas.split()
171 | rgb_image = Image.merge('RGB', (r, g, b))
172 |
173 | # Convert back to tensors
174 | positioned_image_tensor = pil2tensor(rgb_image)
175 | positioned_mask_tensor = pil2tensor(mask_canvas)
176 |
177 | return positioned_image_tensor, positioned_mask_tensor
178 | except Exception as e:
179 | print(f"Error placing image on canvas: {e}")
180 | return image_tensor, mask_tensor # Return original on error
181 |
182 |
183 | routes = PromptServer.instance.routes
184 | @routes.post('/compositor/done')
185 | async def receivedDone(request):
186 | return web.json_response({})
187 |
188 | class Compositor3:
189 | file = "new.png"
190 | result = None
191 | configCache = None
192 |
193 | @classmethod
194 | def IS_CHANGED(cls, **kwargs):
195 | fabricData = kwargs.get("fabricData")
196 | # print(fabricData)
197 | return fabricData
198 |
199 | @classmethod
200 | def INPUT_TYPES(cls):
201 | return {
202 | "required": {
203 | "config": ("COMPOSITOR_CONFIG", {"forceInput": True}),
204 | "fabricData": ("STRING", {"default": "{}"}),
205 | "imageName": ("STRING", {"default": "new.png"}),
206 | },
207 | "optional": {
208 | "tools": ("BOOLEAN", {"forceInput": True, "default": True}),
209 | "extendedConfig": ("COMPOSITOR_CONFIG", {"default": None}), # Made extendedConfig optional
210 | },
211 | "hidden": {
212 | "extra_pnginfo": "EXTRA_PNGINFO",
213 | "node_id": "UNIQUE_ID",
214 | },
215 | }
216 |
217 | # Updated RETURN_TYPES to use new COMPOSITOR_OUTPUT_MASKS type
218 | RETURN_TYPES = ("STRING", "IMAGE", "COMPOSITOR_OUTPUT_MASKS")
219 | RETURN_NAMES = ("transforms", "image", "layer_outputs")
220 | FUNCTION = "composite"
221 | CATEGORY = "image"
222 |
223 | def composite(self, **kwargs):
224 | # https://blog.miguelgrinberg.com/post/how-to-make-python-wait
225 | node_id = kwargs.pop('node_id', None)
226 |
227 |
228 | imageName = kwargs.get('imageName', "new.png")
229 |
230 | config = kwargs.get('config', "default")
231 | extendedConfig = kwargs.get('extendedConfig', None) # Get extendedConfig, default to None if not provided
232 | padding = config["padding"]
233 | invertMask = config["invertMask"]
234 | width = config["width"]
235 | height = config["height"]
236 | config_node_id = config["node_id"]
237 | onConfigChanged = config["onConfigChanged"]
238 | names = config["names"]
239 | fabricData = kwargs.get("fabricData")
240 |
241 | configChanged = self.configCache != config
242 | # print(configChanged)
243 | # print(config)
244 | # print(self.configCache)
245 |
246 |
247 | self.configCache = config
248 | ui = {
249 | "test": ("value",),
250 | "padding": [padding],
251 | "width": [width],
252 | "height": [height],
253 | "config_node_id": [config_node_id],
254 | "node_id": [node_id],
255 | "names": names,
256 | "fabricData": [fabricData],
257 | "awaited": [self.result],
258 | "configChanged": [configChanged],
259 | "onConfigChanged": [onConfigChanged],
260 | }
261 |
262 | # break and send a message to the gui as if it was "executed" below
263 | detail = {"output": ui, "node": node_id}
264 | PromptServer.instance.send_sync("compositor_init", detail)
265 |
266 | imageExists = folder_paths.exists_annotated_filepath(imageName)
267 | # block when config changed
268 | if imageName == "new.png" or not imageExists or configChanged:
269 | # Return ExecutionBlocker for all outputs if blocked
270 | blocker_result = tuple([ExecutionBlocker(None)] * len(self.RETURN_TYPES))
271 | return {
272 | "ui": ui,
273 | "result": blocker_result
274 | }
275 | else: # Only process images if not blocked
276 | image_path = folder_paths.get_annotated_filepath(imageName)
277 | i = Image.open(image_path)
278 | i = ImageOps.exif_transpose(i)
279 | if i.mode == 'I':
280 | i = i.point(lambda i: i * (1 / 255))
281 | image = i.convert("RGB")
282 | image = np.array(image).astype(np.float32) / 255.0
283 | image = torch.from_numpy(image)[None, ]
284 |
285 | # --- Image Rotation Logic ---
286 | rotated_images = [None] * 8
287 | rotated_masks = [None] * 8 # Array to hold transformed masks
288 | canvas_width = 512 # Default canvas width
289 | canvas_height = 512 # Default canvas height
290 |
291 | try:
292 | fabric_data_parsed = json.loads(fabricData)
293 | # Get canvas dimensions from fabric data if available
294 | canvas_width = int(fabric_data_parsed.get("width", 512))
295 | canvas_height = int(fabric_data_parsed.get("height", 512))
296 | print(f"Canvas dimensions: {canvas_width}x{canvas_height}")
297 |
298 | # Get both transforms and bboxes arrays
299 | fabric_transforms = fabric_data_parsed.get('transforms', [])
300 | fabric_bboxes = fabric_data_parsed.get('bboxes', [])
301 |
302 | # Make sure we have valid arrays
303 | if not fabric_transforms:
304 | fabric_transforms = [{} for _ in range(8)]
305 | if not fabric_bboxes:
306 | fabric_bboxes = [{} for _ in range(8)]
307 |
308 | # Initialize empty dictionary if extendedConfig is None
309 | if extendedConfig is None:
310 | extendedConfig = {}
311 |
312 | for idx in range(8):
313 | image_key = f"image{idx + 1}"
314 | mask_key = f"mask{idx + 1}"
315 | # Get image and mask from extendedConfig, return None if not found
316 | original_image_tensor = extendedConfig.get(image_key) if extendedConfig else None
317 | original_mask_tensor = extendedConfig.get(mask_key) if extendedConfig else None
318 |
319 | if original_image_tensor is not None and idx < len(fabric_transforms):
320 | # Get transformation data for rotation and scaling
321 | transform = fabric_transforms[idx]
322 | angle = transform.get('angle', 0)
323 | scale_x = transform.get('scaleX', 1.0)
324 | scale_y = transform.get('scaleY', 1.0)
325 |
326 | # Get positioning data from bboxes (these are the actual coordinates to use)
327 | bbox = fabric_bboxes[idx] if idx < len(fabric_bboxes) else {'left': 0, 'top': 0}
328 | left = bbox.get('left', 0)
329 | top = bbox.get('top', 0)
330 |
331 | print(f"Processing image {idx+1}: angle={angle}, position=({left},{top}), scale=({scale_x},{scale_y})")
332 | if original_mask_tensor is not None:
333 | print(f" - Mask found for image {idx+1}")
334 |
335 | # First rotate if needed
336 | if angle != 0:
337 | try:
338 | pil_image = tensor2pil(original_image_tensor)
339 | rotated_pil = pil_image.rotate(-angle, expand=True, resample=Image.Resampling.BILINEAR)
340 | rotated_tensor = pil2tensor(rotated_pil)
341 |
342 | # Handle mask rotation if mask exists
343 | rotated_mask_tensor = None
344 | if original_mask_tensor is not None:
345 | pil_mask = tensor2pil(original_mask_tensor)
346 | rotated_pil_mask = pil_mask.rotate(-angle, expand=True, resample=Image.Resampling.BILINEAR)
347 | rotated_mask_tensor = pil2tensor(rotated_pil_mask)
348 |
349 | # Place the rotated image and mask on canvas using bbox position
350 | positioned_tensor, positioned_mask = place_on_canvas(
351 | rotated_tensor,
352 | canvas_width,
353 | canvas_height,
354 | left - padding, # Subtract padding from left position
355 | top - padding, # Subtract padding from top position
356 | scale_x,
357 | scale_y,
358 | rotated_mask_tensor
359 | )
360 | rotated_images[idx] = positioned_tensor
361 | rotated_masks[idx] = positioned_mask
362 | except Exception as e:
363 | print(f"Error processing image {idx+1}: {e}")
364 | # Fallback - place the original image using bbox position
365 | positioned_tensor, positioned_mask = place_on_canvas(
366 | original_image_tensor,
367 | canvas_width,
368 | canvas_height,
369 | left,
370 | top,
371 | scale_x,
372 | scale_y,
373 | original_mask_tensor
374 | )
375 | rotated_images[idx] = positioned_tensor
376 | rotated_masks[idx] = positioned_mask
377 | else:
378 | # No rotation needed, just position and scale using bbox position
379 | # Subtract padding from left and top coordinates to correctly position in output
380 | positioned_tensor, positioned_mask = place_on_canvas(
381 | original_image_tensor,
382 | canvas_width,
383 | canvas_height,
384 | left - padding, # Subtract padding from left position
385 | top - padding, # Subtract padding from top position
386 | scale_x,
387 | scale_y,
388 | original_mask_tensor
389 | )
390 | rotated_images[idx] = positioned_tensor
391 | rotated_masks[idx] = positioned_mask
392 | elif original_image_tensor is not None:
393 | # No transform data, just use the original
394 | rotated_images[idx] = original_image_tensor
395 | rotated_masks[idx] = original_mask_tensor # Use original mask if available
396 |
397 | # Before returning results, replace any None mask values with empty masks
398 | # to ensure the workflow doesn't break when connecting to mask inputs
399 | for idx in range(8):
400 | if rotated_masks[idx] is None:
401 | # Create empty mask with the same dimensions as canvas
402 | rotated_masks[idx] = create_empty_mask(canvas_width, canvas_height)
403 |
404 | # Create a dictionary to hold all images and masks
405 | compositor_output_masks = {
406 | "images": rotated_images,
407 | "masks": rotated_masks,
408 | "canvas_width": canvas_width,
409 | "canvas_height": canvas_height
410 | }
411 |
412 | return {
413 | "ui": ui,
414 | "result": (fabricData, image, compositor_output_masks)
415 | }
416 | except json.JSONDecodeError:
417 | print("Error parsing fabricData JSON. Skipping image positioning.")
418 | # Fallback in case of JSON parsing error
419 | empty_output = {
420 | "images": [None] * 8,
421 | "masks": [None] * 8,
422 | "canvas_width": 512,
423 | "canvas_height": 512
424 | }
425 | return {
426 | "ui": ui,
427 | "result": (fabricData, image, empty_output)
428 | }
429 | except Exception as e:
430 | print(f"An unexpected error occurred during image processing: {e}")
431 | # Fallback in case of other errors
432 | empty_output = {
433 | "images": [None] * 8,
434 | "masks": [None] * 8,
435 | "canvas_width": 512,
436 | "canvas_height": 512
437 | }
438 | return {
439 | "ui": ui,
440 | "result": (fabricData, image, empty_output)
441 | }
--------------------------------------------------------------------------------
/CompositorColorPicker.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 | from PIL import Image
4 |
5 | class CompositorColorPicker:
6 | """
7 | This node converts RGB values (0-255) to various color formats:
8 | - Hex color string (#RRGGBB)
9 | - 16-bit color value (RGB565 or RGB555 format)
10 | - 24-bit color value (compatible with ComfyUI's emptyImage node)
11 | - Preview image of the color
12 | """
13 |
14 | @classmethod
15 | def INPUT_TYPES(cls):
16 | return {
17 | "required": {
18 | "red": ("INT", {"default": 0, "min": 0, "max": 255}),
19 | "green": ("INT", {"default": 0, "min": 0, "max": 255}),
20 | "blue": ("INT", {"default": 0, "min": 0, "max": 255}),
21 | "format": (["RGB565", "RGB555"], {"default": "RGB565"}),
22 | },
23 | }
24 |
25 | RETURN_TYPES = ("STRING", "INT", "INT", "IMAGE")
26 | RETURN_NAMES = ("color_string", "color_16bit", "color_24bit", "color_preview")
27 | FUNCTION = "convert_color"
28 | CATEGORY = "image/color"
29 |
30 | def rgb_to_16bit(self, r, g, b, format='RGB565'):
31 | """
32 | Convert RGB values (0-255) to 16-bit color value
33 |
34 | Args:
35 | r, g, b: 8-bit color values (0-255)
36 | format: 'RGB565' or 'RGB555'
37 |
38 | Returns:
39 | 16-bit color value
40 | """
41 | if format == 'RGB565':
42 | r5 = int(r * 31 / 255)
43 | g6 = int(g * 63 / 255)
44 | b5 = int(b * 31 / 255)
45 | return (r5 << 11) | (g6 << 5) | b5
46 |
47 | elif format == 'RGB555':
48 | r5 = int(r * 31 / 255)
49 | g5 = int(g * 31 / 255)
50 | b5 = int(b * 31 / 255)
51 | return (r5 << 10) | (g5 << 5) | b5
52 |
53 | else:
54 | raise ValueError("Format must be 'RGB565' or 'RGB555'")
55 |
56 | def rgb_to_24bit(self, r, g, b):
57 | """
58 | Convert RGB values (0-255) to 24-bit color value (0-16777215)
59 | This is compatible with ComfyUI's emptyImage node.
60 |
61 | Args:
62 | r, g, b: 8-bit color values (0-255)
63 |
64 | Returns:
65 | 24-bit color value
66 | """
67 | return (r << 16) | (g << 8) | b
68 |
69 | def create_color_preview(self, r, g, b, size=128):
70 | """
71 | Create a preview image of the color
72 |
73 | Args:
74 | r, g, b: 8-bit color values (0-255)
75 | size: Size of the preview image in pixels
76 |
77 | Returns:
78 | Tensor representing the color preview image
79 | """
80 | # Create a solid color image
81 | img = Image.new('RGB', (size, size), (r, g, b))
82 |
83 | # Convert to numpy array and normalize to 0-1 range
84 | img_np = np.array(img).astype(np.float32) / 255.0
85 |
86 | # Convert to tensor with batch dimension
87 | return torch.from_numpy(img_np)[None, ]
88 |
89 | def convert_color(self, red, green, blue, format="RGB565"):
90 | """
91 | Convert RGB values to multiple color formats and preview image.
92 |
93 | Args:
94 | red, green, blue: 8-bit color values (0-255)
95 | format: 16-bit color format ('RGB565' or 'RGB555')
96 |
97 | Returns:
98 | Tuple of (color_string, color_16bit, color_24bit, color_preview)
99 | """
100 | # Generate the hex color string (e.g., "#FF0000" for red)
101 | color_string = f"#{red:02X}{green:02X}{blue:02X}"
102 |
103 | # Calculate the 16-bit color value
104 | color_16bit = self.rgb_to_16bit(red, green, blue, format)
105 |
106 | # Calculate the 24-bit color value (compatible with ComfyUI's emptyImage)
107 | color_24bit = self.rgb_to_24bit(red, green, blue)
108 |
109 | # Create a preview image of the color
110 | color_preview = self.create_color_preview(red, green, blue)
111 |
112 | return (color_string, color_16bit, color_24bit, color_preview)
--------------------------------------------------------------------------------
/CompositorConfig3.py:
--------------------------------------------------------------------------------
1 | import nodes
2 | import numpy as np
3 | import base64
4 | from io import BytesIO
5 | from PIL import Image
6 | import folder_paths
7 | import torch
8 | import torch.nn.functional as F
9 | import math
10 | from comfy.utils import common_upscale
11 |
12 | MAX_RESOLUTION = nodes.MAX_RESOLUTION
13 |
14 |
15 | # these probably exist elsewhere as utils
16 | def tensor2pil(image):
17 | return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
18 |
19 |
20 | # these probably exist elsewhere as utils
21 | def toBase64ImgUrl(img):
22 | bytesIO = BytesIO()
23 | img.save(bytesIO, format="PNG")
24 | img_types = bytesIO.getvalue()
25 | img_base64 = base64.b64encode(img_types)
26 | return f"data:image/png;base64,{img_base64.decode('utf-8')}"
27 |
28 |
29 | class CompositorConfig3:
30 | NOT_IDEMPOTENT = True
31 |
32 | @classmethod
33 | def INPUT_TYPES(cls):
34 | return {
35 | "required": {
36 | "width": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 32}),
37 | "height": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 32}),
38 | "padding": ("INT", {"default": 100, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
39 | "normalizeHeight": ("BOOLEAN", {"default": False}),
40 | "onConfigChanged": ("BOOLEAN", {"label_off": "stop", "label_on": "Grab and Continue", "default": False}),
41 | "invertMask": ("BOOLEAN", {"default": False}),
42 | "initialized": ("STRING", {"default": ""}),
43 | },
44 | "optional": {
45 | "image1": ("IMAGE",),
46 | "mask1": ("MASK",),
47 | "image2": ("IMAGE",),
48 | "mask2": ("MASK",),
49 | "image3": ("IMAGE",),
50 | "mask3": ("MASK",),
51 | "image4": ("IMAGE",),
52 | "mask4": ("MASK",),
53 | "image5": ("IMAGE",),
54 | "mask5": ("MASK",),
55 | "image6": ("IMAGE",),
56 | "mask6": ("MASK",),
57 | "image7": ("IMAGE",),
58 | "mask7": ("MASK",),
59 | "image8": ("IMAGE",),
60 | "mask8": ("MASK",),
61 | },
62 | "hidden": {
63 | "prompt": "PROMPT",
64 | "extra_pnginfo": "EXTRA_PNGINFO",
65 | "node_id": "UNIQUE_ID",
66 | },
67 | }
68 |
69 | RETURN_TYPES = ("COMPOSITOR_CONFIG", "COMPOSITOR_CONFIG")
70 | RETURN_NAMES = ("config", "extendedConfig")
71 |
72 | FUNCTION = "configure"
73 |
74 | CATEGORY = "image"
75 | DESCRIPTION = """
76 | The compositor node
77 | - pass up to 8 images
78 | - optionally pass their masks (invert them)
79 | - masks are automatically applied and internally the compositor is passed an rgba
80 | - use the sizing controls to configure the compositor, it will be resized on run
81 | - set the flag to pause to allow yourself time to build your composition (pause acts on compositor, not the config node)
82 | """
83 |
84 | def configure(self, **kwargs):
85 | # capture all inputs for extendedConfig
86 | all_inputs = kwargs.copy()
87 | # extract the images
88 | # convert them from tensor to pil and then to base 64
89 | # send as custom to be able to be used by ui
90 | # finally return the resulting image (the composite "image" is seen as input but it's actually the output)
91 |
92 | image1 = kwargs.pop('image1', None)
93 | image2 = kwargs.pop('image2', None)
94 | image3 = kwargs.pop('image3', None)
95 | image4 = kwargs.pop('image4', None)
96 | image5 = kwargs.pop('image5', None)
97 | image6 = kwargs.pop('image6', None)
98 | image7 = kwargs.pop('image7', None)
99 | image8 = kwargs.pop('image8', None)
100 | mask1 = kwargs.pop('mask1', None)
101 | mask2 = kwargs.pop('mask2', None)
102 | mask3 = kwargs.pop('mask3', None)
103 | mask4 = kwargs.pop('mask4', None)
104 | mask5 = kwargs.pop('mask5', None)
105 | mask6 = kwargs.pop('mask6', None)
106 | mask7 = kwargs.pop('mask7', None)
107 | mask8 = kwargs.pop('mask8', None)
108 | # pause = kwargs.pop('pause', False)
109 | padding = kwargs.pop('padding', 100)
110 | width = kwargs.pop('width', 512)
111 | height = kwargs.pop('height', 512)
112 | invertMask = kwargs.pop('invertMask', False)
113 | normalizeHeight = kwargs.pop('normalizeHeight', 512)
114 | # grabAndContinue, stop
115 | onConfigChanged = kwargs.pop('onConfigChanged', False)
116 | node_id = kwargs.pop('node_id', None)
117 |
118 | images = [image1, image2, image3, image4, image5, image6, image7, image8, ]
119 | masks = [mask1, mask2, mask3, mask4, mask5, mask6, mask7, mask8, ]
120 | input_images = []
121 |
122 | # apply the masks to the images if any so that we get a rgba
123 | # then pass the rgba in the return value
124 | counter = 0
125 | for (img, mask) in zip(images, masks):
126 | if img is not None:
127 |
128 | if normalizeHeight:
129 | # print(counter)
130 | counter = counter+1
131 | #img = self.upscale(img, "lanczos", height, "height", "disabled")
132 | processor = ImageProcessor()
133 | oldimg = img
134 | img = processor.scale_image(img, height)
135 | #print(oldimg == img)
136 | # tensor
137 |
138 | if mask is not None:
139 | # if normalizeHeight:
140 | # # print(mask)
141 | # #mask = self.upscale(img, "lanczos", height, "height", "disabled")
142 | # mask = prepare_mask(mask, foo_is_batch=True)
143 | # mask = processor.scale_image(mask, height)
144 |
145 | # apply the mask and return
146 | # apply the mask and return
147 | masked = self.apply_mask(img, mask, invertMask)
148 | # self.masked = masked[0]
149 |
150 | i = tensor2pil(masked[0])
151 | input_images.append(toBase64ImgUrl(i))
152 | else:
153 | # no need to apply the mask
154 | i = tensor2pil(img)
155 | input_images.append(toBase64ImgUrl(i))
156 | else:
157 | # input is None, forward
158 | input_images.append(img)
159 |
160 | self.ensureEmpty()
161 |
162 | res = {
163 | "node_id": node_id,
164 | "width": width,
165 | "height": height,
166 | "padding": padding,
167 | "names": input_images,
168 | "onConfigChanged": onConfigChanged,
169 | "normalizeHeight": normalizeHeight,
170 | "invertMask": invertMask,
171 | }
172 | return (res, all_inputs)
173 |
174 | def apply_mask(self, image: torch.Tensor, alpha: torch.Tensor, invertMask=False):
175 | batch_size = min(len(image), len(alpha))
176 | out_images = []
177 |
178 | if invertMask:
179 | alpha = 1.0 - resize_mask(alpha, image.shape[1:])
180 | else:
181 | alpha = resize_mask(alpha, image.shape[1:])
182 |
183 | for i in range(batch_size):
184 | out_images.append(torch.cat((image[i][:, :, :3], alpha[i].unsqueeze(2)), dim=2))
185 |
186 | result = (torch.stack(out_images),)
187 | return result
188 |
189 | # ensures empty.png exists
190 | def ensureEmpty(self):
191 | image = "test_empty.png"
192 | if not folder_paths.exists_annotated_filepath(image):
193 | # print("it does not exist")
194 | img = Image.new('RGB', (512, 512), 'white')
195 | img.save(folder_paths.get_annotated_filepath(image))
196 |
197 | def upscale(self, image, upscale_method, side_length: int, side: str, crop):
198 | samples = image.movedim(-1, 1)
199 |
200 | size = get_image_size(image)
201 |
202 | width_B = int(size[0])
203 | height_B = int(size[1])
204 |
205 | width = width_B
206 | height = height_B
207 |
208 | def determineSide(_side: str) -> tuple[int, int]:
209 | width, height = 0, 0
210 | if _side == "Width":
211 | heigh_ratio = height_B / width_B
212 | width = side_length
213 | height = heigh_ratio * width
214 | elif _side == "Height":
215 | width_ratio = width_B / height_B
216 | height = side_length
217 | width = width_ratio * height
218 | return width, height
219 |
220 | if side == "Longest":
221 | if width > height:
222 | width, height = determineSide("Width")
223 | else:
224 | width, height = determineSide("Height")
225 | elif side == "Shortest":
226 | if width < height:
227 | width, height = determineSide("Width")
228 | else:
229 | width, height = determineSide("Height")
230 | else:
231 | width, height = determineSide(side)
232 |
233 | width = math.ceil(width)
234 | height = math.ceil(height)
235 |
236 | cls = common_upscale(samples, width, height, upscale_method, crop)
237 | cls = cls.movedim(1, -1)
238 | return (cls,)
239 |
240 |
241 | def get_image_size(IMAGE) -> tuple[int, int]:
242 | samples = IMAGE.movedim(-1, 1)
243 | size = samples.shape[3], samples.shape[2]
244 | # size = size.movedim(1, -1)
245 | return size
246 |
247 |
248 | def resize_mask(mask, shape):
249 | return torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])),
250 | size=(shape[0], shape[1]), mode="bilinear").squeeze(1)
251 |
252 | class ImageProcessor:
253 | def scale_image(self, image_tensor, new_height):
254 | # Ensure the input tensor is in the format [batch_size, height, width, channels]
255 | if image_tensor.ndim != 4:
256 | raise ValueError("Expected image tensor to have shape [batch_size, height, width, channels]")
257 |
258 | batch_size, original_height, original_width, channels = image_tensor.shape
259 |
260 | if channels not in (1, 3, 4):
261 | raise ValueError("Image tensor must have 1 (grayscale), 3 (RGB), or 4 (RGBA) channels")
262 |
263 | # Calculate the new width to maintain the aspect ratio
264 | aspect_ratio = original_width / original_height
265 | new_width = int(new_height * aspect_ratio)
266 |
267 | # Permute to match PyTorch's expected format [batch_size, channels, height, width]
268 | image_tensor = image_tensor.permute(0, 3, 1, 2) # [batch_size, channels, height, width]
269 |
270 | # Resize images to the new dimensions (new_height, new_width)
271 | resized_images = F.interpolate(image_tensor, size=(new_height, new_width), mode='bilinear', align_corners=False)
272 |
273 | # Permute back to the original format [batch_size, height, width, channels]
274 | resized_images = resized_images.permute(0, 2, 3, 1) # [batch_size, height, width, channels]
275 |
276 | return resized_images
277 |
278 |
279 | def prepare_mask(mask, foo_is_batch):
280 | """
281 | Prepares the mask tensor to have shape [batch_size, height, width, channels].
282 |
283 | Arguments:
284 | mask: Tensor of shape [foo, width, height]
285 | foo_is_batch: Bool, True if `foo` represents the batch size, False if it represents the channel.
286 | """
287 | if foo_is_batch:
288 | # Case where `foo` is the batch size, reshape to [batch_size, height, width, channels=1]
289 | mask = mask.unsqueeze(3) # Add a channel dimension [batch_size, width, height] -> [batch_size, width, height, 1]
290 | else:
291 | # Case where `foo` is the channel dimension, reshape to [1, height, width, channels]
292 | mask = mask.unsqueeze(0).permute(0, 2, 3, 1) # Add batch dim and permute to [1, height, width, channels]
293 |
294 | return mask
--------------------------------------------------------------------------------
/CompositorMasksOutputV3.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from PIL import Image
3 | import numpy as np
4 |
5 | class CompositorMasksOutputV3:
6 | """
7 | This node unpacks the COMPOSITOR_OUTPUT_MASKS from Compositor3 into individual image and mask outputs.
8 | Makes the Compositor's interface cleaner by separating the layer outputs into a dedicated node.
9 | """
10 |
11 | @classmethod
12 | def INPUT_TYPES(cls):
13 | return {
14 | "required": {
15 | "layer_outputs": ("COMPOSITOR_OUTPUT_MASKS",),
16 | },
17 | "hidden": {
18 | "subtract_masks": ("BOOLEAN", {"default": False}),
19 | }
20 | }
21 |
22 | RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE",
23 | "MASK", "MASK", "MASK", "MASK", "MASK", "MASK", "MASK", "MASK")
24 | RETURN_NAMES = ("image_1", "image_2", "image_3", "image_4",
25 | "image_5", "image_6", "image_7", "image_8",
26 | "mask_1", "mask_2", "mask_3", "mask_4",
27 | "mask_5", "mask_6", "mask_7", "mask_8")
28 | FUNCTION = "unpack_outputs"
29 | CATEGORY = "image"
30 |
31 | def unpack_outputs(self, layer_outputs, subtract_masks=False):
32 | """
33 | Unpacks the layer_outputs dictionary into individual image and mask outputs.
34 |
35 | Args:
36 | layer_outputs: Dictionary containing 'images', 'masks', 'canvas_width', and 'canvas_height'
37 | subtract_masks: When True, each mask will have higher-numbered masks subtracted from it
38 | (e.g., mask 6 = mask 6 - mask 7, mask 5 = mask 5 - mask 6, etc.)
39 |
40 | Returns:
41 | Tuple of 16 tensors: 8 images and 8 masks in order
42 | """
43 | images = layer_outputs.get("images", [None] * 8)
44 | masks = layer_outputs.get("masks", [None] * 8)
45 |
46 | # Get canvas dimensions for creating empty images/masks if needed
47 | canvas_width = layer_outputs.get("canvas_width", 512)
48 | canvas_height = layer_outputs.get("canvas_height", 512)
49 |
50 | # Create a standard empty black image for missing values
51 | def create_empty_image(width, height):
52 | empty_img = Image.new('RGB', (width, height), (0, 0, 0))
53 | img_np = np.array(empty_img).astype(np.float32) / 255.0
54 | return torch.from_numpy(img_np)[None, ]
55 |
56 | # Create a standard empty mask (white) for missing values
57 | def create_empty_mask(width, height):
58 | empty_mask = Image.new('L', (width, height), 255) # White mask (completely transparent)
59 | mask_np = np.array(empty_mask).astype(np.float32) / 255.0
60 | return torch.from_numpy(mask_np)[None, ]
61 |
62 | # Ensure we have 8 images and masks
63 | result_images = []
64 | result_masks = []
65 |
66 | for i in range(8):
67 | # Handle images
68 | if i < len(images) and images[i] is not None:
69 | result_images.append(images[i])
70 | else:
71 | result_images.append(create_empty_image(canvas_width, canvas_height))
72 |
73 | # Handle masks
74 | if i < len(masks) and masks[i] is not None:
75 | result_masks.append(masks[i])
76 | else:
77 | result_masks.append(create_empty_mask(canvas_width, canvas_height))
78 |
79 | # Apply mask subtraction if enabled
80 | if subtract_masks:
81 | processed_masks = result_masks.copy()
82 |
83 | # We start from the second-highest mask (index 6, mask 7) and work down
84 | # mask 8 (index 7) remains unchanged
85 | for i in range(6, -1, -1):
86 | current_mask = processed_masks[i]
87 | higher_mask = processed_masks[i+1]
88 |
89 | # Where higher mask has black pixels (visible content), make current mask white (transparent)
90 | # In mask convention: black (0) = visible, white (1) = transparent
91 | black_pixels_in_higher = higher_mask < 0.5
92 |
93 | # Apply the subtraction - where higher mask has black pixels, make current mask white
94 | processed_masks[i] = torch.where(black_pixels_in_higher, torch.ones_like(current_mask), current_mask)
95 |
96 | result_masks = processed_masks
97 |
98 | # Return all images and masks as a flat tuple
99 | return (*result_images, *result_masks)
--------------------------------------------------------------------------------
/CompositorTools3.py:
--------------------------------------------------------------------------------
1 | class CompositorTools3:
2 | @classmethod
3 | def INPUT_TYPES(cls):
4 | return {
5 | "hidden": {
6 | "node_id": "UNIQUE_ID",
7 | },
8 | }
9 |
10 | RETURN_TYPES = ("BOOLEAN",)
11 | RETURN_NAMES = ("tools",)
12 | FUNCTION = "run"
13 | CATEGORY = "image"
14 |
15 | DESCRIPTION = """
16 | experimental node: frontend communication only with feature flag, needs page reload to fill controls
17 | """
18 |
19 | def run(self, **kwargs):
20 | use_alignment_controls = True
21 | ui = {
22 | "use_alignment_controls": [use_alignment_controls],
23 | }
24 |
25 | return {"ui": ui, "result": (use_alignment_controls,)}
26 |
27 |
--------------------------------------------------------------------------------
/CompositorTransformsOut3.py:
--------------------------------------------------------------------------------
1 | # import folder_paths
2 | # from PIL import Image, ImageOps
3 | # import numpy as np
4 | # import torch
5 | # from comfy_execution.graph import ExecutionBlocker
6 | # import threading
7 | # from server import PromptServer
8 | # from aiohttp import web
9 |
10 | import json
11 |
12 |
13 | class CompositorTransformsOutV3:
14 |
15 | @classmethod
16 | def INPUT_TYPES(cls):
17 | return {
18 | "required": {
19 | "transforms": ("STRING", {"forceInput": True}),
20 | "channel": ("INT", {"min": 1, "max": 8, "default": 1}),
21 | "forceInt": ("BOOLEAN", {"default": True}),
22 |
23 | },
24 | "hidden": {
25 | "extra_pnginfo": "EXTRA_PNGINFO",
26 | "node_id": "UNIQUE_ID",
27 | },
28 | }
29 |
30 | RETURN_TYPES = ("INT", "INT", "INT", "INT", "INT", "INT", "INT", "INT", "INT")
31 | RETURN_NAMES = ("x", "y", "width", "height", "angle", "bbox x", "bbox y", "bbox width", "bbox height")
32 |
33 | FUNCTION = "run"
34 | CATEGORY = "image"
35 |
36 | def run(self, **kwargs):
37 | node_id = kwargs.pop('node_id', None)
38 | channel = kwargs.pop('channel', 1)
39 | transforms = kwargs.pop('transforms', {})
40 | forceInt = kwargs.pop('forceInt', {})
41 | # print(transforms)
42 | data = json.loads(transforms)
43 | padding = data["padding"]
44 | # extract transforms
45 | t = data["transforms"]
46 | width = t[channel - 1]["xwidth"] * t[channel - 1]["scaleX"]
47 | height = t[channel - 1]["xheight"] * t[channel - 1]["scaleY"]
48 | # remove the padding as transforms are padding based
49 | x = t[channel - 1]["left"] - padding
50 | y = t[channel - 1]["top"] - padding
51 | #angle
52 | angle = t[channel - 1]["angle"]
53 |
54 | # bounding box out
55 | b = data["bboxes"]
56 | bwidth = b[channel - 1]["xwidth"]
57 | bheight = b[channel - 1]["xheight"]
58 | # remove the padding as transforms are padding based
59 | bx = b[channel - 1]["left"] - padding
60 | by = b[channel - 1]["top"] - padding
61 |
62 | if forceInt:
63 | return (int(x), int(y), int(width), int(height), int(angle), int(bx), int(by), int(bwidth), int(bheight))
64 | else:
65 | return (x, y, width, height, angle, bx, by, bwidth, bheight)
66 |
--------------------------------------------------------------------------------
/ImageColorSampler.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 | from PIL import Image
4 | import json
5 | import base64
6 | from io import BytesIO
7 | from server import PromptServer
8 | from comfy_execution.graph import ExecutionBlocker
9 |
10 | class ImageColorSampler:
11 | """
12 | This node allows clicking on an input image to sample colors,
13 | creating a color palette from the selected sample points.
14 | """
15 |
16 | @classmethod
17 | def INPUT_TYPES(cls):
18 | return {
19 | "required": {
20 | "image": ("IMAGE",),
21 | "sample_points": ("STRING", {"default": "[]", "multiline": True}),
22 | "palette_size": ("INT", {"default": 128, "min": 32, "max": 512}),
23 | "sample_size": ("INT", {"default": 1, "min": 1, "max": 30}),
24 | "wait_for_input": ("BOOLEAN", {"default": True})
25 | },
26 | "hidden": {
27 | "node_id": "UNIQUE_ID",
28 | },
29 | }
30 |
31 | RETURN_TYPES = ("IMAGE", "STRING", "STRING", "IMAGE", "INT", "INT", "STRING")
32 | RETURN_NAMES = ("palette", "sampled_colors", "hex_codes", "swatches", "rgb_24bit", "rgb_565", "rgb_values")
33 | FUNCTION = "create_palette"
34 | CATEGORY = "image/color"
35 |
36 | # Enable dynamic outputs for individual colors
37 | OUTPUT_NODE = True
38 |
39 | # Enable list output for swatches, hex_codes, rgb_24bit, rgb_565, and rgb_values
40 | OUTPUT_IS_LIST = [False, False, True, True, True, True, True]
41 |
42 | # Track which nodes are waiting for user input
43 | waiting_nodes = set()
44 |
45 | def tensor_to_base64_image(self, tensor):
46 | """Convert a torch tensor to a base64 encoded image string"""
47 | # Convert tensor to numpy and then to PIL image
48 | img_np = tensor.cpu().numpy().squeeze(0)
49 | img_np = (img_np * 255).astype(np.uint8)
50 | img_pil = Image.fromarray(img_np)
51 |
52 | # Save to a bytes buffer and convert to base64
53 | buffered = BytesIO()
54 | img_pil.save(buffered, format="PNG")
55 | img_str = base64.b64encode(buffered.getvalue()).decode('utf-8')
56 | return f"data:image/png;base64,{img_str}"
57 |
58 | def rgb_to_16bit(self, r, g, b, format='RGB565'):
59 | """
60 | Convert RGB values (0-255) to 16-bit color value in RGB565 format
61 |
62 | Args:
63 | r, g, b: 8-bit color values (0-255)
64 | format: Currently only 'RGB565' is supported
65 |
66 | Returns:
67 | 16-bit color value
68 | """
69 | # Convert to RGB565 format (5 bits R, 6 bits G, 5 bits B)
70 | r5 = int(r * 31 / 255)
71 | g6 = int(g * 63 / 255)
72 | b5 = int(b * 31 / 255)
73 | return (r5 << 11) | (g6 << 5) | b5
74 |
75 | def rgb_to_24bit(self, r, g, b):
76 | """
77 | Convert RGB values (0-255) to 24-bit color value (0-16777215)
78 | This is compatible with ComfyUI's emptyImage node.
79 |
80 | Args:
81 | r, g, b: 8-bit color values (0-255)
82 |
83 | Returns:
84 | 24-bit color value
85 | """
86 | return (r << 16) | (g << 8) | b
87 |
88 |
89 | def create_palette(self, image, sample_points, palette_size=128, sample_size=5, wait_for_input=True, node_id=None):
90 | """
91 | Creates a color palette from the sampled points on the image.
92 |
93 | Args:
94 | image: Input image tensor
95 | sample_points: JSON string of sample points coordinates and colors
96 | palette_size: Size of the palette image (height in pixels)
97 | sample_size: Size of sample area (radius) for color averaging
98 | wait_for_input: Whether to block execution waiting for user input
99 | node_id: Unique ID of this node instance
100 |
101 | Returns:
102 | Tuple of (palette_image, sampled_colors_json, hex_codes_list, swatches_list, rgb_24bit_list, rgb_565_list, rgb_values_list)
103 | """
104 | # Parse the sample points
105 | try:
106 | points = json.loads(sample_points)
107 | except json.JSONDecodeError:
108 | points = []
109 |
110 | # Check if this is the initial call or a resumption after user input
111 | is_initial_call = node_id not in self.waiting_nodes
112 |
113 | # For initial call, send image data to the UI for interactive editing
114 | if (is_initial_call and wait_for_input):
115 | # Convert image tensor to a base64 string for sending to UI
116 | img_base64 = self.tensor_to_base64_image(image)
117 |
118 | # Send image and current points to the UI
119 | ui_data = {
120 | "image": img_base64,
121 | "sample_points": points,
122 | "sample_size": sample_size,
123 | "node_id": node_id
124 | }
125 |
126 | # Send message to UI to display the image for interaction
127 | PromptServer.instance.send_sync("image_sampler_init", {"node": node_id, "data": ui_data})
128 |
129 | # Add to waiting nodes and block execution
130 | self.waiting_nodes.add(node_id)
131 |
132 | # Return ExecutionBlocker for all outputs
133 | return (ExecutionBlocker(None), ExecutionBlocker(None), ExecutionBlocker(None), ExecutionBlocker(None), ExecutionBlocker(None), ExecutionBlocker(None), ExecutionBlocker(None))
134 |
135 | # Remove from waiting list if resuming
136 | if node_id in self.waiting_nodes:
137 | self.waiting_nodes.remove(node_id)
138 |
139 | # Convert image tensor to numpy array
140 | img_np = image.cpu().numpy().squeeze(0)
141 |
142 | # Image dimensions
143 | height, width, _ = img_np.shape
144 |
145 | # If no points, return empty palette
146 | if not points:
147 | # Create empty palette
148 | palette_img = np.zeros((palette_size, palette_size, 3), dtype=np.float32)
149 | palette_tensor = torch.from_numpy(palette_img)[None, ]
150 | empty_swatch = torch.from_numpy(np.zeros((palette_size, palette_size, 3), dtype=np.float32))[None, ]
151 | return (palette_tensor, "[]", [], [empty_swatch], [], [], [])
152 |
153 | # Calculate colors for each sample point
154 | sampled_colors = []
155 | hex_codes = []
156 | swatches = []
157 | rgb_24bit = []
158 | rgb_565 = []
159 | rgb_values = []
160 |
161 | for point in points:
162 | x = int(point["x"] * width)
163 | y = int(point["y"] * height)
164 |
165 | # Use exact color from JavaScript when sample_size is 1, otherwise do averaging
166 | if sample_size == 1 and "color" in point and isinstance(point["color"], str) and point["color"].startswith("#"):
167 | # Use the hex color directly from JavaScript for exact values
168 | hex_color = point["color"]
169 |
170 | # Parse hex color to RGB
171 | r = int(hex_color[1:3], 16)
172 | g = int(hex_color[3:5], 16)
173 | b = int(hex_color[5:7], 16)
174 | else:
175 | # Use averaging for larger sample sizes or when color isn't specified
176 | # Ensure coordinates are within bounds
177 | x = max(sample_size, min(width - sample_size - 1, x))
178 | y = max(sample_size, min(height - sample_size - 1, y))
179 |
180 | # Sample area - take average color in the sample radius
181 | sample_area = img_np[y-sample_size:y+sample_size+1, x-sample_size:x+sample_size+1]
182 | avg_color = np.mean(sample_area, axis=(0, 1))
183 |
184 | # Convert to 8-bit RGB
185 | r, g, b = [int(c * 255) for c in avg_color]
186 |
187 | # Create hex code
188 | hex_color = f"#{r:02X}{g:02X}{b:02X}"
189 |
190 | hex_codes.append(hex_color)
191 |
192 | # Add to colors list with position info
193 | sampled_colors.append({
194 | "position": {"x": point["x"], "y": point["y"]},
195 | "color": {"r": r, "g": g, "b": b},
196 | "hex": hex_color
197 | })
198 |
199 | # Create a swatch image for this color
200 | swatch_img = np.zeros((palette_size, palette_size, 3), dtype=np.float32)
201 | swatch_img[:, :, 0] = r / 255.0
202 | swatch_img[:, :, 1] = g / 255.0
203 | swatch_img[:, :, 2] = b / 255.0
204 | swatch_tensor = torch.from_numpy(swatch_img)[None, ]
205 | swatches.append(swatch_tensor)
206 |
207 | # Add 24-bit RGB value to list using the dedicated method
208 | rgb_24bit.append(self.rgb_to_24bit(r, g, b))
209 |
210 | # Add 16-bit RGB565 value to list
211 | rgb_565.append(self.rgb_to_16bit(r, g, b, 'RGB565'))
212 |
213 | # Add RGB values to list
214 | rgb_values.append(f"({r}, {g}, {b})")
215 |
216 | # Create palette image
217 | num_colors = len(sampled_colors)
218 | if num_colors == 0:
219 | # Create empty palette
220 | palette_img = np.zeros((palette_size, palette_size, 3), dtype=np.float32)
221 | palette_tensor = torch.from_numpy(palette_img)[None, ]
222 | empty_swatch = torch.from_numpy(np.zeros((palette_size, palette_size, 3), dtype=np.float32))[None, ]
223 | return (palette_tensor, "[]", [], [empty_swatch], [], [], [])
224 |
225 | # Create palette image - a horizontal strip of colors
226 | stripe_height = palette_size
227 | stripe_width = palette_size // num_colors if num_colors > 0 else palette_size
228 |
229 | palette_img = np.zeros((stripe_height, palette_size, 3), dtype=np.float32)
230 |
231 | for i, color_data in enumerate(sampled_colors):
232 | color = color_data["color"]
233 | start_x = i * stripe_width
234 | end_x = (i + 1) * stripe_width if i < num_colors - 1 else palette_size
235 |
236 | # Fill the stripe with the color
237 | palette_img[:, start_x:end_x, 0] = color["r"] / 255.0
238 | palette_img[:, start_x:end_x, 1] = color["g"] / 255.0
239 | palette_img[:, start_x:end_x, 2] = color["b"] / 255.0
240 |
241 | # Convert to tensor
242 | palette_tensor = torch.from_numpy(palette_img)[None, ]
243 |
244 | # Return outputs based on format preference
245 | json_colors = json.dumps(sampled_colors)
246 |
247 | # Prepare dynamic outputs (individual hex codes)
248 | self.output_colors = hex_codes
249 |
250 | # Return all outputs, including the swatches list, 24-bit RGB values, RGB565 values, and RGB values
251 | return (palette_tensor, json_colors, hex_codes, swatches, rgb_24bit, rgb_565, rgb_values)
252 |
253 | # Method to provide dynamic outputs for individual colors
254 | def get_output_for_node_type(self, node):
255 | outputs = {"ui": {"text": ""}} # Default empty output
256 |
257 | # Check if we have output_colors
258 | if hasattr(self, "output_colors") and self.output_colors:
259 | # Create dynamic outputs for each color
260 | for i, hex_code in enumerate(self.output_colors):
261 | outputs[f"color_{i+1}"] = ("STRING", {"color": hex_code})
262 |
263 | # Add UI description showing how many colors are available
264 | outputs["ui"]["text"] = f"{len(self.output_colors)} colors sampled"
265 | else:
266 | outputs["ui"]["text"] = "No colors sampled yet"
267 |
268 | return outputs
269 |
270 | # Add routes for handling the continuation of workflow
271 | @PromptServer.instance.routes.post("/image_sampler/continue")
272 | async def image_sampler_continue(request):
273 | """Handle when user is done selecting color samples and wants to continue"""
274 | data = await request.json()
275 | node_id = data.get("node_id")
276 | sample_points = data.get("sample_points", "[]")
277 |
278 | # Update the sample_points widget value to continue with new points
279 | PromptServer.instance.send_sync("image_sampler_update", {
280 | "node": node_id,
281 | "widget_name": "sample_points",
282 | "value": json.dumps(sample_points)
283 | })
284 |
285 | return {"status": "success"}
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 erosDiffusion
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## Create complex compositions the FAST and EASY way
2 |
3 | 
4 |
5 | How many times do you need to create something like this?
6 | 
7 |
8 | Well, This node was created to make the composition process quick and easy!
9 |
10 | ## The Compositor Node
11 | With the Compositor Node you can:
12 | - Pass up to 8 images and visually place, rotate and scale them to build the perfect composition.
13 | - Group move and group rescale the images. choose your transform center (corner, or center by using ctrl)
14 | - Remember the position, scaling values and z-index across generations and easily easy swap images.
15 | - Use the buffer zone to park an asset you don't want to use or easily reach transformations controls
16 | - Clearly see the exported area through a green overlay
17 | - Easily re-frame your shot via multiple selection scaling, resizing and re-positioning
18 | - Flip an image via negative scaling (drag a corner towards and past the inside of the image)
19 | - Mask your images quickly
20 | - Output masks for each layer.
21 | - Precisely move selections with keyboard
22 | - Use the information about transforms in other nodes (like conditioning set area)
23 | - Use the color picker supporting node to select colors form any part of the browser.
24 |
25 |
26 |
27 | ## Changelog
28 | - head over to the [wiki](https://github.com/erosDiffusion/ComfyUI-enricos-nodes/wiki) tab for more workflows and information!
29 | - v **3.1.5** - 04.05.2025
30 | - _new feature_: **Mask outputs!** you asked for it so there you go: A new node that outputs the layers and their masks! search for the compositor masks output node and connect it to the new layer_outputs output socket. note: mask stacking (subtract top masks from the bekow ones will come later)
31 | - _new node_ :a new **color picker node** (specify rgb coordinates or use the eyedropper and connect the 24 bit output to connect to color inputs).
32 | - _bugfix_: fixed a regression for which the composition overlay was not being superimposed the composition while moving objects and z-stacking not being preserved until save
33 | - _sample workflow_: [a sample workflow with assets can be found in the /assets/workflows folder](/assets/workflows/compositorMasksV3_sample.json)
34 | - v **3.1.3 & 3.1.4** - fix regression due to comfy change, not sure what else is brooken but ... reinstance the node. might be that reloading a flow does not preserve anymore the configurations
35 | - v **3.1.2** - merge pr for comfyui official registry
36 | - v **3.1.1** - 23.03.2025 fixed gui not showing up in comfy frontend higher Comfy 1.1.18+
37 | - v **3.1.0** - 20.09.2024
38 | - _new configuration feature_: **onConfigChange action toggle** when you change the configuration (or any of the attached nodes) you can now choose if:
39 | - you want to **stop** the flow to allow edits
40 | - or you want to **grab a capture and continue** the flow
41 | $${\color{red}Important}$$! this option stops, uploads the composition and re-enqueues your prompt (**there is no real pause**) so careful with random seeds on the left of the node or it will loop!
42 |
43 | - _new configuration feature_: **normalize height** when this is activated your images will all be the same height of the canvas (this can lower image quality)
44 | - _transforms enhancement_: output transforms now give you back the angle and **bounding box coordinates**
45 | - _transforms enhancement_: you can **force transform outputput values to be integers** (as some nodes requires it)
46 |
47 | - _new feature_: (experimental and limited): **Tools** ! this is an experimental feature. it allows controlling some aspects of the compositor.
48 | - **precise selection toggle** ignore transparent pixels and select the first image below the mouse
49 | - **center selected** puts the selected images in the center of canvas
50 | - **reset transforms** zeroes out the changes to images bringing them to their original size, angle and 0,0 location (top left)
51 | - limitations: as saving discards the selection, and it happens on mouse out
52 | you might need to re-select to use centering and reset
53 |
54 | 
55 |
56 | ## Previous versions changelog
57 |
58 |
59 |
60 | click here to expand the changelog...
61 |
62 | - v **3.0.8** - 18.09.2024
63 | - _new feature_: **invert mask** option. the implementation of mask was not correct. now it's possible to invert the mask via toggle.
64 | - _new feature_: **angle output** the angle of rotation is now accessible in the output (and soon the bounding box x,y, width and height).
65 | - _bugfix_: **fix cut images on swap due to wrongly preserved width and height**
66 | - _new feature: **added force int** to allow the outputs to be used with set area conditioning (that requires int)
67 | - v **3.0.4** - 18.09.2024 - **bugfix**: the width and height stored in transforms were swapped and the output node would report them incorrectly. thanks @sky958958 for spotting it
68 | - v **3.0.2** - 17.09.2024 - **friendly transforms** for area prompting!
69 | With the goal of being able to do regional area prompting,
70 | now you can easily output each input x,y coordinates and their scaled width and height with the help of the new **Transform Output** node!
71 | select the channel corresponding the input and the node will output the values for you.
72 | - _enhancement_: a **new node** outputs x,y,width,height other images into a convenient node to be attached to the transforms output
73 | - _enhancement_: save and restore skew from transform (now you can distort your images to help fake perspective)
74 | - v **3.0.0** - 16.09.2024 - this release is a full rewrite of the code and fixes:
75 | - issues #45 , #34, #18
76 | also, and adds **new features**:
77 | - _enhancement_: **simplified control panel** (cature on queue, save transform, pause are removed as not needed anymore)
78 | - _new feature_: **automatic upload** of the output **on mouse out** of the canvas area (no need to click capture)
79 | - _new feature_: **flash on save** (once the image is uploaded the composition area green border briefly flashes in orange)
80 | - _new feature_: **preliminary work for optional control panels** (they will contain alignment controls, and other tools)
81 | - _enhancement_: enqueue with **continue**, on the first run, if necessary information is missing (like output) the flow will stop, make your composition, and click continue to re-enqueue the flash finishes.
82 | - v **2.0.4** - 06.09.2024 - _enhancement_: You can now **scale the selected image via mouse wheel**!
83 | - v **2.0.1** - 05.09.2024 - **V2 is HERE!**
84 | - _enhancement_: An all **new widget layout** with maximized working area and less clutter
85 | - _new feature_: A **new companion configuration widget** to allow more control and easier maintenance
86 | - _enhancement_: More control! it's now possible to select an image or group and then "**alt+drag**" to **center scale and rotate**
87 | - _new feature_: More control! it's now possible to **nudge a selection** by one pixel by using keyboard arrows, and while holding shift the movement is 10px! pixel perfect alignments!
88 | - _new feature_: the node now **remembers the transforms** you have applied, on the new run it will re-apply the stored transforms (storing transforms is controlled in the config)
89 | - _new feature_: **masks are here**! you can now pass masks, and they will be applied automatically! (depending on the results you might want still to invert them)
90 | - _regression_: a bit annoying but is_changed is not being observed so flows are re-triggered even on fixed
91 | - _regression_: img in workflow saved is not visible anymore
92 | - V **1.0.9** - 30.08.2024 - Huge refactoring!
93 | - _new feature_: **multiple instances** are now possible
94 | - _bugfix_: **zooming out does not hide the compositor images anymore**
95 | - _bugfix_: when **saving a png with the workflow** the **compositor content is now visible** (will not be restored...yet)
96 | - _enhancement_: the node **does not re-trigger** the execution of the flow if the image is not changed
97 | - _performance_: the node is **now more efficient** and correctly implements the is_changed check via **checksum**, avoiding re-triggering flows downstream if the composition has not changed
98 | - _maintainability_: the node is now refactored and better engineered, with a lot of comments. could be a good use case for those learning to code comfy extensions.
99 | - V **1.0.8** - 28.08.2024 - _new feature_: **safe area indication** - a green border is overlaid on top of the composition to indicate the exported area
100 | - V **1.0.7** - 28.08.2024 - _new feature_: **preserve stacking order**. when selecting a node, it's z-order is preserved image1 being the background/farthest and image8 the foreground/closest.
101 | - the first connected node will be the most distant from camera (background)
102 | - the last will be the closest to camera (subject/foreground)
103 | - V **1.0.4** - 27.08.2024 - _new feature_: now it's possible to **pause the flow** with a switch to avoid processing an unfinished composition
104 |
105 |
106 |
107 |
108 | ## Setup
109 |
110 | **Method 1: git clone**
111 | open the custom nodes directory in your editor and
112 |
113 | `git clone https://github.com/erosDiffusion/ComfyUI-enricos-nodes.git`
114 |
115 | like all other custom nodes (that are not integrated with manager)
116 |
117 | **Method 2: ComfyUi Manager**
118 | In Comfy UI Manager search "Compositor" and select the node from erosDiffusion and press install.
119 |
120 | **Method 3: via manager's button**
121 | open ComfyUI manager click on **Install via Git URL** and paste this url
122 |
123 | `https://github.com/erosDiffusion/ComfyUI-enricos-nodes.git`
124 |
125 | if you get: "This action is not allowed with this security level configuration" then check your manager config.ini
126 | as discussed [here](https://github.com/ltdrdata/ComfyUI-Manager?tab=readme-ov-file#security-policy):
127 | and set the security to weak (at your risk)
128 |
129 | 
130 |
131 |
132 | ## Reasons and How To use
133 | ### Why this node ?
134 |
135 | - I wanted to learn how to create custom nodes with a GUI in ComfyUI
136 | - be able to composite visually images in ComfyUI
137 | - be able to have image inputs that are generated on the fly in the composition
138 | - be able to remember sizing and position across usages/generations
139 | - have more room to manipulate objects around/outside the generated image
140 |
141 | ### Alternatives ?
142 |
143 | - the painter node is great and works better and does a million things more, but it misses some of these features.
144 | - continue compositing your image like caveman using pixel coordinates
145 | - well...photoshop ** if you have it** and import via million clicks or with a plugin
146 | - finally use **Krita** which is good powerful and free
147 | - oh and Blender also has a great plugin **but you need to know/learn blender**
148 |
149 | ### How to use
150 |
151 | **Method1**:
152 |
153 | - search "compositor" (v3) in the dropdown, connect with config (V3) by dragging from the node config slot.
154 | - configure width, height and padding around node (it's used to be able to move beyond the generated image) the node should will resize when you run
155 | - connect the inputs (suggested setup is to always have a fixed size via resize and rembg where needed)
156 | - important: connect the output (save image, preview image,...)
157 | - run once to get the inputs in the compositor (the flow will stop if there is no output)
158 | - **create your composition** (see below)
159 | - mouse out the composition area (green border flashes to orange as the image uploads)
160 | - click continue to enqueue again (or enqueue)
161 | - use the output ! (suggestion is to feed it to a depth anything v2 node and use it in a depth controlnet to guide your image)
162 |
163 | **Create your composition details:**
164 |
165 | - put your images in the dark gray area
166 | - you can connect any flow (generation with fixed, static rgba, full rgb)
167 | - anything in the dark gray area is rendered
168 | - use up till 8 images, optionally pass masks
169 | - background will be at first slot on top
170 | - in v 1.0.9 and later the z-index is fixed, reconnect an input or move stuff around.
171 | it should be simpler to handle depth stacking
172 |
173 | ### Advanced
174 |
175 | - click to select
176 | - drag (from a clear area) to select multiple
177 | - use controls to rotate and scale
178 | - drag selected to move (can also rescale the group)
179 | - shift click to select multiple
180 | - shift click to unselect selected in a group select
181 | - if you choose to stop on config change, hit continue to re-enqueue. capture happens on mouse out from the composition or
182 | - if you choose grabAndContinue then it will be automatic
183 | - scroll up or down to scale a single image selection
184 |
185 | ### Aupporting nodes I use with this one
186 | - **Rembg(batch)** -> from https://github.com/Mamaaaamooooo/batchImg-rembg-ComfyUI-nodes.git -> extracts the subject and returns a rgba image
187 | - any other technique to create masks (grounding dino, sam, florence2...)
188 | - any **controlnet depth for your model** - works well with depth anything v2 preprocessor for both 1.5 (regular controlnet) and xl (via union controlnet) or lineart (like anylineart), for flux you can try x-labs controlnet (but it does not work well for me)
189 |
190 |
191 | ## Demo Workflow for v3.1
192 |
193 | Just throw the worst possible images you find on the internet or that you can generate...
194 | ...scale and align quick, give a depth controlnet, describe the full scene and style, render...
195 | and you will get:
196 |
197 | 
198 | with the [V3.1 workflow in json format](assets%2Fv3.1.json) you are in pixel perfect positioning control of your scene and content !
199 | Images to replicate are in the assets folder.
200 |
201 | ### Final words and limitations
202 |
203 | - **limitation** you need to run the flow once for the compositor to show images
204 | - **limitation** careful on random values on the left of the node, the node stops the execution on config change to be able to grab a capture and re-enqueues the flow. if the cache is invalidated you not be able to go next see here https://github.com/erosDiffusion/ComfyUI-enricos-nodes/issues/63
205 | when I tried implementing threading pause it was not reliable, so I resorted to stop / restart. another option would be a while loop...but that feels not right.
206 | - **tools** new tools only show up on load, so if you add them, reload page with browser reload
207 | - **known issue**: the compositing is not scaled, so if you want a 5k image well... I hope you have a big enough monitor, but it's not (yet) the goal of this node...
208 |
209 | **Now go put a fairy in a forest!**
210 |
211 | yours, ErosDiffusion 💜
212 |
213 |
214 | 
215 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
1 | # author: erosdiffusionai@gmail.com
2 | from .Compositor3 import Compositor3
3 | from .CompositorConfig3 import CompositorConfig3
4 | from .CompositorTools3 import CompositorTools3
5 | from .CompositorTransformsOut3 import CompositorTransformsOutV3
6 | from .CompositorMasksOutputV3 import CompositorMasksOutputV3
7 | from .CompositorColorPicker import CompositorColorPicker
8 | from .ImageColorSampler import ImageColorSampler
9 |
10 | NODE_CLASS_MAPPINGS = {
11 | "Compositor3": Compositor3,
12 | "CompositorConfig3": CompositorConfig3,
13 | "CompositorTools3": CompositorTools3,
14 | "CompositorTransformsOutV3": CompositorTransformsOutV3,
15 | "CompositorMasksOutputV3": CompositorMasksOutputV3,
16 | "CompositorColorPicker": CompositorColorPicker,
17 | "ImageColorSampler": ImageColorSampler,
18 | }
19 |
20 | NODE_DISPLAY_NAME_MAPPINGS = {
21 | "Compositor3": "💜 Compositor (V3)",
22 | "CompositorConfig3": "💜 Compositor Config (V3)",
23 | "CompositorTools3": "💜 Compositor Tools (V3) Experimental",
24 | "CompositorTransformsOutV3": "💜 Compositor Transforms Output (V3)",
25 | "CompositorMasksOutputV3": "💜 Compositor Masks Output (V3)",
26 | "CompositorColorPicker": "💜 Compositor Color Picker",
27 | "ImageColorSampler": "💜 Image Color Sampler",
28 | }
29 |
30 | EXTENSION_NAME = "Enrico"
31 |
32 | WEB_DIRECTORY = "./web"
33 |
34 | # Additional web resources to ensure they're loaded
35 | __all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS", "WEB_DIRECTORY"]
36 |
--------------------------------------------------------------------------------
/assets/bear.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-enricos-nodes/0ab56bd7611bf8153794a8838a195bdf83c8e213/assets/bear.jpg
--------------------------------------------------------------------------------
/assets/forest.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-enricos-nodes/0ab56bd7611bf8153794a8838a195bdf83c8e213/assets/forest.jpg
--------------------------------------------------------------------------------
/assets/gallerySamples.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-enricos-nodes/0ab56bd7611bf8153794a8838a195bdf83c8e213/assets/gallerySamples.jpg
--------------------------------------------------------------------------------
/assets/output examples/ComfyUI_00286_.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-enricos-nodes/0ab56bd7611bf8153794a8838a195bdf83c8e213/assets/output examples/ComfyUI_00286_.png
--------------------------------------------------------------------------------
/assets/output examples/ComfyUI_00356_.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-enricos-nodes/0ab56bd7611bf8153794a8838a195bdf83c8e213/assets/output examples/ComfyUI_00356_.png
--------------------------------------------------------------------------------
/assets/output examples/ComfyUI_00360_.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-enricos-nodes/0ab56bd7611bf8153794a8838a195bdf83c8e213/assets/output examples/ComfyUI_00360_.png
--------------------------------------------------------------------------------
/assets/output examples/ComfyUI_00369_.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-enricos-nodes/0ab56bd7611bf8153794a8838a195bdf83c8e213/assets/output examples/ComfyUI_00369_.png
--------------------------------------------------------------------------------
/assets/output examples/ComfyUI_00370_.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-enricos-nodes/0ab56bd7611bf8153794a8838a195bdf83c8e213/assets/output examples/ComfyUI_00370_.png
--------------------------------------------------------------------------------
/assets/output examples/ComfyUI_00392_.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-enricos-nodes/0ab56bd7611bf8153794a8838a195bdf83c8e213/assets/output examples/ComfyUI_00392_.png
--------------------------------------------------------------------------------
/assets/showreel1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-enricos-nodes/0ab56bd7611bf8153794a8838a195bdf83c8e213/assets/showreel1.jpg
--------------------------------------------------------------------------------
/assets/showreel1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-enricos-nodes/0ab56bd7611bf8153794a8838a195bdf83c8e213/assets/showreel1.png
--------------------------------------------------------------------------------
/assets/v3.0.2.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-enricos-nodes/0ab56bd7611bf8153794a8838a195bdf83c8e213/assets/v3.0.2.PNG
--------------------------------------------------------------------------------
/assets/v3.1.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-enricos-nodes/0ab56bd7611bf8153794a8838a195bdf83c8e213/assets/v3.1.PNG
--------------------------------------------------------------------------------
/assets/v3.1.json:
--------------------------------------------------------------------------------
1 | {
2 | "last_node_id": 352,
3 | "last_link_id": 823,
4 | "nodes": [
5 | {
6 | "id": 254,
7 | "type": "LoadImage",
8 | "pos": {
9 | "0": -50,
10 | "1": -850
11 | },
12 | "size": {
13 | "0": 315,
14 | "1": 314
15 | },
16 | "flags": {},
17 | "order": 0,
18 | "mode": 0,
19 | "inputs": [],
20 | "outputs": [
21 | {
22 | "name": "IMAGE",
23 | "type": "IMAGE",
24 | "links": [
25 | 819
26 | ],
27 | "slot_index": 0,
28 | "shape": 3
29 | },
30 | {
31 | "name": "MASK",
32 | "type": "MASK",
33 | "links": null,
34 | "shape": 3
35 | }
36 | ],
37 | "properties": {
38 | "Node name for S&R": "LoadImage"
39 | },
40 | "widgets_values": [
41 | "warrior.jpg",
42 | "image"
43 | ]
44 | },
45 | {
46 | "id": 350,
47 | "type": "LoadImage",
48 | "pos": {
49 | "0": -50,
50 | "1": -1200
51 | },
52 | "size": {
53 | "0": 315,
54 | "1": 314
55 | },
56 | "flags": {},
57 | "order": 1,
58 | "mode": 0,
59 | "inputs": [],
60 | "outputs": [
61 | {
62 | "name": "IMAGE",
63 | "type": "IMAGE",
64 | "links": [
65 | 821
66 | ],
67 | "slot_index": 0,
68 | "shape": 3
69 | },
70 | {
71 | "name": "MASK",
72 | "type": "MASK",
73 | "links": null,
74 | "shape": 3
75 | }
76 | ],
77 | "properties": {
78 | "Node name for S&R": "LoadImage"
79 | },
80 | "widgets_values": [
81 | "forest.jpg",
82 | "image"
83 | ]
84 | },
85 | {
86 | "id": 351,
87 | "type": "Image Remove Background (rembg)",
88 | "pos": {
89 | "0": -50,
90 | "1": -450
91 | },
92 | "size": [
93 | 260.3999938964844,
94 | 28.730128519557297
95 | ],
96 | "flags": {},
97 | "order": 7,
98 | "mode": 0,
99 | "inputs": [
100 | {
101 | "name": "image",
102 | "type": "IMAGE",
103 | "link": 819
104 | }
105 | ],
106 | "outputs": [
107 | {
108 | "name": "IMAGE",
109 | "type": "IMAGE",
110 | "links": [
111 | 820
112 | ],
113 | "shape": 3,
114 | "slot_index": 0
115 | }
116 | ],
117 | "properties": {
118 | "Node name for S&R": "Image Remove Background (rembg)"
119 | }
120 | },
121 | {
122 | "id": 120,
123 | "type": "CheckpointLoaderSimple",
124 | "pos": {
125 | "0": 300,
126 | "1": -1200
127 | },
128 | "size": {
129 | "0": 315,
130 | "1": 98
131 | },
132 | "flags": {},
133 | "order": 2,
134 | "mode": 0,
135 | "inputs": [],
136 | "outputs": [
137 | {
138 | "name": "MODEL",
139 | "type": "MODEL",
140 | "links": [
141 | 822
142 | ],
143 | "slot_index": 0
144 | },
145 | {
146 | "name": "CLIP",
147 | "type": "CLIP",
148 | "links": [
149 | 224,
150 | 225
151 | ],
152 | "slot_index": 1
153 | },
154 | {
155 | "name": "VAE",
156 | "type": "VAE",
157 | "links": [
158 | 227,
159 | 230
160 | ],
161 | "slot_index": 2
162 | }
163 | ],
164 | "properties": {
165 | "Node name for S&R": "CheckpointLoaderSimple"
166 | },
167 | "widgets_values": [
168 | "dreamshaper_8.safetensors"
169 | ]
170 | },
171 | {
172 | "id": 122,
173 | "type": "CLIPTextEncode",
174 | "pos": {
175 | "0": 700,
176 | "1": -1200
177 | },
178 | "size": {
179 | "0": 422.84503173828125,
180 | "1": 164.31304931640625
181 | },
182 | "flags": {},
183 | "order": 9,
184 | "mode": 0,
185 | "inputs": [
186 | {
187 | "name": "clip",
188 | "type": "CLIP",
189 | "link": 224
190 | }
191 | ],
192 | "outputs": [
193 | {
194 | "name": "CONDITIONING",
195 | "type": "CONDITIONING",
196 | "links": [
197 | 410
198 | ],
199 | "slot_index": 0
200 | }
201 | ],
202 | "properties": {
203 | "Node name for S&R": "CLIPTextEncode"
204 | },
205 | "widgets_values": [
206 | "cinematic photograph of a viking warrior in a forest and a black wolf AND color grading and film grain AND cinematic AND 4K AND HDR"
207 | ],
208 | "color": "#232",
209 | "bgcolor": "#353"
210 | },
211 | {
212 | "id": 345,
213 | "type": "CompositorConfig3",
214 | "pos": {
215 | "0": 300,
216 | "1": -800
217 | },
218 | "size": {
219 | "0": 315,
220 | "1": 502
221 | },
222 | "flags": {},
223 | "order": 12,
224 | "mode": 0,
225 | "inputs": [
226 | {
227 | "name": "image1",
228 | "type": "IMAGE",
229 | "link": 821
230 | },
231 | {
232 | "name": "mask1",
233 | "type": "MASK",
234 | "link": null
235 | },
236 | {
237 | "name": "image2",
238 | "type": "IMAGE",
239 | "link": 820
240 | },
241 | {
242 | "name": "mask2",
243 | "type": "MASK",
244 | "link": null
245 | },
246 | {
247 | "name": "image3",
248 | "type": "IMAGE",
249 | "link": 818
250 | },
251 | {
252 | "name": "mask3",
253 | "type": "MASK",
254 | "link": null
255 | },
256 | {
257 | "name": "image4",
258 | "type": "IMAGE",
259 | "link": null
260 | },
261 | {
262 | "name": "mask4",
263 | "type": "MASK",
264 | "link": null
265 | },
266 | {
267 | "name": "image5",
268 | "type": "IMAGE",
269 | "link": null
270 | },
271 | {
272 | "name": "mask5",
273 | "type": "MASK",
274 | "link": null
275 | },
276 | {
277 | "name": "image6",
278 | "type": "IMAGE",
279 | "link": null
280 | },
281 | {
282 | "name": "mask6",
283 | "type": "MASK",
284 | "link": null
285 | },
286 | {
287 | "name": "image7",
288 | "type": "IMAGE",
289 | "link": null
290 | },
291 | {
292 | "name": "mask7",
293 | "type": "MASK",
294 | "link": null
295 | },
296 | {
297 | "name": "image8",
298 | "type": "IMAGE",
299 | "link": null
300 | },
301 | {
302 | "name": "mask8",
303 | "type": "MASK",
304 | "link": null
305 | }
306 | ],
307 | "outputs": [
308 | {
309 | "name": "config",
310 | "type": "COMPOSITOR_CONFIG",
311 | "links": [
312 | 805
313 | ],
314 | "shape": 3
315 | }
316 | ],
317 | "properties": {
318 | "Node name for S&R": "CompositorConfig3"
319 | },
320 | "widgets_values": [
321 | 1280,
322 | 768,
323 | 110,
324 | true,
325 | false,
326 | false,
327 | 1726855452501
328 | ]
329 | },
330 | {
331 | "id": 286,
332 | "type": "EmptyImage",
333 | "pos": {
334 | "0": 300,
335 | "1": -1050
336 | },
337 | "size": {
338 | "0": 315,
339 | "1": 130
340 | },
341 | "flags": {},
342 | "order": 3,
343 | "mode": 0,
344 | "inputs": [],
345 | "outputs": [
346 | {
347 | "name": "IMAGE",
348 | "type": "IMAGE",
349 | "links": [],
350 | "slot_index": 0,
351 | "shape": 3
352 | }
353 | ],
354 | "properties": {
355 | "Node name for S&R": "EmptyImage"
356 | },
357 | "widgets_values": [
358 | 512,
359 | 512,
360 | 1,
361 | 1680
362 | ]
363 | },
364 | {
365 | "id": 304,
366 | "type": "LoadImage",
367 | "pos": {
368 | "0": -50,
369 | "1": -350
370 | },
371 | "size": {
372 | "0": 315,
373 | "1": 314
374 | },
375 | "flags": {},
376 | "order": 4,
377 | "mode": 0,
378 | "inputs": [],
379 | "outputs": [
380 | {
381 | "name": "IMAGE",
382 | "type": "IMAGE",
383 | "links": [
384 | 816
385 | ],
386 | "slot_index": 0,
387 | "shape": 3
388 | },
389 | {
390 | "name": "MASK",
391 | "type": "MASK",
392 | "links": [],
393 | "slot_index": 1,
394 | "shape": 3
395 | }
396 | ],
397 | "properties": {
398 | "Node name for S&R": "LoadImage"
399 | },
400 | "widgets_values": [
401 | "wolf.jpg",
402 | "image"
403 | ]
404 | },
405 | {
406 | "id": 197,
407 | "type": "AIO_Preprocessor",
408 | "pos": {
409 | "0": 700,
410 | "1": -950
411 | },
412 | "size": [
413 | 428.3140313976878,
414 | 82
415 | ],
416 | "flags": {},
417 | "order": 16,
418 | "mode": 0,
419 | "inputs": [
420 | {
421 | "name": "image",
422 | "type": "IMAGE",
423 | "link": 672
424 | }
425 | ],
426 | "outputs": [
427 | {
428 | "name": "IMAGE",
429 | "type": "IMAGE",
430 | "links": [
431 | 425,
432 | 426
433 | ],
434 | "slot_index": 0,
435 | "shape": 3
436 | }
437 | ],
438 | "properties": {
439 | "Node name for S&R": "AIO_Preprocessor"
440 | },
441 | "widgets_values": [
442 | "DepthAnythingV2Preprocessor",
443 | 512
444 | ]
445 | },
446 | {
447 | "id": 123,
448 | "type": "CLIPTextEncode",
449 | "pos": {
450 | "0": 1150,
451 | "1": -1200
452 | },
453 | "size": [
454 | 417.50327552934823,
455 | 154.69677890005437
456 | ],
457 | "flags": {
458 | "collapsed": false
459 | },
460 | "order": 10,
461 | "mode": 0,
462 | "inputs": [
463 | {
464 | "name": "clip",
465 | "type": "CLIP",
466 | "link": 225
467 | }
468 | ],
469 | "outputs": [
470 | {
471 | "name": "CONDITIONING",
472 | "type": "CONDITIONING",
473 | "links": [
474 | 411
475 | ],
476 | "slot_index": 0
477 | }
478 | ],
479 | "properties": {
480 | "Node name for S&R": "CLIPTextEncode"
481 | },
482 | "widgets_values": [
483 | "lifeless, horror, painting, cgi, illustration, low_quality, blurry, vampire, unrealistic, drawing, text, watermark, bad_quality"
484 | ],
485 | "color": "#322",
486 | "bgcolor": "#533"
487 | },
488 | {
489 | "id": 194,
490 | "type": "ControlNetLoader",
491 | "pos": {
492 | "0": 1150,
493 | "1": -950
494 | },
495 | "size": [
496 | 459.9102911854518,
497 | 58
498 | ],
499 | "flags": {},
500 | "order": 5,
501 | "mode": 0,
502 | "inputs": [],
503 | "outputs": [
504 | {
505 | "name": "CONTROL_NET",
506 | "type": "CONTROL_NET",
507 | "links": [
508 | 423
509 | ],
510 | "slot_index": 0,
511 | "shape": 3
512 | }
513 | ],
514 | "properties": {
515 | "Node name for S&R": "ControlNetLoader"
516 | },
517 | "widgets_values": [
518 | "control_v11f1p_sd15_depth_fp16.safetensors"
519 | ]
520 | },
521 | {
522 | "id": 298,
523 | "type": "ImageScale",
524 | "pos": {
525 | "0": 1600,
526 | "1": -1200
527 | },
528 | "size": {
529 | "0": 315,
530 | "1": 130
531 | },
532 | "flags": {},
533 | "order": 15,
534 | "mode": 4,
535 | "inputs": [
536 | {
537 | "name": "image",
538 | "type": "IMAGE",
539 | "link": 803
540 | }
541 | ],
542 | "outputs": [
543 | {
544 | "name": "IMAGE",
545 | "type": "IMAGE",
546 | "links": [
547 | 672,
548 | 673
549 | ],
550 | "slot_index": 0,
551 | "shape": 3
552 | }
553 | ],
554 | "properties": {
555 | "Node name for S&R": "ImageScale"
556 | },
557 | "widgets_values": [
558 | "nearest-exact",
559 | 1024,
560 | 1536,
561 | "disabled"
562 | ]
563 | },
564 | {
565 | "id": 198,
566 | "type": "PreviewImage",
567 | "pos": {
568 | "0": 1950,
569 | "1": -1200
570 | },
571 | "size": {
572 | "0": 210,
573 | "1": 246
574 | },
575 | "flags": {},
576 | "order": 19,
577 | "mode": 0,
578 | "inputs": [
579 | {
580 | "name": "images",
581 | "type": "IMAGE",
582 | "link": 426
583 | }
584 | ],
585 | "outputs": [],
586 | "properties": {
587 | "Node name for S&R": "PreviewImage"
588 | }
589 | },
590 | {
591 | "id": 126,
592 | "type": "VAEEncode",
593 | "pos": {
594 | "0": 2200,
595 | "1": -1200
596 | },
597 | "size": {
598 | "0": 210,
599 | "1": 46
600 | },
601 | "flags": {},
602 | "order": 17,
603 | "mode": 0,
604 | "inputs": [
605 | {
606 | "name": "pixels",
607 | "type": "IMAGE",
608 | "link": 673
609 | },
610 | {
611 | "name": "vae",
612 | "type": "VAE",
613 | "link": 230
614 | }
615 | ],
616 | "outputs": [
617 | {
618 | "name": "LATENT",
619 | "type": "LATENT",
620 | "links": [
621 | 231
622 | ],
623 | "slot_index": 0,
624 | "shape": 3
625 | }
626 | ],
627 | "properties": {
628 | "Node name for S&R": "VAEEncode"
629 | }
630 | },
631 | {
632 | "id": 124,
633 | "type": "VAEDecode",
634 | "pos": {
635 | "0": 2200,
636 | "1": -1100
637 | },
638 | "size": {
639 | "0": 140,
640 | "1": 46
641 | },
642 | "flags": {},
643 | "order": 21,
644 | "mode": 0,
645 | "inputs": [
646 | {
647 | "name": "samples",
648 | "type": "LATENT",
649 | "link": 226
650 | },
651 | {
652 | "name": "vae",
653 | "type": "VAE",
654 | "link": 227
655 | }
656 | ],
657 | "outputs": [
658 | {
659 | "name": "IMAGE",
660 | "type": "IMAGE",
661 | "links": [
662 | 670
663 | ],
664 | "slot_index": 0
665 | }
666 | ],
667 | "properties": {
668 | "Node name for S&R": "VAEDecode"
669 | }
670 | },
671 | {
672 | "id": 193,
673 | "type": "ControlNetApplyAdvanced",
674 | "pos": {
675 | "0": 2250,
676 | "1": -450
677 | },
678 | "size": [
679 | 240.27142998000954,
680 | 166
681 | ],
682 | "flags": {},
683 | "order": 18,
684 | "mode": 0,
685 | "inputs": [
686 | {
687 | "name": "positive",
688 | "type": "CONDITIONING",
689 | "link": 410
690 | },
691 | {
692 | "name": "negative",
693 | "type": "CONDITIONING",
694 | "link": 411
695 | },
696 | {
697 | "name": "control_net",
698 | "type": "CONTROL_NET",
699 | "link": 423
700 | },
701 | {
702 | "name": "image",
703 | "type": "IMAGE",
704 | "link": 425
705 | }
706 | ],
707 | "outputs": [
708 | {
709 | "name": "positive",
710 | "type": "CONDITIONING",
711 | "links": [
712 | 413
713 | ],
714 | "slot_index": 0,
715 | "shape": 3
716 | },
717 | {
718 | "name": "negative",
719 | "type": "CONDITIONING",
720 | "links": [
721 | 414
722 | ],
723 | "slot_index": 1,
724 | "shape": 3
725 | }
726 | ],
727 | "properties": {
728 | "Node name for S&R": "ControlNetApplyAdvanced"
729 | },
730 | "widgets_values": [
731 | 0.4,
732 | 0,
733 | 0.9
734 | ]
735 | },
736 | {
737 | "id": 344,
738 | "type": "Compositor3",
739 | "pos": {
740 | "0": 700,
741 | "1": -800
742 | },
743 | "size": [
744 | 1521,
745 | 1079
746 | ],
747 | "flags": {},
748 | "order": 13,
749 | "mode": 0,
750 | "inputs": [
751 | {
752 | "name": "config",
753 | "type": "COMPOSITOR_CONFIG",
754 | "link": 805
755 | },
756 | {
757 | "name": "tools",
758 | "type": "BOOLEAN",
759 | "link": 807,
760 | "widget": {
761 | "name": "tools"
762 | }
763 | }
764 | ],
765 | "outputs": [
766 | {
767 | "name": "transforms",
768 | "type": "STRING",
769 | "links": [
770 | 806
771 | ],
772 | "shape": 3
773 | },
774 | {
775 | "name": "image",
776 | "type": "IMAGE",
777 | "links": [
778 | 803
779 | ],
780 | "shape": 3
781 | }
782 | ],
783 | "properties": {
784 | "Node name for S&R": "Compositor3"
785 | },
786 | "widgets_values": [
787 | "{\"width\":1280,\"height\":768,\"padding\":110,\"transforms\":[{\"left\":67.29333589365456,\"top\":117.9468021136131,\"scaleX\":0.9909220644610665,\"scaleY\":0.9909220644610665,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"left\",\"originY\":\"top\",\"xwidth\":1365,\"xheight\":768,\"skewY\":0,\"skewX\":0},{\"left\":246.16651289095125,\"top\":152.3262279478957,\"scaleX\":0.9039852323783351,\"scaleY\":0.9039852323783351,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"left\",\"originY\":\"top\",\"xwidth\":576,\"xheight\":768,\"skewY\":0,\"skewX\":0},{\"left\":667.8645917323958,\"top\":442.16978259209634,\"scaleX\":0.6638480948156427,\"scaleY\":0.6270438236132133,\"angle\":0,\"flipX\":true,\"flipY\":false,\"originX\":\"left\",\"originY\":\"top\",\"xwidth\":1152,\"xheight\":768,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"left\",\"originY\":\"top\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"left\",\"originY\":\"top\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"left\",\"originY\":\"top\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"left\",\"originY\":\"top\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"left\",\"originY\":\"top\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0}],\"bboxes\":[{\"left\":67.29333589365456,\"top\":117.9468021136131,\"xwidth\":761.028145506099,\"xheight\":1352.6086179893557},{\"left\":246.16651289095125,\"top\":152.3262279478957,\"xwidth\":694.2606584665614,\"xheight\":520.695493849921},{\"left\":667.8645917323958,\"top\":442.16978259209634,\"xwidth\":481.56965653494785,\"xheight\":764.7530052276205},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0}]}",
788 | "compositor/1726856927860.png [temp]",
789 | true,
790 | null,
791 | "continue"
792 | ]
793 | },
794 | {
795 | "id": 347,
796 | "type": "CompositorTools3",
797 | "pos": {
798 | "0": 300,
799 | "1": -250
800 | },
801 | "size": {
802 | "0": 310.79998779296875,
803 | "1": 106
804 | },
805 | "flags": {},
806 | "order": 6,
807 | "mode": 0,
808 | "inputs": [],
809 | "outputs": [
810 | {
811 | "name": "tools",
812 | "type": "BOOLEAN",
813 | "links": [
814 | 807
815 | ],
816 | "shape": 3
817 | }
818 | ],
819 | "properties": {
820 | "Node name for S&R": "CompositorTools3"
821 | }
822 | },
823 | {
824 | "id": 349,
825 | "type": "Image Remove Background (rembg)",
826 | "pos": {
827 | "0": -50,
828 | "1": 50
829 | },
830 | "size": [
831 | 260.3999938964844,
832 | 26.64310432639809
833 | ],
834 | "flags": {},
835 | "order": 11,
836 | "mode": 0,
837 | "inputs": [
838 | {
839 | "name": "image",
840 | "type": "IMAGE",
841 | "link": 816
842 | }
843 | ],
844 | "outputs": [
845 | {
846 | "name": "IMAGE",
847 | "type": "IMAGE",
848 | "links": [
849 | 818
850 | ],
851 | "shape": 3,
852 | "slot_index": 0
853 | }
854 | ],
855 | "properties": {
856 | "Node name for S&R": "Image Remove Background (rembg)"
857 | }
858 | },
859 | {
860 | "id": 119,
861 | "type": "KSampler",
862 | "pos": {
863 | "0": 2250,
864 | "1": -800
865 | },
866 | "size": [
867 | 256.93020611648535,
868 | 262
869 | ],
870 | "flags": {},
871 | "order": 20,
872 | "mode": 0,
873 | "inputs": [
874 | {
875 | "name": "model",
876 | "type": "MODEL",
877 | "link": 823
878 | },
879 | {
880 | "name": "positive",
881 | "type": "CONDITIONING",
882 | "link": 413
883 | },
884 | {
885 | "name": "negative",
886 | "type": "CONDITIONING",
887 | "link": 414
888 | },
889 | {
890 | "name": "latent_image",
891 | "type": "LATENT",
892 | "link": 231
893 | }
894 | ],
895 | "outputs": [
896 | {
897 | "name": "LATENT",
898 | "type": "LATENT",
899 | "links": [
900 | 226
901 | ],
902 | "slot_index": 0
903 | }
904 | ],
905 | "properties": {
906 | "Node name for S&R": "KSampler"
907 | },
908 | "widgets_values": [
909 | 977862159117587,
910 | "fixed",
911 | 35,
912 | 4,
913 | "deis",
914 | "beta",
915 | 1
916 | ]
917 | },
918 | {
919 | "id": 352,
920 | "type": "PerturbedAttentionGuidance",
921 | "pos": {
922 | "0": 2250,
923 | "1": -200
924 | },
925 | "size": [
926 | 218.39999389648438,
927 | 58
928 | ],
929 | "flags": {},
930 | "order": 8,
931 | "mode": 0,
932 | "inputs": [
933 | {
934 | "name": "model",
935 | "type": "MODEL",
936 | "link": 822
937 | }
938 | ],
939 | "outputs": [
940 | {
941 | "name": "MODEL",
942 | "type": "MODEL",
943 | "links": [
944 | 823
945 | ],
946 | "shape": 3,
947 | "slot_index": 0
948 | }
949 | ],
950 | "properties": {
951 | "Node name for S&R": "PerturbedAttentionGuidance"
952 | },
953 | "widgets_values": [
954 | 3
955 | ]
956 | },
957 | {
958 | "id": 297,
959 | "type": "PreviewImage",
960 | "pos": {
961 | "0": 2550,
962 | "1": -700
963 | },
964 | "size": [
965 | 927.4722345560549,
966 | 825.2888724063173
967 | ],
968 | "flags": {},
969 | "order": 22,
970 | "mode": 0,
971 | "inputs": [
972 | {
973 | "name": "images",
974 | "type": "IMAGE",
975 | "link": 670
976 | }
977 | ],
978 | "outputs": [],
979 | "properties": {
980 | "Node name for S&R": "PreviewImage"
981 | }
982 | },
983 | {
984 | "id": 346,
985 | "type": "CompositorTransformsOutV3",
986 | "pos": {
987 | "0": 2550,
988 | "1": -1200
989 | },
990 | "size": {
991 | "0": 453.5999755859375,
992 | "1": 266
993 | },
994 | "flags": {},
995 | "order": 14,
996 | "mode": 0,
997 | "inputs": [
998 | {
999 | "name": "transforms",
1000 | "type": "STRING",
1001 | "link": 806,
1002 | "widget": {
1003 | "name": "transforms"
1004 | }
1005 | }
1006 | ],
1007 | "outputs": [
1008 | {
1009 | "name": "x",
1010 | "type": "INT",
1011 | "links": null,
1012 | "shape": 3
1013 | },
1014 | {
1015 | "name": "y",
1016 | "type": "INT",
1017 | "links": null,
1018 | "shape": 3
1019 | },
1020 | {
1021 | "name": "width",
1022 | "type": "INT",
1023 | "links": null,
1024 | "shape": 3
1025 | },
1026 | {
1027 | "name": "height",
1028 | "type": "INT",
1029 | "links": null,
1030 | "shape": 3
1031 | },
1032 | {
1033 | "name": "angle",
1034 | "type": "INT",
1035 | "links": null,
1036 | "shape": 3
1037 | },
1038 | {
1039 | "name": "bbox x",
1040 | "type": "INT",
1041 | "links": null,
1042 | "shape": 3
1043 | },
1044 | {
1045 | "name": "bbox y",
1046 | "type": "INT",
1047 | "links": null,
1048 | "shape": 3
1049 | },
1050 | {
1051 | "name": "bbox width",
1052 | "type": "INT",
1053 | "links": null,
1054 | "shape": 3
1055 | },
1056 | {
1057 | "name": "bbox height",
1058 | "type": "INT",
1059 | "links": null,
1060 | "shape": 3
1061 | }
1062 | ],
1063 | "properties": {
1064 | "Node name for S&R": "CompositorTransformsOutV3"
1065 | },
1066 | "widgets_values": [
1067 | "",
1068 | 1,
1069 | true
1070 | ]
1071 | }
1072 | ],
1073 | "links": [
1074 | [
1075 | 224,
1076 | 120,
1077 | 1,
1078 | 122,
1079 | 0,
1080 | "CLIP"
1081 | ],
1082 | [
1083 | 225,
1084 | 120,
1085 | 1,
1086 | 123,
1087 | 0,
1088 | "CLIP"
1089 | ],
1090 | [
1091 | 226,
1092 | 119,
1093 | 0,
1094 | 124,
1095 | 0,
1096 | "LATENT"
1097 | ],
1098 | [
1099 | 227,
1100 | 120,
1101 | 2,
1102 | 124,
1103 | 1,
1104 | "VAE"
1105 | ],
1106 | [
1107 | 230,
1108 | 120,
1109 | 2,
1110 | 126,
1111 | 1,
1112 | "VAE"
1113 | ],
1114 | [
1115 | 231,
1116 | 126,
1117 | 0,
1118 | 119,
1119 | 3,
1120 | "LATENT"
1121 | ],
1122 | [
1123 | 410,
1124 | 122,
1125 | 0,
1126 | 193,
1127 | 0,
1128 | "CONDITIONING"
1129 | ],
1130 | [
1131 | 411,
1132 | 123,
1133 | 0,
1134 | 193,
1135 | 1,
1136 | "CONDITIONING"
1137 | ],
1138 | [
1139 | 413,
1140 | 193,
1141 | 0,
1142 | 119,
1143 | 1,
1144 | "CONDITIONING"
1145 | ],
1146 | [
1147 | 414,
1148 | 193,
1149 | 1,
1150 | 119,
1151 | 2,
1152 | "CONDITIONING"
1153 | ],
1154 | [
1155 | 423,
1156 | 194,
1157 | 0,
1158 | 193,
1159 | 2,
1160 | "CONTROL_NET"
1161 | ],
1162 | [
1163 | 425,
1164 | 197,
1165 | 0,
1166 | 193,
1167 | 3,
1168 | "IMAGE"
1169 | ],
1170 | [
1171 | 426,
1172 | 197,
1173 | 0,
1174 | 198,
1175 | 0,
1176 | "IMAGE"
1177 | ],
1178 | [
1179 | 670,
1180 | 124,
1181 | 0,
1182 | 297,
1183 | 0,
1184 | "IMAGE"
1185 | ],
1186 | [
1187 | 672,
1188 | 298,
1189 | 0,
1190 | 197,
1191 | 0,
1192 | "IMAGE"
1193 | ],
1194 | [
1195 | 673,
1196 | 298,
1197 | 0,
1198 | 126,
1199 | 0,
1200 | "IMAGE"
1201 | ],
1202 | [
1203 | 803,
1204 | 344,
1205 | 1,
1206 | 298,
1207 | 0,
1208 | "IMAGE"
1209 | ],
1210 | [
1211 | 805,
1212 | 345,
1213 | 0,
1214 | 344,
1215 | 0,
1216 | "COMPOSITOR_CONFIG"
1217 | ],
1218 | [
1219 | 806,
1220 | 344,
1221 | 0,
1222 | 346,
1223 | 0,
1224 | "STRING"
1225 | ],
1226 | [
1227 | 807,
1228 | 347,
1229 | 0,
1230 | 344,
1231 | 1,
1232 | "BOOLEAN"
1233 | ],
1234 | [
1235 | 816,
1236 | 304,
1237 | 0,
1238 | 349,
1239 | 0,
1240 | "IMAGE"
1241 | ],
1242 | [
1243 | 818,
1244 | 349,
1245 | 0,
1246 | 345,
1247 | 4,
1248 | "IMAGE"
1249 | ],
1250 | [
1251 | 819,
1252 | 254,
1253 | 0,
1254 | 351,
1255 | 0,
1256 | "IMAGE"
1257 | ],
1258 | [
1259 | 820,
1260 | 351,
1261 | 0,
1262 | 345,
1263 | 2,
1264 | "IMAGE"
1265 | ],
1266 | [
1267 | 821,
1268 | 350,
1269 | 0,
1270 | 345,
1271 | 0,
1272 | "IMAGE"
1273 | ],
1274 | [
1275 | 822,
1276 | 120,
1277 | 0,
1278 | 352,
1279 | 0,
1280 | "MODEL"
1281 | ],
1282 | [
1283 | 823,
1284 | 352,
1285 | 0,
1286 | 119,
1287 | 0,
1288 | "MODEL"
1289 | ]
1290 | ],
1291 | "groups": [],
1292 | "config": {},
1293 | "extra": {
1294 | "ds": {
1295 | "scale": 0.6588450000000085,
1296 | "offset": [
1297 | 236.6341619559252,
1298 | 1311.8220381206384
1299 | ]
1300 | },
1301 | "groupNodes": {}
1302 | },
1303 | "version": 0.4
1304 | }
--------------------------------------------------------------------------------
/assets/v3.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-enricos-nodes/0ab56bd7611bf8153794a8838a195bdf83c8e213/assets/v3.PNG
--------------------------------------------------------------------------------
/assets/warrior.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-enricos-nodes/0ab56bd7611bf8153794a8838a195bdf83c8e213/assets/warrior.jpg
--------------------------------------------------------------------------------
/assets/weak.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-enricos-nodes/0ab56bd7611bf8153794a8838a195bdf83c8e213/assets/weak.png
--------------------------------------------------------------------------------
/assets/wolf.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-enricos-nodes/0ab56bd7611bf8153794a8838a195bdf83c8e213/assets/wolf.jpg
--------------------------------------------------------------------------------
/assets/workflows/compositorMasksV3_sample_assets/ComfyUI_00159_.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-enricos-nodes/0ab56bd7611bf8153794a8838a195bdf83c8e213/assets/workflows/compositorMasksV3_sample_assets/ComfyUI_00159_.png
--------------------------------------------------------------------------------
/assets/workflows/compositorMasksV3_sample_assets/ComfyUI_00160_.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-enricos-nodes/0ab56bd7611bf8153794a8838a195bdf83c8e213/assets/workflows/compositorMasksV3_sample_assets/ComfyUI_00160_.png
--------------------------------------------------------------------------------
/assets/workflows/compositorMasksV3_sample_assets/ComfyUI_00161_.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-enricos-nodes/0ab56bd7611bf8153794a8838a195bdf83c8e213/assets/workflows/compositorMasksV3_sample_assets/ComfyUI_00161_.png
--------------------------------------------------------------------------------
/assets/workflows/compositorMasksV3_sample_assets/ComfyUI_00162_.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-enricos-nodes/0ab56bd7611bf8153794a8838a195bdf83c8e213/assets/workflows/compositorMasksV3_sample_assets/ComfyUI_00162_.png
--------------------------------------------------------------------------------
/assets/workflows/v3.1.0_multiple_instances_with_lettering.json:
--------------------------------------------------------------------------------
1 | {
2 | "last_node_id": 360,
3 | "last_link_id": 831,
4 | "nodes": [
5 | {
6 | "id": 254,
7 | "type": "LoadImage",
8 | "pos": {
9 | "0": -50,
10 | "1": -850
11 | },
12 | "size": {
13 | "0": 315,
14 | "1": 314
15 | },
16 | "flags": {},
17 | "order": 0,
18 | "mode": 0,
19 | "inputs": [],
20 | "outputs": [
21 | {
22 | "name": "IMAGE",
23 | "type": "IMAGE",
24 | "links": [
25 | 819
26 | ],
27 | "slot_index": 0,
28 | "shape": 3
29 | },
30 | {
31 | "name": "MASK",
32 | "type": "MASK",
33 | "links": null,
34 | "shape": 3
35 | }
36 | ],
37 | "properties": {
38 | "Node name for S&R": "LoadImage"
39 | },
40 | "widgets_values": [
41 | "warrior.jpg",
42 | "image"
43 | ]
44 | },
45 | {
46 | "id": 350,
47 | "type": "LoadImage",
48 | "pos": {
49 | "0": -50,
50 | "1": -1200
51 | },
52 | "size": {
53 | "0": 315,
54 | "1": 314
55 | },
56 | "flags": {},
57 | "order": 1,
58 | "mode": 0,
59 | "inputs": [],
60 | "outputs": [
61 | {
62 | "name": "IMAGE",
63 | "type": "IMAGE",
64 | "links": [
65 | 821
66 | ],
67 | "slot_index": 0,
68 | "shape": 3
69 | },
70 | {
71 | "name": "MASK",
72 | "type": "MASK",
73 | "links": null,
74 | "shape": 3
75 | }
76 | ],
77 | "properties": {
78 | "Node name for S&R": "LoadImage"
79 | },
80 | "widgets_values": [
81 | "forest.jpg",
82 | "image"
83 | ]
84 | },
85 | {
86 | "id": 351,
87 | "type": "Image Remove Background (rembg)",
88 | "pos": {
89 | "0": -50,
90 | "1": -450
91 | },
92 | "size": {
93 | "0": 260.3999938964844,
94 | "1": 28.73012924194336
95 | },
96 | "flags": {},
97 | "order": 9,
98 | "mode": 0,
99 | "inputs": [
100 | {
101 | "name": "image",
102 | "type": "IMAGE",
103 | "link": 819
104 | }
105 | ],
106 | "outputs": [
107 | {
108 | "name": "IMAGE",
109 | "type": "IMAGE",
110 | "links": [
111 | 820
112 | ],
113 | "slot_index": 0,
114 | "shape": 3
115 | }
116 | ],
117 | "properties": {
118 | "Node name for S&R": "Image Remove Background (rembg)"
119 | }
120 | },
121 | {
122 | "id": 120,
123 | "type": "CheckpointLoaderSimple",
124 | "pos": {
125 | "0": 300,
126 | "1": -1200
127 | },
128 | "size": {
129 | "0": 315,
130 | "1": 98
131 | },
132 | "flags": {},
133 | "order": 2,
134 | "mode": 0,
135 | "inputs": [],
136 | "outputs": [
137 | {
138 | "name": "MODEL",
139 | "type": "MODEL",
140 | "links": [
141 | 822
142 | ],
143 | "slot_index": 0
144 | },
145 | {
146 | "name": "CLIP",
147 | "type": "CLIP",
148 | "links": [
149 | 224,
150 | 225
151 | ],
152 | "slot_index": 1
153 | },
154 | {
155 | "name": "VAE",
156 | "type": "VAE",
157 | "links": [
158 | 227,
159 | 230
160 | ],
161 | "slot_index": 2
162 | }
163 | ],
164 | "properties": {
165 | "Node name for S&R": "CheckpointLoaderSimple"
166 | },
167 | "widgets_values": [
168 | "dreamshaper_8.safetensors"
169 | ]
170 | },
171 | {
172 | "id": 286,
173 | "type": "EmptyImage",
174 | "pos": {
175 | "0": 300,
176 | "1": -1050
177 | },
178 | "size": {
179 | "0": 315,
180 | "1": 130
181 | },
182 | "flags": {},
183 | "order": 3,
184 | "mode": 0,
185 | "inputs": [],
186 | "outputs": [
187 | {
188 | "name": "IMAGE",
189 | "type": "IMAGE",
190 | "links": [],
191 | "slot_index": 0,
192 | "shape": 3
193 | }
194 | ],
195 | "properties": {
196 | "Node name for S&R": "EmptyImage"
197 | },
198 | "widgets_values": [
199 | 512,
200 | 512,
201 | 1,
202 | 1680
203 | ]
204 | },
205 | {
206 | "id": 304,
207 | "type": "LoadImage",
208 | "pos": {
209 | "0": -50,
210 | "1": -350
211 | },
212 | "size": {
213 | "0": 315,
214 | "1": 314
215 | },
216 | "flags": {},
217 | "order": 4,
218 | "mode": 0,
219 | "inputs": [],
220 | "outputs": [
221 | {
222 | "name": "IMAGE",
223 | "type": "IMAGE",
224 | "links": [
225 | 816
226 | ],
227 | "slot_index": 0,
228 | "shape": 3
229 | },
230 | {
231 | "name": "MASK",
232 | "type": "MASK",
233 | "links": [],
234 | "slot_index": 1,
235 | "shape": 3
236 | }
237 | ],
238 | "properties": {
239 | "Node name for S&R": "LoadImage"
240 | },
241 | "widgets_values": [
242 | "wolf.jpg",
243 | "image"
244 | ]
245 | },
246 | {
247 | "id": 197,
248 | "type": "AIO_Preprocessor",
249 | "pos": {
250 | "0": 700,
251 | "1": -950
252 | },
253 | "size": {
254 | "0": 428.31402587890625,
255 | "1": 82
256 | },
257 | "flags": {},
258 | "order": 18,
259 | "mode": 0,
260 | "inputs": [
261 | {
262 | "name": "image",
263 | "type": "IMAGE",
264 | "link": 672
265 | }
266 | ],
267 | "outputs": [
268 | {
269 | "name": "IMAGE",
270 | "type": "IMAGE",
271 | "links": [
272 | 425,
273 | 426
274 | ],
275 | "slot_index": 0,
276 | "shape": 3
277 | }
278 | ],
279 | "properties": {
280 | "Node name for S&R": "AIO_Preprocessor"
281 | },
282 | "widgets_values": [
283 | "DepthAnythingV2Preprocessor",
284 | 512
285 | ]
286 | },
287 | {
288 | "id": 123,
289 | "type": "CLIPTextEncode",
290 | "pos": {
291 | "0": 1150,
292 | "1": -1200
293 | },
294 | "size": {
295 | "0": 417.5032653808594,
296 | "1": 154.69677734375
297 | },
298 | "flags": {
299 | "collapsed": false
300 | },
301 | "order": 12,
302 | "mode": 0,
303 | "inputs": [
304 | {
305 | "name": "clip",
306 | "type": "CLIP",
307 | "link": 225
308 | }
309 | ],
310 | "outputs": [
311 | {
312 | "name": "CONDITIONING",
313 | "type": "CONDITIONING",
314 | "links": [
315 | 411
316 | ],
317 | "slot_index": 0
318 | }
319 | ],
320 | "properties": {
321 | "Node name for S&R": "CLIPTextEncode"
322 | },
323 | "widgets_values": [
324 | "lifeless, horror, painting, cgi, illustration, low_quality, blurry, vampire, unrealistic, drawing, text, watermark, bad_quality"
325 | ],
326 | "color": "#322",
327 | "bgcolor": "#533"
328 | },
329 | {
330 | "id": 194,
331 | "type": "ControlNetLoader",
332 | "pos": {
333 | "0": 1150,
334 | "1": -950
335 | },
336 | "size": {
337 | "0": 459.9102783203125,
338 | "1": 58
339 | },
340 | "flags": {},
341 | "order": 5,
342 | "mode": 0,
343 | "inputs": [],
344 | "outputs": [
345 | {
346 | "name": "CONTROL_NET",
347 | "type": "CONTROL_NET",
348 | "links": [
349 | 423
350 | ],
351 | "slot_index": 0,
352 | "shape": 3
353 | }
354 | ],
355 | "properties": {
356 | "Node name for S&R": "ControlNetLoader"
357 | },
358 | "widgets_values": [
359 | "control_v11f1p_sd15_depth_fp16.safetensors"
360 | ]
361 | },
362 | {
363 | "id": 298,
364 | "type": "ImageScale",
365 | "pos": {
366 | "0": 1600,
367 | "1": -1200
368 | },
369 | "size": {
370 | "0": 315,
371 | "1": 130
372 | },
373 | "flags": {},
374 | "order": 17,
375 | "mode": 4,
376 | "inputs": [
377 | {
378 | "name": "image",
379 | "type": "IMAGE",
380 | "link": 803
381 | }
382 | ],
383 | "outputs": [
384 | {
385 | "name": "IMAGE",
386 | "type": "IMAGE",
387 | "links": [
388 | 672,
389 | 673
390 | ],
391 | "slot_index": 0,
392 | "shape": 3
393 | }
394 | ],
395 | "properties": {
396 | "Node name for S&R": "ImageScale"
397 | },
398 | "widgets_values": [
399 | "nearest-exact",
400 | 1024,
401 | 1536,
402 | "disabled"
403 | ]
404 | },
405 | {
406 | "id": 198,
407 | "type": "PreviewImage",
408 | "pos": {
409 | "0": 1950,
410 | "1": -1200
411 | },
412 | "size": {
413 | "0": 210,
414 | "1": 246
415 | },
416 | "flags": {},
417 | "order": 21,
418 | "mode": 0,
419 | "inputs": [
420 | {
421 | "name": "images",
422 | "type": "IMAGE",
423 | "link": 426
424 | }
425 | ],
426 | "outputs": [],
427 | "properties": {
428 | "Node name for S&R": "PreviewImage"
429 | }
430 | },
431 | {
432 | "id": 126,
433 | "type": "VAEEncode",
434 | "pos": {
435 | "0": 2200,
436 | "1": -1200
437 | },
438 | "size": {
439 | "0": 210,
440 | "1": 46
441 | },
442 | "flags": {},
443 | "order": 19,
444 | "mode": 0,
445 | "inputs": [
446 | {
447 | "name": "pixels",
448 | "type": "IMAGE",
449 | "link": 673
450 | },
451 | {
452 | "name": "vae",
453 | "type": "VAE",
454 | "link": 230
455 | }
456 | ],
457 | "outputs": [
458 | {
459 | "name": "LATENT",
460 | "type": "LATENT",
461 | "links": [
462 | 231
463 | ],
464 | "slot_index": 0,
465 | "shape": 3
466 | }
467 | ],
468 | "properties": {
469 | "Node name for S&R": "VAEEncode"
470 | }
471 | },
472 | {
473 | "id": 349,
474 | "type": "Image Remove Background (rembg)",
475 | "pos": {
476 | "0": -50,
477 | "1": 50
478 | },
479 | "size": {
480 | "0": 260.3999938964844,
481 | "1": 26.643104553222656
482 | },
483 | "flags": {},
484 | "order": 13,
485 | "mode": 0,
486 | "inputs": [
487 | {
488 | "name": "image",
489 | "type": "IMAGE",
490 | "link": 816
491 | }
492 | ],
493 | "outputs": [
494 | {
495 | "name": "IMAGE",
496 | "type": "IMAGE",
497 | "links": [
498 | 818
499 | ],
500 | "slot_index": 0,
501 | "shape": 3
502 | }
503 | ],
504 | "properties": {
505 | "Node name for S&R": "Image Remove Background (rembg)"
506 | }
507 | },
508 | {
509 | "id": 352,
510 | "type": "PerturbedAttentionGuidance",
511 | "pos": {
512 | "0": 2250,
513 | "1": -200
514 | },
515 | "size": {
516 | "0": 218.39999389648438,
517 | "1": 58
518 | },
519 | "flags": {},
520 | "order": 10,
521 | "mode": 0,
522 | "inputs": [
523 | {
524 | "name": "model",
525 | "type": "MODEL",
526 | "link": 822
527 | }
528 | ],
529 | "outputs": [
530 | {
531 | "name": "MODEL",
532 | "type": "MODEL",
533 | "links": [
534 | 823
535 | ],
536 | "slot_index": 0,
537 | "shape": 3
538 | }
539 | ],
540 | "properties": {
541 | "Node name for S&R": "PerturbedAttentionGuidance"
542 | },
543 | "widgets_values": [
544 | 3
545 | ]
546 | },
547 | {
548 | "id": 346,
549 | "type": "CompositorTransformsOutV3",
550 | "pos": {
551 | "0": 2550,
552 | "1": -1200
553 | },
554 | "size": {
555 | "0": 453.5999755859375,
556 | "1": 266
557 | },
558 | "flags": {},
559 | "order": 16,
560 | "mode": 0,
561 | "inputs": [
562 | {
563 | "name": "transforms",
564 | "type": "STRING",
565 | "link": 806,
566 | "widget": {
567 | "name": "transforms"
568 | }
569 | }
570 | ],
571 | "outputs": [
572 | {
573 | "name": "x",
574 | "type": "INT",
575 | "links": null,
576 | "shape": 3
577 | },
578 | {
579 | "name": "y",
580 | "type": "INT",
581 | "links": null,
582 | "shape": 3
583 | },
584 | {
585 | "name": "width",
586 | "type": "INT",
587 | "links": null,
588 | "shape": 3
589 | },
590 | {
591 | "name": "height",
592 | "type": "INT",
593 | "links": null,
594 | "shape": 3
595 | },
596 | {
597 | "name": "angle",
598 | "type": "INT",
599 | "links": null,
600 | "shape": 3
601 | },
602 | {
603 | "name": "bbox x",
604 | "type": "INT",
605 | "links": null,
606 | "shape": 3
607 | },
608 | {
609 | "name": "bbox y",
610 | "type": "INT",
611 | "links": null,
612 | "shape": 3
613 | },
614 | {
615 | "name": "bbox width",
616 | "type": "INT",
617 | "links": null,
618 | "shape": 3
619 | },
620 | {
621 | "name": "bbox height",
622 | "type": "INT",
623 | "links": null,
624 | "shape": 3
625 | }
626 | ],
627 | "properties": {
628 | "Node name for S&R": "CompositorTransformsOutV3"
629 | },
630 | "widgets_values": [
631 | "",
632 | 1,
633 | true
634 | ]
635 | },
636 | {
637 | "id": 119,
638 | "type": "KSampler",
639 | "pos": {
640 | "0": 2250,
641 | "1": -800
642 | },
643 | "size": {
644 | "0": 256.9302062988281,
645 | "1": 262
646 | },
647 | "flags": {},
648 | "order": 22,
649 | "mode": 0,
650 | "inputs": [
651 | {
652 | "name": "model",
653 | "type": "MODEL",
654 | "link": 823
655 | },
656 | {
657 | "name": "positive",
658 | "type": "CONDITIONING",
659 | "link": 413
660 | },
661 | {
662 | "name": "negative",
663 | "type": "CONDITIONING",
664 | "link": 414
665 | },
666 | {
667 | "name": "latent_image",
668 | "type": "LATENT",
669 | "link": 231
670 | }
671 | ],
672 | "outputs": [
673 | {
674 | "name": "LATENT",
675 | "type": "LATENT",
676 | "links": [
677 | 226
678 | ],
679 | "slot_index": 0
680 | }
681 | ],
682 | "properties": {
683 | "Node name for S&R": "KSampler"
684 | },
685 | "widgets_values": [
686 | 977862159117587,
687 | "fixed",
688 | 35,
689 | 4.5,
690 | "deis",
691 | "beta",
692 | 1
693 | ]
694 | },
695 | {
696 | "id": 297,
697 | "type": "PreviewImage",
698 | "pos": {
699 | "0": 2550,
700 | "1": -750
701 | },
702 | "size": {
703 | "0": 1300,
704 | "1": 1050
705 | },
706 | "flags": {},
707 | "order": 24,
708 | "mode": 0,
709 | "inputs": [
710 | {
711 | "name": "images",
712 | "type": "IMAGE",
713 | "link": 670
714 | }
715 | ],
716 | "outputs": [],
717 | "properties": {
718 | "Node name for S&R": "PreviewImage"
719 | }
720 | },
721 | {
722 | "id": 122,
723 | "type": "CLIPTextEncode",
724 | "pos": {
725 | "0": 700,
726 | "1": -1200
727 | },
728 | "size": {
729 | "0": 422.84503173828125,
730 | "1": 164.31304931640625
731 | },
732 | "flags": {},
733 | "order": 11,
734 | "mode": 0,
735 | "inputs": [
736 | {
737 | "name": "clip",
738 | "type": "CLIP",
739 | "link": 224
740 | }
741 | ],
742 | "outputs": [
743 | {
744 | "name": "CONDITIONING",
745 | "type": "CONDITIONING",
746 | "links": [
747 | 410
748 | ],
749 | "slot_index": 0
750 | }
751 | ],
752 | "properties": {
753 | "Node name for S&R": "CLIPTextEncode"
754 | },
755 | "widgets_values": [
756 | "cinematic photograph of a viking warrior in a forest AMD a black wolf AND color grading and film grain AND cinematic AND 4K AND HDR"
757 | ],
758 | "color": "#232",
759 | "bgcolor": "#353"
760 | },
761 | {
762 | "id": 193,
763 | "type": "ControlNetApplyAdvanced",
764 | "pos": {
765 | "0": 2250,
766 | "1": -450
767 | },
768 | "size": {
769 | "0": 240.27142333984375,
770 | "1": 166
771 | },
772 | "flags": {},
773 | "order": 20,
774 | "mode": 0,
775 | "inputs": [
776 | {
777 | "name": "positive",
778 | "type": "CONDITIONING",
779 | "link": 410
780 | },
781 | {
782 | "name": "negative",
783 | "type": "CONDITIONING",
784 | "link": 411
785 | },
786 | {
787 | "name": "control_net",
788 | "type": "CONTROL_NET",
789 | "link": 423
790 | },
791 | {
792 | "name": "image",
793 | "type": "IMAGE",
794 | "link": 425
795 | }
796 | ],
797 | "outputs": [
798 | {
799 | "name": "positive",
800 | "type": "CONDITIONING",
801 | "links": [
802 | 413
803 | ],
804 | "slot_index": 0,
805 | "shape": 3
806 | },
807 | {
808 | "name": "negative",
809 | "type": "CONDITIONING",
810 | "links": [
811 | 414
812 | ],
813 | "slot_index": 1,
814 | "shape": 3
815 | }
816 | ],
817 | "properties": {
818 | "Node name for S&R": "ControlNetApplyAdvanced"
819 | },
820 | "widgets_values": [
821 | 0.5,
822 | 0,
823 | 0.8300000000000001
824 | ]
825 | },
826 | {
827 | "id": 347,
828 | "type": "CompositorTools3",
829 | "pos": {
830 | "0": 300,
831 | "1": -250
832 | },
833 | "size": {
834 | "0": 310.79998779296875,
835 | "1": 106
836 | },
837 | "flags": {},
838 | "order": 6,
839 | "mode": 0,
840 | "inputs": [],
841 | "outputs": [
842 | {
843 | "name": "tools",
844 | "type": "BOOLEAN",
845 | "links": [
846 | 807
847 | ],
848 | "shape": 3
849 | }
850 | ],
851 | "properties": {
852 | "Node name for S&R": "CompositorTools3"
853 | }
854 | },
855 | {
856 | "id": 344,
857 | "type": "Compositor3",
858 | "pos": {
859 | "0": 700,
860 | "1": -800
861 | },
862 | "size": [
863 | 1521,
864 | 1079
865 | ],
866 | "flags": {},
867 | "order": 15,
868 | "mode": 0,
869 | "inputs": [
870 | {
871 | "name": "config",
872 | "type": "COMPOSITOR_CONFIG",
873 | "link": 805
874 | },
875 | {
876 | "name": "tools",
877 | "type": "BOOLEAN",
878 | "link": 807,
879 | "widget": {
880 | "name": "tools"
881 | }
882 | }
883 | ],
884 | "outputs": [
885 | {
886 | "name": "transforms",
887 | "type": "STRING",
888 | "links": [
889 | 806
890 | ],
891 | "shape": 3
892 | },
893 | {
894 | "name": "image",
895 | "type": "IMAGE",
896 | "links": [
897 | 803
898 | ],
899 | "shape": 3
900 | }
901 | ],
902 | "properties": {
903 | "Node name for S&R": "Compositor3"
904 | },
905 | "widgets_values": [
906 | "{\"width\":1280,\"height\":768,\"padding\":110,\"transforms\":[{\"left\":764.4231481843307,\"top\":495.35725483911654,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":1365,\"xheight\":768,\"skewY\":0,\"skewX\":0},{\"left\":487.1252625219706,\"top\":504.49345571201695,\"scaleX\":0.9474614514269661,\"scaleY\":0.9474614514269661,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":576,\"xheight\":768,\"skewY\":0,\"skewX\":0},{\"left\":932.9173507499621,\"top\":679.8975765515524,\"scaleX\":0.7098708725895773,\"scaleY\":0.7098708725895773,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":1152,\"xheight\":768,\"skewY\":0,\"skewX\":0},{\"left\":1431.0722293068018,\"top\":1018.5045283243194,\"scaleX\":0.4964347777779789,\"scaleY\":0.4964347777779789,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0}],\"bboxes\":[{\"left\":81.92314818433067,\"top\":111.35725483911654,\"xwidth\":768,\"xheight\":1365},{\"left\":214.2563645110044,\"top\":140.66825836406196,\"xwidth\":727.6503946959099,\"xheight\":545.7377960219324},{\"left\":524.0317281383657,\"top\":407.30716147715475,\"xwidth\":545.1808301487954,\"xheight\":817.7712452231929},{\"left\":1431.0722293068018,\"top\":1018.5045283243194,\"xwidth\":0,\"xheight\":0},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0}]}",
907 | "compositor/1726858015532.png [temp]",
908 | true,
909 | null,
910 | "continue"
911 | ]
912 | },
913 | {
914 | "id": 124,
915 | "type": "VAEDecode",
916 | "pos": {
917 | "0": 2200,
918 | "1": -1100
919 | },
920 | "size": {
921 | "0": 140,
922 | "1": 46
923 | },
924 | "flags": {},
925 | "order": 23,
926 | "mode": 0,
927 | "inputs": [
928 | {
929 | "name": "samples",
930 | "type": "LATENT",
931 | "link": 226
932 | },
933 | {
934 | "name": "vae",
935 | "type": "VAE",
936 | "link": 227
937 | }
938 | ],
939 | "outputs": [
940 | {
941 | "name": "IMAGE",
942 | "type": "IMAGE",
943 | "links": [
944 | 670,
945 | 826
946 | ],
947 | "slot_index": 0
948 | }
949 | ],
950 | "properties": {
951 | "Node name for S&R": "VAEDecode"
952 | }
953 | },
954 | {
955 | "id": 345,
956 | "type": "CompositorConfig3",
957 | "pos": {
958 | "0": 300,
959 | "1": -800
960 | },
961 | "size": {
962 | "0": 315,
963 | "1": 502
964 | },
965 | "flags": {},
966 | "order": 14,
967 | "mode": 0,
968 | "inputs": [
969 | {
970 | "name": "image1",
971 | "type": "IMAGE",
972 | "link": 821
973 | },
974 | {
975 | "name": "mask1",
976 | "type": "MASK",
977 | "link": null
978 | },
979 | {
980 | "name": "image2",
981 | "type": "IMAGE",
982 | "link": 820
983 | },
984 | {
985 | "name": "mask2",
986 | "type": "MASK",
987 | "link": null
988 | },
989 | {
990 | "name": "image3",
991 | "type": "IMAGE",
992 | "link": 818
993 | },
994 | {
995 | "name": "mask3",
996 | "type": "MASK",
997 | "link": null
998 | },
999 | {
1000 | "name": "image4",
1001 | "type": "IMAGE",
1002 | "link": null
1003 | },
1004 | {
1005 | "name": "mask4",
1006 | "type": "MASK",
1007 | "link": null
1008 | },
1009 | {
1010 | "name": "image5",
1011 | "type": "IMAGE",
1012 | "link": null
1013 | },
1014 | {
1015 | "name": "mask5",
1016 | "type": "MASK",
1017 | "link": null
1018 | },
1019 | {
1020 | "name": "image6",
1021 | "type": "IMAGE",
1022 | "link": null
1023 | },
1024 | {
1025 | "name": "mask6",
1026 | "type": "MASK",
1027 | "link": null
1028 | },
1029 | {
1030 | "name": "image7",
1031 | "type": "IMAGE",
1032 | "link": null
1033 | },
1034 | {
1035 | "name": "mask7",
1036 | "type": "MASK",
1037 | "link": null
1038 | },
1039 | {
1040 | "name": "image8",
1041 | "type": "IMAGE",
1042 | "link": null
1043 | },
1044 | {
1045 | "name": "mask8",
1046 | "type": "MASK",
1047 | "link": null
1048 | }
1049 | ],
1050 | "outputs": [
1051 | {
1052 | "name": "config",
1053 | "type": "COMPOSITOR_CONFIG",
1054 | "links": [
1055 | 805
1056 | ],
1057 | "slot_index": 0,
1058 | "shape": 3
1059 | }
1060 | ],
1061 | "properties": {
1062 | "Node name for S&R": "CompositorConfig3"
1063 | },
1064 | "widgets_values": [
1065 | 1280,
1066 | 768,
1067 | 110,
1068 | true,
1069 | true,
1070 | false,
1071 | 1726858146143
1072 | ]
1073 | },
1074 | {
1075 | "id": 359,
1076 | "type": "SaveImage",
1077 | "pos": {
1078 | "0": 2300,
1079 | "1": 350
1080 | },
1081 | "size": [
1082 | 1550,
1083 | 1050
1084 | ],
1085 | "flags": {},
1086 | "order": 27,
1087 | "mode": 0,
1088 | "inputs": [
1089 | {
1090 | "name": "images",
1091 | "type": "IMAGE",
1092 | "link": 828
1093 | }
1094 | ],
1095 | "outputs": [],
1096 | "properties": {},
1097 | "widgets_values": [
1098 | "ComfyUI"
1099 | ]
1100 | },
1101 | {
1102 | "id": 353,
1103 | "type": "Text_Image_Zho",
1104 | "pos": {
1105 | "0": 35,
1106 | "1": 499
1107 | },
1108 | "size": {
1109 | "0": 210,
1110 | "1": 466
1111 | },
1112 | "flags": {},
1113 | "order": 7,
1114 | "mode": 0,
1115 | "inputs": [],
1116 | "outputs": [
1117 | {
1118 | "name": "image",
1119 | "type": "IMAGE",
1120 | "links": [
1121 | 831
1122 | ],
1123 | "slot_index": 0,
1124 | "shape": 3
1125 | }
1126 | ],
1127 | "properties": {
1128 | "Node name for S&R": "Text_Image_Zho"
1129 | },
1130 | "widgets_values": [
1131 | "V 3.1.0",
1132 | "NotoSans-Regular",
1133 | "left",
1134 | 0,
1135 | 22,
1136 | "#000103",
1137 | 0,
1138 | "#ffffff",
1139 | 0,
1140 | 0,
1141 | 200,
1142 | 200,
1143 | false,
1144 | false,
1145 | 100,
1146 | 180,
1147 | 360
1148 | ]
1149 | },
1150 | {
1151 | "id": 357,
1152 | "type": "CompositorConfig3",
1153 | "pos": {
1154 | "0": 350,
1155 | "1": 350
1156 | },
1157 | "size": {
1158 | "0": 315,
1159 | "1": 502
1160 | },
1161 | "flags": {},
1162 | "order": 25,
1163 | "mode": 0,
1164 | "inputs": [
1165 | {
1166 | "name": "image1",
1167 | "type": "IMAGE",
1168 | "link": 826
1169 | },
1170 | {
1171 | "name": "mask1",
1172 | "type": "MASK",
1173 | "link": null
1174 | },
1175 | {
1176 | "name": "image2",
1177 | "type": "IMAGE",
1178 | "link": null
1179 | },
1180 | {
1181 | "name": "mask2",
1182 | "type": "MASK",
1183 | "link": null
1184 | },
1185 | {
1186 | "name": "image3",
1187 | "type": "IMAGE",
1188 | "link": null
1189 | },
1190 | {
1191 | "name": "mask3",
1192 | "type": "MASK",
1193 | "link": null
1194 | },
1195 | {
1196 | "name": "image4",
1197 | "type": "IMAGE",
1198 | "link": 831
1199 | },
1200 | {
1201 | "name": "mask4",
1202 | "type": "MASK",
1203 | "link": null
1204 | },
1205 | {
1206 | "name": "image5",
1207 | "type": "IMAGE",
1208 | "link": null
1209 | },
1210 | {
1211 | "name": "mask5",
1212 | "type": "MASK",
1213 | "link": null
1214 | },
1215 | {
1216 | "name": "image6",
1217 | "type": "IMAGE",
1218 | "link": null
1219 | },
1220 | {
1221 | "name": "mask6",
1222 | "type": "MASK",
1223 | "link": null
1224 | },
1225 | {
1226 | "name": "image7",
1227 | "type": "IMAGE",
1228 | "link": null
1229 | },
1230 | {
1231 | "name": "mask7",
1232 | "type": "MASK",
1233 | "link": null
1234 | },
1235 | {
1236 | "name": "image8",
1237 | "type": "IMAGE",
1238 | "link": null
1239 | },
1240 | {
1241 | "name": "mask8",
1242 | "type": "MASK",
1243 | "link": null
1244 | }
1245 | ],
1246 | "outputs": [
1247 | {
1248 | "name": "config",
1249 | "type": "COMPOSITOR_CONFIG",
1250 | "links": [
1251 | 825
1252 | ],
1253 | "slot_index": 0,
1254 | "shape": 3
1255 | }
1256 | ],
1257 | "properties": {
1258 | "Node name for S&R": "CompositorConfig3"
1259 | },
1260 | "widgets_values": [
1261 | 1280,
1262 | 768,
1263 | 110,
1264 | true,
1265 | true,
1266 | false,
1267 | 1726858146143
1268 | ]
1269 | },
1270 | {
1271 | "id": 360,
1272 | "type": "CompositorTools3",
1273 | "pos": {
1274 | "0": 350,
1275 | "1": 950
1276 | },
1277 | "size": {
1278 | "0": 310.79998779296875,
1279 | "1": 106
1280 | },
1281 | "flags": {},
1282 | "order": 8,
1283 | "mode": 0,
1284 | "inputs": [],
1285 | "outputs": [
1286 | {
1287 | "name": "tools",
1288 | "type": "BOOLEAN",
1289 | "links": [
1290 | 829
1291 | ],
1292 | "slot_index": 0,
1293 | "shape": 3
1294 | }
1295 | ],
1296 | "properties": {
1297 | "Node name for S&R": "CompositorTools3"
1298 | }
1299 | },
1300 | {
1301 | "id": 354,
1302 | "type": "Compositor3",
1303 | "pos": {
1304 | "0": 700,
1305 | "1": 350
1306 | },
1307 | "size": [
1308 | 1521,
1309 | 1079
1310 | ],
1311 | "flags": {},
1312 | "order": 26,
1313 | "mode": 0,
1314 | "inputs": [
1315 | {
1316 | "name": "config",
1317 | "type": "COMPOSITOR_CONFIG",
1318 | "link": 825
1319 | },
1320 | {
1321 | "name": "tools",
1322 | "type": "BOOLEAN",
1323 | "link": 829,
1324 | "widget": {
1325 | "name": "tools"
1326 | }
1327 | }
1328 | ],
1329 | "outputs": [
1330 | {
1331 | "name": "transforms",
1332 | "type": "STRING",
1333 | "links": [],
1334 | "slot_index": 0,
1335 | "shape": 3
1336 | },
1337 | {
1338 | "name": "image",
1339 | "type": "IMAGE",
1340 | "links": [
1341 | 828
1342 | ],
1343 | "slot_index": 1,
1344 | "shape": 3
1345 | }
1346 | ],
1347 | "properties": {
1348 | "Node name for S&R": "Compositor3"
1349 | },
1350 | "widgets_values": [
1351 | "{\"width\":1280,\"height\":768,\"padding\":110,\"transforms\":[{\"left\":750,\"top\":494,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":1280,\"xheight\":768,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":1415.4241902320905,\"top\":978.2955456708843,\"scaleX\":0.3496149970870165,\"scaleY\":0.3496149970870165,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":768,\"xheight\":768,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0}],\"bboxes\":[{\"left\":110,\"top\":110,\"xwidth\":768,\"xheight\":1280},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0},{\"left\":1281.1720313506762,\"top\":844.0433867894699,\"xwidth\":268.5043177628288,\"xheight\":268.5043177628286},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0}]}",
1352 | "compositor/1726858264446.png [temp]",
1353 | true,
1354 | null,
1355 | "continue"
1356 | ]
1357 | }
1358 | ],
1359 | "links": [
1360 | [
1361 | 224,
1362 | 120,
1363 | 1,
1364 | 122,
1365 | 0,
1366 | "CLIP"
1367 | ],
1368 | [
1369 | 225,
1370 | 120,
1371 | 1,
1372 | 123,
1373 | 0,
1374 | "CLIP"
1375 | ],
1376 | [
1377 | 226,
1378 | 119,
1379 | 0,
1380 | 124,
1381 | 0,
1382 | "LATENT"
1383 | ],
1384 | [
1385 | 227,
1386 | 120,
1387 | 2,
1388 | 124,
1389 | 1,
1390 | "VAE"
1391 | ],
1392 | [
1393 | 230,
1394 | 120,
1395 | 2,
1396 | 126,
1397 | 1,
1398 | "VAE"
1399 | ],
1400 | [
1401 | 231,
1402 | 126,
1403 | 0,
1404 | 119,
1405 | 3,
1406 | "LATENT"
1407 | ],
1408 | [
1409 | 410,
1410 | 122,
1411 | 0,
1412 | 193,
1413 | 0,
1414 | "CONDITIONING"
1415 | ],
1416 | [
1417 | 411,
1418 | 123,
1419 | 0,
1420 | 193,
1421 | 1,
1422 | "CONDITIONING"
1423 | ],
1424 | [
1425 | 413,
1426 | 193,
1427 | 0,
1428 | 119,
1429 | 1,
1430 | "CONDITIONING"
1431 | ],
1432 | [
1433 | 414,
1434 | 193,
1435 | 1,
1436 | 119,
1437 | 2,
1438 | "CONDITIONING"
1439 | ],
1440 | [
1441 | 423,
1442 | 194,
1443 | 0,
1444 | 193,
1445 | 2,
1446 | "CONTROL_NET"
1447 | ],
1448 | [
1449 | 425,
1450 | 197,
1451 | 0,
1452 | 193,
1453 | 3,
1454 | "IMAGE"
1455 | ],
1456 | [
1457 | 426,
1458 | 197,
1459 | 0,
1460 | 198,
1461 | 0,
1462 | "IMAGE"
1463 | ],
1464 | [
1465 | 670,
1466 | 124,
1467 | 0,
1468 | 297,
1469 | 0,
1470 | "IMAGE"
1471 | ],
1472 | [
1473 | 672,
1474 | 298,
1475 | 0,
1476 | 197,
1477 | 0,
1478 | "IMAGE"
1479 | ],
1480 | [
1481 | 673,
1482 | 298,
1483 | 0,
1484 | 126,
1485 | 0,
1486 | "IMAGE"
1487 | ],
1488 | [
1489 | 803,
1490 | 344,
1491 | 1,
1492 | 298,
1493 | 0,
1494 | "IMAGE"
1495 | ],
1496 | [
1497 | 805,
1498 | 345,
1499 | 0,
1500 | 344,
1501 | 0,
1502 | "COMPOSITOR_CONFIG"
1503 | ],
1504 | [
1505 | 806,
1506 | 344,
1507 | 0,
1508 | 346,
1509 | 0,
1510 | "STRING"
1511 | ],
1512 | [
1513 | 807,
1514 | 347,
1515 | 0,
1516 | 344,
1517 | 1,
1518 | "BOOLEAN"
1519 | ],
1520 | [
1521 | 816,
1522 | 304,
1523 | 0,
1524 | 349,
1525 | 0,
1526 | "IMAGE"
1527 | ],
1528 | [
1529 | 818,
1530 | 349,
1531 | 0,
1532 | 345,
1533 | 4,
1534 | "IMAGE"
1535 | ],
1536 | [
1537 | 819,
1538 | 254,
1539 | 0,
1540 | 351,
1541 | 0,
1542 | "IMAGE"
1543 | ],
1544 | [
1545 | 820,
1546 | 351,
1547 | 0,
1548 | 345,
1549 | 2,
1550 | "IMAGE"
1551 | ],
1552 | [
1553 | 821,
1554 | 350,
1555 | 0,
1556 | 345,
1557 | 0,
1558 | "IMAGE"
1559 | ],
1560 | [
1561 | 822,
1562 | 120,
1563 | 0,
1564 | 352,
1565 | 0,
1566 | "MODEL"
1567 | ],
1568 | [
1569 | 823,
1570 | 352,
1571 | 0,
1572 | 119,
1573 | 0,
1574 | "MODEL"
1575 | ],
1576 | [
1577 | 825,
1578 | 357,
1579 | 0,
1580 | 354,
1581 | 0,
1582 | "COMPOSITOR_CONFIG"
1583 | ],
1584 | [
1585 | 826,
1586 | 124,
1587 | 0,
1588 | 357,
1589 | 0,
1590 | "IMAGE"
1591 | ],
1592 | [
1593 | 828,
1594 | 354,
1595 | 1,
1596 | 359,
1597 | 0,
1598 | "IMAGE"
1599 | ],
1600 | [
1601 | 829,
1602 | 360,
1603 | 0,
1604 | 354,
1605 | 1,
1606 | "BOOLEAN"
1607 | ],
1608 | [
1609 | 831,
1610 | 353,
1611 | 0,
1612 | 357,
1613 | 6,
1614 | "IMAGE"
1615 | ]
1616 | ],
1617 | "groups": [],
1618 | "config": {},
1619 | "extra": {
1620 | "ds": {
1621 | "scale": 0.544500000000007,
1622 | "offset": [
1623 | 381.334151528598,
1624 | 906.0090720330776
1625 | ]
1626 | },
1627 | "groupNodes": {}
1628 | },
1629 | "version": 0.4
1630 | }
--------------------------------------------------------------------------------
/assets/workflows/v3.json:
--------------------------------------------------------------------------------
1 | {
2 | "last_node_id": 198,
3 | "last_link_id": 426,
4 | "nodes": [
5 | {
6 | "id": 152,
7 | "type": "LoadImage",
8 | "pos": {
9 | "0": 625,
10 | "1": -850
11 | },
12 | "size": {
13 | "0": 315,
14 | "1": 314
15 | },
16 | "flags": {},
17 | "order": 0,
18 | "mode": 0,
19 | "inputs": [],
20 | "outputs": [
21 | {
22 | "name": "IMAGE",
23 | "type": "IMAGE",
24 | "links": [
25 | 400
26 | ],
27 | "slot_index": 0,
28 | "shape": 3
29 | },
30 | {
31 | "name": "MASK",
32 | "type": "MASK",
33 | "links": null,
34 | "shape": 3
35 | }
36 | ],
37 | "properties": {
38 | "Node name for S&R": "LoadImage"
39 | },
40 | "widgets_values": [
41 | "wolf.jpg",
42 | "image"
43 | ]
44 | },
45 | {
46 | "id": 189,
47 | "type": "LoadImage",
48 | "pos": {
49 | "0": 625,
50 | "1": -400
51 | },
52 | "size": {
53 | "0": 315,
54 | "1": 314
55 | },
56 | "flags": {},
57 | "order": 1,
58 | "mode": 0,
59 | "inputs": [],
60 | "outputs": [
61 | {
62 | "name": "IMAGE",
63 | "type": "IMAGE",
64 | "links": [
65 | 404
66 | ],
67 | "slot_index": 0,
68 | "shape": 3
69 | },
70 | {
71 | "name": "MASK",
72 | "type": "MASK",
73 | "links": null,
74 | "shape": 3
75 | }
76 | ],
77 | "properties": {
78 | "Node name for S&R": "LoadImage"
79 | },
80 | "widgets_values": [
81 | "warrior.jpg",
82 | "image"
83 | ]
84 | },
85 | {
86 | "id": 191,
87 | "type": "LoadImage",
88 | "pos": {
89 | "0": 625,
90 | "1": -1225
91 | },
92 | "size": {
93 | "0": 315,
94 | "1": 314
95 | },
96 | "flags": {},
97 | "order": 2,
98 | "mode": 0,
99 | "inputs": [],
100 | "outputs": [
101 | {
102 | "name": "IMAGE",
103 | "type": "IMAGE",
104 | "links": [
105 | 417
106 | ],
107 | "slot_index": 0,
108 | "shape": 3
109 | },
110 | {
111 | "name": "MASK",
112 | "type": "MASK",
113 | "links": null,
114 | "shape": 3
115 | }
116 | ],
117 | "properties": {
118 | "Node name for S&R": "LoadImage"
119 | },
120 | "widgets_values": [
121 | "forest.jpg",
122 | "image"
123 | ]
124 | },
125 | {
126 | "id": 188,
127 | "type": "Image Remove Background (rembg)",
128 | "pos": {
129 | "0": 625,
130 | "1": -475
131 | },
132 | "size": {
133 | "0": 295.0843505859375,
134 | "1": 26
135 | },
136 | "flags": {},
137 | "order": 5,
138 | "mode": 0,
139 | "inputs": [
140 | {
141 | "name": "image",
142 | "type": "IMAGE",
143 | "link": 400
144 | }
145 | ],
146 | "outputs": [
147 | {
148 | "name": "IMAGE",
149 | "type": "IMAGE",
150 | "links": [
151 | 418
152 | ],
153 | "slot_index": 0,
154 | "shape": 3
155 | }
156 | ],
157 | "properties": {
158 | "Node name for S&R": "Image Remove Background (rembg)"
159 | }
160 | },
161 | {
162 | "id": 120,
163 | "type": "CheckpointLoaderSimple",
164 | "pos": {
165 | "0": 990,
166 | "1": -1009
167 | },
168 | "size": {
169 | "0": 315,
170 | "1": 98
171 | },
172 | "flags": {},
173 | "order": 3,
174 | "mode": 0,
175 | "inputs": [],
176 | "outputs": [
177 | {
178 | "name": "MODEL",
179 | "type": "MODEL",
180 | "links": [
181 | 220
182 | ],
183 | "slot_index": 0
184 | },
185 | {
186 | "name": "CLIP",
187 | "type": "CLIP",
188 | "links": [
189 | 224,
190 | 225
191 | ],
192 | "slot_index": 1
193 | },
194 | {
195 | "name": "VAE",
196 | "type": "VAE",
197 | "links": [
198 | 227,
199 | 230
200 | ],
201 | "slot_index": 2
202 | }
203 | ],
204 | "properties": {
205 | "Node name for S&R": "CheckpointLoaderSimple"
206 | },
207 | "widgets_values": [
208 | "photon_v1.safetensors"
209 | ]
210 | },
211 | {
212 | "id": 184,
213 | "type": "PreviewImage",
214 | "pos": {
215 | "0": 2643,
216 | "1": -850
217 | },
218 | "size": [
219 | 1112.7326366378015,
220 | 803.9349193454689
221 | ],
222 | "flags": {},
223 | "order": 17,
224 | "mode": 0,
225 | "inputs": [
226 | {
227 | "name": "images",
228 | "type": "IMAGE",
229 | "link": 395
230 | }
231 | ],
232 | "outputs": [],
233 | "properties": {
234 | "Node name for S&R": "PreviewImage"
235 | }
236 | },
237 | {
238 | "id": 194,
239 | "type": "ControlNetLoader",
240 | "pos": {
241 | "0": 2312,
242 | "1": -1112
243 | },
244 | "size": {
245 | "0": 315,
246 | "1": 58
247 | },
248 | "flags": {},
249 | "order": 4,
250 | "mode": 0,
251 | "inputs": [],
252 | "outputs": [
253 | {
254 | "name": "CONTROL_NET",
255 | "type": "CONTROL_NET",
256 | "links": [
257 | 423
258 | ],
259 | "shape": 3,
260 | "slot_index": 0
261 | }
262 | ],
263 | "properties": {
264 | "Node name for S&R": "ControlNetLoader"
265 | },
266 | "widgets_values": [
267 | "control_v11f1p_sd15_depth_fp16.safetensors"
268 | ]
269 | },
270 | {
271 | "id": 124,
272 | "type": "VAEDecode",
273 | "pos": {
274 | "0": 2665,
275 | "1": -977
276 | },
277 | "size": {
278 | "0": 210,
279 | "1": 46
280 | },
281 | "flags": {},
282 | "order": 16,
283 | "mode": 0,
284 | "inputs": [
285 | {
286 | "name": "samples",
287 | "type": "LATENT",
288 | "link": 226
289 | },
290 | {
291 | "name": "vae",
292 | "type": "VAE",
293 | "link": 227
294 | }
295 | ],
296 | "outputs": [
297 | {
298 | "name": "IMAGE",
299 | "type": "IMAGE",
300 | "links": [
301 | 395
302 | ],
303 | "slot_index": 0
304 | }
305 | ],
306 | "properties": {
307 | "Node name for S&R": "VAEDecode"
308 | }
309 | },
310 | {
311 | "id": 126,
312 | "type": "VAEEncode",
313 | "pos": {
314 | "0": 2671,
315 | "1": -1102
316 | },
317 | "size": {
318 | "0": 210,
319 | "1": 46
320 | },
321 | "flags": {},
322 | "order": 11,
323 | "mode": 0,
324 | "inputs": [
325 | {
326 | "name": "pixels",
327 | "type": "IMAGE",
328 | "link": 422
329 | },
330 | {
331 | "name": "vae",
332 | "type": "VAE",
333 | "link": 230
334 | }
335 | ],
336 | "outputs": [
337 | {
338 | "name": "LATENT",
339 | "type": "LATENT",
340 | "links": [
341 | 231
342 | ],
343 | "slot_index": 0,
344 | "shape": 3
345 | }
346 | ],
347 | "properties": {
348 | "Node name for S&R": "VAEEncode"
349 | }
350 | },
351 | {
352 | "id": 193,
353 | "type": "ControlNetApplyAdvanced",
354 | "pos": {
355 | "0": 2300,
356 | "1": -1361
357 | },
358 | "size": {
359 | "0": 340.20001220703125,
360 | "1": 166
361 | },
362 | "flags": {},
363 | "order": 13,
364 | "mode": 0,
365 | "inputs": [
366 | {
367 | "name": "positive",
368 | "type": "CONDITIONING",
369 | "link": 410
370 | },
371 | {
372 | "name": "negative",
373 | "type": "CONDITIONING",
374 | "link": 411
375 | },
376 | {
377 | "name": "control_net",
378 | "type": "CONTROL_NET",
379 | "link": 423
380 | },
381 | {
382 | "name": "image",
383 | "type": "IMAGE",
384 | "link": 425
385 | }
386 | ],
387 | "outputs": [
388 | {
389 | "name": "positive",
390 | "type": "CONDITIONING",
391 | "links": [
392 | 413
393 | ],
394 | "slot_index": 0,
395 | "shape": 3
396 | },
397 | {
398 | "name": "negative",
399 | "type": "CONDITIONING",
400 | "links": [
401 | 414
402 | ],
403 | "slot_index": 1,
404 | "shape": 3
405 | }
406 | ],
407 | "properties": {
408 | "Node name for S&R": "ControlNetApplyAdvanced"
409 | },
410 | "widgets_values": [
411 | 1,
412 | 0,
413 | 1
414 | ]
415 | },
416 | {
417 | "id": 197,
418 | "type": "AIO_Preprocessor",
419 | "pos": {
420 | "0": 2306,
421 | "1": -990
422 | },
423 | "size": {
424 | "0": 315,
425 | "1": 82
426 | },
427 | "flags": {},
428 | "order": 12,
429 | "mode": 0,
430 | "inputs": [
431 | {
432 | "name": "image",
433 | "type": "IMAGE",
434 | "link": 424
435 | }
436 | ],
437 | "outputs": [
438 | {
439 | "name": "IMAGE",
440 | "type": "IMAGE",
441 | "links": [
442 | 425,
443 | 426
444 | ],
445 | "shape": 3,
446 | "slot_index": 0
447 | }
448 | ],
449 | "properties": {
450 | "Node name for S&R": "AIO_Preprocessor"
451 | },
452 | "widgets_values": [
453 | "DepthAnythingV2Preprocessor",
454 | 512
455 | ]
456 | },
457 | {
458 | "id": 198,
459 | "type": "PreviewImage",
460 | "pos": {
461 | "0": 2690,
462 | "1": -1366
463 | },
464 | "size": [
465 | 210,
466 | 246
467 | ],
468 | "flags": {},
469 | "order": 14,
470 | "mode": 0,
471 | "inputs": [
472 | {
473 | "name": "images",
474 | "type": "IMAGE",
475 | "link": 426
476 | }
477 | ],
478 | "outputs": [],
479 | "properties": {
480 | "Node name for S&R": "PreviewImage"
481 | }
482 | },
483 | {
484 | "id": 119,
485 | "type": "KSampler",
486 | "pos": {
487 | "0": 2965,
488 | "1": -1386
489 | },
490 | "size": {
491 | "0": 439.5302429199219,
492 | "1": 474
493 | },
494 | "flags": {},
495 | "order": 15,
496 | "mode": 0,
497 | "inputs": [
498 | {
499 | "name": "model",
500 | "type": "MODEL",
501 | "link": 220
502 | },
503 | {
504 | "name": "positive",
505 | "type": "CONDITIONING",
506 | "link": 413
507 | },
508 | {
509 | "name": "negative",
510 | "type": "CONDITIONING",
511 | "link": 414
512 | },
513 | {
514 | "name": "latent_image",
515 | "type": "LATENT",
516 | "link": 231
517 | }
518 | ],
519 | "outputs": [
520 | {
521 | "name": "LATENT",
522 | "type": "LATENT",
523 | "links": [
524 | 226
525 | ],
526 | "slot_index": 0
527 | }
528 | ],
529 | "properties": {
530 | "Node name for S&R": "KSampler"
531 | },
532 | "widgets_values": [
533 | 210611449412695,
534 | "fixed",
535 | 35,
536 | 3.54,
537 | "deis",
538 | "beta",
539 | 1
540 | ]
541 | },
542 | {
543 | "id": 190,
544 | "type": "Image Remove Background (rembg)",
545 | "pos": {
546 | "0": 625,
547 | "1": 0
548 | },
549 | "size": {
550 | "0": 306.39581298828125,
551 | "1": 26
552 | },
553 | "flags": {},
554 | "order": 6,
555 | "mode": 0,
556 | "inputs": [
557 | {
558 | "name": "image",
559 | "type": "IMAGE",
560 | "link": 404
561 | }
562 | ],
563 | "outputs": [
564 | {
565 | "name": "IMAGE",
566 | "type": "IMAGE",
567 | "links": [
568 | 419
569 | ],
570 | "slot_index": 0,
571 | "shape": 3
572 | }
573 | ],
574 | "properties": {
575 | "Node name for S&R": "Image Remove Background (rembg)"
576 | }
577 | },
578 | {
579 | "id": 196,
580 | "type": "CompositorConfig3",
581 | "pos": {
582 | "0": 982,
583 | "1": -843
584 | },
585 | "size": {
586 | "0": 315,
587 | "1": 430
588 | },
589 | "flags": {},
590 | "order": 9,
591 | "mode": 0,
592 | "inputs": [
593 | {
594 | "name": "image1",
595 | "type": "IMAGE",
596 | "link": 417
597 | },
598 | {
599 | "name": "mask1",
600 | "type": "MASK",
601 | "link": null
602 | },
603 | {
604 | "name": "image2",
605 | "type": "IMAGE",
606 | "link": 418
607 | },
608 | {
609 | "name": "mask2",
610 | "type": "MASK",
611 | "link": null
612 | },
613 | {
614 | "name": "image3",
615 | "type": "IMAGE",
616 | "link": 419
617 | },
618 | {
619 | "name": "mask3",
620 | "type": "MASK",
621 | "link": null
622 | },
623 | {
624 | "name": "image4",
625 | "type": "IMAGE",
626 | "link": null
627 | },
628 | {
629 | "name": "mask4",
630 | "type": "MASK",
631 | "link": null
632 | },
633 | {
634 | "name": "image5",
635 | "type": "IMAGE",
636 | "link": null
637 | },
638 | {
639 | "name": "mask5",
640 | "type": "MASK",
641 | "link": null
642 | },
643 | {
644 | "name": "image6",
645 | "type": "IMAGE",
646 | "link": null
647 | },
648 | {
649 | "name": "mask6",
650 | "type": "MASK",
651 | "link": null
652 | },
653 | {
654 | "name": "image7",
655 | "type": "IMAGE",
656 | "link": null
657 | },
658 | {
659 | "name": "mask7",
660 | "type": "MASK",
661 | "link": null
662 | },
663 | {
664 | "name": "image8",
665 | "type": "IMAGE",
666 | "link": null
667 | },
668 | {
669 | "name": "mask8",
670 | "type": "MASK",
671 | "link": null
672 | }
673 | ],
674 | "outputs": [
675 | {
676 | "name": "config",
677 | "type": "COMPOSITOR_CONFIG",
678 | "links": [
679 | 416
680 | ],
681 | "shape": 3
682 | }
683 | ],
684 | "properties": {
685 | "Node name for S&R": "CompositorConfig3"
686 | },
687 | "widgets_values": [
688 | 1024,
689 | 512,
690 | 100,
691 | ""
692 | ]
693 | },
694 | {
695 | "id": 122,
696 | "type": "CLIPTextEncode",
697 | "pos": {
698 | "0": 1363,
699 | "1": -1088
700 | },
701 | "size": {
702 | "0": 422.84503173828125,
703 | "1": 164.31304931640625
704 | },
705 | "flags": {},
706 | "order": 7,
707 | "mode": 0,
708 | "inputs": [
709 | {
710 | "name": "clip",
711 | "type": "CLIP",
712 | "link": 224
713 | }
714 | ],
715 | "outputs": [
716 | {
717 | "name": "CONDITIONING",
718 | "type": "CONDITIONING",
719 | "links": [
720 | 410
721 | ],
722 | "slot_index": 0
723 | }
724 | ],
725 | "properties": {
726 | "Node name for S&R": "CLIPTextEncode"
727 | },
728 | "widgets_values": [
729 | "masterpiece photo, a viking wearing intricate bone armor in a forest AND a black wolf, cinematic shot, warm light, autumn"
730 | ],
731 | "color": "#232",
732 | "bgcolor": "#353"
733 | },
734 | {
735 | "id": 123,
736 | "type": "CLIPTextEncode",
737 | "pos": {
738 | "0": 1826,
739 | "1": -1104
740 | },
741 | "size": {
742 | "0": 425.27801513671875,
743 | "1": 180.6060791015625
744 | },
745 | "flags": {
746 | "collapsed": false
747 | },
748 | "order": 8,
749 | "mode": 0,
750 | "inputs": [
751 | {
752 | "name": "clip",
753 | "type": "CLIP",
754 | "link": 225
755 | }
756 | ],
757 | "outputs": [
758 | {
759 | "name": "CONDITIONING",
760 | "type": "CONDITIONING",
761 | "links": [
762 | 411
763 | ],
764 | "slot_index": 0
765 | }
766 | ],
767 | "properties": {
768 | "Node name for S&R": "CLIPTextEncode"
769 | },
770 | "widgets_values": [
771 | "horror, lifeless, text, watermark, black background, nsfw , antropomorph, (sweather:1.3)"
772 | ],
773 | "color": "#322",
774 | "bgcolor": "#533"
775 | },
776 | {
777 | "id": 195,
778 | "type": "Compositor3",
779 | "pos": {
780 | "0": 1338,
781 | "1": -852
782 | },
783 | "size": [
784 | 1245,
785 | 803
786 | ],
787 | "flags": {},
788 | "order": 10,
789 | "mode": 0,
790 | "inputs": [
791 | {
792 | "name": "config",
793 | "type": "COMPOSITOR_CONFIG",
794 | "link": 416
795 | }
796 | ],
797 | "outputs": [
798 | {
799 | "name": "transforms",
800 | "type": "STRING",
801 | "links": null,
802 | "shape": 3
803 | },
804 | {
805 | "name": "image",
806 | "type": "IMAGE",
807 | "links": [
808 | 422,
809 | 424
810 | ],
811 | "shape": 3,
812 | "slot_index": 1
813 | }
814 | ],
815 | "properties": {
816 | "Node name for S&R": "Compositor3"
817 | },
818 | "widgets_values": [
819 | "{\"width\":1024,\"height\":512,\"padding\":100,\"transforms\":[{\"left\":53.675599372608986,\"top\":-22.10669448342202,\"scaleX\":0.18999999999999928,\"scaleY\":0.18999999999999928,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"left\",\"originY\":\"top\"},{\"left\":163.57991387187252,\"top\":284.1685224899203,\"scaleX\":0.5699999999999996,\"scaleY\":0.5699999999999996,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"left\",\"originY\":\"top\"},{\"left\":455.80701988232624,\"top\":103.7938078264778,\"scaleX\":0.4299999999999995,\"scaleY\":0.4299999999999995,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"left\",\"originY\":\"top\"},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"left\",\"originY\":\"top\"},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"left\",\"originY\":\"top\"},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"left\",\"originY\":\"top\"},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"left\",\"originY\":\"top\"},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"left\",\"originY\":\"top\"}]}",
820 | "compositor/1726522183379.png [temp]",
821 | null,
822 | "continue"
823 | ]
824 | }
825 | ],
826 | "links": [
827 | [
828 | 220,
829 | 120,
830 | 0,
831 | 119,
832 | 0,
833 | "MODEL"
834 | ],
835 | [
836 | 224,
837 | 120,
838 | 1,
839 | 122,
840 | 0,
841 | "CLIP"
842 | ],
843 | [
844 | 225,
845 | 120,
846 | 1,
847 | 123,
848 | 0,
849 | "CLIP"
850 | ],
851 | [
852 | 226,
853 | 119,
854 | 0,
855 | 124,
856 | 0,
857 | "LATENT"
858 | ],
859 | [
860 | 227,
861 | 120,
862 | 2,
863 | 124,
864 | 1,
865 | "VAE"
866 | ],
867 | [
868 | 230,
869 | 120,
870 | 2,
871 | 126,
872 | 1,
873 | "VAE"
874 | ],
875 | [
876 | 231,
877 | 126,
878 | 0,
879 | 119,
880 | 3,
881 | "LATENT"
882 | ],
883 | [
884 | 395,
885 | 124,
886 | 0,
887 | 184,
888 | 0,
889 | "IMAGE"
890 | ],
891 | [
892 | 400,
893 | 152,
894 | 0,
895 | 188,
896 | 0,
897 | "IMAGE"
898 | ],
899 | [
900 | 404,
901 | 189,
902 | 0,
903 | 190,
904 | 0,
905 | "IMAGE"
906 | ],
907 | [
908 | 410,
909 | 122,
910 | 0,
911 | 193,
912 | 0,
913 | "CONDITIONING"
914 | ],
915 | [
916 | 411,
917 | 123,
918 | 0,
919 | 193,
920 | 1,
921 | "CONDITIONING"
922 | ],
923 | [
924 | 413,
925 | 193,
926 | 0,
927 | 119,
928 | 1,
929 | "CONDITIONING"
930 | ],
931 | [
932 | 414,
933 | 193,
934 | 1,
935 | 119,
936 | 2,
937 | "CONDITIONING"
938 | ],
939 | [
940 | 416,
941 | 196,
942 | 0,
943 | 195,
944 | 0,
945 | "COMPOSITOR_CONFIG"
946 | ],
947 | [
948 | 417,
949 | 191,
950 | 0,
951 | 196,
952 | 0,
953 | "IMAGE"
954 | ],
955 | [
956 | 418,
957 | 188,
958 | 0,
959 | 196,
960 | 2,
961 | "IMAGE"
962 | ],
963 | [
964 | 419,
965 | 190,
966 | 0,
967 | 196,
968 | 4,
969 | "IMAGE"
970 | ],
971 | [
972 | 422,
973 | 195,
974 | 1,
975 | 126,
976 | 0,
977 | "IMAGE"
978 | ],
979 | [
980 | 423,
981 | 194,
982 | 0,
983 | 193,
984 | 2,
985 | "CONTROL_NET"
986 | ],
987 | [
988 | 424,
989 | 195,
990 | 1,
991 | 197,
992 | 0,
993 | "IMAGE"
994 | ],
995 | [
996 | 425,
997 | 197,
998 | 0,
999 | 193,
1000 | 3,
1001 | "IMAGE"
1002 | ],
1003 | [
1004 | 426,
1005 | 197,
1006 | 0,
1007 | 198,
1008 | 0,
1009 | "IMAGE"
1010 | ]
1011 | ],
1012 | "groups": [],
1013 | "config": {},
1014 | "extra": {
1015 | "ds": {
1016 | "scale": 0.6830134553650705,
1017 | "offset": [
1018 | -445.07009118325834,
1019 | 1509.8065442908983
1020 | ]
1021 | }
1022 | },
1023 | "version": 0.4
1024 | }
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "comfyui-enricos-nodes"
3 | description = "pass up to 8 images and visually place, rotate and scale them to build the perfect composition. group move and group rescale. remember their position and scaling value across generations to easy swap images. use the buffer zone to to park an asset you don't want to use or easily reach transformations controls"
4 | version = "3.1.6"
5 |
6 | license = {text = "MIT License"}
7 |
8 | [project.urls]
9 | Repository = "https://github.com/erosDiffusion/ComfyUI-enricos-nodes"
10 | # Used by Comfy Registry https://comfyregistry.org
11 |
12 | [tool.comfy]
13 | PublisherId = "erosdiffusion"
14 | DisplayName = "ComfyUI-enricos-nodes"
15 | Icon = "💜"
16 |
--------------------------------------------------------------------------------
/web/colorPicker.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 |
3 | /**
4 | * Custom widget for CompositorColorPicker that allows using the browser's EyeDropper API
5 | */
6 | app.registerExtension({
7 | name: "ComfyUI.Enrico.ColorPicker",
8 |
9 | async beforeRegisterNodeDef(nodeType, nodeData, app) {
10 | if (nodeType.comfyClass === "CompositorColorPicker") {
11 | const onNodeCreated = nodeType.prototype.onNodeCreated;
12 |
13 | // Override the onNodeCreated method to add our custom widget
14 | nodeType.prototype.onNodeCreated = function() {
15 | const result = onNodeCreated?.apply(this, arguments);
16 |
17 | // Get references to the RGB input widgets
18 | const redWidget = this.widgets.find(w => w.name === "red");
19 | const greenWidget = this.widgets.find(w => w.name === "green");
20 | const blueWidget = this.widgets.find(w => w.name === "blue");
21 | const formatWidget = this.widgets.find(w => w.name === "format");
22 |
23 | // Create our custom HTML element
24 | const element = document.createElement("div");
25 | element.style.display = "flex";
26 | element.style.alignItems = "center";
27 | element.style.padding = "5px";
28 |
29 | // Create color input
30 | const colorInput = document.createElement("input");
31 | colorInput.type = "color";
32 | colorInput.style.width = "50px";
33 | colorInput.style.height = "30px";
34 | colorInput.style.marginRight = "10px";
35 | colorInput.style.cursor = "pointer";
36 | colorInput.style.borderRadius = "3px";
37 | colorInput.style.border = "none";
38 |
39 | // Set initial color from the RGB values
40 | const r = redWidget.value.toString(16).padStart(2, '0');
41 | const g = greenWidget.value.toString(16).padStart(2, '0');
42 | const b = blueWidget.value.toString(16).padStart(2, '0');
43 | colorInput.value = `#${r}${g}${b}`;
44 |
45 | // Create eyedropper button
46 | const eyedropperBtn = document.createElement("button");
47 | eyedropperBtn.textContent = "🔍";
48 | eyedropperBtn.title = "Pick color from screen";
49 | eyedropperBtn.style.cursor = "pointer";
50 | eyedropperBtn.style.marginRight = "10px";
51 | eyedropperBtn.style.fontSize = "16px";
52 | eyedropperBtn.style.padding = "4px 8px";
53 | eyedropperBtn.style.backgroundColor = "#666";
54 | eyedropperBtn.style.color = "white";
55 | eyedropperBtn.style.border = "none";
56 | eyedropperBtn.style.borderRadius = "3px";
57 |
58 | // Create hex color display
59 | const hexDisplay = document.createElement("span");
60 | hexDisplay.style.marginLeft = "5px";
61 | hexDisplay.style.fontFamily = "monospace";
62 | hexDisplay.style.backgroundColor = "#444";
63 | hexDisplay.style.padding = "3px 6px";
64 | hexDisplay.style.borderRadius = "3px";
65 | hexDisplay.textContent = colorInput.value;
66 |
67 | // Function to update RGB widgets when color changes
68 | const updateWidgetsFromHex = (hexColor) => {
69 | // Remove # and parse hex values to RGB
70 | const hex = hexColor.substring(1);
71 | const r = parseInt(hex.substring(0, 2), 16);
72 | const g = parseInt(hex.substring(2, 4), 16);
73 | const b = parseInt(hex.substring(4, 6), 16);
74 |
75 | // Update the widgets
76 | redWidget.value = r;
77 | greenWidget.value = g;
78 | blueWidget.value = b;
79 |
80 | // Trigger the widget callbacks
81 | redWidget.callback(r);
82 | greenWidget.callback(g);
83 | blueWidget.callback(b);
84 |
85 | // Update the hex display
86 | hexDisplay.textContent = hexColor;
87 |
88 | // Mark the node as dirty to update visuals
89 | this.setDirtyCanvas(true);
90 | };
91 |
92 | // Listen for changes on the color input
93 | colorInput.addEventListener("input", function() {
94 | updateWidgetsFromHex(this.value);
95 | });
96 |
97 | // Eyedropper functionality if supported by the browser
98 | eyedropperBtn.addEventListener("click", async function() {
99 | if ('EyeDropper' in window) {
100 | try {
101 | const eyeDropper = new EyeDropper();
102 | const { sRGBHex } = await eyeDropper.open();
103 | colorInput.value = sRGBHex;
104 | updateWidgetsFromHex(sRGBHex);
105 | } catch (e) {
106 | console.log("Eye dropper was cancelled or errored:", e);
107 | }
108 | } else {
109 | alert("EyeDropper API is not supported in your browser");
110 | }
111 | });
112 |
113 | // Update color picker when RGB widgets change
114 | const updateColorFromWidgets = () => {
115 | const r = redWidget.value.toString(16).padStart(2, '0');
116 | const g = greenWidget.value.toString(16).padStart(2, '0');
117 | const b = blueWidget.value.toString(16).padStart(2, '0');
118 | const hexColor = `#${r}${g}${b}`;
119 | colorInput.value = hexColor;
120 | hexDisplay.textContent = hexColor;
121 | };
122 |
123 | // Override the RGB widget callbacks to update our color picker
124 | const originalRedCallback = redWidget.callback;
125 | redWidget.callback = function(value) {
126 | const result = originalRedCallback?.apply(this, [value]);
127 | updateColorFromWidgets();
128 | return result;
129 | };
130 |
131 | const originalGreenCallback = greenWidget.callback;
132 | greenWidget.callback = function(value) {
133 | const result = originalGreenCallback?.apply(this, [value]);
134 | updateColorFromWidgets();
135 | return result;
136 | };
137 |
138 | const originalBlueCallback = blueWidget.callback;
139 | blueWidget.callback = function(value) {
140 | const result = originalBlueCallback?.apply(this, [value]);
141 | updateColorFromWidgets();
142 | return result;
143 | };
144 |
145 | // Add elements to the container
146 | element.appendChild(colorInput);
147 | element.appendChild(eyedropperBtn);
148 | element.appendChild(hexDisplay);
149 |
150 | // Add the DOM widget to the node
151 | this.addDOMWidget("colorpicker_widget", "colorpicker", element, {
152 | serialize: false,
153 | hideOnZoom: false,
154 | });
155 |
156 | // Make the original input widgets smaller
157 | redWidget.computeSize = () => [60, 24];
158 | greenWidget.computeSize = () => [60, 24];
159 | blueWidget.computeSize = () => [60, 24];
160 |
161 | return result;
162 | };
163 | }
164 | },
165 | });
--------------------------------------------------------------------------------
/web/empty.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-enricos-nodes/0ab56bd7611bf8153794a8838a195bdf83c8e213/web/empty.png
--------------------------------------------------------------------------------
/web/imageSampler.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 | import { api } from "../../scripts/api.js";
3 |
4 | /**
5 | * Custom widget for ImageColorSampler that allows clicking on an image to sample colors
6 | */
7 | app.registerExtension({
8 | name: "ComfyUI.Enrico.ImageSampler",
9 |
10 | async setup() {
11 | // Listen for image_sampler_init event from Python
12 | api.addEventListener("image_sampler_init", (event) => {
13 | const detail = event.detail;
14 | const node = app.graph.getNodeById(detail.node);
15 | if (!node) return;
16 |
17 | // Forward the data to the node instance
18 | if (node.onImageSamplerInit) {
19 | node.onImageSamplerInit(detail.data);
20 | }
21 | });
22 |
23 | // Listen for image_sampler_update event from Python
24 | api.addEventListener("image_sampler_update", (event) => {
25 | const detail = event.detail;
26 | const node = app.graph.getNodeById(detail.node);
27 | if (!node) return;
28 |
29 | // Update the widget with new value
30 | const widget = node.widgets.find(w => w.name === detail.widget_name);
31 | if (widget) {
32 | widget.value = detail.value;
33 | app.graph.setDirtyCanvas(true);
34 |
35 | // Run the workflow again to continue processing
36 | app.queuePrompt(0, 1); // Continue the workflow
37 | }
38 | });
39 | },
40 |
41 | async beforeRegisterNodeDef(nodeType, nodeData, app) {
42 | if (nodeType.comfyClass === "ImageColorSampler") {
43 | const onNodeCreated = nodeType.prototype.onNodeCreated;
44 |
45 | // Override the onNodeCreated method to add our custom widget
46 | nodeType.prototype.onNodeCreated = function() {
47 | const result = onNodeCreated?.apply(this, arguments);
48 |
49 | // Get references to the input widgets
50 | const samplePointsWidget = this.widgets.find(w => w.name === "sample_points");
51 | const paletteSizeWidget = this.widgets.find(w => w.name === "palette_size");
52 | const sampleSizeWidget = this.widgets.find(w => w.name === "sample_size");
53 | const waitForInputWidget = this.widgets.find(w => w.name === "wait_for_input");
54 |
55 | // Hide the sample_points widget as it's just for data storage
56 | if (samplePointsWidget) {
57 | samplePointsWidget.computeSize = () => [0, -4];
58 | }
59 |
60 | // Create main container for our custom widget
61 | const container = document.createElement("div");
62 | container.style.width = "auto";
63 | container.style.height = "auto";
64 | container.style.display = "flex";
65 | container.style.flexDirection = "column";
66 | container.style.padding = "10px";
67 | container.style.resize = "none";
68 | container.style.overflow = "hidden";
69 |
70 | // Create image container
71 | const imageContainer = document.createElement("div");
72 | imageContainer.style.backgroundColor = "#333";
73 | imageContainer.style.border = "1px solid #666";
74 | imageContainer.style.borderRadius = "4px";
75 | imageContainer.style.cursor = "crosshair";
76 | imageContainer.style.display = "block";
77 | imageContainer.style.width = "auto";
78 | imageContainer.style.margin = "0 auto";
79 | imageContainer.style.resize = "none";
80 | imageContainer.style.overflow = "hidden";
81 |
82 | // Create canvas for image display and interaction
83 | const canvas = document.createElement("canvas");
84 | canvas.width = 512; // Initial size, will be updated when image loads
85 | canvas.height = 512;
86 | imageContainer.appendChild(canvas);
87 |
88 | // Create debug info element to show coordinates
89 | const debugInfo = document.createElement("div");
90 | debugInfo.style.backgroundColor = "rgba(0,0,0,0.5)";
91 | debugInfo.style.color = "#fff";
92 | debugInfo.style.padding = "5px";
93 | debugInfo.style.borderRadius = "3px";
94 | debugInfo.style.fontSize = "12px";
95 | debugInfo.style.margin = "5px";
96 | debugInfo.style.display = "block"; // Set to "none" to hide in production
97 | container.appendChild(debugInfo);
98 |
99 | // Create info panel
100 | const infoPanel = document.createElement("div");
101 | infoPanel.style.marginTop = "10px";
102 | infoPanel.style.padding = "8px";
103 | infoPanel.style.backgroundColor = "#333";
104 | infoPanel.style.borderRadius = "4px";
105 | infoPanel.style.fontSize = "12px";
106 | infoPanel.style.color = "#ccc";
107 | infoPanel.innerHTML = "Click on image to add color samples
Drag points to move
CTRL+Click to remove a point
Adjust sample size to average colors
Click 'Continue Workflow' to proceed";
108 |
109 | // Create buttons container
110 | const buttonsContainer = document.createElement("div");
111 | buttonsContainer.style.marginTop = "10px";
112 | buttonsContainer.style.display = "flex";
113 | buttonsContainer.style.gap = "10px";
114 |
115 | // Create clear button
116 | const clearButton = document.createElement("button");
117 | clearButton.textContent = "Clear Samples";
118 | clearButton.style.padding = "6px 12px";
119 | clearButton.style.backgroundColor = "#555";
120 | clearButton.style.color = "white";
121 | clearButton.style.border = "none";
122 | clearButton.style.borderRadius = "4px";
123 | clearButton.style.cursor = "pointer";
124 |
125 | // Create continue button
126 | const continueButton = document.createElement("button");
127 | continueButton.textContent = "Continue Workflow";
128 | continueButton.style.padding = "6px 12px";
129 | continueButton.style.backgroundColor = "#3a88fe";
130 | continueButton.style.color = "white";
131 | continueButton.style.border = "none";
132 | continueButton.style.borderRadius = "4px";
133 | continueButton.style.cursor = "pointer";
134 | continueButton.style.marginLeft = "auto";
135 |
136 | // Add hover effect for buttons
137 | [clearButton, continueButton].forEach(button => {
138 | button.addEventListener("mouseover", function() {
139 | this.style.opacity = "0.8";
140 | });
141 | button.addEventListener("mouseout", function() {
142 | this.style.opacity = "1";
143 | });
144 | });
145 |
146 | // Add buttons to container
147 | buttonsContainer.appendChild(clearButton);
148 | buttonsContainer.appendChild(continueButton);
149 |
150 | // Add elements to container in the correct order
151 | container.appendChild(imageContainer);
152 | container.appendChild(infoPanel);
153 | container.appendChild(buttonsContainer);
154 |
155 | // Sample points data
156 | let samplePoints = [];
157 | const pointSize = 5; // Radius of sample points
158 |
159 | // Canvas context and state
160 | const ctx = canvas.getContext("2d");
161 | let image = null;
162 | let imageBase64 = null;
163 | let selectedPoint = -1;
164 | let isDragging = false;
165 | let nodeId = null;
166 |
167 | // Store actual dimensions of the original image
168 | let originalImageWidth = 0;
169 | let originalImageHeight = 0;
170 |
171 | // Method to handle data from Python
172 | this.onImageSamplerInit = (data) => {
173 | if (!data) return;
174 |
175 | // Store node ID for API calls
176 | nodeId = data.node_id;
177 |
178 | // Load points if any
179 | if (data.sample_points && Array.isArray(data.sample_points)) {
180 | samplePoints = data.sample_points;
181 | }
182 |
183 | // Update sample size if provided
184 | if (data.sample_size && sampleSizeWidget) {
185 | sampleSizeWidget.value = data.sample_size;
186 | }
187 |
188 | // Load image if provided
189 | if (data.image) {
190 | imageBase64 = data.image;
191 | loadImageFromBase64(data.image);
192 | }
193 | };
194 |
195 | // Load and display image from base64
196 | const loadImageFromBase64 = (base64Data) => {
197 | const img = new Image();
198 | img.onload = () => {
199 | // Set canvas size to exactly match the image dimensions
200 | originalImageWidth = img.width;
201 | originalImageHeight = img.height;
202 |
203 | // Set canvas dimensions to match the image exactly
204 | canvas.width = img.width;
205 | canvas.height = img.height;
206 |
207 | // Adjust container size to fit the image exactly
208 | imageContainer.style.width = img.width + "px";
209 | imageContainer.style.height = img.height + "px";
210 |
211 | // Draw the image at 1:1 pixel ratio
212 | ctx.clearRect(0, 0, canvas.width, canvas.height);
213 | ctx.drawImage(img, 0, 0);
214 |
215 | // Store image reference
216 | image = img;
217 |
218 | // Draw sample points if any
219 | drawSamplePoints();
220 | };
221 |
222 | img.src = base64Data;
223 | };
224 |
225 | // Function to continue workflow
226 | const continueWorkflow = () => {
227 | if (!nodeId) return;
228 |
229 | // Send data back to server to continue the workflow
230 | api.fetchApi("/image_sampler/continue", {
231 | method: "POST",
232 | headers: { "Content-Type": "application/json" },
233 | body: JSON.stringify({
234 | node_id: nodeId,
235 | sample_points: samplePoints
236 | })
237 | }).catch(err => console.error("Error continuing workflow:", err));
238 | };
239 |
240 | // Draw sample points
241 | const drawSamplePoints = () => {
242 | if (!ctx || !canvas.width) return;
243 |
244 | // Redraw the image
245 | if (image) {
246 | ctx.clearRect(0, 0, canvas.width, canvas.height);
247 | ctx.drawImage(image, 0, 0, canvas.width, canvas.height);
248 | }
249 |
250 | // Draw each sample point
251 | samplePoints.forEach((point, index) => {
252 | // Convert normalized coordinates to canvas pixels
253 | const x = Math.round(point.x * canvas.width);
254 | const y = Math.round(point.y * canvas.height);
255 |
256 | // Draw outer circle
257 | ctx.beginPath();
258 | ctx.arc(x, y, pointSize + 2, 0, Math.PI * 2);
259 | ctx.fillStyle = "black";
260 | ctx.fill();
261 |
262 | // Draw inner circle with sampled color
263 | ctx.beginPath();
264 | ctx.arc(x, y, pointSize, 0, Math.PI * 2);
265 | ctx.fillStyle = point.color || "#ffffff";
266 | ctx.fill();
267 |
268 | // Highlight selected point
269 | if (index === selectedPoint) {
270 | ctx.beginPath();
271 | ctx.arc(x, y, pointSize + 4, 0, Math.PI * 2);
272 | ctx.strokeStyle = "yellow";
273 | ctx.lineWidth = 2;
274 | ctx.stroke();
275 | }
276 |
277 | // Display color information next to the point with a fixed-size background
278 | const hexColor = point.color || "#ffffff";
279 |
280 | // Parse hex to RGB values
281 | const r = parseInt(hexColor.substring(1, 3), 16);
282 | const g = parseInt(hexColor.substring(3, 5), 16);
283 | const b = parseInt(hexColor.substring(5, 7), 16);
284 | const rgbText = `(${r}, ${g}, ${b})`;
285 |
286 | // Fixed position with consistent offset from the point
287 | const labelX = x;
288 | // Increase separation between point and label
289 | const labelY = y + pointSize + 25;
290 | const padding = 8;
291 |
292 | // Use a more readable font family with fallbacks
293 | const fontFamily = "'Segoe UI', Roboto, 'Helvetica Neue', sans-serif";
294 |
295 | // Draw better looking label background with rounded corners
296 | const cornerRadius = 4;
297 | ctx.save(); // Save context state before modifications
298 |
299 | // Fixed width for the background based on max possible text size
300 | // "#FFFFFF" + "(255, 255, 255)" with proper padding
301 | const fixedLabelWidth = 100; // Fixed width that accommodates all possible values
302 | const fixedLabelHeight = 42; // Fixed height for consistent appearance
303 |
304 | // Create rounded rectangle background for color label
305 | ctx.fillStyle = "rgba(0,0,0,0.75)"; // Darker, more opaque background
306 | roundRect(
307 | ctx,
308 | labelX - fixedLabelWidth/2, // Center the fixed-width box
309 | labelY - 15,
310 | fixedLabelWidth,
311 | fixedLabelHeight,
312 | cornerRadius
313 | );
314 |
315 | // Apply text rendering optimizations
316 | ctx.textBaseline = "middle";
317 | ctx.shadowColor = "rgba(0,0,0,0.5)";
318 | ctx.shadowBlur = 3;
319 | ctx.shadowOffsetX = 0;
320 | ctx.shadowOffsetY = 1;
321 |
322 | // Draw hex text with improved visibility
323 | ctx.font = `bold 13px ${fontFamily}`;
324 | ctx.textAlign = "center";
325 | ctx.fillStyle = "#ffffff";
326 | ctx.fillText(hexColor, labelX, labelY);
327 |
328 | // Draw RGB text below hex text
329 | ctx.font = `11px ${fontFamily}`;
330 | ctx.fillStyle = "#cccccc"; // Slightly dimmer for secondary info
331 | ctx.fillText(rgbText, labelX, labelY + 16);
332 |
333 | ctx.restore(); // Restore context to previous state
334 | });
335 | };
336 |
337 | // Helper function to create rounded rectangle path
338 | const roundRect = (context, x, y, width, height, radius) => {
339 | if (width < 2 * radius) radius = width / 2;
340 | if (height < 2 * radius) radius = height / 2;
341 |
342 | context.beginPath();
343 | context.moveTo(x + radius, y);
344 | context.arcTo(x + width, y, x + width, y + height, radius);
345 | context.arcTo(x + width, y + height, x, y + height, radius);
346 | context.arcTo(x, y + height, x, y, radius);
347 | context.arcTo(x, y, x + width, y, radius);
348 | context.closePath();
349 | context.fill();
350 | };
351 |
352 | // Check if a point is under the cursor
353 | const getPointAtPosition = (x, y) => {
354 | for (let i = samplePoints.length - 1; i >= 0; i--) {
355 | const point = samplePoints[i];
356 | const canvasPos = { x: point.x * canvas.width, y: point.y * canvas.height };
357 |
358 | const distance = Math.sqrt(
359 | Math.pow(x - canvasPos.x, 2) + Math.pow(y - canvasPos.y, 2)
360 | );
361 |
362 | if (distance <= pointSize * 2) { // Slightly larger hit area for easier selection
363 | return i;
364 | }
365 | }
366 | return -1;
367 | };
368 |
369 | // Get pixel color at a specific position
370 | const getPixelColorAtPosition = (x, y) => {
371 | if (!ctx || !image) return "#FFFFFF";
372 |
373 | try {
374 | // Ensure coordinates are integers and within canvas bounds
375 | const pixelX = Math.max(0, Math.min(canvas.width - 1, Math.floor(x)));
376 | const pixelY = Math.max(0, Math.min(canvas.height - 1, Math.floor(y)));
377 |
378 | // Save context state
379 | ctx.save();
380 |
381 | // Clear the canvas and draw just the image without any overlays
382 | ctx.clearRect(0, 0, canvas.width, canvas.height);
383 | ctx.drawImage(image, 0, 0, canvas.width, canvas.height);
384 |
385 | // Get the exact pixel data - no averaging or interpolation
386 | const pixelData = ctx.getImageData(pixelX, pixelY, 1, 1).data;
387 |
388 | // Restore context to previous state
389 | ctx.restore();
390 |
391 | // Get exact RGB values (as integers)
392 | const r = pixelData[0];
393 | const g = pixelData[1];
394 | const b = pixelData[2];
395 |
396 | // Format for hex display
397 | const rHex = r.toString(16).padStart(2, '0');
398 | const gHex = g.toString(16).padStart(2, '0');
399 | const bHex = b.toString(16).padStart(2, '0');
400 |
401 | // Return the hex representation
402 | return `#${rHex}${gHex}${bHex}`;
403 | } catch (e) {
404 | console.error("Error getting pixel color:", e);
405 | return "#FFFFFF";
406 | }
407 | };
408 |
409 | // Update debug info display
410 | const updateDebugInfo = (info) => {
411 | if (debugInfo) {
412 | debugInfo.textContent = info;
413 | }
414 | };
415 |
416 | // Handle mouse events on canvas
417 | canvas.addEventListener("mousedown", (e) => {
418 | e.preventDefault(); // Prevent default browser behavior
419 |
420 | // Use offsetX and offsetY directly for mouse position
421 | const mouseX = e.offsetX;
422 | const mouseY = e.offsetY;
423 |
424 | updateDebugInfo(`Mouse: ${mouseX.toFixed(1)}, ${mouseY.toFixed(1)}`);
425 |
426 | // Check if click is on an existing point
427 | const pointIndex = getPointAtPosition(mouseX, mouseY);
428 |
429 | if (pointIndex >= 0) {
430 | if (e.ctrlKey) {
431 | // CTRL+click to delete point
432 | samplePoints.splice(pointIndex, 1);
433 | selectedPoint = -1;
434 | updateSamplePointsWidget();
435 | drawSamplePoints();
436 | } else {
437 | // Select point for dragging
438 | selectedPoint = pointIndex;
439 | isDragging = true;
440 | drawSamplePoints();
441 | }
442 | } else if (mouseX >= 0 && mouseX <= canvas.width &&
443 | mouseY >= 0 && mouseY <= canvas.height && image) {
444 | // Calculate normalized coordinates for the new point
445 | const normalized = { x: mouseX / canvas.width, y: mouseY / canvas.height };
446 |
447 | // Add new point
448 | const newPoint = {
449 | x: normalized.x,
450 | y: normalized.y,
451 | color: getPixelColorAtPosition(mouseX, mouseY) // Sample color immediately
452 | };
453 |
454 | samplePoints.push(newPoint);
455 | selectedPoint = samplePoints.length - 1;
456 | isDragging = true;
457 |
458 | updateSamplePointsWidget();
459 | drawSamplePoints();
460 | }
461 | });
462 |
463 | canvas.addEventListener("mousemove", (e) => {
464 | if (!isDragging || selectedPoint < 0) return;
465 |
466 | e.preventDefault();
467 |
468 | // Use offsetX and offsetY directly for mouse position
469 | const mouseX = e.offsetX;
470 | const mouseY = e.offsetY;
471 |
472 | // Calculate normalized coordinates
473 | const normalized = { x: mouseX / canvas.width, y: mouseY / canvas.height };
474 |
475 | // Update point position with normalized coordinates
476 | samplePoints[selectedPoint].x = normalized.x;
477 | samplePoints[selectedPoint].y = normalized.y;
478 |
479 | // Update debug info
480 | updateDebugInfo(`Point: (${normalized.x.toFixed(3)}, ${normalized.y.toFixed(3)})`);
481 |
482 | // First redraw the image to get accurate sampling
483 | if (image) {
484 | // Redraw the point's area to ensure we're sampling from the original image
485 | ctx.save();
486 | ctx.drawImage(image, 0, 0, canvas.width, canvas.height);
487 | ctx.restore();
488 | }
489 |
490 | // Update color at new position
491 | samplePoints[selectedPoint].color = getPixelColorAtPosition(mouseX, mouseY);
492 |
493 | // Then draw all sample points
494 | drawSamplePoints();
495 | });
496 |
497 | const handleMouseUp = (e) => {
498 | if (isDragging && selectedPoint >= 0) {
499 | e.preventDefault();
500 | isDragging = false;
501 | updateSamplePointsWidget();
502 | }
503 | };
504 |
505 | // Track mouse position for debugging
506 | canvas.addEventListener("mousemove", (e) => {
507 | if (debugInfo.style.display !== "block") return;
508 |
509 | // Use offsetX and offsetY directly
510 | const mouseX = e.offsetX;
511 | const mouseY = e.offsetY;
512 |
513 | const normalized = { x: mouseX / canvas.width, y: mouseY / canvas.height };
514 | updateDebugInfo(
515 | `Mouse: ${mouseX.toFixed(1)}, ${mouseY.toFixed(1)} | `
516 | );
517 | });
518 |
519 | canvas.addEventListener("mouseup", handleMouseUp);
520 | canvas.addEventListener("mouseleave", handleMouseUp);
521 |
522 | // Update the hidden widget with sample points data
523 | const updateSamplePointsWidget = () => {
524 | if (samplePointsWidget) {
525 | samplePointsWidget.value = JSON.stringify(samplePoints);
526 | this.setDirtyCanvas(true);
527 | }
528 | };
529 |
530 | // Extract hex codes as a list from sample points
531 | const getHexCodesList = () => {
532 | return samplePoints.map(point => point.color);
533 | };
534 |
535 | // Clear all sample points
536 | clearButton.addEventListener("click", () => {
537 | samplePoints = [];
538 | selectedPoint = -1;
539 | updateSamplePointsWidget();
540 | drawSamplePoints();
541 | });
542 |
543 | // Continue workflow button
544 | continueButton.addEventListener("click", () => {
545 | continueWorkflow();
546 | });
547 |
548 | // When the node receives the output from processing
549 | this.onExecuted = function(output) {
550 | if (!output || !output.hasResult) return;
551 |
552 | // If we have a sampled colors result, update point colors
553 | if (output.sampled_colors) {
554 | try {
555 | const colors = JSON.parse(output.sampled_colors);
556 | if (Array.isArray(colors) && colors.length > 0) {
557 | // Update point colors
558 | colors.forEach((colorData, index) => {
559 | if (index < samplePoints.length) {
560 | samplePoints[index].color = colorData.hex;
561 | }
562 | });
563 | drawSamplePoints();
564 | }
565 | } catch (e) {
566 | console.error("Error parsing sampled colors:", e);
567 | }
568 | }
569 | };
570 |
571 | // Handle image input changes
572 | this.onImageInput = function(inputData) {
573 | // Schedule image loading for next frame to ensure DOM is ready
574 | if (inputData && inputData.tensor) {
575 | setTimeout(() => loadImageToCanvas(inputData), 0);
576 | } else if (imageBase64) {
577 | // If we have a base64 image from Python, use that
578 | setTimeout(() => loadImageFromBase64(imageBase64), 0);
579 | }
580 | };
581 |
582 | // Load and display image from tensor data
583 | const loadImageToCanvas = (imgData) => {
584 | if (!imgData) return;
585 |
586 | // Store original image dimensions
587 | originalImageWidth = imgData.width;
588 | originalImageHeight = imgData.height;
589 |
590 | // Create image from tensor data
591 | const imgPixels = new Uint8ClampedArray(imgData.data);
592 | const imgDataObj = new ImageData(imgPixels, imgData.width, imgData.height);
593 |
594 | const offscreenCanvas = new OffscreenCanvas(imgData.width, imgData.height);
595 | const offCtx = offscreenCanvas.getContext("2d");
596 | offCtx.putImageData(imgDataObj, 0, 0);
597 |
598 | // Create image object from the offscreen canvas
599 | offscreenCanvas.convertToBlob().then(blob => {
600 | const img = new Image();
601 | img.onload = () => {
602 | // Set canvas size to exactly match the image dimensions
603 | canvas.width = img.width;
604 | canvas.height = img.height;
605 |
606 | // Adjust container size to fit the image exactly
607 | imageContainer.style.width = img.width + "px";
608 | imageContainer.style.height = img.height + "px";
609 |
610 | // Draw the image at 1:1 pixel ratio
611 | ctx.clearRect(0, 0, canvas.width, canvas.height);
612 | ctx.drawImage(img, 0, 0);
613 |
614 | // Store image reference
615 | image = img;
616 |
617 | // Draw sample points if any
618 | drawSamplePoints();
619 | };
620 | img.src = URL.createObjectURL(blob);
621 | });
622 | };
623 |
624 | // Handle window resize to reposition points correctly
625 | const resizeObserver = new ResizeObserver(() => {
626 | if (image) {
627 | // Don't allow resizing - maintain original dimensions
628 | canvas.width = originalImageWidth;
629 | canvas.height = originalImageHeight;
630 | imageContainer.style.width = originalImageWidth + "px";
631 | imageContainer.style.height = originalImageHeight + "px";
632 |
633 | // Redraw everything at the original size
634 | ctx.clearRect(0, 0, canvas.width, canvas.height);
635 | ctx.drawImage(image, 0, 0, canvas.width, canvas.height);
636 | drawSamplePoints();
637 | }
638 | });
639 |
640 | // Start observing size changes
641 | resizeObserver.observe(imageContainer);
642 |
643 | // Add the DOM widget to the node
644 | this.addDOMWidget("image_sampler_widget", "image_sampler", container, {
645 | serialize: false,
646 | hideOnZoom: false,
647 | resizable: false
648 | });
649 |
650 | // Make the input widgets smaller
651 | if (paletteSizeWidget) paletteSizeWidget.computeSize = () => [100, 24];
652 | if (sampleSizeWidget) paletteSizeWidget.computeSize = () => [100, 24];
653 | if (waitForInputWidget) waitForInputWidget.computeSize = () => [100, 24];
654 |
655 | return result;
656 | };
657 |
658 | // Add method to capture image input data
659 | const onConnectionsChange = nodeType.prototype.onConnectionsChange;
660 | nodeType.prototype.onConnectionsChange = function(type, slotIndex, isConnected, link_info, output) {
661 | const result = onConnectionsChange?.apply(this, arguments);
662 |
663 | // Process only when connecting an input and link_info is valid
664 | if (type === LiteGraph.INPUT && isConnected && link_info && link_info.origin_id) {
665 | // Get the linked node
666 | const linkedNode = this.graph.getNodeById(link_info.origin_id);
667 | if (linkedNode) {
668 | const inputSlot = link_info.origin_slot;
669 | const outputData = linkedNode.outputs[inputSlot];
670 |
671 | // Check if this is an image input
672 | if (outputData && outputData.type === "IMAGE") {
673 | // Access the tensor data if available
674 | const tensorData = linkedNode.getOutputData ? linkedNode.getOutputData(inputSlot) : null;
675 |
676 | if (tensorData && this.onImageInput) {
677 | this.onImageInput({ tensor: tensorData });
678 | }
679 | }
680 | }
681 | } else if (type === LiteGraph.INPUT && !isConnected) {
682 | // If image input is disconnected, clear the canvas
683 | const widget = this.widgets.find(w => w.name === "image_sampler_widget");
684 | if (widget && widget.value) {
685 | const canvas = widget.value.querySelector("canvas");
686 | if (canvas) {
687 | const ctx = canvas.getContext("2d");
688 | ctx.clearRect(0, 0, canvas.width, canvas.height);
689 | }
690 | }
691 | }
692 |
693 | return result;
694 | };
695 |
696 | // Ensure the node updates when new image data is available
697 | const onExecute = nodeType.prototype.onExecute;
698 | nodeType.prototype.onExecute = function() {
699 | const result = onExecute?.apply(this, arguments);
700 |
701 | // Check if we have image input
702 | const imageInput = this.getInputData(0);
703 | if (imageInput && this.onImageInput) {
704 | this.onImageInput({ tensor: imageInput });
705 | }
706 |
707 | return result;
708 | };
709 | }
710 | },
711 | });
--------------------------------------------------------------------------------
/web/test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-enricos-nodes/0ab56bd7611bf8153794a8838a195bdf83c8e213/web/test.png
--------------------------------------------------------------------------------
/web/test_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-enricos-nodes/0ab56bd7611bf8153794a8838a195bdf83c8e213/web/test_2.png
--------------------------------------------------------------------------------
/web/tools.js:
--------------------------------------------------------------------------------
1 | import {app} from "../../scripts/app.js";
2 | import {api} from "../../scripts/api.js";
3 |
4 |
5 | function isType(comfyClass, node) {
6 | return node.constructor.comfyClass == comfyClass;
7 | }
8 |
9 | function getWidget(node, widgetName) {
10 | return node.widgets.find((w) => w.name === widgetName);
11 | }
12 |
13 | app.registerExtension({
14 | name: "Comfy.CompositorTools3",
15 | async getCustomWidgets(app) {
16 | },
17 | async setup(app) {
18 | },
19 | async init(args) {
20 | },
21 | async beforeRegisterNodeDef(nodeType, nodeData, app) {
22 | },
23 |
24 | async loadedGraphNode(node, app) {
25 | },
26 | async afterConfigureGraph(args) {
27 | // not enough to do here only we also in node created (for later or connection changed)
28 | console.log("after configure graph")
29 | // To do something when a workflow has loaded, use afterConfigureGraph, not setup
30 | // console.log("afterConfigureGraph", args);
31 |
32 |
33 | const tools = app.graph.findNodesByType("CompositorTools3");
34 | tools.forEach((node) => {
35 | const CHANNELNAME = `Tools${node.id}`;
36 | console.log(CHANNELNAME)
37 | const channel = new BroadcastChannel(CHANNELNAME);
38 |
39 | node["togglePreciseSelection"] = () => {
40 | //console.log(arguments);
41 | channel.postMessage({action:"togglePreciseSelection",value: node.preciseSelection.value, nodeId: node.id});
42 | }
43 |
44 | node["centerSelected"] = () => {
45 | //console.log(arguments);
46 | channel.postMessage({action:"centerSelected",value: true, nodeId: node.id});
47 | }
48 |
49 | node["resetTransforms"] = () => {
50 | //console.log(arguments);
51 | channel.postMessage({action:"resetTransforms",value: true, nodeId: node.id});
52 | }
53 |
54 | node.centerSelected = node.addWidget("button", "centerSelected", false, node.centerSelected);
55 | node.preciseSelection = node.addWidget("toggle", "preciseSelection", false, node.togglePreciseSelection);
56 | node.resetTransforms = node.addWidget("button", "resetTransforms", false, node.resetTransforms);
57 | //node.preciseSelection.serialize = ()=>{}
58 | node.setDirtyCanvas(true, true);
59 | })
60 | },
61 | async nodeCreated(node) {
62 | if (!isType("CompositorTools3", node)) return;
63 | // console.log("better log it");
64 | node.serialize_widgets = false;
65 | node.isVirtualNode = true;
66 | },
67 | });
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
--------------------------------------------------------------------------------