├── .gitignore
├── README.md
├── __init__.py
├── assets
├── CLIP_interrogator.png
├── SaveImage.png
├── VAEDecode_to_folder.png
├── depthslicer.jpg
├── eden.png
├── eden_gpt4_node.jpg
├── imagedescriptionnode.jpg
├── loadrandomimage.jpg
├── maskfromrgb_kmeans.jpg
├── parallaxzoom.jpg
└── random_number.jpg
├── clip_utils
├── __init__.py
├── clip_interrogator.py
├── clip_tools.py
└── data
│ ├── artists.txt
│ ├── flavors.txt
│ ├── mediums.txt
│ ├── movements.txt
│ └── negative.txt
├── example_workflows
├── Thumbs.db
├── animate_3D.jpg
├── animate_3D.json
├── audio_split_stems.jpg
├── audio_split_stems.json
├── background_removal.jpg
├── background_removal.json
├── background_removal_video.jpg
├── background_removal_video.json
├── face-expression-transfer_HelloMeme_image.jpg
├── face-expression-transfer_HelloMeme_image.json
├── face-expression-transfer_HelloMeme_video.jpg
├── face-expression-transfer_HelloMeme_video.json
├── face_styler.jpg
├── face_styler.json
├── flux_dev.jpg
├── flux_dev.json
├── flux_inpainting.jpg
├── flux_inpainting.json
├── flux_redux.jpg
├── flux_redux.json
├── flux_schnell_fp8.jpg
├── flux_schnell_fp8.json
├── frame-blend-time-remapping.jpg
├── frame-blend-time-remapping.json
├── img2vid.jpg
├── img2vid.json
├── layer_diffusion.jpg
├── layer_diffusion.json
├── mars-id.jpg
├── mars-id.json
├── mochi_preview.jpg
├── mochi_preview.json
├── ominicontrol.jpg
├── ominicontrol.json
├── outpaint.jpg
├── outpaint.json
├── remix_flux_schnell.jpg
├── remix_flux_schnell.json
├── seamless-texture-generator.json
├── stable_audio.jpg
├── stable_audio.json
├── texture_flow.jpg
├── texture_flow.json
├── txt2img_SDXL.jpg
├── txt2img_SDXL.json
├── txt2vid.jpg
├── txt2vid.json
├── upscaler.jpg
├── upscaler.json
├── vid2vid_sdxl.jpg
├── vid2vid_sdxl.json
├── video_FX.jpg
├── video_FX.json
├── video_upscaler.jpg
├── video_upscaler.json
└── workflow_test_audio_separator.json
├── general_utils.py
├── img_utils
├── animation.py
├── depth.png
├── depth_nodes.py
├── depth_segmentation.py
├── gpt_nodes.py
├── hist_matcher.py
├── image.png
├── img_nodes.py
├── img_utils.py
└── test.py
├── ip_adapter_utils
├── exploration_state.py
└── moodmix_utils.py
├── js
└── eden_number_display.js
├── logic
└── logic_nodes.py
├── pyproject.toml
├── requirements.txt
└── video_utils
├── fill_image_mask.py
├── fill_utils.py
├── gradient_mask_video.py
├── mask_workflow.json
└── video_interpolation.py
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | *.pyc
3 | chatgpt/
4 | old_example_code/
5 | *.mp4
6 | .env
7 | ip_adapter_utils/img_embeds
8 | video_utils/test_output
9 |
10 | *CLAUDE.md
11 | video_utils/test_output
12 | video_utils/test_assets
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |

3 |
🌱 Eden ComfyUI Custom Node Suite
4 |
A comprehensive collection of specialized ComfyUI nodes for advanced generative AI workflows
5 |
Developed by Eden.art
6 |
7 |
8 | ---
9 |
10 | ## 📖 Overview
11 |
12 | This repository contains 70+ custom ComfyUI nodes designed to enhance creative AI workflows with advanced image processing, depth manipulation, AI-powered text generation, logical operations, and sophisticated video effects. These nodes power many of the creative tools available on [Eden.art](https://www.eden.art/).
13 |
14 | ## 🚀 Quick Start
15 |
16 | ### Installation
17 | ```bash
18 | cd ComfyUI/custom_nodes/
19 | git clone https://github.com/edenartlab/eden_comfy_pipelines.git
20 | cd eden_comfy_pipelines
21 | pip install -r requirements.txt
22 | ```
23 |
24 | ### OpenAI Setup (Optional)
25 | For GPT-powered nodes, create a `.env` file in your ComfyUI root directory:
26 | ```
27 | OPENAI_API_KEY=your_api_key_here
28 | ```
29 |
30 | ---
31 |
32 | ## 🎯 Node Categories
33 |
34 | ### 🤖 AI & GPT Integration
35 |
36 | #### **GPT4 Text Completion**
37 |
38 |
39 | Advanced text generation with GPT-4 integration:
40 | - **Eden_gpt4_node**: Direct GPT-4 API integration with customizable models (gpt-4o, gpt-4-turbo)
41 | - **Eden_GPTPromptEnhancer**: Intelligently enhance basic prompts with detailed descriptions and artistic elements
42 | - **Eden_GPTStructuredOutput**: Generate structured JSON responses following custom schemas
43 |
44 | #### **GPT4 Vision & Image Analysis**
45 |
46 |
47 | Powerful image understanding capabilities:
48 | - **ImageDescriptionNode**: Generate detailed captions and descriptions from images
49 | - **CLIP_Interrogator**: Extract comprehensive text descriptions using CLIP + BLIP models
50 |
51 | ---
52 |
53 | ### 🖼️ Advanced Image Processing
54 |
55 | #### **Smart Image Loading & Management**
56 |
57 |
58 | Intelligent batch processing tools:
59 | - **LoadRandomImage**: Process multiple images with automatic aspect ratio correction
60 | - **ImageFolderIterator**: Sequential image loading with index-based selection
61 | - **LoadImagesByFilename**: Batch load images by filename patterns
62 |
63 | #### **Precision Image Manipulation**
64 |
65 |
66 | Professional image handling:
67 | - **SaveImageAdvanced**: Enhanced saving with timestamps and metadata export
68 | - **VAEDecode_to_folder**: Direct-to-disk VAE decoding for long sequences
69 | - **IMG_padder/IMG_unpadder**: Smart padding with edge-color matching
70 | - **IMG_scaler**: Mathematical operations on pixel values
71 | - **IMG_blender**: Advanced image blending with weight control
72 |
73 | #### **Color & Mask Generation**
74 |
75 |
76 | Advanced segmentation tools:
77 | - **MaskFromRGB_KMeans**: Generate precise masks using K-means color clustering
78 | - **Eden_MaskCombiner**: Combine multiple masks with sophisticated blending
79 | - **ConvertToGrayscale**: Professional grayscale conversion with alpha handling
80 | - **HistogramMatching**: Match color distributions between images
81 |
82 | ---
83 |
84 | ### 🎭 Face Processing & Detection
85 |
86 | Comprehensive face manipulation toolkit:
87 | - **Eden_FaceToMask**: Automatic face detection and mask generation using MediaPipe
88 | - **Eden_Face_Crop**: Intelligent face cropping with padding and boundary handling
89 | - **Eden_ImageMaskComposite**: Advanced image compositing with mask support
90 |
91 | ---
92 |
93 | ### 📐 Depth & 3D Effects
94 |
95 | #### **Depth Analysis & Manipulation**
96 |
97 |
98 | Create stunning depth-based effects:
99 | - **DepthSlicer**: Generate targeted masks from depth maps using K-means clustering
100 | - **Eden_DepthSlice_MaskVideo**: Animated depth slicing for video sequences
101 | - **ParallaxZoom**: 3D parallax effects from depth maps
102 |
103 | #### **3D Parallax Animation**
104 |
105 |
106 | - **ParallaxZoom**: Create immersive Deforum-style 3D zoom effects
107 | - **AspectPadImageForOutpainting**: Intelligent padding for consistent aspect ratios
108 |
109 | ---
110 |
111 | ### 🎲 Logic & Control Flow
112 |
113 | #### **Data Types & Comparisons**
114 |
115 |
116 | Essential workflow control nodes:
117 | - **Eden_RandomNumberSampler**: Visual random number generation with live display
118 | - **Eden_Compare**: Advanced comparison operations for any data type
119 | - **Eden_IfExecute**: Conditional execution based on boolean logic
120 | - **Eden_BoolBinaryOperation**: Boolean algebra operations (AND, OR, XOR, etc.)
121 |
122 | #### **Utilities & Conversion**
123 | - **Eden_String/Int/Float/Bool**: Type conversion and value passing
124 | - **Eden_Math**: Mathematical operations on numeric values
125 | - **Eden_StringHash**: Generate deterministic hashes from strings
126 | - **Eden_Debug_Anything**: Comprehensive debugging with type analysis
127 |
128 | ---
129 |
130 | ### 📁 File & Data Management
131 |
132 | #### **File Operations**
133 | - **GetRandomFile**: Random file selection from directories
134 | - **FolderScanner**: Comprehensive folder analysis and file listing
135 | - **Eden_AllMediaLoader**: Load various media types with validation
136 | - **Eden_Save_Param_Dict**: Export workflow parameters as JSON
137 |
138 | #### **Random Sampling & Selection**
139 | - **Eden_RandomFilepathSampler**: Statistical file path sampling
140 | - **Eden_RandomPromptFromFile**: Load random prompts from text files
141 | - **Eden_randbool**: Random boolean generation with probability control
142 |
143 | ---
144 |
145 | ### 🎬 Video & Animation
146 |
147 | #### **Video Processing**
148 |
149 |
150 | Advanced video manipulation:
151 | - **VideoFrameSelector**: Intelligent frame selection with temporal optimization
152 | - **KeyframeBlender**: Smooth keyframe interpolation for animations
153 | - **MaskedRegionVideoExport**: Export specific regions from video sequences
154 | - **Extend_Sequence**: Loop and extend video sequences with various modes
155 |
156 | #### **Animation Tools**
157 | - **Animation_RGB_Mask**: Create animated masks from RGB data
158 | - **AnimatedShapeMaskNode**: Generate animated geometric masks
159 | - **OrganicFillNode**: Organic mask filling for seamless animations
160 |
161 | ---
162 |
163 | ### 🔄 IP Adapter & Embeddings
164 |
165 | Advanced conditioning and style transfer:
166 | - **Random_Style_Mixture**: Blend multiple style embeddings randomly
167 | - **Linear_Combine_IP_Embeds**: Linear combination of IP adapter embeddings
168 | - **SavePosEmbeds/Load_Embeddings_From_Folder**: Embedding management system
169 | - **IP_Adapter_Settings_Distribution**: Control IP adapter influence distribution
170 |
171 | ---
172 |
173 | ## 🎨 Featured Workflows
174 |
175 | The repository includes example workflows in `example_workflows/` showcasing:
176 | - **3D Animation**: Depth-based parallax effects
177 | - **Audio Processing**: Stem separation and audio manipulation
178 | - **Face Styling**: Expression transfer and face manipulation
179 | - **Video Effects**: Frame blending and time remapping
180 | - **AI Generation**: FLUX, SDXL, and other model workflows
181 |
182 | ---
183 |
184 | ## 🛠️ Technical Features
185 |
186 | ### **Memory Optimization**
187 | - **LatentTypeConversion**: Convert between float16/float32 for memory efficiency
188 | - **Eden_RepeatLatentBatch**: Efficient latent batch processing
189 | - Smart tensor management across GPU/CPU
190 |
191 | ### **Resolution & Aspect Ratio**
192 | - **WidthHeightPicker**: Smart resolution selection with multiple constraints
193 | - **IMG_resolution_multiple_of**: Ensure dimensions are multiples of specific values
194 | - **AspectPadImageForOutpainting**: Professional aspect ratio handling
195 |
196 | ### **String & Text Processing**
197 | - **Eden_StringReplace**: Advanced string replacement with regex support
198 | - **Eden_Regex_Replace**: Powerful regex pattern matching
199 | - **Eden_RandomPromptFromFile**: Dynamic prompt loading from files
200 |
201 | ---
202 |
203 | ## 🎯 Use Cases
204 |
205 | - **Creative AI Workflows**: Professional image and video generation
206 | - **Batch Processing**: Automated processing of large image sets
207 | - **3D Effects**: Depth-based animations and parallax effects
208 | - **Face Processing**: Portrait enhancement and manipulation
209 | - **Content Creation**: Automated caption generation and description
210 | - **Video Production**: Advanced video effects and frame manipulation
211 | - **Research & Development**: Experimental AI workflows and testing
212 |
213 | ---
214 |
215 | ## 📚 Contributing
216 |
217 | We welcome contributions! For workflow contributions, check out our production workflows repository: [edenartlab/workflows](https://github.com/edenartlab/workflows)
218 |
219 | ## 📄 License
220 |
221 | This project is licensed under the MIT License - see the LICENSE file for details.
222 |
223 | ---
224 |
225 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
4 |
5 | from clip_utils.clip_tools import CLIP_Interrogator
6 | from img_utils.img_nodes import *
7 | from img_utils.depth_nodes import *
8 | from img_utils.gpt_nodes import *
9 | from img_utils.hist_matcher import HistogramMatching
10 | from logic.logic_nodes import *
11 | from img_utils.animation import Animation_RGB_Mask, AnimatedShapeMaskNode
12 | from video_utils.gradient_mask_video import KeyframeBlender, MaskedRegionVideoExport
13 | from ip_adapter_utils.moodmix_utils import *
14 | from video_utils.video_interpolation import VideoFrameSelector
15 | from video_utils.fill_image_mask import OrganicFillNode
16 | from general_utils import *
17 |
18 | WEB_DIRECTORY = "./js"
19 |
20 | NODE_CLASS_MAPPINGS = {
21 | "CLIP_Interrogator": CLIP_Interrogator,
22 | "Eden_IMG_padder": IMG_padder,
23 | "Eden_IMG_unpadder": IMG_unpadder,
24 | "IMG_scaler": IMG_scaler,
25 | "IMG_blender": IMG_blender,
26 | "ConvertToGrayscale": ConvertToGrayscale,
27 | "LoadRandomImage": LoadRandomImage,
28 | "VAEDecode_to_folder": VAEDecode_to_folder,
29 | "HistogramMatching": HistogramMatching,
30 | "LatentTypeConversion": LatentTypeConversion,
31 | "IMG_resolution_multiple_of": IMG_resolution_multiple_of,
32 | "Eden_Compare": Eden_Compare,
33 | "Eden_Int": Eden_Int,
34 | "Eden_Float": Eden_Float,
35 | "Eden_Bool": Eden_Bool,
36 | "Eden_BoolBinaryOperation": Eden_BoolBinaryOperation,
37 | "Eden_String": Eden_String,
38 | "If ANY execute A else B": Eden_IfExecute,
39 | "MaskFromRGB_KMeans": MaskFromRGB_KMeans,
40 | "GetRandomFile": GetRandomFile,
41 | "Animation_RGB_Mask": Animation_RGB_Mask,
42 | "AnimatedShapeMaskNode": AnimatedShapeMaskNode,
43 | "ImageDescriptionNode": ImageDescriptionNode,
44 | "Eden_gpt4_node": Eden_gpt4_node,
45 | "Eden_GPTPromptEnhancer": Eden_GPTPromptEnhancer,
46 | "Eden_GPTStructuredOutput": Eden_GPTStructuredOutput,
47 | "FolderScanner": FolderScanner,
48 | "SavePosEmbeds": SavePosEmbeds,
49 | "VideoFrameSelector": VideoFrameSelector,
50 | "LoadImagesByFilename": LoadImagesByFilename,
51 | "Random_Style_Mixture": Random_Style_Mixture,
52 | "Linear_Combine_IP_Embeds": Linear_Combine_IP_Embeds,
53 | "SaveImageAdvanced": SaveImageAdvanced,
54 | "Load_Embeddings_From_Folder": Load_Embeddings_From_Folder,
55 | "Get_Prefixed_Imgs": Get_Prefixed_Imgs,
56 | "WidthHeightPicker": WidthHeightPicker,
57 | "DepthSlicer": DepthSlicer,
58 | "ParallaxZoom": ParallaxZoom,
59 | "AspectPadImageForOutpainting": AspectPadImageForOutpainting,
60 | "Eden_MaskBoundingBox": Eden_MaskBoundingBox,
61 | "Eden_Seed": Eden_Seed,
62 | "Eden_RepeatLatentBatch": Eden_RepeatLatentBatch,
63 | "Extend_Sequence": Extend_Sequence,
64 | "Eden_DetermineFrameCount": Eden_DetermineFrameCount,
65 | "Eden_Math": Eden_Math,
66 | "Eden_IntToFloat": Eden_IntToFloat,
67 | "Eden_FloatToInt": Eden_FloatToInt,
68 | "Eden_Image_Math": Eden_Image_Math,
69 | "IP_Adapter_Settings_Distribution": IP_Adapter_Settings_Distribution,
70 | "Eden_StringHash": Eden_StringHash,
71 | "ImageFolderIterator": ImageFolderIterator,
72 | "Eden_MaskCombiner": Eden_MaskCombiner,
73 | "Eden_DepthSlice_MaskVideo": Eden_DepthSlice_MaskVideo,
74 | "KeyframeBlender": KeyframeBlender,
75 | "MaskedRegionVideoExport": MaskedRegionVideoExport,
76 | "Eden_RandomPromptFromFile": Eden_RandomPromptFromFile,
77 | "Eden_StringReplace": Eden_StringReplace,
78 | "Eden_randbool": Eden_randbool,
79 | "Eden_Face_Crop": Eden_Face_Crop,
80 | "SDTypeConverter": SDTypeConverter,
81 | "SDAnyConverter": SDAnyConverter,
82 | "Eden_FaceToMask": Eden_FaceToMask,
83 | "Eden_ImageMaskComposite": Eden_ImageMaskComposite,
84 | "Eden_Regex_Replace": Eden_Regex_Replace,
85 | "Eden_Debug_Anything": Eden_Debug_Anything,
86 | "Eden_RandomNumberSampler": Eden_RandomNumberSampler,
87 | "Eden_RandomFilepathSampler": Eden_RandomFilepathSampler,
88 | "Eden_AllMediaLoader": Eden_AllMediaLoader,
89 | "Eden_Save_Param_Dict": Eden_Save_Param_Dict,
90 | "OrganicFillNode": OrganicFillNode
91 | }
92 |
93 | NODE_DISPLAY_NAME_MAPPINGS = {
94 | "Eden_RandomNumberSampler": "Random Number Sampler 🎲",
95 | "Eden_RandomFilepathSampler": "Random Filepath Sampler 🎲",
96 | "Eden_AllMediaLoader": "All Media Loader 📁",
97 | "Eden_Save_Param_Dict": "Save Param Dict 📁",
98 | "OrganicFillNode": "Organic Fill Mask Animation",
99 | "AnimatedShapeMaskNode": "Animated Shape Mask"
100 | }
101 |
102 | try:
103 | from random_conditioning.random_c_utils import *
104 | # add keys:
105 | NODE_CLASS_MAPPINGS_ADD = {
106 | "SaveConditioning": Eden_SaveConditioning,
107 | "LoadConditioning": Eden_LoadConditioning,
108 | "Inspect_Conditioning": Eden_Inspect_Conditioning,
109 | "Eden_RandomConditioningSamplerNode": Eden_RandomConditioningSamplerNode,
110 | "Eden_Load_Legacy_Conditioning": Eden_Load_Legacy_Conditioning,
111 | "Eden_get_random_file_from_folder": Eden_get_random_file_from_folder
112 | }
113 | NODE_CLASS_MAPPINGS.update(NODE_CLASS_MAPPINGS_ADD)
114 | except:
115 | pass
116 |
117 |
118 | def print_eden_banner():
119 | """
120 | Prints a decorative banner for the Eden ComfyUI pack on load
121 | """
122 |
123 | green = "\033[32m"
124 | reset = "\033[0m"
125 | bold = "\033[1m"
126 |
127 | banner = f"""
128 | {green}🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱{reset}
129 | {bold}🌱 Eden ComfyUI Pack maintained by {green}https://eden.art/ 🌱{reset}
130 | {green}🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱🌱{reset}
131 | """
132 | print(banner)
133 |
134 | # Call this function when your package loads
135 | print_eden_banner()
--------------------------------------------------------------------------------
/assets/CLIP_interrogator.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/assets/CLIP_interrogator.png
--------------------------------------------------------------------------------
/assets/SaveImage.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/assets/SaveImage.png
--------------------------------------------------------------------------------
/assets/VAEDecode_to_folder.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/assets/VAEDecode_to_folder.png
--------------------------------------------------------------------------------
/assets/depthslicer.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/assets/depthslicer.jpg
--------------------------------------------------------------------------------
/assets/eden.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/assets/eden.png
--------------------------------------------------------------------------------
/assets/eden_gpt4_node.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/assets/eden_gpt4_node.jpg
--------------------------------------------------------------------------------
/assets/imagedescriptionnode.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/assets/imagedescriptionnode.jpg
--------------------------------------------------------------------------------
/assets/loadrandomimage.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/assets/loadrandomimage.jpg
--------------------------------------------------------------------------------
/assets/maskfromrgb_kmeans.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/assets/maskfromrgb_kmeans.jpg
--------------------------------------------------------------------------------
/assets/parallaxzoom.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/assets/parallaxzoom.jpg
--------------------------------------------------------------------------------
/assets/random_number.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/assets/random_number.jpg
--------------------------------------------------------------------------------
/clip_utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .clip_interrogator import Config, Interrogator, LabelTable, list_caption_models, list_clip_models, load_list
2 |
3 | __version__ = '0.6.0'
4 | __author__ = 'pharmapsychotic'
--------------------------------------------------------------------------------
/clip_utils/clip_tools.py:
--------------------------------------------------------------------------------
1 | import time
2 | import os
3 | import torch
4 | import PIL.Image
5 | from PIL import Image
6 | import sys, os, time, re
7 |
8 | # custom version of clip_interrogator which downloads to the ComfyUI models dir:
9 | from .clip_interrogator import Interrogator, Config
10 |
11 | import torch
12 | import numpy as np
13 | from PIL import Image
14 |
15 | sys.path.append('..')
16 | import folder_paths
17 |
18 | def comfy_tensor_to_pil(tensor):
19 | # Clone the tensor and detach it from the computation graph
20 | tensor = tensor.clone().detach()
21 |
22 | # Normalize the tensor if it's not already in [0, 1]
23 | if torch.max(tensor) > 1:
24 | print("Normalizing tensor to [0, 1]")
25 | tensor = torch.div(tensor, 255)
26 |
27 | # Convert to PIL Image and return
28 | return Image.fromarray((tensor.cpu().numpy() * 255).astype(np.uint8))
29 |
30 | # Global variable to hold the model
31 | global_interrogator_model = None
32 |
33 | class CLIP_Interrogator:
34 | def __init__(self):
35 | self.ci = None
36 | self.keep_model_alive = False
37 |
38 | @classmethod
39 | def INPUT_TYPES(s):
40 | return {
41 | "required": {
42 | "image": ("IMAGE",),
43 | "mode": (["fast", "full"], ),
44 | "keep_model_alive": ("BOOLEAN", {"default": True}),
45 | "prepend_blip_caption": ("BOOLEAN", {"default": True}),
46 | "save_prompt_to_txt_file": ("STRING", {"default": "clip_interrogator_prompt.txt"}),
47 | }
48 | }
49 |
50 | RETURN_TYPES = ("STRING","STRING")
51 | RETURN_NAMES = ("full_prompt", "blip_caption")
52 | FUNCTION = "interrogate"
53 | CATEGORY = "Eden 🌱"
54 |
55 | def interrogate(self, image, mode="fast", keep_model_alive=True, prepend_blip_caption = True, save_prompt_to_txt_file=None):
56 |
57 | self.keep_model_alive = keep_model_alive
58 |
59 | print(f"Interrogating image with mode {mode}, keep_model_alive={keep_model_alive}")
60 |
61 | # ci expects a PIL image, but we get a torch tensor:
62 | if image.shape[0] > 1:
63 | print("Warning: CLIP_Interrogator expects a single image, but got a batch. Using first image in batch.")
64 |
65 | pil_image = comfy_tensor_to_pil(image[0])
66 |
67 | clip_model_dir = os.path.join(str(folder_paths.models_dir), "clip")
68 | os.makedirs(clip_model_dir, exist_ok=True)
69 |
70 | ci = self.load_ci(clip_model_path=clip_model_dir)
71 |
72 | if prepend_blip_caption:
73 | prepend_caption = None
74 | else:
75 | prepend_caption = " " # make sure there is a space so that the prompt is not joined with the caption
76 |
77 | if mode == "fast":
78 | prompt = ci.interrogate_fast(pil_image, caption = prepend_caption)
79 | else:
80 | prompt = ci.interrogate(pil_image, caption = prepend_caption)
81 |
82 | blip_caption = ci.generate_caption(pil_image)
83 |
84 | blip_caption = self.clean_prompt(blip_caption)
85 | prompt = self.clean_prompt(prompt)
86 |
87 | print(f"Interogated prompt: {prompt}")
88 |
89 | if save_prompt_to_txt_file:
90 | if not save_prompt_to_txt_file.endswith(".txt"):
91 | save_prompt_to_txt_file += ".txt"
92 |
93 | # Make sure the path is absolute:
94 | save_prompt_to_txt_file = os.path.abspath(save_prompt_to_txt_file)
95 |
96 | # Make sure the directory exists:
97 | os.makedirs(os.path.dirname(save_prompt_to_txt_file), exist_ok=True)
98 |
99 | with open(save_prompt_to_txt_file, "w", encoding="utf-8") as f:
100 | f.write(prompt)
101 | print(f"Saved interrogated prompt to {save_prompt_to_txt_file}")
102 |
103 | return (prompt, blip_caption)
104 |
105 | def load_ci(self, clip_model_path=None):
106 | global global_interrogator_model
107 |
108 | if self.ci is None:
109 | if global_interrogator_model:
110 | self.ci = global_interrogator_model
111 | else:
112 | BLIP_MODEL_DIR = os.path.abspath(os.path.join(str(folder_paths.models_dir), "blip"))
113 | self.ci = Interrogator(Config(clip_model_path=clip_model_path, clip_model_name="ViT-L-14/openai", cache_dir=BLIP_MODEL_DIR))
114 |
115 | if self.keep_model_alive:
116 | global_interrogator_model = self.ci
117 | else:
118 | global_interrogator_model = None
119 |
120 | return self.ci
121 |
122 | def clean_prompt(self, text):
123 | text = text.replace("arafed", "")
124 |
125 | # Replace double spaces with single space
126 | text = re.sub(r'\s+', ' ', text)
127 |
128 | # Replace double commas with single comma
129 | text = re.sub(r',+', ',', text)
130 |
131 | # Remove spaces before commas
132 | text = re.sub(r'\s+,', ',', text)
133 |
134 | # Ensure space after commas (if not followed by another punctuation or end of string)
135 | text = re.sub(r',([^\s\.,;?!])', r', \1', text)
136 |
137 | # Trim spaces around periods and ensure one space after
138 | text = re.sub(r'\s*\.\s*', '. ', text)
139 |
140 | # Remove leading commas
141 | text = re.sub(r'^,', '', text)
142 |
143 | # Capitalize the first letter of the sentence
144 | text = text[0].upper() + text[1:] if text else text
145 |
146 | # convert to utf-8:
147 | text = text.encode('utf-8', 'ignore').decode('utf-8')
148 |
149 | return text.strip()
--------------------------------------------------------------------------------
/clip_utils/data/mediums.txt:
--------------------------------------------------------------------------------
1 | a 3D render
2 | a black and white photo
3 | a bronze sculpture
4 | a cartoon
5 | a cave painting
6 | a character portrait
7 | a charcoal drawing
8 | a child's drawing
9 | a color pencil sketch
10 | a colorized photo
11 | a comic book panel
12 | a computer rendering
13 | a cross stitch
14 | a cubist painting
15 | a detailed drawing
16 | a detailed matte painting
17 | a detailed painting
18 | a diagram
19 | a digital painting
20 | a digital rendering
21 | a drawing
22 | a fine art painting
23 | a flemish Baroque
24 | a gouache
25 | a hologram
26 | a hyperrealistic painting
27 | a jigsaw puzzle
28 | a low poly render
29 | a macro photograph
30 | a manga drawing
31 | a marble sculpture
32 | a matte painting
33 | a microscopic photo
34 | a mid-nineteenth century engraving
35 | a minimalist painting
36 | a mosaic
37 | a painting
38 | a pastel
39 | a pencil sketch
40 | a photo
41 | a photocopy
42 | a photorealistic painting
43 | a picture
44 | a pointillism painting
45 | a polaroid photo
46 | a pop art painting
47 | a portrait
48 | a poster
49 | a raytraced image
50 | a renaissance painting
51 | a screenprint
52 | a screenshot
53 | a silk screen
54 | a sketch
55 | a statue
56 | a still life
57 | a stipple
58 | a stock photo
59 | a storybook illustration
60 | a surrealist painting
61 | a surrealist sculpture
62 | a tattoo
63 | a tilt shift photo
64 | a watercolor painting
65 | a wireframe diagram
66 | a woodcut
67 | an abstract drawing
68 | an abstract painting
69 | an abstract sculpture
70 | an acrylic painting
71 | an airbrush painting
72 | an album cover
73 | an ambient occlusion render
74 | an anime drawing
75 | an art deco painting
76 | an art deco sculpture
77 | an engraving
78 | an etching
79 | an illustration of
80 | an impressionist painting
81 | an ink drawing
82 | an oil on canvas painting
83 | an oil painting
84 | an ultrafine detailed painting
85 | chalk art
86 | computer graphics
87 | concept art
88 | cyberpunk art
89 | digital art
90 | egyptian art
91 | graffiti art
92 | lineart
93 | pixel art
94 | poster art
95 | vector art
96 |
--------------------------------------------------------------------------------
/clip_utils/data/movements.txt:
--------------------------------------------------------------------------------
1 | abstract art
2 | abstract expressionism
3 | abstract illusionism
4 | academic art
5 | action painting
6 | aestheticism
7 | afrofuturism
8 | altermodern
9 | american barbizon school
10 | american impressionism
11 | american realism
12 | american romanticism
13 | american scene painting
14 | analytical art
15 | antipodeans
16 | arabesque
17 | arbeitsrat für kunst
18 | art & language
19 | art brut
20 | art deco
21 | art informel
22 | art nouveau
23 | art photography
24 | arte povera
25 | arts and crafts movement
26 | ascii art
27 | ashcan school
28 | assemblage
29 | australian tonalism
30 | auto-destructive art
31 | barbizon school
32 | baroque
33 | bauhaus
34 | bengal school of art
35 | berlin secession
36 | black arts movement
37 | brutalism
38 | classical realism
39 | cloisonnism
40 | cobra
41 | color field
42 | computer art
43 | conceptual art
44 | concrete art
45 | constructivism
46 | context art
47 | crayon art
48 | crystal cubism
49 | cubism
50 | cubo-futurism
51 | cynical realism
52 | dada
53 | danube school
54 | dau-al-set
55 | de stijl
56 | deconstructivism
57 | digital art
58 | ecological art
59 | environmental art
60 | excessivism
61 | expressionism
62 | fantastic realism
63 | fantasy art
64 | fauvism
65 | feminist art
66 | figuration libre
67 | figurative art
68 | figurativism
69 | fine art
70 | fluxus
71 | folk art
72 | funk art
73 | furry art
74 | futurism
75 | generative art
76 | geometric abstract art
77 | german romanticism
78 | gothic art
79 | graffiti
80 | gutai group
81 | happening
82 | harlem renaissance
83 | heidelberg school
84 | holography
85 | hudson river school
86 | hurufiyya
87 | hypermodernism
88 | hyperrealism
89 | impressionism
90 | incoherents
91 | institutional critique
92 | interactive art
93 | international gothic
94 | international typographic style
95 | kinetic art
96 | kinetic pointillism
97 | kitsch movement
98 | land art
99 | les automatistes
100 | les nabis
101 | letterism
102 | light and space
103 | lowbrow
104 | lyco art
105 | lyrical abstraction
106 | magic realism
107 | magical realism
108 | mail art
109 | mannerism
110 | massurrealism
111 | maximalism
112 | metaphysical painting
113 | mingei
114 | minimalism
115 | modern european ink painting
116 | modernism
117 | modular constructivism
118 | naive art
119 | naturalism
120 | neo-dada
121 | neo-expressionism
122 | neo-fauvism
123 | neo-figurative
124 | neo-primitivism
125 | neo-romanticism
126 | neoclassicism
127 | neogeo
128 | neoism
129 | neoplasticism
130 | net art
131 | new objectivity
132 | new sculpture
133 | northwest school
134 | nuclear art
135 | objective abstraction
136 | op art
137 | optical illusion
138 | orphism
139 | panfuturism
140 | paris school
141 | photorealism
142 | pixel art
143 | plasticien
144 | plein air
145 | pointillism
146 | pop art
147 | pop surrealism
148 | post-impressionism
149 | postminimalism
150 | pre-raphaelitism
151 | precisionism
152 | primitivism
153 | private press
154 | process art
155 | psychedelic art
156 | purism
157 | qajar art
158 | quito school
159 | rasquache
160 | rayonism
161 | realism
162 | regionalism
163 | remodernism
164 | renaissance
165 | retrofuturism
166 | rococo
167 | romanesque
168 | romanticism
169 | samikshavad
170 | serial art
171 | shin hanga
172 | shock art
173 | socialist realism
174 | sots art
175 | space art
176 | street art
177 | stuckism
178 | sumatraism
179 | superflat
180 | suprematism
181 | surrealism
182 | symbolism
183 | synchromism
184 | synthetism
185 | sōsaku hanga
186 | tachisme
187 | temporary art
188 | tonalism
189 | toyism
190 | transgressive art
191 | ukiyo-e
192 | underground comix
193 | unilalianism
194 | vancouver school
195 | vanitas
196 | verdadism
197 | video art
198 | viennese actionism
199 | visual art
200 | vorticism
201 |
--------------------------------------------------------------------------------
/clip_utils/data/negative.txt:
--------------------------------------------------------------------------------
1 | 3d
2 | b&w
3 | bad anatomy
4 | bad art
5 | blur
6 | blurry
7 | cartoon
8 | childish
9 | close up
10 | deformed
11 | disconnected limbs
12 | disfigured
13 | disgusting
14 | extra limb
15 | extra limbs
16 | floating limbs
17 | grain
18 | illustration
19 | kitsch
20 | long body
21 | long neck
22 | low quality
23 | low-res
24 | malformed hands
25 | mangled
26 | missing limb
27 | mutated
28 | mutation
29 | mutilated
30 | noisy
31 | old
32 | out of focus
33 | over saturation
34 | oversaturated
35 | poorly drawn
36 | poorly drawn face
37 | poorly drawn hands
38 | render
39 | surreal
40 | ugly
41 | weird colors
--------------------------------------------------------------------------------
/example_workflows/Thumbs.db:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/Thumbs.db
--------------------------------------------------------------------------------
/example_workflows/animate_3D.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/animate_3D.jpg
--------------------------------------------------------------------------------
/example_workflows/audio_split_stems.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/audio_split_stems.jpg
--------------------------------------------------------------------------------
/example_workflows/audio_split_stems.json:
--------------------------------------------------------------------------------
1 | {
2 | "last_node_id": 31,
3 | "last_link_id": 34,
4 | "nodes": [
5 | {
6 | "id": 23,
7 | "type": "SaveAudio",
8 | "pos": [
9 | 470,
10 | 480
11 | ],
12 | "size": [
13 | 315,
14 | 100
15 | ],
16 | "flags": {},
17 | "order": 5,
18 | "mode": 0,
19 | "inputs": [
20 | {
21 | "name": "audio",
22 | "type": "AUDIO",
23 | "link": 24
24 | }
25 | ],
26 | "outputs": [],
27 | "properties": {
28 | "Node name for S&R": "SaveAudio"
29 | },
30 | "widgets_values": [
31 | "audio/audio-separation-drums",
32 | null
33 | ]
34 | },
35 | {
36 | "id": 24,
37 | "type": "SaveAudio",
38 | "pos": [
39 | 470,
40 | 640
41 | ],
42 | "size": [
43 | 315,
44 | 100
45 | ],
46 | "flags": {},
47 | "order": 7,
48 | "mode": 0,
49 | "inputs": [
50 | {
51 | "name": "audio",
52 | "type": "AUDIO",
53 | "link": 25
54 | }
55 | ],
56 | "outputs": [],
57 | "properties": {
58 | "Node name for S&R": "SaveAudio"
59 | },
60 | "widgets_values": [
61 | "audio/audio-separation-other",
62 | null
63 | ]
64 | },
65 | {
66 | "id": 25,
67 | "type": "SaveAudio",
68 | "pos": [
69 | 470,
70 | 800
71 | ],
72 | "size": [
73 | 315,
74 | 100
75 | ],
76 | "flags": {},
77 | "order": 8,
78 | "mode": 0,
79 | "inputs": [
80 | {
81 | "name": "audio",
82 | "type": "AUDIO",
83 | "link": 26
84 | }
85 | ],
86 | "outputs": [],
87 | "properties": {
88 | "Node name for S&R": "SaveAudio"
89 | },
90 | "widgets_values": [
91 | "audio/audio-separation-vocals",
92 | null
93 | ]
94 | },
95 | {
96 | "id": 11,
97 | "type": "AudioSeparation",
98 | "pos": [
99 | 45,
100 | 494
101 | ],
102 | "size": [
103 | 315,
104 | 166
105 | ],
106 | "flags": {},
107 | "order": 2,
108 | "mode": 0,
109 | "inputs": [
110 | {
111 | "name": "audio",
112 | "type": "AUDIO",
113 | "link": 19
114 | }
115 | ],
116 | "outputs": [
117 | {
118 | "name": "Bass",
119 | "type": "AUDIO",
120 | "links": [
121 | 23,
122 | 29
123 | ],
124 | "slot_index": 0,
125 | "shape": 3
126 | },
127 | {
128 | "name": "Drums",
129 | "type": "AUDIO",
130 | "links": [
131 | 24,
132 | 30
133 | ],
134 | "slot_index": 1,
135 | "shape": 3
136 | },
137 | {
138 | "name": "Other",
139 | "type": "AUDIO",
140 | "links": [
141 | 25,
142 | 31
143 | ],
144 | "slot_index": 2,
145 | "shape": 3
146 | },
147 | {
148 | "name": "Vocals",
149 | "type": "AUDIO",
150 | "links": [
151 | 26
152 | ],
153 | "slot_index": 3,
154 | "shape": 3
155 | }
156 | ],
157 | "properties": {
158 | "Node name for S&R": "AudioSeparation"
159 | },
160 | "widgets_values": [
161 | "half_sine",
162 | 16,
163 | 0.1
164 | ]
165 | },
166 | {
167 | "id": 27,
168 | "type": "AudioCombine",
169 | "pos": [
170 | 52,
171 | 976
172 | ],
173 | "size": [
174 | 315,
175 | 78
176 | ],
177 | "flags": {},
178 | "order": 9,
179 | "mode": 0,
180 | "inputs": [
181 | {
182 | "name": "audio_1",
183 | "type": "AUDIO",
184 | "link": 27
185 | },
186 | {
187 | "name": "audio_2",
188 | "type": "AUDIO",
189 | "link": 31
190 | }
191 | ],
192 | "outputs": [
193 | {
194 | "name": "AUDIO",
195 | "type": "AUDIO",
196 | "links": [
197 | 28
198 | ],
199 | "slot_index": 0,
200 | "shape": 3
201 | }
202 | ],
203 | "properties": {
204 | "Node name for S&R": "AudioCombine"
205 | },
206 | "widgets_values": [
207 | "add"
208 | ]
209 | },
210 | {
211 | "id": 26,
212 | "type": "AudioCombine",
213 | "pos": [
214 | 53,
215 | 828
216 | ],
217 | "size": [
218 | 315,
219 | 78
220 | ],
221 | "flags": {},
222 | "order": 6,
223 | "mode": 0,
224 | "inputs": [
225 | {
226 | "name": "audio_1",
227 | "type": "AUDIO",
228 | "link": 29
229 | },
230 | {
231 | "name": "audio_2",
232 | "type": "AUDIO",
233 | "link": 30
234 | }
235 | ],
236 | "outputs": [
237 | {
238 | "name": "AUDIO",
239 | "type": "AUDIO",
240 | "links": [
241 | 27
242 | ],
243 | "slot_index": 0,
244 | "shape": 3
245 | }
246 | ],
247 | "properties": {
248 | "Node name for S&R": "AudioCombine"
249 | },
250 | "widgets_values": [
251 | "add"
252 | ]
253 | },
254 | {
255 | "id": 28,
256 | "type": "SaveAudio",
257 | "pos": [
258 | 467,
259 | 968
260 | ],
261 | "size": [
262 | 315,
263 | 100
264 | ],
265 | "flags": {},
266 | "order": 10,
267 | "mode": 0,
268 | "inputs": [
269 | {
270 | "name": "audio",
271 | "type": "AUDIO",
272 | "link": 28
273 | }
274 | ],
275 | "outputs": [],
276 | "properties": {
277 | "Node name for S&R": "SaveAudio"
278 | },
279 | "widgets_values": [
280 | "audio/instrumentals/track",
281 | null
282 | ]
283 | },
284 | {
285 | "id": 18,
286 | "type": "AudioCrop",
287 | "pos": [
288 | -322,
289 | 508
290 | ],
291 | "size": [
292 | 315,
293 | 82
294 | ],
295 | "flags": {},
296 | "order": 1,
297 | "mode": 0,
298 | "inputs": [
299 | {
300 | "name": "audio",
301 | "type": "AUDIO",
302 | "link": 32
303 | }
304 | ],
305 | "outputs": [
306 | {
307 | "name": "AUDIO",
308 | "type": "AUDIO",
309 | "links": [
310 | 19,
311 | 34
312 | ],
313 | "slot_index": 0,
314 | "shape": 3
315 | }
316 | ],
317 | "properties": {
318 | "Node name for S&R": "AudioCrop"
319 | },
320 | "widgets_values": [
321 | "0:00",
322 | "10:00"
323 | ]
324 | },
325 | {
326 | "id": 22,
327 | "type": "SaveAudio",
328 | "pos": [
329 | 463,
330 | 320
331 | ],
332 | "size": [
333 | 315,
334 | 100
335 | ],
336 | "flags": {},
337 | "order": 4,
338 | "mode": 0,
339 | "inputs": [
340 | {
341 | "name": "audio",
342 | "type": "AUDIO",
343 | "link": 23
344 | }
345 | ],
346 | "outputs": [],
347 | "properties": {
348 | "Node name for S&R": "SaveAudio"
349 | },
350 | "widgets_values": [
351 | "audio/audio-separation-bass",
352 | null
353 | ]
354 | },
355 | {
356 | "id": 31,
357 | "type": "PreviewAudio",
358 | "pos": [
359 | -319.37188720703125,
360 | 362.6708068847656
361 | ],
362 | "size": [
363 | 315,
364 | 76
365 | ],
366 | "flags": {},
367 | "order": 3,
368 | "mode": 0,
369 | "inputs": [
370 | {
371 | "name": "audio",
372 | "type": "AUDIO",
373 | "link": 34
374 | }
375 | ],
376 | "outputs": [],
377 | "properties": {
378 | "Node name for S&R": "PreviewAudio"
379 | },
380 | "widgets_values": [
381 | null
382 | ]
383 | },
384 | {
385 | "id": 30,
386 | "type": "LoadAudioPlus",
387 | "pos": [
388 | -685.44384765625,
389 | 512.709716796875
390 | ],
391 | "size": [
392 | 315,
393 | 78
394 | ],
395 | "flags": {},
396 | "order": 0,
397 | "mode": 0,
398 | "inputs": [],
399 | "outputs": [
400 | {
401 | "name": "audio",
402 | "type": "AUDIO",
403 | "links": [
404 | 32
405 | ],
406 | "slot_index": 0
407 | },
408 | {
409 | "name": "audio_info",
410 | "type": "AUDIOINFO",
411 | "links": null
412 | }
413 | ],
414 | "properties": {
415 | "Node name for S&R": "LoadAudioPlus"
416 | },
417 | "widgets_values": [
418 | "promp+it+up.mp3"
419 | ]
420 | }
421 | ],
422 | "links": [
423 | [
424 | 19,
425 | 18,
426 | 0,
427 | 11,
428 | 0,
429 | "AUDIO"
430 | ],
431 | [
432 | 23,
433 | 11,
434 | 0,
435 | 22,
436 | 0,
437 | "AUDIO"
438 | ],
439 | [
440 | 24,
441 | 11,
442 | 1,
443 | 23,
444 | 0,
445 | "AUDIO"
446 | ],
447 | [
448 | 25,
449 | 11,
450 | 2,
451 | 24,
452 | 0,
453 | "AUDIO"
454 | ],
455 | [
456 | 26,
457 | 11,
458 | 3,
459 | 25,
460 | 0,
461 | "AUDIO"
462 | ],
463 | [
464 | 27,
465 | 26,
466 | 0,
467 | 27,
468 | 0,
469 | "AUDIO"
470 | ],
471 | [
472 | 28,
473 | 27,
474 | 0,
475 | 28,
476 | 0,
477 | "AUDIO"
478 | ],
479 | [
480 | 29,
481 | 11,
482 | 0,
483 | 26,
484 | 0,
485 | "AUDIO"
486 | ],
487 | [
488 | 30,
489 | 11,
490 | 1,
491 | 26,
492 | 1,
493 | "AUDIO"
494 | ],
495 | [
496 | 31,
497 | 11,
498 | 2,
499 | 27,
500 | 1,
501 | "AUDIO"
502 | ],
503 | [
504 | 32,
505 | 30,
506 | 0,
507 | 18,
508 | 0,
509 | "AUDIO"
510 | ],
511 | [
512 | 34,
513 | 18,
514 | 0,
515 | 31,
516 | 0,
517 | "AUDIO"
518 | ]
519 | ],
520 | "groups": [],
521 | "config": {},
522 | "extra": {
523 | "ds": {
524 | "scale": 0.8264462809917359,
525 | "offset": [
526 | 1059.8919086958717,
527 | -32.3409173010354
528 | ]
529 | },
530 | "node_versions": {
531 | "comfy-core": "0.3.13",
532 | "audio-separation-nodes-comfyui": "31a4567726e035097cc2d1f767767908a6fda2ea",
533 | "comfyui-dreambait-nodes": "c518b81ee4d2bff187f03fa7541de36d6a4c2008"
534 | },
535 | "VHS_latentpreview": false,
536 | "VHS_latentpreviewrate": 0
537 | },
538 | "version": 0.4
539 | }
--------------------------------------------------------------------------------
/example_workflows/background_removal.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/background_removal.jpg
--------------------------------------------------------------------------------
/example_workflows/background_removal.json:
--------------------------------------------------------------------------------
1 | {
2 | "last_node_id": 61,
3 | "last_link_id": 152,
4 | "nodes": [
5 | {
6 | "id": 42,
7 | "type": "SaveImage",
8 | "pos": [
9 | 1551,
10 | 415
11 | ],
12 | "size": [
13 | 493.2388916015625,
14 | 373.78704833984375
15 | ],
16 | "flags": {},
17 | "order": 5,
18 | "mode": 0,
19 | "inputs": [
20 | {
21 | "name": "images",
22 | "type": "IMAGE",
23 | "link": 98
24 | }
25 | ],
26 | "outputs": [],
27 | "properties": {
28 | "Node name for S&R": "SaveImage"
29 | },
30 | "widgets_values": [
31 | "mask"
32 | ]
33 | },
34 | {
35 | "id": 33,
36 | "type": "ImageResize+",
37 | "pos": [
38 | 272,
39 | 273
40 | ],
41 | "size": [
42 | 315,
43 | 218
44 | ],
45 | "flags": {},
46 | "order": 1,
47 | "mode": 0,
48 | "inputs": [
49 | {
50 | "name": "image",
51 | "type": "IMAGE",
52 | "link": 82
53 | }
54 | ],
55 | "outputs": [
56 | {
57 | "name": "IMAGE",
58 | "type": "IMAGE",
59 | "links": [
60 | 148
61 | ],
62 | "slot_index": 0,
63 | "shape": 3
64 | },
65 | {
66 | "name": "width",
67 | "type": "INT",
68 | "links": null,
69 | "shape": 3
70 | },
71 | {
72 | "name": "height",
73 | "type": "INT",
74 | "links": null,
75 | "shape": 3
76 | }
77 | ],
78 | "properties": {
79 | "Node name for S&R": "ImageResize+"
80 | },
81 | "widgets_values": [
82 | 2048,
83 | 2048,
84 | "lanczos",
85 | "keep proportion",
86 | "downscale if bigger",
87 | 8
88 | ]
89 | },
90 | {
91 | "id": 60,
92 | "type": "InspyrenetRembg",
93 | "pos": [
94 | 626.6607666015625,
95 | 275
96 | ],
97 | "size": [
98 | 210,
99 | 78.2226333618164
100 | ],
101 | "flags": {},
102 | "order": 2,
103 | "mode": 0,
104 | "inputs": [
105 | {
106 | "name": "image",
107 | "type": "IMAGE",
108 | "link": 148
109 | }
110 | ],
111 | "outputs": [
112 | {
113 | "name": "IMAGE",
114 | "type": "IMAGE",
115 | "links": [
116 | 149
117 | ],
118 | "slot_index": 0,
119 | "shape": 3
120 | },
121 | {
122 | "name": "MASK",
123 | "type": "MASK",
124 | "links": [
125 | 150,
126 | 151
127 | ],
128 | "slot_index": 1,
129 | "shape": 3
130 | }
131 | ],
132 | "properties": {
133 | "Node name for S&R": "InspyrenetRembg"
134 | },
135 | "widgets_values": [
136 | "default"
137 | ]
138 | },
139 | {
140 | "id": 38,
141 | "type": "SaveImage",
142 | "pos": [
143 | 1552,
144 | -33
145 | ],
146 | "size": [
147 | 493.2388916015625,
148 | 373.78704833984375
149 | ],
150 | "flags": {},
151 | "order": 8,
152 | "mode": 0,
153 | "inputs": [
154 | {
155 | "name": "images",
156 | "type": "IMAGE",
157 | "link": 141
158 | }
159 | ],
160 | "outputs": [],
161 | "properties": {
162 | "Node name for S&R": "SaveImage"
163 | },
164 | "widgets_values": [
165 | "transparent_foreground"
166 | ]
167 | },
168 | {
169 | "id": 61,
170 | "type": "SaveImage",
171 | "pos": [
172 | 2075,
173 | -33
174 | ],
175 | "size": [
176 | 493.2388916015625,
177 | 373.78704833984375
178 | ],
179 | "flags": {},
180 | "order": 10,
181 | "mode": 0,
182 | "inputs": [
183 | {
184 | "name": "images",
185 | "type": "IMAGE",
186 | "link": 152
187 | }
188 | ],
189 | "outputs": [],
190 | "properties": {
191 | "Node name for S&R": "SaveImage"
192 | },
193 | "widgets_values": [
194 | "rgb_black_background"
195 | ]
196 | },
197 | {
198 | "id": 3,
199 | "type": "LoadImage",
200 | "pos": [
201 | -340,
202 | 275
203 | ],
204 | "size": [
205 | 589.18896484375,
206 | 468.7266845703125
207 | ],
208 | "flags": {},
209 | "order": 0,
210 | "mode": 0,
211 | "inputs": [],
212 | "outputs": [
213 | {
214 | "name": "IMAGE",
215 | "type": "IMAGE",
216 | "links": [
217 | 82
218 | ],
219 | "slot_index": 0,
220 | "shape": 3
221 | },
222 | {
223 | "name": "MASK",
224 | "type": "MASK",
225 | "links": null,
226 | "shape": 3
227 | }
228 | ],
229 | "properties": {
230 | "Node name for S&R": "LoadImage"
231 | },
232 | "widgets_values": [
233 | "Eden-6687f4891873c23e3a53e55f-___-66955f55151fa193cb510948.png",
234 | "image"
235 | ]
236 | },
237 | {
238 | "id": 52,
239 | "type": "InvertMask",
240 | "pos": [
241 | 630,
242 | 410
243 | ],
244 | "size": [
245 | 210,
246 | 26
247 | ],
248 | "flags": {},
249 | "order": 4,
250 | "mode": 0,
251 | "inputs": [
252 | {
253 | "name": "mask",
254 | "type": "MASK",
255 | "link": 151
256 | }
257 | ],
258 | "outputs": [
259 | {
260 | "name": "MASK",
261 | "type": "MASK",
262 | "links": [
263 | 134
264 | ],
265 | "slot_index": 0,
266 | "shape": 3
267 | }
268 | ],
269 | "properties": {
270 | "Node name for S&R": "InvertMask"
271 | },
272 | "widgets_values": []
273 | },
274 | {
275 | "id": 41,
276 | "type": "MaskToImage",
277 | "pos": [
278 | 860,
279 | 275
280 | ],
281 | "size": [
282 | 176.39999389648438,
283 | 26
284 | ],
285 | "flags": {},
286 | "order": 3,
287 | "mode": 0,
288 | "inputs": [
289 | {
290 | "name": "mask",
291 | "type": "MASK",
292 | "link": 150
293 | }
294 | ],
295 | "outputs": [
296 | {
297 | "name": "IMAGE",
298 | "type": "IMAGE",
299 | "links": [
300 | 98,
301 | 130
302 | ],
303 | "slot_index": 0,
304 | "shape": 3
305 | }
306 | ],
307 | "properties": {
308 | "Node name for S&R": "MaskToImage"
309 | },
310 | "widgets_values": []
311 | },
312 | {
313 | "id": 53,
314 | "type": "ImageBlend",
315 | "pos": [
316 | 1090,
317 | 270
318 | ],
319 | "size": [
320 | 315,
321 | 102
322 | ],
323 | "flags": {},
324 | "order": 7,
325 | "mode": 0,
326 | "inputs": [
327 | {
328 | "name": "image1",
329 | "type": "IMAGE",
330 | "link": 149
331 | },
332 | {
333 | "name": "image2",
334 | "type": "IMAGE",
335 | "link": 132
336 | }
337 | ],
338 | "outputs": [
339 | {
340 | "name": "IMAGE",
341 | "type": "IMAGE",
342 | "links": [
343 | 141,
344 | 143
345 | ],
346 | "slot_index": 0,
347 | "shape": 3
348 | }
349 | ],
350 | "properties": {
351 | "Node name for S&R": "ImageBlend"
352 | },
353 | "widgets_values": [
354 | 1,
355 | "multiply"
356 | ]
357 | },
358 | {
359 | "id": 54,
360 | "type": "JoinImageWithAlpha",
361 | "pos": [
362 | 860,
363 | 390
364 | ],
365 | "size": [
366 | 176.39999389648438,
367 | 46
368 | ],
369 | "flags": {},
370 | "order": 6,
371 | "mode": 0,
372 | "inputs": [
373 | {
374 | "name": "image",
375 | "type": "IMAGE",
376 | "link": 130
377 | },
378 | {
379 | "name": "alpha",
380 | "type": "MASK",
381 | "link": 134
382 | }
383 | ],
384 | "outputs": [
385 | {
386 | "name": "IMAGE",
387 | "type": "IMAGE",
388 | "links": [
389 | 132
390 | ],
391 | "slot_index": 0,
392 | "shape": 3
393 | }
394 | ],
395 | "properties": {
396 | "Node name for S&R": "JoinImageWithAlpha"
397 | },
398 | "widgets_values": []
399 | },
400 | {
401 | "id": 57,
402 | "type": "Images to RGB",
403 | "pos": [
404 | 1191.251953125,
405 | -12.17456340789795
406 | ],
407 | "size": [
408 | 210,
409 | 26
410 | ],
411 | "flags": {},
412 | "order": 9,
413 | "mode": 0,
414 | "inputs": [
415 | {
416 | "name": "images",
417 | "type": "IMAGE",
418 | "link": 143
419 | }
420 | ],
421 | "outputs": [
422 | {
423 | "name": "IMAGE",
424 | "type": "IMAGE",
425 | "links": [
426 | 152
427 | ],
428 | "slot_index": 0,
429 | "shape": 3
430 | }
431 | ],
432 | "properties": {
433 | "Node name for S&R": "Images to RGB"
434 | },
435 | "widgets_values": []
436 | }
437 | ],
438 | "links": [
439 | [
440 | 82,
441 | 3,
442 | 0,
443 | 33,
444 | 0,
445 | "IMAGE"
446 | ],
447 | [
448 | 98,
449 | 41,
450 | 0,
451 | 42,
452 | 0,
453 | "IMAGE"
454 | ],
455 | [
456 | 130,
457 | 41,
458 | 0,
459 | 54,
460 | 0,
461 | "IMAGE"
462 | ],
463 | [
464 | 132,
465 | 54,
466 | 0,
467 | 53,
468 | 1,
469 | "IMAGE"
470 | ],
471 | [
472 | 134,
473 | 52,
474 | 0,
475 | 54,
476 | 1,
477 | "MASK"
478 | ],
479 | [
480 | 141,
481 | 53,
482 | 0,
483 | 38,
484 | 0,
485 | "IMAGE"
486 | ],
487 | [
488 | 143,
489 | 53,
490 | 0,
491 | 57,
492 | 0,
493 | "IMAGE"
494 | ],
495 | [
496 | 148,
497 | 33,
498 | 0,
499 | 60,
500 | 0,
501 | "IMAGE"
502 | ],
503 | [
504 | 149,
505 | 60,
506 | 0,
507 | 53,
508 | 0,
509 | "IMAGE"
510 | ],
511 | [
512 | 150,
513 | 60,
514 | 1,
515 | 41,
516 | 0,
517 | "MASK"
518 | ],
519 | [
520 | 151,
521 | 60,
522 | 1,
523 | 52,
524 | 0,
525 | "MASK"
526 | ],
527 | [
528 | 152,
529 | 57,
530 | 0,
531 | 61,
532 | 0,
533 | "IMAGE"
534 | ]
535 | ],
536 | "groups": [],
537 | "config": {},
538 | "extra": {
539 | "ds": {
540 | "scale": 0.6588450000000601,
541 | "offset": [
542 | 508.40587808805503,
543 | 417.73558576208325
544 | ]
545 | },
546 | "node_versions": {
547 | "comfy-core": "0.3.13",
548 | "ComfyUI_essentials": "33ff89fd354d8ec3ab6affb605a79a931b445d99",
549 | "comfyui-inspyrenet-rembg": "87ac452ef1182e8f35f59b04010158d74dcefd06",
550 | "pr-was-node-suite-comfyui-47064894": "056badacda52e88d29d6a65f9509cd3115ace0f2"
551 | },
552 | "VHS_latentpreview": false,
553 | "VHS_latentpreviewrate": 0
554 | },
555 | "version": 0.4
556 | }
--------------------------------------------------------------------------------
/example_workflows/background_removal_video.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/background_removal_video.jpg
--------------------------------------------------------------------------------
/example_workflows/background_removal_video.json:
--------------------------------------------------------------------------------
1 | {
2 | "last_node_id": 21,
3 | "last_link_id": 37,
4 | "nodes": [
5 | {
6 | "id": 8,
7 | "type": "ImageResize+",
8 | "pos": [
9 | 851,
10 | 874
11 | ],
12 | "size": [
13 | 286.8852233886719,
14 | 218
15 | ],
16 | "flags": {},
17 | "order": 1,
18 | "mode": 0,
19 | "inputs": [
20 | {
21 | "name": "image",
22 | "type": "IMAGE",
23 | "link": 11
24 | }
25 | ],
26 | "outputs": [
27 | {
28 | "name": "IMAGE",
29 | "type": "IMAGE",
30 | "links": [
31 | 33
32 | ],
33 | "slot_index": 0,
34 | "shape": 3
35 | },
36 | {
37 | "name": "width",
38 | "type": "INT",
39 | "links": null,
40 | "shape": 3
41 | },
42 | {
43 | "name": "height",
44 | "type": "INT",
45 | "links": null,
46 | "shape": 3
47 | }
48 | ],
49 | "properties": {
50 | "Node name for S&R": "ImageResize+"
51 | },
52 | "widgets_values": [
53 | 2048,
54 | 2048,
55 | "lanczos",
56 | "keep proportion",
57 | "downscale if bigger",
58 | 8
59 | ]
60 | },
61 | {
62 | "id": 21,
63 | "type": "InspyrenetRembg",
64 | "pos": [
65 | 1307,
66 | 872
67 | ],
68 | "size": [
69 | 315,
70 | 78
71 | ],
72 | "flags": {},
73 | "order": 3,
74 | "mode": 0,
75 | "inputs": [
76 | {
77 | "name": "image",
78 | "type": "IMAGE",
79 | "link": 33
80 | }
81 | ],
82 | "outputs": [
83 | {
84 | "name": "IMAGE",
85 | "type": "IMAGE",
86 | "links": [
87 | 37
88 | ],
89 | "slot_index": 0,
90 | "shape": 3
91 | },
92 | {
93 | "name": "MASK",
94 | "type": "MASK",
95 | "links": [],
96 | "slot_index": 1,
97 | "shape": 3
98 | }
99 | ],
100 | "properties": {
101 | "Node name for S&R": "InspyrenetRembg"
102 | },
103 | "widgets_values": [
104 | "on"
105 | ]
106 | },
107 | {
108 | "id": 5,
109 | "type": "VHS_VideoInfo",
110 | "pos": [
111 | 1313,
112 | 1008
113 | ],
114 | "size": [
115 | 311.90643310546875,
116 | 206
117 | ],
118 | "flags": {},
119 | "order": 2,
120 | "mode": 0,
121 | "inputs": [
122 | {
123 | "name": "video_info",
124 | "type": "VHS_VIDEOINFO",
125 | "link": 4
126 | }
127 | ],
128 | "outputs": [
129 | {
130 | "name": "source_fps🟨",
131 | "type": "FLOAT",
132 | "links": [
133 | 5
134 | ],
135 | "slot_index": 0,
136 | "shape": 3
137 | },
138 | {
139 | "name": "source_frame_count🟨",
140 | "type": "INT",
141 | "links": null,
142 | "shape": 3
143 | },
144 | {
145 | "name": "source_duration🟨",
146 | "type": "FLOAT",
147 | "links": null,
148 | "shape": 3
149 | },
150 | {
151 | "name": "source_width🟨",
152 | "type": "INT",
153 | "links": null,
154 | "shape": 3
155 | },
156 | {
157 | "name": "source_height🟨",
158 | "type": "INT",
159 | "links": null,
160 | "shape": 3
161 | },
162 | {
163 | "name": "loaded_fps🟦",
164 | "type": "FLOAT",
165 | "links": null,
166 | "shape": 3
167 | },
168 | {
169 | "name": "loaded_frame_count🟦",
170 | "type": "INT",
171 | "links": null,
172 | "shape": 3
173 | },
174 | {
175 | "name": "loaded_duration🟦",
176 | "type": "FLOAT",
177 | "links": null,
178 | "shape": 3
179 | },
180 | {
181 | "name": "loaded_width🟦",
182 | "type": "INT",
183 | "links": null,
184 | "shape": 3
185 | },
186 | {
187 | "name": "loaded_height🟦",
188 | "type": "INT",
189 | "links": null,
190 | "shape": 3
191 | }
192 | ],
193 | "properties": {
194 | "Node name for S&R": "VHS_VideoInfo"
195 | },
196 | "widgets_values": {}
197 | },
198 | {
199 | "id": 3,
200 | "type": "VHS_VideoCombine",
201 | "pos": [
202 | 1686,
203 | 872
204 | ],
205 | "size": [
206 | 307.333984375,
207 | 282
208 | ],
209 | "flags": {},
210 | "order": 4,
211 | "mode": 0,
212 | "inputs": [
213 | {
214 | "name": "images",
215 | "type": "IMAGE",
216 | "link": 37
217 | },
218 | {
219 | "name": "audio",
220 | "type": "AUDIO",
221 | "link": 6,
222 | "shape": 7
223 | },
224 | {
225 | "name": "meta_batch",
226 | "type": "VHS_BatchManager",
227 | "link": null,
228 | "shape": 7
229 | },
230 | {
231 | "name": "vae",
232 | "type": "VAE",
233 | "link": null,
234 | "shape": 7
235 | },
236 | {
237 | "name": "frame_rate",
238 | "type": "FLOAT",
239 | "link": 5,
240 | "widget": {
241 | "name": "frame_rate"
242 | }
243 | }
244 | ],
245 | "outputs": [
246 | {
247 | "name": "Filenames",
248 | "type": "VHS_FILENAMES",
249 | "links": null,
250 | "shape": 3
251 | }
252 | ],
253 | "properties": {
254 | "Node name for S&R": "VHS_VideoCombine"
255 | },
256 | "widgets_values": {
257 | "frame_rate": 8,
258 | "loop_count": 1,
259 | "filename_prefix": "background_removed",
260 | "format": "video/ProRes",
261 | "profile": "4",
262 | "pingpong": false,
263 | "save_output": true,
264 | "videopreview": {
265 | "hidden": false,
266 | "paused": false,
267 | "params": {
268 | "filename": "background_removed_00001.mov",
269 | "subfolder": "",
270 | "type": "output",
271 | "format": "video/ProRes",
272 | "frame_rate": 30
273 | },
274 | "muted": false
275 | }
276 | }
277 | },
278 | {
279 | "id": 1,
280 | "type": "VHS_LoadVideo",
281 | "pos": [
282 | 515,
283 | 875
284 | ],
285 | "size": [
286 | 306.6362609863281,
287 | 610.63623046875
288 | ],
289 | "flags": {},
290 | "order": 0,
291 | "mode": 0,
292 | "inputs": [
293 | {
294 | "name": "meta_batch",
295 | "type": "VHS_BatchManager",
296 | "link": null,
297 | "shape": 7
298 | },
299 | {
300 | "name": "vae",
301 | "type": "VAE",
302 | "link": null,
303 | "shape": 7
304 | }
305 | ],
306 | "outputs": [
307 | {
308 | "name": "IMAGE",
309 | "type": "IMAGE",
310 | "links": [
311 | 11
312 | ],
313 | "slot_index": 0,
314 | "shape": 3
315 | },
316 | {
317 | "name": "frame_count",
318 | "type": "INT",
319 | "links": null,
320 | "shape": 3
321 | },
322 | {
323 | "name": "audio",
324 | "type": "AUDIO",
325 | "links": [
326 | 6
327 | ],
328 | "slot_index": 2,
329 | "shape": 3
330 | },
331 | {
332 | "name": "video_info",
333 | "type": "VHS_VIDEOINFO",
334 | "links": [
335 | 4
336 | ],
337 | "slot_index": 3,
338 | "shape": 3
339 | }
340 | ],
341 | "properties": {
342 | "Node name for S&R": "VHS_LoadVideo"
343 | },
344 | "widgets_values": {
345 | "video": "fire-pulse.mp4",
346 | "force_rate": 0,
347 | "custom_width": 0,
348 | "custom_height": 0,
349 | "frame_load_cap": 0,
350 | "skip_first_frames": 0,
351 | "select_every_nth": 1,
352 | "format": "AnimateDiff",
353 | "choose video to upload": "image",
354 | "videopreview": {
355 | "hidden": false,
356 | "paused": false,
357 | "params": {
358 | "frame_load_cap": 0,
359 | "skip_first_frames": 0,
360 | "force_rate": 0,
361 | "select_every_nth": 1,
362 | "filename": "fire-pulse.mp4",
363 | "type": "input",
364 | "format": "video/mp4"
365 | },
366 | "muted": false
367 | }
368 | }
369 | }
370 | ],
371 | "links": [
372 | [
373 | 4,
374 | 1,
375 | 3,
376 | 5,
377 | 0,
378 | "VHS_VIDEOINFO"
379 | ],
380 | [
381 | 5,
382 | 5,
383 | 0,
384 | 3,
385 | 4,
386 | "FLOAT"
387 | ],
388 | [
389 | 6,
390 | 1,
391 | 2,
392 | 3,
393 | 1,
394 | "AUDIO"
395 | ],
396 | [
397 | 11,
398 | 1,
399 | 0,
400 | 8,
401 | 0,
402 | "IMAGE"
403 | ],
404 | [
405 | 33,
406 | 8,
407 | 0,
408 | 21,
409 | 0,
410 | "IMAGE"
411 | ],
412 | [
413 | 37,
414 | 21,
415 | 0,
416 | 3,
417 | 0,
418 | "IMAGE"
419 | ]
420 | ],
421 | "groups": [],
422 | "config": {},
423 | "extra": {
424 | "ds": {
425 | "scale": 0.8954302432553654,
426 | "offset": [
427 | -300.6409807077343,
428 | -642.3231546873209
429 | ]
430 | },
431 | "node_versions": {
432 | "ComfyUI_essentials": "33ff89fd354d8ec3ab6affb605a79a931b445d99",
433 | "comfyui-inspyrenet-rembg": "87ac452ef1182e8f35f59b04010158d74dcefd06",
434 | "comfyui-videohelpersuite": "1.5.0"
435 | },
436 | "VHS_latentpreview": false,
437 | "VHS_latentpreviewrate": 0
438 | },
439 | "version": 0.4
440 | }
--------------------------------------------------------------------------------
/example_workflows/face-expression-transfer_HelloMeme_image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/face-expression-transfer_HelloMeme_image.jpg
--------------------------------------------------------------------------------
/example_workflows/face-expression-transfer_HelloMeme_image.json:
--------------------------------------------------------------------------------
1 | {
2 | "last_node_id": 65,
3 | "last_link_id": 133,
4 | "nodes": [
5 | {
6 | "id": 2,
7 | "type": "HMFaceToolkitsLoader",
8 | "pos": [
9 | 43,
10 | 699
11 | ],
12 | "size": [
13 | 315,
14 | 82
15 | ],
16 | "flags": {},
17 | "order": 0,
18 | "mode": 0,
19 | "inputs": [],
20 | "outputs": [
21 | {
22 | "name": "face_toolkits",
23 | "type": "FACE_TOOLKITS",
24 | "links": [
25 | 57,
26 | 62,
27 | 65,
28 | 119,
29 | 127
30 | ],
31 | "slot_index": 0
32 | }
33 | ],
34 | "properties": {
35 | "Node name for S&R": "HMFaceToolkitsLoader"
36 | },
37 | "widgets_values": [
38 | 0,
39 | "huggingface"
40 | ]
41 | },
42 | {
43 | "id": 39,
44 | "type": "CropPortrait",
45 | "pos": [
46 | 516,
47 | 537
48 | ],
49 | "size": [
50 | 241.79998779296875,
51 | 46
52 | ],
53 | "flags": {},
54 | "order": 6,
55 | "mode": 0,
56 | "inputs": [
57 | {
58 | "name": "image",
59 | "type": "IMAGE",
60 | "link": 128
61 | },
62 | {
63 | "name": "face_toolkits",
64 | "type": "FACE_TOOLKITS",
65 | "link": 57
66 | }
67 | ],
68 | "outputs": [
69 | {
70 | "name": "IMAGE",
71 | "type": "IMAGE",
72 | "links": [
73 | 118
74 | ],
75 | "slot_index": 0
76 | }
77 | ],
78 | "properties": {
79 | "Node name for S&R": "CropPortrait"
80 | },
81 | "widgets_values": []
82 | },
83 | {
84 | "id": 43,
85 | "type": "GetFaceLandmarks",
86 | "pos": [
87 | 465,
88 | 771
89 | ],
90 | "size": [
91 | 292.20001220703125,
92 | 46
93 | ],
94 | "flags": {},
95 | "order": 5,
96 | "mode": 0,
97 | "inputs": [
98 | {
99 | "name": "face_toolkits",
100 | "type": "FACE_TOOLKITS",
101 | "link": 62
102 | },
103 | {
104 | "name": "images",
105 | "type": "IMAGE",
106 | "link": 115
107 | }
108 | ],
109 | "outputs": [
110 | {
111 | "name": "landmarks",
112 | "type": "FACELANDMARKS222",
113 | "links": [
114 | 64,
115 | 126
116 | ],
117 | "slot_index": 0
118 | }
119 | ],
120 | "properties": {
121 | "Node name for S&R": "GetFaceLandmarks"
122 | },
123 | "widgets_values": []
124 | },
125 | {
126 | "id": 44,
127 | "type": "GetDrivePose",
128 | "pos": [
129 | 852,
130 | 661
131 | ],
132 | "size": [
133 | 304.79998779296875,
134 | 66
135 | ],
136 | "flags": {},
137 | "order": 7,
138 | "mode": 0,
139 | "inputs": [
140 | {
141 | "name": "face_toolkits",
142 | "type": "FACE_TOOLKITS",
143 | "link": 65
144 | },
145 | {
146 | "name": "images",
147 | "type": "IMAGE",
148 | "link": 116
149 | },
150 | {
151 | "name": "landmarks",
152 | "type": "FACELANDMARKS222",
153 | "link": 64
154 | }
155 | ],
156 | "outputs": [
157 | {
158 | "name": "drive_pose",
159 | "type": "DRIVE_POSE",
160 | "links": [
161 | 120
162 | ],
163 | "slot_index": 0
164 | }
165 | ],
166 | "properties": {
167 | "Node name for S&R": "GetDrivePose"
168 | },
169 | "widgets_values": []
170 | },
171 | {
172 | "id": 58,
173 | "type": "HMPipelineImage",
174 | "pos": [
175 | 1274,
176 | 584
177 | ],
178 | "size": [
179 | 315,
180 | 326
181 | ],
182 | "flags": {},
183 | "order": 9,
184 | "mode": 0,
185 | "inputs": [
186 | {
187 | "name": "hm_image_pipeline",
188 | "type": "HMIMAGEPIPELINE",
189 | "link": 129
190 | },
191 | {
192 | "name": "face_toolkits",
193 | "type": "FACE_TOOLKITS",
194 | "link": 119
195 | },
196 | {
197 | "name": "ref_image",
198 | "type": "IMAGE",
199 | "link": 118
200 | },
201 | {
202 | "name": "drive_pose",
203 | "type": "DRIVE_POSE",
204 | "link": 120
205 | },
206 | {
207 | "name": "drive_exp",
208 | "type": "DRIVE_EXPRESSION",
209 | "link": 124,
210 | "shape": 7
211 | },
212 | {
213 | "name": "drive_exp2",
214 | "type": "DRIVE_EXPRESSION2",
215 | "link": null,
216 | "shape": 7
217 | }
218 | ],
219 | "outputs": [
220 | {
221 | "name": "IMAGE",
222 | "type": "IMAGE",
223 | "links": [
224 | 131
225 | ],
226 | "slot_index": 0
227 | },
228 | {
229 | "name": "LATENT",
230 | "type": "LATENT",
231 | "links": null
232 | }
233 | ],
234 | "properties": {
235 | "Node name for S&R": "HMPipelineImage"
236 | },
237 | "widgets_values": [
238 | 0,
239 | "(best quality), highly detailed, ultra-detailed, headshot, person, well-placed five sense organs, looking at the viewer, centered composition, sharp focus, realistic skin texture",
240 | "",
241 | 25,
242 | 655833116581459,
243 | "randomize",
244 | 2,
245 | 0
246 | ]
247 | },
248 | {
249 | "id": 59,
250 | "type": "PreviewImage",
251 | "pos": [
252 | 2006,
253 | 588
254 | ],
255 | "size": [
256 | 210,
257 | 246
258 | ],
259 | "flags": {},
260 | "order": 11,
261 | "mode": 0,
262 | "inputs": [
263 | {
264 | "name": "images",
265 | "type": "IMAGE",
266 | "link": 133
267 | }
268 | ],
269 | "outputs": [],
270 | "properties": {
271 | "Node name for S&R": "PreviewImage"
272 | },
273 | "widgets_values": []
274 | },
275 | {
276 | "id": 61,
277 | "type": "GetDriveExpression",
278 | "pos": [
279 | 857,
280 | 823
281 | ],
282 | "size": [
283 | 292.20001220703125,
284 | 66
285 | ],
286 | "flags": {},
287 | "order": 8,
288 | "mode": 0,
289 | "inputs": [
290 | {
291 | "name": "face_toolkits",
292 | "type": "FACE_TOOLKITS",
293 | "link": 127
294 | },
295 | {
296 | "name": "images",
297 | "type": "IMAGE",
298 | "link": 125
299 | },
300 | {
301 | "name": "landmarks",
302 | "type": "FACELANDMARKS222",
303 | "link": 126
304 | }
305 | ],
306 | "outputs": [
307 | {
308 | "name": "drive_exp",
309 | "type": "DRIVE_EXPRESSION",
310 | "links": [
311 | 124
312 | ],
313 | "slot_index": 0
314 | }
315 | ],
316 | "properties": {
317 | "Node name for S&R": "GetDriveExpression"
318 | },
319 | "widgets_values": []
320 | },
321 | {
322 | "id": 63,
323 | "type": "HMImagePipelineLoader",
324 | "pos": [
325 | 817,
326 | 358
327 | ],
328 | "size": [
329 | 352.79998779296875,
330 | 178
331 | ],
332 | "flags": {},
333 | "order": 1,
334 | "mode": 0,
335 | "inputs": [],
336 | "outputs": [
337 | {
338 | "name": "hm_image_pipeline",
339 | "type": "HMIMAGEPIPELINE",
340 | "links": [
341 | 129
342 | ],
343 | "slot_index": 0
344 | }
345 | ],
346 | "properties": {
347 | "Node name for S&R": "HMImagePipelineLoader"
348 | },
349 | "widgets_values": [
350 | "SD1.5",
351 | "None",
352 | "same as checkpoint",
353 | "v2",
354 | "x1",
355 | "huggingface"
356 | ]
357 | },
358 | {
359 | "id": 65,
360 | "type": "ImageUpscaleWithModel",
361 | "pos": [
362 | 1630,
363 | 489
364 | ],
365 | "size": [
366 | 340.20001220703125,
367 | 46
368 | ],
369 | "flags": {},
370 | "order": 10,
371 | "mode": 0,
372 | "inputs": [
373 | {
374 | "name": "upscale_model",
375 | "type": "UPSCALE_MODEL",
376 | "link": 130
377 | },
378 | {
379 | "name": "image",
380 | "type": "IMAGE",
381 | "link": 131
382 | }
383 | ],
384 | "outputs": [
385 | {
386 | "name": "IMAGE",
387 | "type": "IMAGE",
388 | "links": [
389 | 133
390 | ],
391 | "slot_index": 0
392 | }
393 | ],
394 | "properties": {
395 | "Node name for S&R": "ImageUpscaleWithModel"
396 | },
397 | "widgets_values": []
398 | },
399 | {
400 | "id": 64,
401 | "type": "UpscaleModelLoader",
402 | "pos": [
403 | 1270,
404 | 397
405 | ],
406 | "size": [
407 | 315,
408 | 58
409 | ],
410 | "flags": {},
411 | "order": 2,
412 | "mode": 0,
413 | "inputs": [],
414 | "outputs": [
415 | {
416 | "name": "UPSCALE_MODEL",
417 | "type": "UPSCALE_MODEL",
418 | "links": [
419 | 130
420 | ],
421 | "slot_index": 0
422 | }
423 | ],
424 | "properties": {
425 | "Node name for S&R": "UpscaleModelLoader"
426 | },
427 | "widgets_values": [
428 | "RealESRGAN_x2plus.pth"
429 | ]
430 | },
431 | {
432 | "id": 57,
433 | "type": "LoadImage",
434 | "pos": [
435 | 36.06138229370117,
436 | 864.020751953125
437 | ],
438 | "size": [
439 | 315,
440 | 314
441 | ],
442 | "flags": {},
443 | "order": 3,
444 | "mode": 0,
445 | "inputs": [],
446 | "outputs": [
447 | {
448 | "name": "IMAGE",
449 | "type": "IMAGE",
450 | "links": [
451 | 115,
452 | 116,
453 | 125
454 | ],
455 | "slot_index": 0
456 | },
457 | {
458 | "name": "MASK",
459 | "type": "MASK",
460 | "links": null
461 | }
462 | ],
463 | "title": "expression",
464 | "properties": {
465 | "Node name for S&R": "LoadImage"
466 | },
467 | "widgets_values": [
468 | "eve-looking-back.png",
469 | "image"
470 | ]
471 | },
472 | {
473 | "id": 62,
474 | "type": "LoadImage",
475 | "pos": [
476 | 52,
477 | 289
478 | ],
479 | "size": [
480 | 315,
481 | 314
482 | ],
483 | "flags": {},
484 | "order": 4,
485 | "mode": 0,
486 | "inputs": [],
487 | "outputs": [
488 | {
489 | "name": "IMAGE",
490 | "type": "IMAGE",
491 | "links": [
492 | 128
493 | ],
494 | "slot_index": 0
495 | },
496 | {
497 | "name": "MASK",
498 | "type": "MASK",
499 | "links": null
500 | }
501 | ],
502 | "title": "face",
503 | "properties": {
504 | "Node name for S&R": "LoadImage"
505 | },
506 | "widgets_values": [
507 | "eve-poison-ivy.png",
508 | "image"
509 | ]
510 | }
511 | ],
512 | "links": [
513 | [
514 | 57,
515 | 2,
516 | 0,
517 | 39,
518 | 1,
519 | "FACE_TOOLKITS"
520 | ],
521 | [
522 | 62,
523 | 2,
524 | 0,
525 | 43,
526 | 0,
527 | "FACE_TOOLKITS"
528 | ],
529 | [
530 | 64,
531 | 43,
532 | 0,
533 | 44,
534 | 2,
535 | "FACELANDMARKS222"
536 | ],
537 | [
538 | 65,
539 | 2,
540 | 0,
541 | 44,
542 | 0,
543 | "FACE_TOOLKITS"
544 | ],
545 | [
546 | 115,
547 | 57,
548 | 0,
549 | 43,
550 | 1,
551 | "IMAGE"
552 | ],
553 | [
554 | 116,
555 | 57,
556 | 0,
557 | 44,
558 | 1,
559 | "IMAGE"
560 | ],
561 | [
562 | 118,
563 | 39,
564 | 0,
565 | 58,
566 | 2,
567 | "IMAGE"
568 | ],
569 | [
570 | 119,
571 | 2,
572 | 0,
573 | 58,
574 | 1,
575 | "FACE_TOOLKITS"
576 | ],
577 | [
578 | 120,
579 | 44,
580 | 0,
581 | 58,
582 | 3,
583 | "DRIVE_POSE"
584 | ],
585 | [
586 | 124,
587 | 61,
588 | 0,
589 | 58,
590 | 4,
591 | "DRIVE_EXPRESSION"
592 | ],
593 | [
594 | 125,
595 | 57,
596 | 0,
597 | 61,
598 | 1,
599 | "IMAGE"
600 | ],
601 | [
602 | 126,
603 | 43,
604 | 0,
605 | 61,
606 | 2,
607 | "FACELANDMARKS222"
608 | ],
609 | [
610 | 127,
611 | 2,
612 | 0,
613 | 61,
614 | 0,
615 | "FACE_TOOLKITS"
616 | ],
617 | [
618 | 128,
619 | 62,
620 | 0,
621 | 39,
622 | 0,
623 | "IMAGE"
624 | ],
625 | [
626 | 129,
627 | 63,
628 | 0,
629 | 58,
630 | 0,
631 | "HMIMAGEPIPELINE"
632 | ],
633 | [
634 | 130,
635 | 64,
636 | 0,
637 | 65,
638 | 0,
639 | "UPSCALE_MODEL"
640 | ],
641 | [
642 | 131,
643 | 58,
644 | 0,
645 | 65,
646 | 1,
647 | "IMAGE"
648 | ],
649 | [
650 | 133,
651 | 65,
652 | 0,
653 | 59,
654 | 0,
655 | "IMAGE"
656 | ]
657 | ],
658 | "groups": [],
659 | "config": {},
660 | "extra": {
661 | "ds": {
662 | "scale": 0.922959981770718,
663 | "offset": [
664 | 36.51902584314138,
665 | -106.24399222983358
666 | ]
667 | },
668 | "node_versions": {
669 | "comfyui_hellomeme": "1.0.2",
670 | "comfy-core": "0.3.13"
671 | },
672 | "VHS_latentpreview": true,
673 | "VHS_latentpreviewrate": 0
674 | },
675 | "version": 0.4
676 | }
--------------------------------------------------------------------------------
/example_workflows/face-expression-transfer_HelloMeme_video.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/face-expression-transfer_HelloMeme_video.jpg
--------------------------------------------------------------------------------
/example_workflows/face-expression-transfer_HelloMeme_video.json:
--------------------------------------------------------------------------------
1 | {
2 | "last_node_id": 59,
3 | "last_link_id": 118,
4 | "nodes": [
5 | {
6 | "id": 2,
7 | "type": "HMFaceToolkitsLoader",
8 | "pos": [
9 | 43,
10 | 699
11 | ],
12 | "size": [
13 | 315,
14 | 82
15 | ],
16 | "flags": {},
17 | "order": 0,
18 | "mode": 0,
19 | "inputs": [],
20 | "outputs": [
21 | {
22 | "name": "face_toolkits",
23 | "type": "FACE_TOOLKITS",
24 | "links": [
25 | 57,
26 | 62,
27 | 65,
28 | 97,
29 | 114
30 | ],
31 | "slot_index": 0
32 | }
33 | ],
34 | "properties": {
35 | "Node name for S&R": "HMFaceToolkitsLoader"
36 | },
37 | "widgets_values": [
38 | 0,
39 | "huggingface"
40 | ]
41 | },
42 | {
43 | "id": 39,
44 | "type": "CropPortrait",
45 | "pos": [
46 | 516,
47 | 537
48 | ],
49 | "size": [
50 | 241.79998779296875,
51 | 46
52 | ],
53 | "flags": {},
54 | "order": 5,
55 | "mode": 0,
56 | "inputs": [
57 | {
58 | "name": "image",
59 | "type": "IMAGE",
60 | "link": 116
61 | },
62 | {
63 | "name": "face_toolkits",
64 | "type": "FACE_TOOLKITS",
65 | "link": 57
66 | }
67 | ],
68 | "outputs": [
69 | {
70 | "name": "IMAGE",
71 | "type": "IMAGE",
72 | "links": [
73 | 99
74 | ],
75 | "slot_index": 0
76 | }
77 | ],
78 | "properties": {
79 | "Node name for S&R": "CropPortrait"
80 | },
81 | "widgets_values": []
82 | },
83 | {
84 | "id": 43,
85 | "type": "GetFaceLandmarks",
86 | "pos": [
87 | 465,
88 | 771
89 | ],
90 | "size": [
91 | 292.20001220703125,
92 | 46
93 | ],
94 | "flags": {},
95 | "order": 4,
96 | "mode": 0,
97 | "inputs": [
98 | {
99 | "name": "face_toolkits",
100 | "type": "FACE_TOOLKITS",
101 | "link": 62
102 | },
103 | {
104 | "name": "images",
105 | "type": "IMAGE",
106 | "link": 82
107 | }
108 | ],
109 | "outputs": [
110 | {
111 | "name": "landmarks",
112 | "type": "FACELANDMARKS222",
113 | "links": [
114 | 64,
115 | 113
116 | ],
117 | "slot_index": 0
118 | }
119 | ],
120 | "properties": {
121 | "Node name for S&R": "GetFaceLandmarks"
122 | },
123 | "widgets_values": []
124 | },
125 | {
126 | "id": 44,
127 | "type": "GetDrivePose",
128 | "pos": [
129 | 842,
130 | 677
131 | ],
132 | "size": [
133 | 304.79998779296875,
134 | 66
135 | ],
136 | "flags": {},
137 | "order": 6,
138 | "mode": 0,
139 | "inputs": [
140 | {
141 | "name": "face_toolkits",
142 | "type": "FACE_TOOLKITS",
143 | "link": 65
144 | },
145 | {
146 | "name": "images",
147 | "type": "IMAGE",
148 | "link": 83
149 | },
150 | {
151 | "name": "landmarks",
152 | "type": "FACELANDMARKS222",
153 | "link": 64
154 | }
155 | ],
156 | "outputs": [
157 | {
158 | "name": "drive_pose",
159 | "type": "DRIVE_POSE",
160 | "links": [
161 | 100
162 | ],
163 | "slot_index": 0
164 | }
165 | ],
166 | "properties": {
167 | "Node name for S&R": "GetDrivePose"
168 | },
169 | "widgets_values": []
170 | },
171 | {
172 | "id": 53,
173 | "type": "HMPipelineVideo",
174 | "pos": [
175 | 1258,
176 | 562
177 | ],
178 | "size": [
179 | 315,
180 | 350
181 | ],
182 | "flags": {},
183 | "order": 8,
184 | "mode": 0,
185 | "inputs": [
186 | {
187 | "name": "hm_video_pipeline",
188 | "type": "HMVIDEOPIPELINE",
189 | "link": 115
190 | },
191 | {
192 | "name": "face_toolkits",
193 | "type": "FACE_TOOLKITS",
194 | "link": 97
195 | },
196 | {
197 | "name": "ref_image",
198 | "type": "IMAGE",
199 | "link": 99
200 | },
201 | {
202 | "name": "drive_pose",
203 | "type": "DRIVE_POSE",
204 | "link": 100
205 | },
206 | {
207 | "name": "drive_exp",
208 | "type": "DRIVE_EXPRESSION",
209 | "link": null,
210 | "shape": 7
211 | },
212 | {
213 | "name": "drive_exp2",
214 | "type": "DRIVE_EXPRESSION2",
215 | "link": 111,
216 | "shape": 7
217 | }
218 | ],
219 | "outputs": [
220 | {
221 | "name": "IMAGE",
222 | "type": "IMAGE",
223 | "links": [
224 | 117
225 | ],
226 | "slot_index": 0
227 | },
228 | {
229 | "name": "LATENT",
230 | "type": "LATENT",
231 | "links": null
232 | }
233 | ],
234 | "properties": {
235 | "Node name for S&R": "HMPipelineVideo"
236 | },
237 | "widgets_values": [
238 | 0,
239 | 4,
240 | "(best quality), highly detailed, ultra-detailed, headshot, person, well-placed five sense organs, looking at the viewer, centered composition, sharp focus, realistic skin texture",
241 | "",
242 | 25,
243 | 803448379700888,
244 | "randomize",
245 | 2,
246 | 0
247 | ]
248 | },
249 | {
250 | "id": 56,
251 | "type": "GetDriveExpression2",
252 | "pos": [
253 | 844,
254 | 816
255 | ],
256 | "size": [
257 | 304.79998779296875,
258 | 66
259 | ],
260 | "flags": {},
261 | "order": 7,
262 | "mode": 0,
263 | "inputs": [
264 | {
265 | "name": "face_toolkits",
266 | "type": "FACE_TOOLKITS",
267 | "link": 114
268 | },
269 | {
270 | "name": "images",
271 | "type": "IMAGE",
272 | "link": 112
273 | },
274 | {
275 | "name": "landmarks",
276 | "type": "FACELANDMARKS222",
277 | "link": 113
278 | }
279 | ],
280 | "outputs": [
281 | {
282 | "name": "drive_exp2",
283 | "type": "DRIVE_EXPRESSION2",
284 | "links": [
285 | 111
286 | ],
287 | "slot_index": 0
288 | }
289 | ],
290 | "properties": {
291 | "Node name for S&R": "GetDriveExpression2"
292 | },
293 | "widgets_values": []
294 | },
295 | {
296 | "id": 57,
297 | "type": "HMVideoPipelineLoader",
298 | "pos": [
299 | 813,
300 | 343
301 | ],
302 | "size": [
303 | 352.79998779296875,
304 | 178
305 | ],
306 | "flags": {},
307 | "order": 1,
308 | "mode": 0,
309 | "inputs": [],
310 | "outputs": [
311 | {
312 | "name": "hm_video_pipeline",
313 | "type": "HMVIDEOPIPELINE",
314 | "links": [
315 | 115
316 | ],
317 | "slot_index": 0
318 | }
319 | ],
320 | "properties": {
321 | "Node name for S&R": "HMVideoPipelineLoader"
322 | },
323 | "widgets_values": [
324 | "SD1.5",
325 | "None",
326 | "same as checkpoint",
327 | "v2",
328 | "x1",
329 | "huggingface"
330 | ]
331 | },
332 | {
333 | "id": 59,
334 | "type": "VHS_VideoCombine",
335 | "pos": [
336 | 1619,
337 | 800
338 | ],
339 | "size": [
340 | 214.7587890625,
341 | 542.7587890625
342 | ],
343 | "flags": {},
344 | "order": 9,
345 | "mode": 0,
346 | "inputs": [
347 | {
348 | "name": "images",
349 | "type": "IMAGE",
350 | "link": 117
351 | },
352 | {
353 | "name": "audio",
354 | "type": "AUDIO",
355 | "link": 118,
356 | "shape": 7
357 | },
358 | {
359 | "name": "meta_batch",
360 | "type": "VHS_BatchManager",
361 | "link": null,
362 | "shape": 7
363 | },
364 | {
365 | "name": "vae",
366 | "type": "VAE",
367 | "link": null,
368 | "shape": 7
369 | }
370 | ],
371 | "outputs": [
372 | {
373 | "name": "Filenames",
374 | "type": "VHS_FILENAMES",
375 | "links": null
376 | }
377 | ],
378 | "properties": {
379 | "Node name for S&R": "VHS_VideoCombine"
380 | },
381 | "widgets_values": {
382 | "frame_rate": 15,
383 | "loop_count": 0,
384 | "filename_prefix": "AnimateDiff",
385 | "format": "video/h264-mp4",
386 | "pix_fmt": "yuv420p",
387 | "crf": 18,
388 | "save_metadata": true,
389 | "trim_to_audio": false,
390 | "pingpong": false,
391 | "save_output": true,
392 | "videopreview": {
393 | "hidden": false,
394 | "paused": false,
395 | "params": {
396 | "filename": "AnimateDiff_00003-audio.mp4",
397 | "subfolder": "",
398 | "type": "output",
399 | "format": "video/h264-mp4",
400 | "frame_rate": 15,
401 | "workflow": "AnimateDiff_00003.png",
402 | "fullpath": "C:\\Comfy\\ComfyUI\\output\\AnimateDiff_00003-audio.mp4"
403 | },
404 | "muted": false
405 | }
406 | }
407 | },
408 | {
409 | "id": 48,
410 | "type": "VHS_LoadVideo",
411 | "pos": [
412 | 106,
413 | 867
414 | ],
415 | "size": [
416 | 247.455078125,
417 | 551.455078125
418 | ],
419 | "flags": {},
420 | "order": 2,
421 | "mode": 0,
422 | "inputs": [
423 | {
424 | "name": "meta_batch",
425 | "type": "VHS_BatchManager",
426 | "link": null,
427 | "shape": 7
428 | },
429 | {
430 | "name": "vae",
431 | "type": "VAE",
432 | "link": null,
433 | "shape": 7
434 | }
435 | ],
436 | "outputs": [
437 | {
438 | "name": "IMAGE",
439 | "type": "IMAGE",
440 | "links": [
441 | 82,
442 | 83,
443 | 112
444 | ],
445 | "slot_index": 0
446 | },
447 | {
448 | "name": "frame_count",
449 | "type": "INT",
450 | "links": null
451 | },
452 | {
453 | "name": "audio",
454 | "type": "AUDIO",
455 | "links": [
456 | 118
457 | ],
458 | "slot_index": 2
459 | },
460 | {
461 | "name": "video_info",
462 | "type": "VHS_VIDEOINFO",
463 | "links": null
464 | }
465 | ],
466 | "properties": {
467 | "Node name for S&R": "VHS_LoadVideo"
468 | },
469 | "widgets_values": {
470 | "video": "d6_1_short.mp4",
471 | "force_rate": 15,
472 | "custom_width": 0,
473 | "custom_height": 0,
474 | "frame_load_cap": 0,
475 | "skip_first_frames": 0,
476 | "select_every_nth": 1,
477 | "format": "AnimateDiff",
478 | "choose video to upload": "image",
479 | "videopreview": {
480 | "hidden": false,
481 | "paused": false,
482 | "params": {
483 | "force_rate": 15,
484 | "frame_load_cap": 0,
485 | "skip_first_frames": 0,
486 | "select_every_nth": 1,
487 | "filename": "d6_1_short.mp4",
488 | "type": "input",
489 | "format": "video/mp4"
490 | },
491 | "muted": false
492 | }
493 | }
494 | },
495 | {
496 | "id": 58,
497 | "type": "LoadImage",
498 | "pos": [
499 | 42.38749313354492,
500 | 273
501 | ],
502 | "size": [
503 | 315,
504 | 314
505 | ],
506 | "flags": {},
507 | "order": 3,
508 | "mode": 0,
509 | "inputs": [],
510 | "outputs": [
511 | {
512 | "name": "IMAGE",
513 | "type": "IMAGE",
514 | "links": [
515 | 116
516 | ],
517 | "slot_index": 0
518 | },
519 | {
520 | "name": "MASK",
521 | "type": "MASK",
522 | "links": null
523 | }
524 | ],
525 | "properties": {
526 | "Node name for S&R": "LoadImage"
527 | },
528 | "widgets_values": [
529 | "eve-looking-back.png",
530 | "image"
531 | ]
532 | }
533 | ],
534 | "links": [
535 | [
536 | 57,
537 | 2,
538 | 0,
539 | 39,
540 | 1,
541 | "FACE_TOOLKITS"
542 | ],
543 | [
544 | 62,
545 | 2,
546 | 0,
547 | 43,
548 | 0,
549 | "FACE_TOOLKITS"
550 | ],
551 | [
552 | 64,
553 | 43,
554 | 0,
555 | 44,
556 | 2,
557 | "FACELANDMARKS222"
558 | ],
559 | [
560 | 65,
561 | 2,
562 | 0,
563 | 44,
564 | 0,
565 | "FACE_TOOLKITS"
566 | ],
567 | [
568 | 82,
569 | 48,
570 | 0,
571 | 43,
572 | 1,
573 | "IMAGE"
574 | ],
575 | [
576 | 83,
577 | 48,
578 | 0,
579 | 44,
580 | 1,
581 | "IMAGE"
582 | ],
583 | [
584 | 97,
585 | 2,
586 | 0,
587 | 53,
588 | 1,
589 | "FACE_TOOLKITS"
590 | ],
591 | [
592 | 99,
593 | 39,
594 | 0,
595 | 53,
596 | 2,
597 | "IMAGE"
598 | ],
599 | [
600 | 100,
601 | 44,
602 | 0,
603 | 53,
604 | 3,
605 | "DRIVE_POSE"
606 | ],
607 | [
608 | 111,
609 | 56,
610 | 0,
611 | 53,
612 | 5,
613 | "DRIVE_EXPRESSION2"
614 | ],
615 | [
616 | 112,
617 | 48,
618 | 0,
619 | 56,
620 | 1,
621 | "IMAGE"
622 | ],
623 | [
624 | 113,
625 | 43,
626 | 0,
627 | 56,
628 | 2,
629 | "FACELANDMARKS222"
630 | ],
631 | [
632 | 114,
633 | 2,
634 | 0,
635 | 56,
636 | 0,
637 | "FACE_TOOLKITS"
638 | ],
639 | [
640 | 115,
641 | 57,
642 | 0,
643 | 53,
644 | 0,
645 | "HMVIDEOPIPELINE"
646 | ],
647 | [
648 | 116,
649 | 58,
650 | 0,
651 | 39,
652 | 0,
653 | "IMAGE"
654 | ],
655 | [
656 | 117,
657 | 53,
658 | 0,
659 | 59,
660 | 0,
661 | "IMAGE"
662 | ],
663 | [
664 | 118,
665 | 48,
666 | 2,
667 | 59,
668 | 1,
669 | "AUDIO"
670 | ]
671 | ],
672 | "groups": [],
673 | "config": {},
674 | "extra": {
675 | "ds": {
676 | "scale": 0.8264462809917354,
677 | "offset": [
678 | 588.8217835430144,
679 | -135.8614279707282
680 | ]
681 | },
682 | "node_versions": {
683 | "comfyui_hellomeme": "1.0.2",
684 | "comfyui-videohelpersuite": "1.5.0",
685 | "comfy-core": "0.3.13"
686 | },
687 | "VHS_latentpreview": true,
688 | "VHS_latentpreviewrate": 0
689 | },
690 | "version": 0.4
691 | }
--------------------------------------------------------------------------------
/example_workflows/face_styler.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/face_styler.jpg
--------------------------------------------------------------------------------
/example_workflows/flux_dev.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/flux_dev.jpg
--------------------------------------------------------------------------------
/example_workflows/flux_inpainting.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/flux_inpainting.jpg
--------------------------------------------------------------------------------
/example_workflows/flux_redux.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/flux_redux.jpg
--------------------------------------------------------------------------------
/example_workflows/flux_schnell_fp8.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/flux_schnell_fp8.jpg
--------------------------------------------------------------------------------
/example_workflows/flux_schnell_fp8.json:
--------------------------------------------------------------------------------
1 | {
2 | "last_node_id": 28,
3 | "last_link_id": 44,
4 | "nodes": [
5 | {
6 | "id": 22,
7 | "type": "BasicGuider",
8 | "pos": [
9 | 559,
10 | 125
11 | ],
12 | "size": [
13 | 241.79998779296875,
14 | 46
15 | ],
16 | "flags": {},
17 | "order": 6,
18 | "mode": 0,
19 | "inputs": [
20 | {
21 | "name": "model",
22 | "type": "MODEL",
23 | "link": 41,
24 | "slot_index": 0
25 | },
26 | {
27 | "name": "conditioning",
28 | "type": "CONDITIONING",
29 | "link": 40,
30 | "slot_index": 1
31 | }
32 | ],
33 | "outputs": [
34 | {
35 | "name": "GUIDER",
36 | "type": "GUIDER",
37 | "links": [
38 | 30
39 | ],
40 | "slot_index": 0,
41 | "shape": 3
42 | }
43 | ],
44 | "properties": {
45 | "Node name for S&R": "BasicGuider"
46 | },
47 | "widgets_values": []
48 | },
49 | {
50 | "id": 5,
51 | "type": "EmptyLatentImage",
52 | "pos": [
53 | 480,
54 | 432
55 | ],
56 | "size": [
57 | 315,
58 | 106
59 | ],
60 | "flags": {},
61 | "order": 0,
62 | "mode": 0,
63 | "inputs": [],
64 | "outputs": [
65 | {
66 | "name": "LATENT",
67 | "type": "LATENT",
68 | "links": [
69 | 23
70 | ],
71 | "slot_index": 0
72 | }
73 | ],
74 | "properties": {
75 | "Node name for S&R": "EmptyLatentImage"
76 | },
77 | "widgets_values": [
78 | 1024,
79 | 1024,
80 | 1
81 | ],
82 | "color": "#323",
83 | "bgcolor": "#535"
84 | },
85 | {
86 | "id": 25,
87 | "type": "RandomNoise",
88 | "pos": [
89 | 480,
90 | 576
91 | ],
92 | "size": [
93 | 315,
94 | 82
95 | ],
96 | "flags": {},
97 | "order": 1,
98 | "mode": 0,
99 | "inputs": [],
100 | "outputs": [
101 | {
102 | "name": "NOISE",
103 | "type": "NOISE",
104 | "links": [
105 | 37
106 | ],
107 | "shape": 3
108 | }
109 | ],
110 | "properties": {
111 | "Node name for S&R": "RandomNoise"
112 | },
113 | "widgets_values": [
114 | 883118765641760,
115 | "randomize"
116 | ],
117 | "color": "#2a363b",
118 | "bgcolor": "#3f5159"
119 | },
120 | {
121 | "id": 13,
122 | "type": "SamplerCustomAdvanced",
123 | "pos": [
124 | 842,
125 | 215
126 | ],
127 | "size": [
128 | 251.69891357421875,
129 | 326
130 | ],
131 | "flags": {},
132 | "order": 7,
133 | "mode": 0,
134 | "inputs": [
135 | {
136 | "name": "noise",
137 | "type": "NOISE",
138 | "link": 37,
139 | "slot_index": 0
140 | },
141 | {
142 | "name": "guider",
143 | "type": "GUIDER",
144 | "link": 30,
145 | "slot_index": 1
146 | },
147 | {
148 | "name": "sampler",
149 | "type": "SAMPLER",
150 | "link": 19,
151 | "slot_index": 2
152 | },
153 | {
154 | "name": "sigmas",
155 | "type": "SIGMAS",
156 | "link": 20,
157 | "slot_index": 3
158 | },
159 | {
160 | "name": "latent_image",
161 | "type": "LATENT",
162 | "link": 23,
163 | "slot_index": 4
164 | }
165 | ],
166 | "outputs": [
167 | {
168 | "name": "output",
169 | "type": "LATENT",
170 | "links": [
171 | 24
172 | ],
173 | "slot_index": 0,
174 | "shape": 3
175 | },
176 | {
177 | "name": "denoised_output",
178 | "type": "LATENT",
179 | "links": null,
180 | "shape": 3
181 | }
182 | ],
183 | "properties": {
184 | "Node name for S&R": "SamplerCustomAdvanced"
185 | },
186 | "widgets_values": []
187 | },
188 | {
189 | "id": 8,
190 | "type": "VAEDecode",
191 | "pos": [
192 | 1124,
193 | 217
194 | ],
195 | "size": [
196 | 210,
197 | 46
198 | ],
199 | "flags": {},
200 | "order": 8,
201 | "mode": 0,
202 | "inputs": [
203 | {
204 | "name": "samples",
205 | "type": "LATENT",
206 | "link": 24
207 | },
208 | {
209 | "name": "vae",
210 | "type": "VAE",
211 | "link": 44
212 | }
213 | ],
214 | "outputs": [
215 | {
216 | "name": "IMAGE",
217 | "type": "IMAGE",
218 | "links": [
219 | 9
220 | ],
221 | "slot_index": 0
222 | }
223 | ],
224 | "properties": {
225 | "Node name for S&R": "VAEDecode"
226 | },
227 | "widgets_values": []
228 | },
229 | {
230 | "id": 9,
231 | "type": "SaveImage",
232 | "pos": [
233 | 1362,
234 | 216
235 | ],
236 | "size": [
237 | 985.3012084960938,
238 | 1060.3828125
239 | ],
240 | "flags": {},
241 | "order": 9,
242 | "mode": 0,
243 | "inputs": [
244 | {
245 | "name": "images",
246 | "type": "IMAGE",
247 | "link": 9
248 | }
249 | ],
250 | "outputs": [],
251 | "properties": {
252 | "Node name for S&R": "SaveImage"
253 | },
254 | "widgets_values": [
255 | "Flux"
256 | ]
257 | },
258 | {
259 | "id": 17,
260 | "type": "BasicScheduler",
261 | "pos": [
262 | 480,
263 | 820
264 | ],
265 | "size": [
266 | 315,
267 | 106
268 | ],
269 | "flags": {},
270 | "order": 4,
271 | "mode": 0,
272 | "inputs": [
273 | {
274 | "name": "model",
275 | "type": "MODEL",
276 | "link": 42,
277 | "slot_index": 0
278 | }
279 | ],
280 | "outputs": [
281 | {
282 | "name": "SIGMAS",
283 | "type": "SIGMAS",
284 | "links": [
285 | 20
286 | ],
287 | "shape": 3
288 | }
289 | ],
290 | "properties": {
291 | "Node name for S&R": "BasicScheduler"
292 | },
293 | "widgets_values": [
294 | "simple",
295 | 4,
296 | 1
297 | ]
298 | },
299 | {
300 | "id": 16,
301 | "type": "KSamplerSelect",
302 | "pos": [
303 | 480,
304 | 709
305 | ],
306 | "size": [
307 | 315,
308 | 58
309 | ],
310 | "flags": {},
311 | "order": 2,
312 | "mode": 0,
313 | "inputs": [],
314 | "outputs": [
315 | {
316 | "name": "SAMPLER",
317 | "type": "SAMPLER",
318 | "links": [
319 | 19
320 | ],
321 | "shape": 3
322 | }
323 | ],
324 | "properties": {
325 | "Node name for S&R": "KSamplerSelect"
326 | },
327 | "widgets_values": [
328 | "euler"
329 | ]
330 | },
331 | {
332 | "id": 28,
333 | "type": "CheckpointLoaderSimple",
334 | "pos": [
335 | -12.205482482910156,
336 | 210.79689025878906
337 | ],
338 | "size": [
339 | 315,
340 | 98
341 | ],
342 | "flags": {},
343 | "order": 3,
344 | "mode": 0,
345 | "inputs": [],
346 | "outputs": [
347 | {
348 | "name": "MODEL",
349 | "type": "MODEL",
350 | "links": [
351 | 41,
352 | 42
353 | ],
354 | "slot_index": 0
355 | },
356 | {
357 | "name": "CLIP",
358 | "type": "CLIP",
359 | "links": [
360 | 43
361 | ],
362 | "slot_index": 1
363 | },
364 | {
365 | "name": "VAE",
366 | "type": "VAE",
367 | "links": [
368 | 44
369 | ],
370 | "slot_index": 2
371 | }
372 | ],
373 | "properties": {
374 | "Node name for S&R": "CheckpointLoaderSimple"
375 | },
376 | "widgets_values": [
377 | "flux1-schnell-fp8.safetensors"
378 | ]
379 | },
380 | {
381 | "id": 6,
382 | "type": "CLIPTextEncode",
383 | "pos": [
384 | 375,
385 | 221
386 | ],
387 | "size": [
388 | 422.84503173828125,
389 | 164.31304931640625
390 | ],
391 | "flags": {},
392 | "order": 5,
393 | "mode": 0,
394 | "inputs": [
395 | {
396 | "name": "clip",
397 | "type": "CLIP",
398 | "link": 43
399 | }
400 | ],
401 | "outputs": [
402 | {
403 | "name": "CONDITIONING",
404 | "type": "CONDITIONING",
405 | "links": [
406 | 40
407 | ],
408 | "slot_index": 0
409 | }
410 | ],
411 | "properties": {
412 | "Node name for S&R": "CLIPTextEncode"
413 | },
414 | "widgets_values": [
415 | "a futuristic cyborg forest nymph made of leaves wires flesh cables and flowers in the form of a humanoid female presenting avatar"
416 | ],
417 | "color": "#232",
418 | "bgcolor": "#353"
419 | }
420 | ],
421 | "links": [
422 | [
423 | 9,
424 | 8,
425 | 0,
426 | 9,
427 | 0,
428 | "IMAGE"
429 | ],
430 | [
431 | 19,
432 | 16,
433 | 0,
434 | 13,
435 | 2,
436 | "SAMPLER"
437 | ],
438 | [
439 | 20,
440 | 17,
441 | 0,
442 | 13,
443 | 3,
444 | "SIGMAS"
445 | ],
446 | [
447 | 23,
448 | 5,
449 | 0,
450 | 13,
451 | 4,
452 | "LATENT"
453 | ],
454 | [
455 | 24,
456 | 13,
457 | 0,
458 | 8,
459 | 0,
460 | "LATENT"
461 | ],
462 | [
463 | 30,
464 | 22,
465 | 0,
466 | 13,
467 | 1,
468 | "GUIDER"
469 | ],
470 | [
471 | 37,
472 | 25,
473 | 0,
474 | 13,
475 | 0,
476 | "NOISE"
477 | ],
478 | [
479 | 40,
480 | 6,
481 | 0,
482 | 22,
483 | 1,
484 | "CONDITIONING"
485 | ],
486 | [
487 | 41,
488 | 28,
489 | 0,
490 | 22,
491 | 0,
492 | "MODEL"
493 | ],
494 | [
495 | 42,
496 | 28,
497 | 0,
498 | 17,
499 | 0,
500 | "MODEL"
501 | ],
502 | [
503 | 43,
504 | 28,
505 | 1,
506 | 6,
507 | 0,
508 | "CLIP"
509 | ],
510 | [
511 | 44,
512 | 28,
513 | 2,
514 | 8,
515 | 1,
516 | "VAE"
517 | ]
518 | ],
519 | "groups": [],
520 | "config": {},
521 | "extra": {
522 | "ds": {
523 | "scale": 0.740024994425886,
524 | "offset": [
525 | 1073.8353428802184,
526 | 161.4565981653887
527 | ]
528 | },
529 | "node_versions": {
530 | "comfy-core": "0.3.13"
531 | },
532 | "VHS_latentpreview": false,
533 | "VHS_latentpreviewrate": 0
534 | },
535 | "version": 0.4
536 | }
--------------------------------------------------------------------------------
/example_workflows/frame-blend-time-remapping.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/frame-blend-time-remapping.jpg
--------------------------------------------------------------------------------
/example_workflows/img2vid.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/img2vid.jpg
--------------------------------------------------------------------------------
/example_workflows/layer_diffusion.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/layer_diffusion.jpg
--------------------------------------------------------------------------------
/example_workflows/mars-id.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/mars-id.jpg
--------------------------------------------------------------------------------
/example_workflows/mochi_preview.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/mochi_preview.jpg
--------------------------------------------------------------------------------
/example_workflows/ominicontrol.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/ominicontrol.jpg
--------------------------------------------------------------------------------
/example_workflows/ominicontrol.json:
--------------------------------------------------------------------------------
1 | {
2 | "last_node_id": 9,
3 | "last_link_id": 15,
4 | "nodes": [
5 | {
6 | "id": 1,
7 | "type": "RunningHub_Omini_Subject",
8 | "pos": [
9 | 426,
10 | -109
11 | ],
12 | "size": [
13 | 400,
14 | 200
15 | ],
16 | "flags": {},
17 | "order": 2,
18 | "mode": 0,
19 | "inputs": [
20 | {
21 | "name": "subject_image",
22 | "type": "IMAGE",
23 | "link": 12,
24 | "slot_index": 0,
25 | "label": "subject_image"
26 | }
27 | ],
28 | "outputs": [
29 | {
30 | "name": "image",
31 | "type": "IMAGE",
32 | "links": [
33 | 10
34 | ],
35 | "slot_index": 0,
36 | "shape": 3,
37 | "label": "image"
38 | }
39 | ],
40 | "properties": {
41 | "Node name for S&R": "RunningHub_Omini_Subject"
42 | },
43 | "widgets_values": [
44 | "A coffee mug featuring this item, iridescent surface that shimmers between deep blues, purples, and electrified oranges, reminiscent of liquid metal or a heat map in motion. The mug appears semi-translucent, with light refracting through its glossy, prismatic material, creating an effect akin to phosphorescent energy flowing beneath its surface. ",
45 | 848471931153291,
46 | "randomize"
47 | ]
48 | },
49 | {
50 | "id": 2,
51 | "type": "LoadImage",
52 | "pos": [
53 | -334.69158935546875,
54 | -111.25019073486328
55 | ],
56 | "size": [
57 | 315,
58 | 314
59 | ],
60 | "flags": {},
61 | "order": 0,
62 | "mode": 0,
63 | "inputs": [],
64 | "outputs": [
65 | {
66 | "name": "IMAGE",
67 | "type": "IMAGE",
68 | "links": [
69 | 15
70 | ],
71 | "slot_index": 0,
72 | "shape": 3,
73 | "label": "IMAGE"
74 | },
75 | {
76 | "name": "MASK",
77 | "type": "MASK",
78 | "shape": 3,
79 | "label": "MASK"
80 | }
81 | ],
82 | "properties": {
83 | "Node name for S&R": "LoadImage"
84 | },
85 | "widgets_values": [
86 | "EDEN-ICON-COLOUR.png",
87 | "image"
88 | ]
89 | },
90 | {
91 | "id": 4,
92 | "type": "SaveImage",
93 | "pos": [
94 | 856.39892578125,
95 | -108.98165130615234
96 | ],
97 | "size": [
98 | 315,
99 | 270
100 | ],
101 | "flags": {},
102 | "order": 3,
103 | "mode": 0,
104 | "inputs": [
105 | {
106 | "name": "images",
107 | "type": "IMAGE",
108 | "link": 10,
109 | "label": "images"
110 | }
111 | ],
112 | "outputs": [],
113 | "properties": {
114 | "Node name for S&R": "SaveImage"
115 | },
116 | "widgets_values": [
117 | "ComfyUI"
118 | ]
119 | },
120 | {
121 | "id": 8,
122 | "type": "ImageResize+",
123 | "pos": [
124 | 32.47909164428711,
125 | -112.0680160522461
126 | ],
127 | "size": [
128 | 315,
129 | 218
130 | ],
131 | "flags": {},
132 | "order": 1,
133 | "mode": 0,
134 | "inputs": [
135 | {
136 | "name": "image",
137 | "type": "IMAGE",
138 | "link": 15
139 | }
140 | ],
141 | "outputs": [
142 | {
143 | "name": "IMAGE",
144 | "type": "IMAGE",
145 | "links": [
146 | 12
147 | ],
148 | "slot_index": 0
149 | },
150 | {
151 | "name": "width",
152 | "type": "INT",
153 | "links": null
154 | },
155 | {
156 | "name": "height",
157 | "type": "INT",
158 | "links": null
159 | }
160 | ],
161 | "properties": {
162 | "Node name for S&R": "ImageResize+"
163 | },
164 | "widgets_values": [
165 | 1024,
166 | 1024,
167 | "nearest",
168 | "fill / crop",
169 | "always",
170 | 0
171 | ]
172 | }
173 | ],
174 | "links": [
175 | [
176 | 10,
177 | 1,
178 | 0,
179 | 4,
180 | 0,
181 | "IMAGE"
182 | ],
183 | [
184 | 12,
185 | 8,
186 | 0,
187 | 1,
188 | 0,
189 | "IMAGE"
190 | ],
191 | [
192 | 15,
193 | 2,
194 | 0,
195 | 8,
196 | 0,
197 | "IMAGE"
198 | ]
199 | ],
200 | "groups": [],
201 | "config": {},
202 | "extra": {
203 | "ds": {
204 | "scale": 1.4420993610651298,
205 | "offset": [
206 | 454.98451047879644,
207 | 473.442971922833
208 | ]
209 | },
210 | "0246.VERSION": [
211 | 0,
212 | 0,
213 | 4
214 | ],
215 | "ue_links": [],
216 | "VHS_latentpreview": true,
217 | "VHS_latentpreviewrate": 0,
218 | "node_versions": {
219 | "comfy-core": "0.3.13",
220 | "ComfyUI_essentials": "33ff89fd354d8ec3ab6affb605a79a931b445d99",
221 | "ComfyUI_RH_OminiControl": "669646b5d235f4b0be4c2d6523032efe0f0df524"
222 | }
223 | },
224 | "version": 0.4
225 | }
--------------------------------------------------------------------------------
/example_workflows/outpaint.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/outpaint.jpg
--------------------------------------------------------------------------------
/example_workflows/remix_flux_schnell.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/remix_flux_schnell.jpg
--------------------------------------------------------------------------------
/example_workflows/stable_audio.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/stable_audio.jpg
--------------------------------------------------------------------------------
/example_workflows/stable_audio.json:
--------------------------------------------------------------------------------
1 | {
2 | "last_node_id": 18,
3 | "last_link_id": 27,
4 | "nodes": [
5 | {
6 | "id": 3,
7 | "type": "KSampler",
8 | "pos": [
9 | 864,
10 | 96
11 | ],
12 | "size": [
13 | 315,
14 | 262
15 | ],
16 | "flags": {},
17 | "order": 6,
18 | "mode": 0,
19 | "inputs": [
20 | {
21 | "name": "model",
22 | "type": "MODEL",
23 | "link": 18
24 | },
25 | {
26 | "name": "positive",
27 | "type": "CONDITIONING",
28 | "link": 4
29 | },
30 | {
31 | "name": "negative",
32 | "type": "CONDITIONING",
33 | "link": 6
34 | },
35 | {
36 | "name": "latent_image",
37 | "type": "LATENT",
38 | "link": 12,
39 | "slot_index": 3
40 | }
41 | ],
42 | "outputs": [
43 | {
44 | "name": "LATENT",
45 | "type": "LATENT",
46 | "links": [
47 | 13
48 | ],
49 | "slot_index": 0
50 | }
51 | ],
52 | "properties": {
53 | "Node name for S&R": "KSampler"
54 | },
55 | "widgets_values": [
56 | 1072174819573698,
57 | "randomize",
58 | 50,
59 | 5,
60 | "dpmpp_3m_sde_gpu",
61 | "exponential",
62 | 1
63 | ]
64 | },
65 | {
66 | "id": 4,
67 | "type": "CheckpointLoaderSimple",
68 | "pos": [
69 | 0,
70 | 240
71 | ],
72 | "size": [
73 | 336,
74 | 98
75 | ],
76 | "flags": {},
77 | "order": 0,
78 | "mode": 0,
79 | "inputs": [],
80 | "outputs": [
81 | {
82 | "name": "MODEL",
83 | "type": "MODEL",
84 | "links": [
85 | 18
86 | ],
87 | "slot_index": 0
88 | },
89 | {
90 | "name": "CLIP",
91 | "type": "CLIP",
92 | "links": [],
93 | "slot_index": 1
94 | },
95 | {
96 | "name": "VAE",
97 | "type": "VAE",
98 | "links": [
99 | 14
100 | ],
101 | "slot_index": 2
102 | }
103 | ],
104 | "properties": {
105 | "Node name for S&R": "CheckpointLoaderSimple"
106 | },
107 | "widgets_values": [
108 | "stable_audio_open_1.0.safetensors"
109 | ]
110 | },
111 | {
112 | "id": 6,
113 | "type": "CLIPTextEncode",
114 | "pos": [
115 | 384,
116 | 96
117 | ],
118 | "size": [
119 | 432,
120 | 144
121 | ],
122 | "flags": {},
123 | "order": 3,
124 | "mode": 0,
125 | "inputs": [
126 | {
127 | "name": "clip",
128 | "type": "CLIP",
129 | "link": 25
130 | }
131 | ],
132 | "outputs": [
133 | {
134 | "name": "CONDITIONING",
135 | "type": "CONDITIONING",
136 | "links": [
137 | 4
138 | ],
139 | "slot_index": 0
140 | }
141 | ],
142 | "properties": {
143 | "Node name for S&R": "CLIPTextEncode"
144 | },
145 | "widgets_values": [
146 | "303 acid bassline arpeggiated 128bpm Gmin7"
147 | ],
148 | "color": "#232",
149 | "bgcolor": "#353"
150 | },
151 | {
152 | "id": 7,
153 | "type": "CLIPTextEncode",
154 | "pos": [
155 | 384,
156 | 288
157 | ],
158 | "size": [
159 | 432,
160 | 144
161 | ],
162 | "flags": {},
163 | "order": 4,
164 | "mode": 0,
165 | "inputs": [
166 | {
167 | "name": "clip",
168 | "type": "CLIP",
169 | "link": 26
170 | }
171 | ],
172 | "outputs": [
173 | {
174 | "name": "CONDITIONING",
175 | "type": "CONDITIONING",
176 | "links": [
177 | 6
178 | ],
179 | "slot_index": 0
180 | }
181 | ],
182 | "properties": {
183 | "Node name for S&R": "CLIPTextEncode"
184 | },
185 | "widgets_values": [
186 | ""
187 | ],
188 | "color": "#322",
189 | "bgcolor": "#533"
190 | },
191 | {
192 | "id": 10,
193 | "type": "CLIPLoader",
194 | "pos": [
195 | 0,
196 | 96
197 | ],
198 | "size": [
199 | 335.6534118652344,
200 | 82
201 | ],
202 | "flags": {},
203 | "order": 1,
204 | "mode": 0,
205 | "inputs": [],
206 | "outputs": [
207 | {
208 | "name": "CLIP",
209 | "type": "CLIP",
210 | "links": [
211 | 25,
212 | 26
213 | ],
214 | "slot_index": 0,
215 | "shape": 3
216 | }
217 | ],
218 | "properties": {
219 | "Node name for S&R": "CLIPLoader"
220 | },
221 | "widgets_values": [
222 | "t5_base.safetensors",
223 | "stable_audio",
224 | "default"
225 | ]
226 | },
227 | {
228 | "id": 11,
229 | "type": "EmptyLatentAudio",
230 | "pos": [
231 | 613.6414184570312,
232 | 495.17822265625
233 | ],
234 | "size": [
235 | 210,
236 | 78.66081237792969
237 | ],
238 | "flags": {},
239 | "order": 5,
240 | "mode": 0,
241 | "inputs": [
242 | {
243 | "name": "seconds",
244 | "type": "FLOAT",
245 | "link": 27,
246 | "widget": {
247 | "name": "seconds"
248 | }
249 | }
250 | ],
251 | "outputs": [
252 | {
253 | "name": "LATENT",
254 | "type": "LATENT",
255 | "links": [
256 | 12
257 | ],
258 | "shape": 3
259 | }
260 | ],
261 | "properties": {
262 | "Node name for S&R": "EmptyLatentAudio"
263 | },
264 | "widgets_values": [
265 | 30,
266 | 1
267 | ]
268 | },
269 | {
270 | "id": 12,
271 | "type": "VAEDecodeAudio",
272 | "pos": [
273 | 1200,
274 | 96
275 | ],
276 | "size": [
277 | 210,
278 | 46
279 | ],
280 | "flags": {},
281 | "order": 7,
282 | "mode": 0,
283 | "inputs": [
284 | {
285 | "name": "samples",
286 | "type": "LATENT",
287 | "link": 13
288 | },
289 | {
290 | "name": "vae",
291 | "type": "VAE",
292 | "link": 14,
293 | "slot_index": 1
294 | }
295 | ],
296 | "outputs": [
297 | {
298 | "name": "AUDIO",
299 | "type": "AUDIO",
300 | "links": [
301 | 15
302 | ],
303 | "slot_index": 0,
304 | "shape": 3
305 | }
306 | ],
307 | "properties": {
308 | "Node name for S&R": "VAEDecodeAudio"
309 | },
310 | "widgets_values": []
311 | },
312 | {
313 | "id": 13,
314 | "type": "SaveAudio",
315 | "pos": [
316 | 1440,
317 | 96
318 | ],
319 | "size": [
320 | 355.22216796875,
321 | 100
322 | ],
323 | "flags": {},
324 | "order": 8,
325 | "mode": 0,
326 | "inputs": [
327 | {
328 | "name": "audio",
329 | "type": "AUDIO",
330 | "link": 15
331 | }
332 | ],
333 | "outputs": [],
334 | "properties": {
335 | "Node name for S&R": "SaveAudio"
336 | },
337 | "widgets_values": [
338 | "audio/Stable_Audio",
339 | null
340 | ]
341 | },
342 | {
343 | "id": 18,
344 | "type": "Eden_Float",
345 | "pos": [
346 | 389.1009521484375,
347 | 503.55010986328125
348 | ],
349 | "size": [
350 | 210,
351 | 63.84355545043945
352 | ],
353 | "flags": {},
354 | "order": 2,
355 | "mode": 0,
356 | "inputs": [],
357 | "outputs": [
358 | {
359 | "name": "FLOAT",
360 | "type": "FLOAT",
361 | "links": [
362 | 27
363 | ],
364 | "slot_index": 0
365 | }
366 | ],
367 | "title": "seconds",
368 | "properties": {
369 | "Node name for S&R": "Eden_Float"
370 | },
371 | "widgets_values": [
372 | 30
373 | ]
374 | }
375 | ],
376 | "links": [
377 | [
378 | 4,
379 | 6,
380 | 0,
381 | 3,
382 | 1,
383 | "CONDITIONING"
384 | ],
385 | [
386 | 6,
387 | 7,
388 | 0,
389 | 3,
390 | 2,
391 | "CONDITIONING"
392 | ],
393 | [
394 | 12,
395 | 11,
396 | 0,
397 | 3,
398 | 3,
399 | "LATENT"
400 | ],
401 | [
402 | 13,
403 | 3,
404 | 0,
405 | 12,
406 | 0,
407 | "LATENT"
408 | ],
409 | [
410 | 14,
411 | 4,
412 | 2,
413 | 12,
414 | 1,
415 | "VAE"
416 | ],
417 | [
418 | 15,
419 | 12,
420 | 0,
421 | 13,
422 | 0,
423 | "AUDIO"
424 | ],
425 | [
426 | 18,
427 | 4,
428 | 0,
429 | 3,
430 | 0,
431 | "MODEL"
432 | ],
433 | [
434 | 25,
435 | 10,
436 | 0,
437 | 6,
438 | 0,
439 | "CLIP"
440 | ],
441 | [
442 | 26,
443 | 10,
444 | 0,
445 | 7,
446 | 0,
447 | "CLIP"
448 | ],
449 | [
450 | 27,
451 | 18,
452 | 0,
453 | 11,
454 | 0,
455 | "FLOAT"
456 | ]
457 | ],
458 | "groups": [],
459 | "config": {},
460 | "extra": {
461 | "ds": {
462 | "scale": 1.1,
463 | "offset": [
464 | 423.617787753202,
465 | 201.80085002159075
466 | ]
467 | },
468 | "node_versions": {
469 | "comfy-core": "0.3.13",
470 | "eden_comfy_pipelines": "35af96dba9df83a53013a48cda08a9556a1c8fef"
471 | },
472 | "VHS_latentpreview": true,
473 | "VHS_latentpreviewrate": 0
474 | },
475 | "version": 0.4
476 | }
--------------------------------------------------------------------------------
/example_workflows/texture_flow.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/texture_flow.jpg
--------------------------------------------------------------------------------
/example_workflows/txt2img_SDXL.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/txt2img_SDXL.jpg
--------------------------------------------------------------------------------
/example_workflows/txt2vid.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/txt2vid.jpg
--------------------------------------------------------------------------------
/example_workflows/upscaler.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/upscaler.jpg
--------------------------------------------------------------------------------
/example_workflows/vid2vid_sdxl.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/vid2vid_sdxl.jpg
--------------------------------------------------------------------------------
/example_workflows/video_FX.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/video_FX.jpg
--------------------------------------------------------------------------------
/example_workflows/video_upscaler.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/example_workflows/video_upscaler.jpg
--------------------------------------------------------------------------------
/img_utils/depth.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/img_utils/depth.png
--------------------------------------------------------------------------------
/img_utils/depth_nodes.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn.functional as F
3 | from PIL import Image
4 | import torch
5 | import cv2
6 | import numpy as np
7 | from sklearn.cluster import KMeans
8 | from sklearn.preprocessing import StandardScaler
9 | from scipy.spatial.distance import cdist
10 | import sys
11 |
12 | class WeightedKMeans(KMeans):
13 | def __init__(self, n_clusters=8, weights=None, **kwargs):
14 | super().__init__(n_clusters=n_clusters, **kwargs)
15 | self.weights = weights
16 |
17 | def fit(self, X):
18 | if self.weights is None:
19 | self.weights = np.ones(X.shape[1])
20 | return super().fit(X)
21 |
22 | def _weighted_euclidean_distance(self, X, Y):
23 | return cdist(X, Y, metric='wminkowski', w=self.weights, p=2)
24 |
25 | def fit_predict(self, X, y=None, sample_weight=None):
26 | return super().fit_predict(X, sample_weight=sample_weight)
27 |
28 | def fit_transform(self, X, y=None, sample_weight=None):
29 | return super().fit_transform(X, sample_weight=sample_weight)
30 |
31 | def transform(self, X):
32 | return self._weighted_euclidean_distance(X, self.cluster_centers_)
33 |
34 |
35 | def smart_depth_slicing(rgb_img, depth_img, n_slices, rgb_weight, standardize_features):
36 | depth_img = depth_img.numpy() if hasattr(depth_img, 'numpy') else depth_img
37 | rgb_img = rgb_img.numpy() if hasattr(rgb_img, 'numpy') else rgb_img
38 |
39 | # Reshape images
40 | depth_flat = depth_img.reshape(-1, 1)
41 | rgb_flat = rgb_img.reshape(-1, 3)
42 |
43 | if rgb_weight != 0.0:
44 | combined_features = np.hstack((depth_flat, rgb_flat))
45 | weights = np.array([1, rgb_weight, rgb_weight, rgb_weight])
46 | else:
47 | combined_features = np.hstack((depth_flat))[:,np.newaxis]
48 | weights = np.array([1])
49 |
50 | if standardize_features:
51 | scaler = StandardScaler()
52 | combined_features = scaler.fit_transform(combined_features)
53 |
54 | # Apply weighted k-means clustering
55 | kmeans = WeightedKMeans(n_clusters=n_slices, weights=weights, random_state=42)
56 | kmeans.fit(combined_features)
57 |
58 | cluster_indices = kmeans.labels_
59 |
60 | # Extract depth values from cluster centers
61 | depth_centers = kmeans.cluster_centers_[:, 0]
62 |
63 | # Sort the cluster centers based on depth
64 | sorted_indices = np.argsort(depth_centers)
65 | #sorted_centers = depth_centers[sorted_indices]
66 |
67 | # Reshape cluster_indices back to original image shape
68 | cluster_indices = cluster_indices.reshape(depth_img.shape)
69 |
70 | # Create a mapping from old cluster indices to new sorted indices
71 | index_map = {old: new for new, old in enumerate(sorted_indices[::-1])}
72 |
73 | # Apply the mapping to get sorted cluster indices
74 | sorted_cluster_indices = np.vectorize(index_map.get)(cluster_indices)
75 |
76 | # Create mask_images tensor
77 | h, w = depth_img.shape
78 | mask_images = torch.zeros((n_slices, h, w, 3), dtype=torch.float32)
79 |
80 | # Fill mask_images with binary masks
81 | for i in range(n_slices):
82 | binary_mask = (sorted_cluster_indices == i)
83 | mask_images[i] = torch.from_numpy(np.repeat(binary_mask[:, :, np.newaxis], 3, axis=2))
84 |
85 | return mask_images
86 |
87 | import torch
88 |
89 | class Eden_DepthSlice_MaskVideo:
90 | @classmethod
91 | def INPUT_TYPES(s):
92 | return {"required":
93 | {"depth_map": ("IMAGE",),
94 | "slice_width": ("FLOAT", {"default": 0.1, "min": 0.01, "max": 0.99, "step": 0.01}),
95 | "min_depth": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
96 | "max_depth": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
97 | "gamma_correction": ("FLOAT", {"default": 1.0, "min": 0.1, "max": 10.0, "step": 0.1}),
98 | "n_frames": ("INT", {"default": 100, "min": 1, "max": sys.maxsize}),
99 | "reverse": ("BOOLEAN", {"default": False}),
100 | "bounce": ("BOOLEAN", {"default": False}),
101 | }
102 | }
103 |
104 | RETURN_TYPES = ("MASK","MASK")
105 | RETURN_NAMES = ("depth_slice_masks", "gamma_corrected_depth_map")
106 | FUNCTION = "generate_mask_video"
107 | CATEGORY = "Eden 🌱/Depth"
108 |
109 | def generate_mask_video(self, depth_map, slice_width, min_depth, max_depth, gamma_correction, n_frames, reverse, bounce):
110 | # Ensure depth_map is normalized between 0 and 1:
111 | depth_map = depth_map - depth_map.min()
112 | depth_map = depth_map / depth_map.max()
113 |
114 | # Apply gamma correction to the input depth map
115 | depth_map = depth_map ** gamma_correction
116 |
117 | # If depth_map has multiple channels, select the first one
118 | if depth_map.shape[-1] == 3:
119 | depth_map = depth_map[..., 0] # Retain only the first channel
120 |
121 | # Adjust n_frames for bouncing effect if enabled
122 | if bounce:
123 | n_frames = n_frames // 2
124 |
125 | # Set up video frames tensor on the same device as depth_map
126 | device = depth_map.device
127 | video_frames = torch.zeros((n_frames, *depth_map.shape[1:3]), dtype=torch.float32, device=device)
128 |
129 | # Calculate each frame's depth range based on slice width and min_depth/max_depth:
130 | for i in range(n_frames):
131 | lower_bound = min_depth + (max_depth - slice_width - min_depth) * i / n_frames
132 | upper_bound = lower_bound + slice_width
133 |
134 | # Generate mask for the current depth range:
135 | mask = (depth_map >= lower_bound) & (depth_map < upper_bound)
136 | video_frames[i] = mask.squeeze().float()
137 |
138 | # Reverse the video frames if the reverse flag is enabled
139 | if reverse:
140 | video_frames = video_frames.flip(0)
141 |
142 | # Handle bounce effect by concatenating the frames in reverse order
143 | if bounce:
144 | video_frames = torch.cat([video_frames, video_frames.flip(0)])
145 | if video_frames.shape[0] < n_frames:
146 | video_frames = torch.cat([video_frames, video_frames[-1:]])
147 |
148 | return (video_frames, depth_map)
149 |
150 | class DepthSlicer:
151 | @classmethod
152 | def INPUT_TYPES(s):
153 | return {"required":
154 | {"image": ("IMAGE",),
155 | "depth_map": ("IMAGE",),
156 | "n_slices": ("INT", {"default": 2, "min": 1, "max": sys.maxsize}),
157 | "rgb_weight": ("FLOAT", {"default": 0.0, "step": 0.01}),
158 | "standardize_features": ("BOOLEAN", {"default": False}),
159 | }
160 | }
161 |
162 | RETURN_TYPES = ("IMAGE",)
163 | RETURN_NAMES = ("inpainting_masks",)
164 | FUNCTION = "slice"
165 | CATEGORY = "Eden 🌱/Depth"
166 |
167 | def slice(self, image, depth_map, n_slices, rgb_weight, standardize_features):
168 | # Use only one channel (they're identical)
169 | depth = depth_map[0, :, :, 0]
170 |
171 | # Calculate thresholds
172 | masks = smart_depth_slicing(image, depth, n_slices, rgb_weight, standardize_features)
173 |
174 | return (masks,)
175 |
176 |
177 | class ParallaxZoom:
178 | @classmethod
179 | def INPUT_TYPES(s):
180 | return {"required":
181 | {"masks": ("IMAGE",),
182 | "image_slices": ("IMAGE",),
183 | "foreground_zoom_factor": ("FLOAT", {"default": 1.1, "step": 0.001}),
184 | "background_zoom_factor": ("FLOAT", {"default": 1.05, "step": 0.001}),
185 | "pan_left": ("FLOAT", {"default": 0.1, "min": -1.0, "max": 1.0, "step": 0.001}),
186 | "n_frames": ("INT", {"default": 25, "min": 1, "max": sys.maxsize}),
187 | "loop": ("BOOLEAN", {"default": False}),
188 | }
189 | }
190 |
191 | RETURN_TYPES = ("IMAGE","IMAGE")
192 | RETURN_NAMES = ("frames","masks")
193 | FUNCTION = "zoom"
194 | CATEGORY = "Eden 🌱/Depth"
195 | DESCRIPTION = """
196 | Apply 3D depth parallax to the input image to create a 3D video effect.
197 | Foreground and Background zoom factors control the amount of zoom applied to the respective layers.
198 | Pan Left controls the amount of horizontal shift applied to the image.
199 | All these values are the total fraction (relative to resolution) applied to the image over the full animation.
200 | """
201 |
202 | @staticmethod
203 | def warp_affine(image, mask=None, zoom_factor=1.0, shift_factor=0.0):
204 | h, w = image.shape[:2]
205 | center = (w / 2, h / 2)
206 |
207 | # Create the affine transformation matrix
208 | M = cv2.getRotationMatrix2D(center, 0, zoom_factor)
209 | M[0, 2] += shift_factor * w # Add horizontal shift
210 |
211 | # Apply the affine transformation
212 | warped_image = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
213 |
214 | if mask is not None:
215 | warped_mask = cv2.warpAffine(mask, M, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
216 | return warped_image, warped_mask
217 |
218 | return warped_image
219 |
220 | def zoom(self, masks, image_slices, foreground_zoom_factor, background_zoom_factor, pan_left, n_frames, loop):
221 | masks = masks.numpy()
222 | image_slices = image_slices.numpy()
223 |
224 | # Extract the images and masks
225 | foreground_image = image_slices[0].copy()
226 | background_image = image_slices[1].copy()
227 | foreground_mask = masks[0,:,:,0].copy()
228 |
229 | frames, foreground_masks = [], []
230 |
231 | for i in range(n_frames):
232 |
233 | # Compute progress as a value between 0 and 1
234 | progress = i / (n_frames - 1)
235 |
236 | if loop:
237 | # Full sine wave cycle for looping
238 | angle = progress * np.pi * 2
239 | factor = (np.sin(angle) + 1) / 2
240 | else:
241 | # Linear progression for non-looping
242 | factor = progress
243 |
244 | # Compute zoom and shift factors
245 | fg_zoom = 1 + (foreground_zoom_factor - 1) * factor
246 |
247 | # Adjust background zoom behavior
248 | if background_zoom_factor >= 1:
249 | bg_zoom = 1 + (background_zoom_factor - 1) * factor
250 | else:
251 | bg_zoom = 1 / background_zoom_factor - (1 / background_zoom_factor - 1) * factor
252 |
253 | fg_shift = pan_left/2 - pan_left * factor
254 |
255 | print(f"Frame {i}, fg_shift: {fg_shift}")
256 |
257 | # Apply transformations
258 | warped_foreground, warped_mask = self.warp_affine(foreground_image, foreground_mask, fg_zoom, fg_shift)
259 | warped_background = self.warp_affine(background_image, zoom_factor=bg_zoom)
260 |
261 | # Ensure the mask has 3 channels to match the image
262 | warped_mask = np.stack([warped_mask] * 3, axis=-1)
263 | foreground_masks.append(warped_mask)
264 |
265 | # Combine foreground and background
266 | final_image = warped_foreground * warped_mask + warped_background * (1 - warped_mask)
267 | final_image = final_image[:, :, :3]
268 |
269 | frames.append(final_image)
270 |
271 | frames = torch.tensor(np.array(frames))
272 | foreground_masks = torch.tensor(np.array(foreground_masks))
273 |
274 | return (frames, foreground_masks)
275 |
276 |
277 |
278 | def load_and_prepare_data(depth_map_path, image_path):
279 | # Load depth map
280 | depth_map = Image.open(depth_map_path).convert('L') # Convert to grayscale
281 | depth_map = transforms.ToTensor()(depth_map).squeeze(0) # Remove channel dimension
282 |
283 | # Load image
284 | image = Image.open(image_path).convert('RGB')
285 | image = transforms.ToTensor()(image)
286 |
287 | # Ensure depth map and image have the same size
288 | if depth_map.shape != image.shape[1:]:
289 | depth_map = transforms.Resize(image.shape[1:])(depth_map.unsqueeze(0)).squeeze(0)
290 |
291 | # Normalize depth map to [0, 1] range if it's not already
292 | if depth_map.max() > 1:
293 | depth_map = depth_map / 255.0
294 |
295 | return depth_map, image
296 |
297 |
298 | def perspective_warp_torch(depth_map, image, affine_matrix):
299 | # Ensure inputs are PyTorch tensors and on the same device
300 | device = image.device
301 | depth_map = depth_map.to(device)
302 | affine_matrix = affine_matrix.to(device)
303 |
304 | # Get image dimensions
305 | channels, height, width = image.shape
306 |
307 | # Create meshgrid of pixel coordinates
308 | y, x = torch.meshgrid(torch.arange(height, device=device), torch.arange(width, device=device), indexing='ij')
309 |
310 | # Stack x, y, and depth values
311 | points = torch.stack([x.flatten(), y.flatten(), depth_map.flatten(), torch.ones_like(x.flatten())], dim=-1)
312 |
313 | # Apply affine transformation
314 | transformed_points = torch.matmul(affine_matrix, points.T).T
315 |
316 | # Normalize homogeneous coordinates
317 | transformed_points = transformed_points[:, :3] / transformed_points[:, 3:]
318 |
319 | # Reshape back to image dimensions
320 | new_x = transformed_points[:, 0].reshape(height, width)
321 | new_y = transformed_points[:, 1].reshape(height, width)
322 |
323 | # Scale coordinates to [-1, 1] range for grid_sample
324 | new_x = 2 * (new_x / (width - 1)) - 1
325 | new_y = 2 * (new_y / (height - 1)) - 1
326 |
327 | # Stack coordinates for grid_sample
328 | grid = torch.stack([new_x, new_y], dim=-1).unsqueeze(0)
329 |
330 | # Apply the transformation using grid_sample
331 | warped_image = F.grid_sample(image.unsqueeze(0), grid, mode='bicubic', padding_mode='reflection', align_corners=True)
332 |
333 | return warped_image.squeeze(0)
334 |
335 | if __name__ == "__main__":
336 |
337 | import torch
338 | from torchvision import transforms
339 | from PIL import Image
340 |
341 | # Example usage:
342 | depth_map_path = 'depth.png'
343 | image_path = 'img.png'
344 |
345 | depth_map, image = load_and_prepare_data(depth_map_path, image_path)
346 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
347 |
348 | # Create an example affine matrix (identity transform with some translation)
349 | zoom_factor = 0.1
350 | affine_matrix = torch.tensor([
351 | [1, 0, 0, 0],
352 | [0, 1, 0, 0],
353 | [0, 0, 1, 0],
354 | [0, 0, zoom_factor, 1]
355 | ], dtype=torch.float32)
356 |
357 | warped_image = perspective_warp_torch(
358 | depth_map,
359 | image,
360 | affine_matrix
361 | )
362 |
363 | # If you want to visualize or save the result:
364 | from torchvision.utils import save_image
365 | save_image(warped_image, 'warped_image.jpg')
--------------------------------------------------------------------------------
/img_utils/depth_segmentation.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import logging
3 | from typing import Tuple, Optional
4 |
5 | import numpy as np
6 | import cv2
7 | from sklearn.cluster import KMeans
8 | from skimage import segmentation, filters, morphology, measure
9 | from scipy import ndimage
10 |
11 |
12 | # Set up logging
13 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
14 | logger = logging.getLogger(__name__)
15 |
16 | def load_and_preprocess_images(rgb_path: str, depth_path: str) -> Tuple[np.ndarray, np.ndarray]:
17 | """
18 | Load and preprocess RGB and depth images.
19 |
20 | Args:
21 | rgb_path (str): Path to the RGB image file.
22 | depth_path (str): Path to the depth image file.
23 |
24 | Returns:
25 | Tuple[np.ndarray, np.ndarray]: Normalized RGB and depth images.
26 | """
27 | try:
28 | rgb_image = cv2.imread(rgb_path)
29 | rgb_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB)
30 | depth_image = cv2.imread(depth_path, cv2.IMREAD_ANYDEPTH)
31 |
32 | if rgb_image is None or depth_image is None:
33 | raise ValueError("Failed to load images")
34 |
35 | if rgb_image.shape[:2] != depth_image.shape:
36 | raise ValueError("RGB and depth images must have the same dimensions")
37 |
38 | normalized_rgb = normalize_image(rgb_image)
39 | normalized_depth = normalize_image(depth_image)
40 |
41 | return normalized_rgb, normalized_depth
42 | except Exception as e:
43 | logger.error(f"Error in load_and_preprocess_images: {str(e)}")
44 | raise
45 |
46 | def normalize_image(image: np.ndarray) -> np.ndarray:
47 | """
48 | Normalize image to 0-1 range.
49 |
50 | Args:
51 | image (np.ndarray): Input image.
52 |
53 | Returns:
54 | np.ndarray: Normalized image.
55 | """
56 | return (image - np.min(image)) / (np.max(image) - np.min(image))
57 |
58 | def initial_depth_segmentation(depth_map: np.ndarray, n_clusters: int = 3) -> np.ndarray:
59 | """
60 | Perform initial segmentation based on depth information.
61 |
62 | Args:
63 | depth_map (np.ndarray): Normalized depth map.
64 | n_clusters (int): Number of clusters for K-means.
65 |
66 | Returns:
67 | np.ndarray: Initial depth-based segmentation.
68 | """
69 | try:
70 | depth_flat = depth_map.reshape(-1, 1)
71 | kmeans = KMeans(n_clusters=n_clusters, random_state=42, n_init=10)
72 | depth_clusters = kmeans.fit_predict(depth_flat)
73 | return depth_clusters.reshape(depth_map.shape)
74 | except Exception as e:
75 | logger.error(f"Error in initial_depth_segmentation: {str(e)}")
76 | raise
77 |
78 |
79 | def visualize_intermediate(image, title):
80 | """
81 | Visualize and save an intermediate result.
82 |
83 | Args:
84 | image (np.ndarray): Image to visualize.
85 | title (str): Title for the plot.
86 | """
87 |
88 | n_unique_values = len(np.unique(image))
89 |
90 | plt.figure(figsize=(10, 8))
91 | plt.imshow(image) #, cmap='nipy_spectral')
92 | plt.title(title + f" ({n_unique_values} segments)")
93 | plt.axis('off')
94 | plt.savefig(f"{title.replace(' ', '_')}.png")
95 | plt.close()
96 |
97 | def auto_segment(rgb_image: np.ndarray, depth_map: np.ndarray, n_segments: int) -> np.ndarray:
98 | """
99 | Main function to perform automatic segmentation.
100 |
101 | Args:
102 | rgb_image (np.ndarray): Normalized RGB image.
103 | depth_map (np.ndarray): Normalized depth map.
104 | n_segments (int): Desired number of segments.
105 |
106 | Returns:
107 | np.ndarray: Final segmentation.
108 | """
109 | try:
110 | logger.info("Starting automatic segmentation")
111 | logger.info(f"RGB image shape: {rgb_image.shape}, dtype: {rgb_image.dtype}")
112 | logger.info(f"Depth map shape: {depth_map.shape}, dtype: {depth_map.dtype}")
113 | logger.info(f"Desired number of segments: {n_segments}")
114 |
115 | # Initial depth segmentation
116 | depth_segments = initial_depth_segmentation(depth_map, n_clusters=min(n_segments, 10))
117 | logger.info("Initial depth segmentation completed")
118 | visualize_intermediate(depth_segments, "01 - Initial Depth Segmentation")
119 |
120 | # Color refinement
121 | color_refined = refine_with_color(rgb_image, depth_segments, n_segments=n_segments)
122 | logger.info("Color refinement completed")
123 | visualize_intermediate(color_refined, "02 - Color Refined Segmentation")
124 |
125 | # Edge-based refinement
126 | edge_refined = edge_based_refinement(rgb_image, color_refined)
127 | logger.info("Edge-based refinement completed")
128 | visualize_intermediate(edge_refined, "03 - Edge Refined Segmentation")
129 |
130 | # Region growing
131 | region_grown = region_growing(rgb_image, depth_map, edge_refined)
132 | logger.info("Region growing completed")
133 | visualize_intermediate(region_grown, "04 - Region Grown Segmentation")
134 |
135 | # Post-processing
136 | final_segments = post_processing(region_grown, n_segments=n_segments)
137 | logger.info("Post-processing completed")
138 | visualize_intermediate(final_segments, "05 - Final Segmentation")
139 |
140 | evaluation_score = evaluate_segmentation(final_segments)
141 | logger.info(f"Segmentation completed. Evaluation score: {evaluation_score:.4f}")
142 |
143 | return final_segments
144 | except Exception as e:
145 | logger.error(f"Error in auto_segment: {str(e)}")
146 | raise
147 |
148 | def edge_based_refinement(rgb_image: np.ndarray, segments: np.ndarray) -> np.ndarray:
149 | """
150 | Refine segmentation using edge detection.
151 |
152 | Args:
153 | rgb_image (np.ndarray): Normalized RGB image.
154 | segments (np.ndarray): Input segmentation.
155 |
156 | Returns:
157 | np.ndarray: Edge-refined segmentation.
158 | """
159 | try:
160 | # Convert the image to 8-bit unsigned integer
161 | rgb_image_8bit = (rgb_image * 255).astype(np.uint8)
162 |
163 | # Convert to grayscale
164 | gray_image = cv2.cvtColor(rgb_image_8bit, cv2.COLOR_RGB2GRAY)
165 |
166 | # Compute edges
167 | edges = filters.sobel(gray_image)
168 |
169 | # Normalize edges to 0-1 range
170 | edges = (edges - edges.min()) / (edges.max() - edges.min())
171 |
172 | return segmentation.watershed(edges, markers=segments, mask=edges < 0.1)
173 | except Exception as e:
174 | logger.error(f"Error in edge_based_refinement: {str(e)}")
175 | raise
176 |
177 | def region_growing(rgb_image: np.ndarray, depth_map: np.ndarray, segments: np.ndarray) -> np.ndarray:
178 | """
179 | Perform region growing based on color and depth similarity.
180 |
181 | Args:
182 | rgb_image (np.ndarray): Normalized RGB image.
183 | depth_map (np.ndarray): Normalized depth map.
184 | segments (np.ndarray): Input segmentation.
185 |
186 | Returns:
187 | np.ndarray: Region-grown segmentation.
188 | """
189 | try:
190 | def color_distance(c1, c2):
191 | return np.sqrt(np.sum((c1 - c2) ** 2))
192 |
193 | def depth_distance(d1, d2):
194 | return abs(d1 - d2)
195 |
196 | grown_segments = segments.copy()
197 | height, width = segments.shape
198 | directions = [(-1, 0), (1, 0), (0, -1), (0, 1)]
199 |
200 | for y in range(height):
201 | for x in range(width):
202 | for dy, dx in directions:
203 | ny, nx = y + dy, x + dx
204 | if 0 <= ny < height and 0 <= nx < width:
205 | if segments[y, x] != segments[ny, nx]:
206 | color_dist = color_distance(rgb_image[y, x], rgb_image[ny, nx])
207 | depth_dist = depth_distance(depth_map[y, x], depth_map[ny, nx])
208 | if color_dist < 0.1 and depth_dist < 0.05:
209 | grown_segments[ny, nx] = grown_segments[y, x]
210 |
211 | return grown_segments
212 | except Exception as e:
213 | logger.error(f"Error in region_growing: {str(e)}")
214 | raise
215 |
216 | def refine_with_color(rgb_image: np.ndarray, depth_segments: np.ndarray, n_segments: int) -> np.ndarray:
217 | """
218 | Refine segmentation using color information.
219 |
220 | Args:
221 | rgb_image (np.ndarray): Normalized RGB image.
222 | depth_segments (np.ndarray): Initial depth-based segmentation.
223 | n_segments (int): Desired number of segments.
224 |
225 | Returns:
226 | np.ndarray: Refined segmentation.
227 | """
228 | try:
229 | refined_segments = segmentation.slic(rgb_image, n_segments=n_segments, compactness=10, start_label=1)
230 | return refined_segments
231 | except Exception as e:
232 | logger.error(f"Error in refine_with_color: {str(e)}")
233 | raise
234 |
235 | import traceback
236 |
237 | def segment_merging(segments: np.ndarray) -> np.ndarray:
238 | """
239 | Merge the two most similar adjacent segments based on size and centroid proximity.
240 | """
241 | try:
242 | unique_labels = np.unique(segments)
243 | logger.info(f"Unique labels: {unique_labels}")
244 |
245 | # Compute region properties
246 | props = measure.regionprops(segments)
247 | logger.info(f"Number of regions: {len(props)}")
248 |
249 | # Create dictionaries for centroids and areas
250 | centroids = {prop.label: prop.centroid for prop in props if prop.label != 0}
251 | areas = {prop.label: prop.area for prop in props if prop.label != 0}
252 |
253 | logger.info(f"Centroids: {centroids}")
254 | logger.info(f"Areas: {areas}")
255 |
256 | # Find adjacent segments
257 | def are_adjacent(label1, label2):
258 | region1 = segments == label1
259 | dilated = ndimage.binary_dilation(region1)
260 | return np.any(dilated & (segments == label2))
261 |
262 | # Find the pair of adjacent segments with the most similar size and closest centroids
263 | min_score = float('inf')
264 | merge_pair = None
265 | for i, label1 in enumerate(unique_labels):
266 | if label1 == 0:
267 | continue
268 | for label2 in unique_labels[i+1:]:
269 | if label2 == 0:
270 | continue
271 | if are_adjacent(label1, label2):
272 | size_diff = abs(areas[label1] - areas[label2]) / max(areas[label1], areas[label2])
273 | distance = np.linalg.norm(np.array(centroids[label1]) - np.array(centroids[label2]))
274 | score = size_diff + distance # You can adjust the weighting of these factors
275 | logger.debug(f"Labels {label1} and {label2}: score = {score}")
276 | if score < min_score:
277 | min_score = score
278 | merge_pair = (label1, label2)
279 |
280 | logger.info(f"Merge pair: {merge_pair}")
281 |
282 | # Merge the selected pair
283 | if merge_pair:
284 | label1, label2 = merge_pair
285 | segments[segments == label2] = label1
286 | logger.info(f"Merged label {label2} into {label1}")
287 | else:
288 | logger.warning("No suitable merge pair found")
289 |
290 | return segments
291 | except Exception as e:
292 | logger.error(f"Error in segment_merging: {str(e)}")
293 | logger.error(traceback.format_exc())
294 | raise
295 |
296 |
297 | def post_processing(segments: np.ndarray, n_segments: int) -> np.ndarray:
298 | """
299 | Apply post-processing to clean up the segmentation and ensure the desired number of segments.
300 |
301 | Args:
302 | segments (np.ndarray): Input segmentation.
303 | n_segments (int): Desired number of segments.
304 |
305 | Returns:
306 | np.ndarray: Post-processed segmentation.
307 | """
308 | try:
309 | # Remove small objects
310 | cleaned = morphology.remove_small_objects(segments, min_size=100)
311 |
312 | current_segments = len(np.unique(cleaned))
313 | logger.info(f"Number of segments after removing small objects: {current_segments}")
314 |
315 | # Merge segments if there are too many
316 | merge_count = 0
317 | while current_segments > n_segments:
318 | logger.info(f"Merging segments: current {current_segments}, target {n_segments}")
319 | cleaned = segment_merging(cleaned)
320 | new_current_segments = len(np.unique(cleaned))
321 | logger.info(f"After merging: {new_current_segments} segments")
322 | if new_current_segments == current_segments:
323 | logger.warning("No segments were merged in this iteration. Breaking loop.")
324 | break
325 | current_segments = new_current_segments
326 | merge_count += 1
327 | if merge_count > 100: # Safeguard against infinite loops
328 | logger.warning("Reached maximum number of merge iterations. Breaking loop.")
329 | break
330 |
331 | # Split segments if there are too few
332 | split_count = 0
333 | while current_segments < n_segments:
334 | logger.info(f"Splitting segments: current {current_segments}, target {n_segments}")
335 | cleaned = segment_splitting(cleaned)
336 | new_current_segments = len(np.unique(cleaned))
337 | logger.info(f"After splitting: {new_current_segments} segments")
338 | if new_current_segments == current_segments:
339 | logger.warning("No segments were split in this iteration. Breaking loop.")
340 | break
341 | current_segments = new_current_segments
342 | split_count += 1
343 | if split_count > 100: # Safeguard against infinite loops
344 | logger.warning("Reached maximum number of split iterations. Breaking loop.")
345 | break
346 |
347 | logger.info(f"Final number of segments: {current_segments}")
348 | return cleaned
349 | except Exception as e:
350 | logger.error(f"Error in post_processing: {str(e)}")
351 | logger.error(traceback.format_exc())
352 | raise
353 |
354 | import numpy as np
355 | from scipy import ndimage
356 | from skimage import measure
357 |
358 |
359 | def segment_splitting(segments: np.ndarray) -> np.ndarray:
360 | """
361 | Split the largest segment using K-means clustering.
362 | """
363 | from sklearn.cluster import KMeans
364 |
365 | unique_labels = np.unique(segments)
366 |
367 | # Find the largest segment
368 | largest_label = max(unique_labels, key=lambda l: np.sum(segments == l))
369 |
370 | # Get the coordinates of pixels in the largest segment
371 | y, x = np.where(segments == largest_label)
372 | coords = np.column_stack((y, x))
373 |
374 | # Apply K-means clustering to split the segment into two
375 | kmeans = KMeans(n_clusters=2, random_state=42)
376 | cluster_labels = kmeans.fit_predict(coords)
377 |
378 | # Assign new labels to the split segments
379 | new_label = segments.max() + 1
380 | mask = segments == largest_label
381 | segments[mask] = np.where(cluster_labels == 0, largest_label, new_label)[mask[y, x]]
382 |
383 | return segments
384 | if __name__ == "__main__":
385 | try:
386 | rgb_path = "image.png"
387 | depth_path = "depth.png"
388 | n_segments = 3 # Desired number of segments
389 |
390 | rgb_image, depth_map = load_and_preprocess_images(rgb_path, depth_path)
391 | segmentation_result = auto_segment(rgb_image, depth_map, n_segments)
392 |
393 | # Visualize the final result
394 | plt.figure(figsize=(12, 10))
395 | plt.imshow(segmentation_result, cmap='nipy_spectral')
396 | plt.title("Final Segmentation Result")
397 | plt.axis('off')
398 | plt.savefig("Final_Segmentation_Result.png")
399 | plt.savefig('plot.jpg')
400 |
401 | except Exception as e:
402 | logger.error(f"An error occurred: {str(e)}")
--------------------------------------------------------------------------------
/img_utils/gpt_nodes.py:
--------------------------------------------------------------------------------
1 | import base64
2 | import io, os, sys
3 | from PIL import Image
4 | import numpy as np
5 | from openai import OpenAI
6 |
7 | from dotenv import load_dotenv
8 | load_dotenv()
9 |
10 | try:
11 | OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
12 | client = OpenAI(api_key=OPENAI_API_KEY)
13 | print("OpenAI API key loaded")
14 | except:
15 | OPENAI_API_KEY = None
16 | client = None
17 | print("Eden_Comfy_Pipelines: WARNING: Could not find OPENAI_API_KEY in .env, disabling gpt prompt generation.")
18 |
19 |
20 | class Eden_gpt4_node:
21 | @classmethod
22 | def INPUT_TYPES(cls):
23 | return {
24 | "required": {
25 | "max_token": ("INT", {"default": 100, "min": 1, "max": sys.maxsize}),
26 | "model": (["gpt-4o", "gpt-4-turbo"], {"default": "gpt-4o"}),
27 | "prompt": ("STRING", {"multiline": True, "default": "Write a poem about ComfyUI"}),
28 | "seed": ("INT", {"default": 0, "min": 0, "max": sys.maxsize}),
29 | }
30 | }
31 |
32 | RETURN_TYPES = ("STRING",)
33 | FUNCTION = "gpt4_completion"
34 | CATEGORY = "Eden 🌱"
35 |
36 | def gpt4_completion(self, max_token, model, prompt, seed):
37 | try:
38 |
39 | if not client:
40 | print("An OpenAI API key is required for GPT node, put a .env file with your key in the comfyui root directory!")
41 | return ("An OpenAI API key is required for GPT-4 Vision. Make sure to place a .env file in the root directory of eden_comfy_pipelines with your secret API key. Make sure to never share your API key with anyone.", )
42 |
43 | response = client.chat.completions.create(
44 | model=model,
45 | seed=seed,
46 | messages=[
47 | {"role": "system", "content": "You are a helpful assistant."},
48 | {"role": "user", "content": prompt},
49 | ],
50 | max_tokens=max_token
51 | )
52 |
53 | gpt_completion = response.choices[0].message.content
54 | print(f"GPT4 completion:\n{gpt_completion}")
55 | return (gpt_completion,)
56 | except Exception as e:
57 | return (f"Error: {str(e)}",)
58 |
59 |
60 | class Eden_GPTPromptEnhancer:
61 | @classmethod
62 | def INPUT_TYPES(cls):
63 | return {
64 | "required": {
65 | "basic_prompt": ("STRING", {
66 | "multiline": True,
67 | "default": "A beautiful landscape"
68 | }),
69 | "enhancement_instructions": ("STRING", {
70 | "multiline": True,
71 | "default": "Augment this visual description by adding specific details about lighting, scene elements, composition, and artistic style. Make it more descriptive and specific. Be bold and creative! Limit the final prompt to 100 words."
72 | }),
73 | "max_token": ("INT", {"default": 500, "min": 1, "max": sys.maxsize}),
74 | "model": ([
75 | "gpt-4o",
76 | "gpt-4o-mini",
77 | "gpt-4-turbo",
78 | "gpt-3.5-turbo",
79 | ], {"default": "gpt-4o"}),
80 | "seed": ("INT", {"default": 0, "min": 0, "max": sys.maxsize}),
81 | },
82 | "optional": {
83 | "temperature": ("FLOAT", {
84 | "default": 0.7,
85 | "min": 0.0,
86 | "max": 2.0,
87 | "step": 0.1
88 | }),
89 | }
90 | }
91 |
92 | RETURN_TYPES = ("STRING",)
93 | FUNCTION = "enhance_prompt"
94 | CATEGORY = "Eden 🌱"
95 |
96 | def enhance_prompt(self, basic_prompt, enhancement_instructions, max_token, model, seed, temperature=0.7):
97 | try:
98 | if not client:
99 | return ("An OpenAI API key is required for GPT Prompt Enhancer. Make sure to place a .env file in the root directory with your OpenAI API key.",)
100 |
101 | # Construct the system message to guide GPT's behavior
102 | system_message = """You are a prompt engineering expert. Your task is to enhance and improve the given prompt according to the provided instructions.
103 | Keep the enhanced prompt focused and coherent. Maintain the original intent while adding valuable details and improvements."""
104 |
105 | # Construct the user message combining the prompt and instructions
106 | user_message = f"""Original prompt: {basic_prompt}
107 |
108 | Enhancement instructions: {enhancement_instructions}
109 |
110 | Please enhance this prompt according to the instructions. Provide only the enhanced prompt without any explanations or additional text."""
111 |
112 | response = client.chat.completions.create(
113 | model=model,
114 | seed=seed,
115 | temperature=temperature,
116 | messages=[
117 | {"role": "system", "content": system_message},
118 | {"role": "user", "content": user_message},
119 | ],
120 | max_tokens=max_token
121 | )
122 |
123 | enhanced_prompt = response.choices[0].message.content
124 | return (enhanced_prompt,)
125 |
126 | except Exception as e:
127 | return (f"Error in prompt enhancement: {str(e)}",)
128 |
129 | class ImageDescriptionNode:
130 | @classmethod
131 | def INPUT_TYPES(cls):
132 | return {
133 | "required": {
134 | "image": ("IMAGE",),
135 | "max_token": ("INT", {"default": 100, "min": 1, "max": sys.maxsize}),
136 | "endpoint": ("STRING", {"multiline": False, "default": "https://api.openai.com/v1"}),
137 | "model": (["gpt-4-vision Low", "gpt-4-vision High"], {"default": "gpt-4-vision Low"}),
138 | "prompt": ("STRING", {"multiline": True, "default": "Consicely describe the content of the images. Respond with a single description per line (ending with a period and a newline character)."}),
139 | }
140 | }
141 |
142 | RETURN_TYPES = ("STRING",)
143 | FUNCTION = "describe_image"
144 | CATEGORY = "Eden 🌱"
145 |
146 | def image_to_base64(self, image):
147 | buffered = io.BytesIO()
148 | image.save(buffered, format="PNG")
149 | img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
150 | return img_str
151 |
152 | def set_system_message(self, sysmsg):
153 | return [{
154 | "role": "system",
155 | "content": sysmsg
156 | }]
157 |
158 | def describe_image(self, image, max_token, endpoint, model, prompt):
159 | try:
160 | image = image[0]
161 | i = 255. * image.cpu().numpy()
162 | img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
163 |
164 | if not OPENAI_API_KEY:
165 | return "An OpenAI API key is required for GPT-4 Vision. Make sure to place a .env file in the root directory of eden_comfy_pipelines with your secret API key. Make sure to never share your API key with anyone."
166 |
167 | client = OpenAI(api_key=OPENAI_API_KEY, base_url=endpoint)
168 | processed_image = self.image_to_base64(img)
169 | detail = "low" if model == "gpt-4-vision Low" else "high"
170 | system_message = self.set_system_message("You are a helpful assistant.")
171 | response = client.chat.completions.create(
172 | model="gpt-4o",
173 | messages=system_message + [
174 | {
175 | "role": "user",
176 | "content": [{
177 | "type": "image_url",
178 | "image_url": {"url": f"data:image/png;base64,{processed_image}", "detail": detail}
179 | }]
180 | },
181 | {
182 | "role": "user",
183 | "content": prompt
184 | }
185 | ],
186 | max_tokens=max_token
187 | )
188 | description = response.choices[0].message.content
189 | print(f"GPT4-v Description:\n{description}")
190 | return (description,)
191 | except Exception as e:
192 | return (f"Error: {str(e)}",)
193 |
194 | class Eden_GPTStructuredOutput:
195 | @classmethod
196 | def INPUT_TYPES(cls):
197 | return {
198 | "required": {
199 | "prompt": ("STRING", {
200 | "multiline": True,
201 | "default": "Generate a structured response about the given topic"
202 | }),
203 | "system_prompt": ("STRING", {
204 | "multiline": True,
205 | "default": "You are a helpful assistant that generates structured JSON responses."
206 | }),
207 | "json_schema": ("STRING", {
208 | "multiline": True,
209 | "default": '{\n "type": "object",\n "properties": {\n "title": {"type": "string"},\n "description": {"type": "string"},\n "key_points": {"type": "array", "items": {"type": "string"}}\n },\n "required": ["title", "description", "key_points"]\n}'
210 | }),
211 | "max_tokens": ("INT", {"default": 1000, "min": 1, "max": sys.maxsize}),
212 | "model": (["gpt-4o", "gpt-4-turbo"], {"default": "gpt-4o"}),
213 | "seed": ("INT", {"default": 0, "min": 0, "max": sys.maxsize}),
214 | },
215 | "optional": {
216 | "temperature": ("FLOAT", {
217 | "default": 0.7,
218 | "min": 0.0,
219 | "max": 2.0,
220 | "step": 0.1
221 | }),
222 | }
223 | }
224 |
225 | RETURN_TYPES = ("STRING",)
226 | FUNCTION = "generate_structured_output"
227 | CATEGORY = "Eden 🌱"
228 |
229 | def generate_structured_output(self, prompt, system_prompt, json_schema, max_tokens, model, seed, temperature=0.7):
230 | try:
231 | if not client:
232 | return ("An OpenAI API key is required for GPT Structured Output. Make sure to place a .env file in the root directory with your OpenAI API key.",)
233 |
234 | # Construct the system message to guide GPT's behavior
235 | system_message = f"""{system_prompt}
236 |
237 | You must respond with a valid JSON object that strictly follows this schema:
238 | {json_schema}
239 |
240 | Do not include any explanations or text outside the JSON object."""
241 |
242 | response = client.chat.completions.create(
243 | model=model,
244 | seed=seed,
245 | temperature=temperature,
246 | messages=[
247 | {"role": "system", "content": system_message},
248 | {"role": "user", "content": prompt},
249 | ],
250 | max_tokens=max_tokens,
251 | response_format={"type": "json_object"}
252 | )
253 |
254 | structured_output = response.choices[0].message.content
255 | return (structured_output,)
256 |
257 | except Exception as e:
258 | return (f"Error in structured output generation: {str(e)}",)
259 |
--------------------------------------------------------------------------------
/img_utils/hist_matcher.py:
--------------------------------------------------------------------------------
1 | """This module implements Histogram Matching operation"""
2 | import sys
3 | import numpy as np
4 | import os
5 | import cv2
6 | import torch
7 |
8 | """This module contains constants used in the command line interface"""
9 | # color spaces
10 | GRAY = 'gray'
11 | HSV = 'hsv'
12 | LAB = 'lab'
13 | RGB = 'rgb'
14 |
15 | # image channels
16 | IMAGE_CHANNELS = '0,1,2'
17 |
18 | # image matching operations
19 | MAX_VALUE_8_BIT = 255
20 |
21 | def read_image(path: str) -> np.ndarray:
22 | """ This function reads an image and transforms it to RGB color space """
23 | if os.path.exists(path):
24 | image = cv2.imread(path, cv2.IMREAD_UNCHANGED)
25 | image = image.astype(np.float32) / MAX_VALUE_8_BIT
26 | if image.ndim == 2:
27 | return image[:, :, np.newaxis]
28 | return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
29 |
30 | raise ValueError(f'Invalid image path {path}')
31 |
32 | def write_image(image: np.ndarray, path: str) -> None:
33 | """ This function transforms an image to BGR color space
34 | and writes it to disk """
35 | if image.dtype == np.float32:
36 | image = (image * MAX_VALUE_8_BIT).astype(np.uint8)
37 | if image.dtype == np.uint8:
38 | if image.shape[-1] == 1:
39 | output_image = image[:, :, 0]
40 | else:
41 | output_image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
42 | if not cv2.imwrite(path, output_image):
43 | raise ValueError(
44 | f'Output directory {os.path.dirname(path)} does not exist')
45 | else:
46 | raise TypeError(
47 | f'Cannot write image with type {image.dtype}')
48 |
49 |
50 |
51 |
52 | class HistogramMatching:
53 | """Histogram Matching operation class"""
54 | """ inspired by
55 | https://github.com/continental/image-statistics-matching/tree/master
56 | """
57 | def __init__(self):
58 | self.r_values = []
59 | self.r_counts = []
60 | self.r_quantiles = []
61 |
62 | @classmethod
63 | def INPUT_TYPES(s):
64 | return {
65 | "required": {
66 | "ref_image": ("IMAGE",),
67 | "target_images": ("IMAGE",),
68 | "matching_fraction": ("FLOAT", {"default": 0.75, "min": 0, "max": 1, "step": 0.01}),
69 | "channels": ("STRING", {"default": "0,1,2"})
70 | }
71 | }
72 |
73 | RETURN_TYPES = ("IMAGE",)
74 | FUNCTION = "hist_match"
75 | CATEGORY = "Eden 🌱"
76 |
77 | def hist_match(self, ref_image, target_images, matching_fraction, channels):
78 | """ This function matches the histogram of the source image to the reference image """
79 |
80 | print(f"Input shapes:")
81 | print(ref_image.shape)
82 | print(target_images.shape)
83 |
84 | # Convert the input torch tensors to numpy arrays:
85 | ref_image = ref_image.cpu().numpy().astype(np.float32)
86 | target_images = target_images.cpu().numpy().astype(np.float32)
87 |
88 | # clip to 0-255:
89 | ref_image = np.clip(255. * ref_image, 0, 255)
90 | target_images = np.clip(255. * target_images, 0, 255)
91 |
92 | ref_image = cv2.cvtColor(ref_image, cv2.COLOR_BGR2RGB)
93 | target_images = [cv2.cvtColor(image, cv2.COLOR_BGR2RGB) for image in target_images]
94 |
95 | # extract the channels list:
96 | channels = list(map(int, channels.split(',')))
97 |
98 | # set the reference image:
99 | self.set_reference_img(ref_image, channels)
100 |
101 | # match the images to the reference:
102 | results = self.match_images_to_reference(target_images, matching_fraction, channels)
103 |
104 | # convert back to BGR:
105 | results = [cv2.cvtColor(result, cv2.COLOR_RGB2BGR) for result in results]
106 |
107 | # convert results back to torch tensors:
108 | results = [torch.from_numpy(result) * 255. for result in results]
109 | results = torch.stack(results)
110 |
111 | print(results.shape)
112 | return (results,)
113 |
114 |
115 | def set_reference_img(self, reference: np.ndarray, channels=[0, 1, 2]) -> None:
116 | """ This function sets the reference image used for histogram matching """
117 | for channel in channels:
118 | ref_channel = reference[:, :, channel].ravel()
119 | values, counts = np.unique(ref_channel, return_counts=True)
120 | quantiles = np.cumsum(counts).astype(float) / (ref_channel.size + sys.float_info.epsilon)
121 | self.r_values.append(values)
122 | self.r_counts.append(counts)
123 | self.r_quantiles.append(quantiles)
124 |
125 | def match_images_to_reference(self,
126 | sources: list,
127 | match_prop: float = 1.0,
128 | channels = [0,1,2],
129 | ) -> list:
130 |
131 | results = []
132 | for source in sources:
133 | result = np.copy(source)
134 | for channel in channels:
135 | result[:, :, channel] = self.match_channel(source[:, :, channel], channel, match_prop)
136 | results.append(result.astype(np.float32))
137 |
138 | return results
139 |
140 | def match_channel(self,
141 | source: np.ndarray,
142 | channel: int,
143 | match_prop: float = 1.0
144 | ) -> np.ndarray:
145 |
146 | source_shape = source.shape
147 | source = source.ravel()
148 |
149 | # get unique pixel values (sorted),
150 | # indices of the unique array and counts
151 | _, s_indices, s_counts = np.unique(source, return_counts=True, return_inverse=True)
152 |
153 | # compute the cumulative sum of the counts
154 | s_quantiles = np.cumsum(s_counts).astype(float) / (source.size + sys.float_info.epsilon)
155 |
156 | # interpolate linearly to find the pixel values in the reference
157 | # that correspond most closely to the quantiles in the source image
158 | interp_values = np.interp(s_quantiles, self.r_quantiles[channel], self.r_values[channel])
159 |
160 | # clip the interpolated values to the valid range
161 | interp_values = np.clip(interp_values, 0, 1)
162 |
163 | # pick the interpolated pixel values using the inverted source indices
164 | result = interp_values[s_indices]
165 |
166 | # apply matching proportion
167 | diff = source.astype(float) - result
168 | result = source.astype(float) - (diff * match_prop)
169 |
170 | return result.reshape(source_shape)
--------------------------------------------------------------------------------
/img_utils/image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edenartlab/eden_comfy_pipelines/82919a328169fa1863124f6ac2bc51c681eb6eec/img_utils/image.png
--------------------------------------------------------------------------------
/img_utils/img_utils.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import torch
3 | import numpy as np
4 | from PIL import Image
5 |
6 | def preprocess_lab(lab):
7 | L_chan, a_chan, b_chan = torch.unbind(lab, dim=2)
8 | # L_chan: black and white with input range [0, 100]
9 | # a_chan/b_chan: color channels with input range ~[-110, 110], not exact
10 | # [0, 100] => [-1, 1], ~[-110, 110] => [-1, 1]
11 | return [L_chan / 50.0 - 1.0, a_chan / 110.0, b_chan / 110.0]
12 |
13 |
14 | def deprocess_lab(L_chan, a_chan, b_chan):
15 | # TODO This is axis=3 instead of axis=2 when deprocessing batch of images
16 | # (we process individual images but deprocess batches)
17 | return torch.stack([(L_chan + 1) / 2.0 * 100.0, a_chan * 110.0, b_chan * 110.0], dim=2)
18 |
19 |
20 | def rgb_to_lab(srgb):
21 | # Get appropriate device
22 | if torch.cuda.is_available():
23 | device = torch.device('cuda')
24 | elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
25 | device = torch.device('mps')
26 | else:
27 | device = torch.device('cpu')
28 |
29 | srgb_pixels = torch.reshape(srgb, [-1, 3])
30 |
31 | # Convert to correct device
32 | srgb_pixels = srgb_pixels.to(device)
33 |
34 | linear_mask = (srgb_pixels <= 0.04045).type(torch.FloatTensor).to(device)
35 | exponential_mask = (srgb_pixels > 0.04045).type(torch.FloatTensor).to(device)
36 | rgb_pixels = (srgb_pixels / 12.92 * linear_mask) + (((srgb_pixels + 0.055) / 1.055) ** 2.4) * exponential_mask
37 |
38 | rgb_to_xyz = torch.tensor([
39 | # X Y Z
40 | [0.412453, 0.212671, 0.019334], # R
41 | [0.357580, 0.715160, 0.119193], # G
42 | [0.180423, 0.072169, 0.950227], # B
43 | ]).type(torch.FloatTensor).to(device)
44 |
45 | xyz_pixels = torch.mm(rgb_pixels, rgb_to_xyz)
46 |
47 | # XYZ to Lab
48 | xyz_normalized_pixels = torch.mul(xyz_pixels, torch.tensor([1/0.950456, 1.0, 1/1.088754]).type(torch.FloatTensor).to(device))
49 |
50 | epsilon = 6.0/29.0
51 |
52 | linear_mask = (xyz_normalized_pixels <= (epsilon**3)).type(torch.FloatTensor).to(device)
53 | exponential_mask = (xyz_normalized_pixels > (epsilon**3)).type(torch.FloatTensor).to(device)
54 |
55 | fxfyfz_pixels = (xyz_normalized_pixels / (3 * epsilon**2) + 4.0/29.0) * linear_mask + ((xyz_normalized_pixels+0.000001) ** (1.0/3.0)) * exponential_mask
56 |
57 | # Convert to lab
58 | fxfyfz_to_lab = torch.tensor([
59 | # l a b
60 | [ 0.0, 500.0, 0.0], # fx
61 | [116.0, -500.0, 200.0], # fy
62 | [ 0.0, 0.0, -200.0], # fz
63 | ]).type(torch.FloatTensor).to(device)
64 |
65 | lab_pixels = torch.mm(fxfyfz_pixels, fxfyfz_to_lab) + torch.tensor([-16.0, 0.0, 0.0]).type(torch.FloatTensor).to(device)
66 |
67 | return torch.reshape(lab_pixels, srgb.shape)
68 |
69 |
70 | def lab_to_rgb(lab):
71 | # Get appropriate device
72 | if torch.cuda.is_available():
73 | device = torch.device('cuda')
74 | elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
75 | device = torch.device('mps')
76 | else:
77 | device = torch.device('cpu')
78 |
79 | lab_pixels = torch.reshape(lab, [-1, 3])
80 |
81 | # Convert to device
82 | lab_pixels = lab_pixels.to(device)
83 |
84 | # Convert to fxfyfz
85 | lab_to_fxfyfz = torch.tensor([
86 | # fx fy fz
87 | [1/116.0, 1/116.0, 1/116.0], # l
88 | [1/500.0, 0.0, 0.0], # a
89 | [ 0.0, 0.0, -1/200.0], # b
90 | ]).type(torch.FloatTensor).to(device)
91 |
92 | fxfyfz_pixels = torch.mm(lab_pixels + torch.tensor([16.0, 0.0, 0.0]).type(torch.FloatTensor).to(device), lab_to_fxfyfz)
93 |
94 | # Convert to xyz
95 | epsilon = 6.0/29.0
96 | linear_mask = (fxfyfz_pixels <= epsilon).type(torch.FloatTensor).to(device)
97 | exponential_mask = (fxfyfz_pixels > epsilon).type(torch.FloatTensor).to(device)
98 |
99 | xyz_pixels = (3 * epsilon**2 * (fxfyfz_pixels - 4/29.0)) * linear_mask + ((fxfyfz_pixels+0.000001) ** 3) * exponential_mask
100 |
101 | # Denormalize for D65 white point
102 | xyz_pixels = torch.mul(xyz_pixels, torch.tensor([0.950456, 1.0, 1.088754]).type(torch.FloatTensor).to(device))
103 |
104 | xyz_to_rgb = torch.tensor([
105 | # r g b
106 | [ 3.2404542, -0.9692660, 0.0556434], # x
107 | [-1.5371385, 1.8760108, -0.2040259], # y
108 | [-0.4985314, 0.0415560, 1.0572252], # z
109 | ]).type(torch.FloatTensor).to(device)
110 |
111 | rgb_pixels = torch.mm(xyz_pixels, xyz_to_rgb)
112 |
113 | # Avoid a slightly negative number messing up the conversion
114 | # Clip
115 | rgb_pixels = torch.clamp(rgb_pixels, 0.0, 1.0)
116 |
117 | linear_mask = (rgb_pixels <= 0.0031308).type(torch.FloatTensor).to(device)
118 | exponential_mask = (rgb_pixels > 0.0031308).type(torch.FloatTensor).to(device)
119 | srgb_pixels = (rgb_pixels * 12.92 * linear_mask) + (((rgb_pixels+0.000001) ** (1/2.4) * 1.055) - 0.055) * exponential_mask
120 |
121 | # Move back to original device if needed
122 | if lab.device != device:
123 | srgb_pixels = srgb_pixels.to(lab.device)
124 |
125 | return torch.reshape(srgb_pixels, lab.shape)
--------------------------------------------------------------------------------
/img_utils/test.py:
--------------------------------------------------------------------------------
1 | """This module implements Histogram Matching operation"""
2 | import sys
3 | import numpy as np
4 | import os
5 | import cv2
6 |
7 | """This module contains constants used in the command line interface"""
8 | # color spaces
9 | GRAY = 'gray'
10 | HSV = 'hsv'
11 | LAB = 'lab'
12 | RGB = 'rgb'
13 |
14 | # image channels
15 | IMAGE_CHANNELS = '0,1,2'
16 |
17 | # image matching operations
18 | MAX_VALUE_8_BIT = 255
19 |
20 | def read_image(path: str) -> np.ndarray:
21 | """ This function reads an image and transforms it to RGB color space """
22 | if os.path.exists(path):
23 | image = cv2.imread(path, cv2.IMREAD_UNCHANGED)
24 | image = image.astype(np.float32) / MAX_VALUE_8_BIT
25 | if image.ndim == 2:
26 | return image[:, :, np.newaxis]
27 | return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
28 |
29 | raise ValueError(f'Invalid image path {path}')
30 |
31 | def write_image(image: np.ndarray, path: str) -> None:
32 | """ This function transforms an image to BGR color space
33 | and writes it to disk """
34 | if image.dtype == np.float32:
35 | image = (image * MAX_VALUE_8_BIT).astype(np.uint8)
36 | if image.dtype == np.uint8:
37 | if image.shape[-1] == 1:
38 | output_image = image[:, :, 0]
39 | else:
40 | output_image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
41 | if not cv2.imwrite(path, output_image):
42 | raise ValueError(
43 | f'Output directory {os.path.dirname(path)} does not exist')
44 | else:
45 | raise TypeError(
46 | f'Cannot write image with type {image.dtype}')
47 | class HistogramMatching():
48 | """Histogram Matching operation class"""
49 |
50 | def __init__(self):
51 | self.r_values = []
52 | self.r_counts = []
53 | self.r_quantiles = []
54 |
55 | def set_reference_img(self, reference: np.ndarray, channels=[0, 1, 2]) -> None:
56 | """ This function sets the reference image used for histogram matching """
57 | for channel in channels:
58 | ref_channel = reference[:, :, channel].ravel()
59 | values, counts = np.unique(ref_channel, return_counts=True)
60 | quantiles = np.cumsum(counts).astype(float) / (ref_channel.size + sys.float_info.epsilon)
61 | self.r_values.append(values)
62 | self.r_counts.append(counts)
63 | self.r_quantiles.append(quantiles)
64 |
65 | def match_images_to_reference(self,
66 | sources: list,
67 | match_prop: float = 1.0,
68 | channels = [0,1,2],
69 | ) -> list:
70 |
71 | results = []
72 | for source in sources:
73 | result = np.copy(source)
74 | for channel in channels:
75 | result[:, :, channel] = self.match_channel(source[:, :, channel], channel, match_prop)
76 | results.append(result.astype(np.float32))
77 |
78 | return results
79 |
80 | def match_channel(self,
81 | source: np.ndarray,
82 | channel: int,
83 | match_prop: float = 1.0
84 | ) -> np.ndarray:
85 |
86 | source_shape = source.shape
87 | source = source.ravel()
88 |
89 | # get unique pixel values (sorted),
90 | # indices of the unique array and counts
91 | _, s_indices, s_counts = np.unique(source, return_counts=True, return_inverse=True)
92 |
93 | # compute the cumulative sum of the counts
94 | s_quantiles = np.cumsum(s_counts).astype(float) / (source.size + sys.float_info.epsilon)
95 |
96 | # interpolate linearly to find the pixel values in the reference
97 | # that correspond most closely to the quantiles in the source image
98 | interp_values = np.interp(s_quantiles, self.r_quantiles[channel], self.r_values[channel])
99 |
100 | # clip the interpolated values to the valid range
101 | interp_values = np.clip(interp_values, 0, 1)
102 |
103 | # pick the interpolated pixel values using the inverted source indices
104 | result = interp_values[s_indices]
105 |
106 | # apply matching proportion
107 | diff = source.astype(float) - result
108 | result = source.astype(float) - (diff * match_prop)
109 |
110 | return result.reshape(source_shape)
111 |
112 |
113 | if __name__ == "__main__":
114 |
115 | hist_matcher = HistogramMatching()
116 |
117 | src_dir = "frames"
118 | ref_img = "tree.jpg"
119 |
120 | sources = [read_image(os.path.join(src_dir, img)) for img in sorted(os.listdir(src_dir))]
121 | reference = read_image(ref_img)
122 |
123 | hist_matcher.set_reference_img(reference)
124 | results = hist_matcher.match_images_to_reference(sources, 0.5)
125 |
126 | for i, result in enumerate(results):
127 | write_image(result, f"hist_match_{i}.jpg")
128 |
--------------------------------------------------------------------------------
/ip_adapter_utils/exploration_state.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | class ExplorationState:
4 | def __init__(
5 | self,
6 | sample_embed: torch.tensor,
7 | ):
8 | self.sample_embed = sample_embed.cpu()
9 |
10 | def validate(self):
11 | pass ## TODO
12 |
13 | def save(self, filename):
14 | state_dict = {
15 | "sample_embed": self.sample_embed.half(),
16 | }
17 | torch.save(
18 | state_dict,
19 | filename
20 | )
21 | print(f"Saved ExplorationState: {filename}")
22 |
23 | @classmethod
24 | def from_file(cls, filename: str):
25 | state_dict = torch.load(
26 | filename
27 | )
28 | return cls(
29 | sample_embed = state_dict["sample_embed"].float(),
30 | )
--------------------------------------------------------------------------------
/js/eden_number_display.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 |
3 | // For debugging - confirm the script is loaded
4 | console.log("Eden NumberDisplay extension loading...");
5 |
6 | app.registerExtension({
7 | name: "Eden.NumberDisplay",
8 |
9 | async beforeRegisterNodeDef(nodeType, nodeData, app) {
10 | // Only modify Eden_RandomNumberSampler nodes
11 | if (nodeData.name !== "Eden_RandomNumberSampler") {
12 | return;
13 | }
14 |
15 | console.log("Registering Eden_RandomNumberSampler node definition");
16 |
17 | // Store the original onDrawForeground function
18 | const onDrawForeground = nodeType.prototype.onDrawForeground;
19 |
20 | // Override the onDrawForeground function to display the sampled value
21 | nodeType.prototype.onDrawForeground = function(ctx) {
22 | // Call the original onDrawForeground function if it exists
23 | if (onDrawForeground) {
24 | onDrawForeground.apply(this, arguments);
25 | }
26 |
27 | // Check if we have a value to display
28 | if (this.sampledValue) {
29 | // Draw a background for the text
30 | const textHeight = 20;
31 | const margin = 10;
32 | const width = this.size[0];
33 | const y = this.size[1] - textHeight - margin/2;
34 |
35 | ctx.fillStyle = "rgba(0,0,0,0.2)";
36 | ctx.beginPath();
37 | ctx.roundRect(0, y, width, textHeight, [0, 0, 5, 5]);
38 | ctx.fill();
39 |
40 | // Draw the text
41 | ctx.fillStyle = "#FFF";
42 | ctx.font = "14px Arial";
43 | ctx.textAlign = "center";
44 | ctx.fillText("Sampled: " + this.sampledValue, width/2, y + 15);
45 | }
46 | };
47 |
48 | // Store the original onExecuted function
49 | const onExecuted = nodeType.prototype.onExecuted;
50 |
51 | // Override the onExecuted function
52 | nodeType.prototype.onExecuted = function(message) {
53 | // Call the original onExecuted function if it exists
54 | if (onExecuted) {
55 | onExecuted.apply(this, arguments);
56 | }
57 |
58 | console.log("Eden_RandomNumberSampler executed with message:", message);
59 |
60 | // Store the random number to display
61 | if (message && message.random_number !== undefined) {
62 | this.sampledValue = message.random_number[0];
63 | // Force the node to redraw
64 | this.setDirtyCanvas(true, true);
65 | }
66 | };
67 | }
68 | });
69 |
70 | console.log("Eden NumberDisplay extension loaded successfully");
--------------------------------------------------------------------------------
/logic/logic_nodes.py:
--------------------------------------------------------------------------------
1 | """
2 |
3 | This code was heavily inspired by
4 | https://github.com/theUpsider/ComfyUI-Logic/tree/fb8897351f715ea75eebf52e74515b6d07c693b8
5 |
6 | """
7 |
8 | import sys
9 |
10 | class AnyType(str):
11 | def __ne__(self, __value: object) -> bool:
12 | return False
13 |
14 | any_typ = AnyType("*")
15 |
16 | class Eden_String:
17 | @classmethod
18 | def INPUT_TYPES(s):
19 | return {
20 | "required": {"value": ("STRING", {"default": "", "multiline": True})},
21 | }
22 |
23 | RETURN_TYPES = ("STRING",)
24 | RETURN_NAMES = ("STRING",)
25 | FUNCTION = "execute"
26 | CATEGORY = "Eden 🌱/Logic"
27 |
28 | def execute(self, value):
29 | return (value,)
30 |
31 |
32 | class Eden_Int:
33 | @classmethod
34 | def INPUT_TYPES(s):
35 | return {
36 | "required": {"value": ("INT", {"min": -sys.maxsize, "max": sys.maxsize, "default": 0})},
37 | }
38 |
39 | RETURN_TYPES = ("INT",)
40 | RETURN_NAMES = ("INT",)
41 | FUNCTION = "execute"
42 | CATEGORY = "Eden 🌱/Logic"
43 |
44 | def execute(self, value):
45 | return (value,)
46 |
47 |
48 | class Eden_Float:
49 | @classmethod
50 | def INPUT_TYPES(s):
51 | return {
52 | "required": {"value": ("FLOAT", {"default": 0, "step": 0.01})},
53 | }
54 |
55 | RETURN_TYPES = ("FLOAT",)
56 | RETURN_NAMES = ("FLOAT",)
57 | FUNCTION = "execute"
58 | CATEGORY = "Eden 🌱/Logic"
59 |
60 | def execute(self, value):
61 | return (value,)
62 |
63 |
64 | class Eden_Bool:
65 | @classmethod
66 | def INPUT_TYPES(s):
67 | return {
68 | "required": {"value": ("BOOLEAN", {"default": False})},
69 | }
70 |
71 | RETURN_TYPES = ("BOOLEAN",)
72 | RETURN_NAMES = ("BOOLEAN",)
73 | FUNCTION = "execute"
74 | CATEGORY = "Eden 🌱/Logic"
75 |
76 | def execute(self, value):
77 | return (value,)
78 |
79 |
80 | COMPARE_FUNCTIONS = {
81 | "a == b": lambda a, b: a == b,
82 | "a != b": lambda a, b: a != b,
83 | "a < b": lambda a, b: a < b,
84 | "a > b": lambda a, b: a > b,
85 | "a <= b": lambda a, b: a <= b,
86 | "a >= b": lambda a, b: a >= b,
87 | }
88 |
89 | class AlwaysEqualProxy(str):
90 | def __eq__(self, _):
91 | return True
92 |
93 | def __ne__(self, _):
94 | return False
95 |
96 |
97 | class Eden_Compare:
98 | @classmethod
99 | def INPUT_TYPES(s):
100 | compare_functions = list(COMPARE_FUNCTIONS.keys())
101 | return {
102 | "required": {
103 | "a": (AlwaysEqualProxy("*"), {"default": 0}),
104 | "b": (AlwaysEqualProxy("*"), {"default": 0}),
105 | "comparison": (compare_functions, {"default": "a == b"}),
106 | },
107 | }
108 |
109 | RETURN_TYPES = ("BOOLEAN",)
110 | RETURN_NAMES = ("boolean",)
111 | FUNCTION = "compare"
112 | CATEGORY = "Eden 🌱/Logic"
113 |
114 | def compare(self, a, b, comparison):
115 | return (COMPARE_FUNCTIONS[comparison](a, b),)
116 |
117 |
118 | from typing import Any, Callable, Mapping
119 | BOOL_BINARY_OPERATIONS: Mapping[str, Callable[[bool, bool], bool]] = {
120 | "Nor": lambda a, b: not (a or b),
121 | "Xor": lambda a, b: a ^ b,
122 | "Nand": lambda a, b: not (a and b),
123 | "And": lambda a, b: a and b,
124 | "Xnor": lambda a, b: not (a ^ b),
125 | "Or": lambda a, b: a or b,
126 | "Eq": lambda a, b: a == b,
127 | "Neq": lambda a, b: a != b,
128 | }
129 |
130 | class Eden_BoolBinaryOperation:
131 | @classmethod
132 | def INPUT_TYPES(cls):
133 | return {
134 | "required": {
135 | "op": (list(BOOL_BINARY_OPERATIONS.keys()),),
136 | "a": ("BOOLEAN", {"default": False}),
137 | "b": ("BOOLEAN", {"default": False}),
138 | }
139 | }
140 |
141 | RETURN_TYPES = ("BOOLEAN",)
142 | FUNCTION = "op"
143 | CATEGORY = "Eden 🌱/Logic"
144 |
145 | def op(self, op: str, a: bool, b: bool) -> tuple[bool]:
146 | return (BOOL_BINARY_OPERATIONS[op](a, b),)
147 |
148 |
149 | class Eden_IfExecute:
150 | """
151 | This node executes IF_TRUE if ANY is True, otherwise it executes IF_FALSE.
152 | ANY can be any input, IF_TRUE and IF_FALSE can be any output.
153 | """
154 |
155 | @classmethod
156 | def INPUT_TYPES(s):
157 | return {
158 | "required": {
159 | "ANY": (AlwaysEqualProxy("*"),),
160 | "IF_TRUE": (AlwaysEqualProxy("*"),),
161 | "IF_FALSE": (AlwaysEqualProxy("*"),),
162 | },
163 | }
164 |
165 | RETURN_TYPES = (AlwaysEqualProxy("*"),)
166 | RETURN_NAMES = ("?",)
167 | FUNCTION = "return_based_on_bool"
168 | CATEGORY = "Eden 🌱/Logic"
169 |
170 | def return_based_on_bool(self, ANY, IF_TRUE, IF_FALSE):
171 | result_str = "True" if ANY else "False"
172 | print(f"Evaluating {type(ANY)}, *** {ANY} *** as {result_str}")
173 | return (IF_TRUE if ANY else IF_FALSE,)
174 |
175 |
176 | import torch
177 | import random
178 |
179 | class Eden_RandomNumberSampler:
180 | """Node that generates a random number from a uniform distribution with configurable min/max values"""
181 |
182 | @classmethod
183 | def INPUT_TYPES(cls):
184 | return {
185 | "required": {
186 | "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
187 | "min_value": ("FLOAT", {"default": 0.00, "min": -1000.00, "max": 1000.00, "step": 0.01}),
188 | "max_value": ("FLOAT", {"default": 1.00, "min": -1000.00, "max": 1000.00, "step": 0.01}),
189 | }
190 | }
191 |
192 | FUNCTION = "sample_random_number"
193 | RETURN_TYPES = ("INT", "FLOAT", "STRING")
194 | RETURN_NAMES = ("sampled_int", "sampled_float", "sampled_string")
195 | CATEGORY = "Eden 🌱/Logic"
196 | DESCRIPTION = "Samples a random number from a uniform distribution between min_value and max_value"
197 | OUTPUT_NODE = True # Enable UI updates
198 |
199 | def sample_random_number(self, seed, min_value, max_value):
200 | # Set seeds for reproducibility
201 | torch.manual_seed(seed)
202 | random.seed(seed)
203 |
204 | # Ensure min_value is not greater than max_value
205 | if min_value > max_value:
206 | min_value, max_value = max_value, min_value
207 |
208 | # Generate random float between min and max
209 | sampled_float = min_value + (max_value - min_value) * torch.rand(1).item()
210 |
211 | # Round to 2 decimal places for consistency
212 | sampled_float = round(sampled_float, 2)
213 |
214 | # Convert to integer (rounded)
215 | sampled_int = int(round(sampled_float))
216 |
217 | # Create string representation with 2 decimal places
218 | sampled_string = f"{sampled_float:.2f}"
219 |
220 | # Return the sampled values along with special UI update
221 | return {
222 | "ui": {
223 | "random_number": [f"{sampled_float:.2f}"]
224 | },
225 | "result": (sampled_int, sampled_float, sampled_string)
226 | }
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "eden_comfy_pipelines"
3 | description = "A collection of custom nodes and workflows for ComfyUI, developed by https://www.eden.art/"
4 | version = "1.3.0"
5 | license = {file = "LICENSE"}
6 | dependencies = [
7 | clip-interrogator==0.6.0
8 | "open_clip_torch==2.26.1",
9 | "openai",
10 | "python-dotenv",
11 | "scikit-learn",
12 | "transformers",
13 | "matplotlib",
14 | "scikit-image",
15 | "opencv-python"
16 | "opencv-python",
17 | "piexif",
18 | "py7zr",
19 | "lpips",
20 | "tsp_solver2",
21 | "einops",
22 | ]
23 |
24 | [project.urls]
25 | Repository = "https://github.com/edenartlab/eden_comfy_pipelines"
26 | # Used by Comfy Registry https://comfyregistry.org
27 |
28 | [tool.comfy]
29 | PublisherId = "edenartlab"
30 | DisplayName = "Eden Comfy Pack"
31 | Icon = "🌱🌱"
32 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | clip-interrogator==0.6.0
2 | open_clip_torch==2.26.1
3 | py7zr==0.22.0
4 | openai>=1.57.2
5 | python-dotenv
6 | scikit-learn
7 | transformers
8 | matplotlib
9 | scikit-image
10 | opencv-python
11 |
12 |
--------------------------------------------------------------------------------
/video_utils/fill_utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch
3 | import numpy as np
4 | from PIL import Image
5 | import matplotlib.pyplot as plt
6 | from torchvision.transforms import ToTensor, ToPILImage
7 | from typing import List, Tuple, Optional
8 |
9 | # ────────────────────────────────────────────────────────────────────────────────
10 | # UTILITIES
11 | # ────────────────────────────────────────────────────────────────────────────────
12 |
13 | def pil_to_comfy_tensor(pil_image: Image.Image) -> torch.Tensor:
14 | """Convert PIL Image to ComfyUI tensor format [B,H,W,C]."""
15 | # Convert PIL to tensor [C,H,W] with values in [0,1]
16 | tensor = ToTensor()(pil_image)
17 | # Permute to [H,W,C]
18 | tensor = tensor.permute(1, 2, 0)
19 | # Add batch dimension [B,H,W,C]
20 | tensor = tensor.unsqueeze(0)
21 | return tensor
22 |
23 | def comfy_tensor_to_pil(tensor: torch.Tensor) -> Image.Image:
24 | """Convert ComfyUI tensor format [B,H,W,C] to PIL Image."""
25 | # Remove batch dimension if present
26 | if tensor.dim() == 4:
27 | tensor = tensor.squeeze(0) # [H,W,C]
28 | # Ensure tensor is on CPU and in float format
29 | tensor = tensor.cpu().float()
30 | # Clamp to [0,1] range
31 | tensor = torch.clamp(tensor, 0, 1)
32 | # Permute to [C,H,W] for ToPILImage
33 | tensor = tensor.permute(2, 0, 1)
34 | # Convert to PIL
35 | return ToPILImage()(tensor)
36 |
37 | def tensor_to_np_mask(t: torch.Tensor) -> np.ndarray:
38 | """Convert boolean Torch mask [H,W] to uint8 0/255 numpy."""
39 | # Ensure input is on CPU before converting to numpy
40 | if t.device != torch.device('cpu'):
41 | t = t.cpu()
42 | return (t.numpy().astype(np.uint8) * 255)
43 |
44 | # Helper function for normalization
45 | def normalize_tensor(tensor: torch.Tensor, eps: float = 1e-8) -> torch.Tensor:
46 | """Normalize tensor to [0, 1] range.
47 |
48 | For tensors with dim < 3: Normalizes the entire tensor.
49 | For tensors with dim >= 3: Normalizes each batch element independently.
50 |
51 | Args:
52 | tensor: Input tensor to normalize
53 | eps: Small epsilon value to avoid division by zero
54 |
55 | Returns:
56 | Normalized tensor in [0, 1] range
57 | """
58 | if tensor is None:
59 | return None
60 |
61 | if tensor.numel() == 0:
62 | return tensor # Handle empty tensor
63 |
64 | if tensor.dim() < 3: # Handle cases like [H,W] or single values
65 | min_val = torch.min(tensor)
66 | max_val = torch.max(tensor)
67 | if max_val - min_val < eps:
68 | return torch.zeros_like(tensor)
69 | return (tensor - min_val) / (max_val - min_val + eps)
70 | else: # Handle batched tensors [B, H, W] or [B, C, H, W], etc.
71 | # Normalize each element in the batch independently
72 | tensor_norm = torch.zeros_like(tensor, dtype=torch.float32)
73 | for i in range(tensor.shape[0]):
74 | min_val = tensor[i].min()
75 | max_val = tensor[i].max()
76 | if max_val - min_val < eps:
77 | tensor_norm[i] = torch.zeros_like(tensor[i])
78 | else:
79 | tensor_norm[i] = (tensor[i] - min_val) / (max_val - min_val + eps)
80 | return tensor_norm
81 |
82 | def load_image(path: str, target_size: Optional[Tuple[int, int]] = None) -> Optional[torch.Tensor]:
83 | """Load image from path and convert to tensor [H,W,C] with optional resizing."""
84 | img = Image.open(path)
85 |
86 | # Ensure image is in RGB or L mode before processing
87 | if img.mode not in ['RGB', 'L', 'RGBA']:
88 | img = img.convert('RGB')
89 | elif img.mode == 'RGBA':
90 | background = Image.new("RGB", img.size, (255, 255, 255))
91 | background.paste(img, mask=img.split()[3]) # 3 is the alpha channel
92 | img = background
93 |
94 | if target_size is not None:
95 | img = img.resize((target_size[1], target_size[0]), Image.BICUBIC)
96 |
97 | tensor = ToTensor()(img) # [C,H,W]
98 |
99 | # Convert grayscale (L mode) or single-channel RGB to [1, H, W]
100 | if tensor.shape[0] == 1:
101 | pass # Already [1, H, W]
102 | elif tensor.shape[0] == 3 and torch.allclose(tensor[0], tensor[1], atol=1e-3) and torch.allclose(tensor[1], tensor[2], atol=1e-3):
103 | tensor = tensor[0:1] # Take one channel -> [1, H, W]
104 | elif tensor.shape[0] == 4: # RGBA converted by ToTensor()
105 | tensor = tensor[:3] # Drop alpha -> [3, H, W]
106 |
107 | return tensor.float().permute(1, 2, 0) # [H,W,C]
108 |
109 |
110 | def save_frames_as_gif(frames: torch.Tensor, output_path: str, duration: int = 100):
111 | """Save frames tensor [N,H,W], [N,1,H,W] or [N,H,W,1] as animated gif."""
112 | if frames.dim() == 4 and frames.shape[1] == 1:
113 | frames = frames.squeeze(1) # [N, H, W]
114 | elif frames.dim() == 4 and frames.shape[-1] == 1:
115 | frames = frames.squeeze(-1) # [N, H, W]
116 | elif frames.dim() != 3:
117 | raise ValueError(f"Expected frames tensor of shape [N,H,W], [N,1,H,W] or [N,H,W,1], got {frames.shape}")
118 |
119 | # Ensure tensor is on CPU and float type [0, 1] range for ToPILImage
120 | frames = frames.cpu().float()
121 |
122 | # Normalize each frame individually to [0, 1]
123 | normalized_frames = [normalize_tensor(f) for f in frames]
124 |
125 | frames_pil = [ToPILImage()(f) for f in normalized_frames]
126 | if frames_pil:
127 | try:
128 | frames_pil[0].save(
129 | output_path,
130 | save_all=True,
131 | append_images=frames_pil[1:],
132 | optimize=True,
133 | duration=duration,
134 | loop=0
135 | )
136 | print(f"- Fill process animation: {os.path.basename(output_path)}")
137 | except Exception as e:
138 | print(f"Error saving GIF {output_path}: {e}")
139 | else:
140 | print(f"Warning: No frames provided to save_frames_as_gif for {output_path}")
141 |
142 |
143 | # ────────────────────────────────────────────────────────────────────────────────
144 | # VISUALIZATION (for testing/debugging)
145 | # ────────────────────────────────────────────────────────────────────────────────
146 |
147 | def visualize_final_result(output_dir: str, test_name: str, input_img: Optional[torch.Tensor], final_mask: torch.Tensor):
148 | """Save a visualization comparing the original image, final mask, and blended result."""
149 | os.makedirs(output_dir, exist_ok=True)
150 | output_path = os.path.join(output_dir, f"{test_name}_result_visualization.png")
151 |
152 | # --- Prepare Tensors ---
153 | # Normalize first, then ensure CPU float
154 | final_mask_norm = normalize_tensor(final_mask).cpu().float()
155 | input_img_norm = None
156 | if input_img is not None:
157 | input_img_norm = normalize_tensor(input_img).cpu().float()
158 |
159 |
160 | # Standardize mask: [H, W]
161 | if final_mask_norm.dim() == 4 and final_mask_norm.shape[1] == 1: final_mask_norm = final_mask_norm.squeeze(1) #[B, H, W]
162 | if final_mask_norm.dim() == 4 and final_mask_norm.shape[-1] == 1: final_mask_norm = final_mask_norm.squeeze(-1) #[B, H, W]
163 | if final_mask_norm.dim() == 3 and final_mask_norm.shape[0] == 1: final_mask_norm = final_mask_norm.squeeze(0) # [H, W]
164 | if final_mask_norm.dim() != 2:
165 | print(f"Warning: Unexpected final_mask shape {final_mask_norm.shape}, skipping visualization.")
166 | return
167 |
168 | # Standardize input image: [H, W, C] or None
169 | if input_img_norm is not None:
170 | if input_img_norm.dim() == 4 and input_img_norm.shape[0] == 1: input_img_norm = input_img_norm.squeeze(0) # [C, H, W] or [H, W, C]
171 | if input_img_norm.dim() == 3 and input_img_norm.shape[0] in [1, 3]: # [C, H, W] -> [H, W, C]
172 | input_img_norm = input_img_norm.permute(1, 2, 0)
173 | if input_img_norm.dim() != 3 or input_img_norm.shape[-1] not in [1, 3]:
174 | print(f"Warning: Unexpected input_img shape {input_img_norm.shape}, cannot blend.")
175 | input_img_norm = None # Treat as unavailable for blending
176 |
177 |
178 | # --- Plotting ---
179 | num_plots = 2 + (input_img_norm is not None) # Original, Mask, Blend (if possible)
180 | fig, axes = plt.subplots(1, num_plots, figsize=(5 * num_plots, 5))
181 | axes = [axes] if num_plots == 1 else axes.flatten()
182 | plot_idx = 0
183 |
184 | # Plot Original Image (if available)
185 | if input_img_norm is not None:
186 | input_np = input_img_norm.numpy()
187 | axes[plot_idx].imshow(input_np.squeeze()) # Squeeze for grayscale [H,W,1]
188 | axes[plot_idx].set_title("Original Image")
189 | axes[plot_idx].axis('off')
190 | plot_idx += 1
191 | else:
192 | # If input image is missing, occupy the first slot with a message
193 | axes[plot_idx].text(0.5, 0.5, 'Original Image Not Available', ha='center', va='center')
194 | axes[plot_idx].axis('off')
195 | plot_idx += 1
196 |
197 |
198 | # Plot Final Mask
199 | mask_np = final_mask_norm.numpy()
200 | axes[plot_idx].imshow(mask_np, cmap='gray')
201 | axes[plot_idx].set_title("Final Mask")
202 | axes[plot_idx].axis('off')
203 | plot_idx += 1
204 |
205 | # Plot Blended Result (if original image available)
206 | if input_img_norm is not None:
207 | input_np = input_img_norm.numpy()
208 | # Create RGB mask broadcastable to image shape
209 | mask_rgb = np.stack([mask_np]*input_np.shape[-1], axis=-1) if input_np.ndim == 3 else mask_np # Handle grayscale input
210 |
211 | # Ensure input_np is compatible (e.g., grayscale input with RGB mask?)
212 | if input_np.shape[-1] == 1 and mask_rgb.shape[-1] == 3:
213 | input_np_rgb = np.repeat(input_np, 3, axis=-1)
214 | else:
215 | input_np_rgb = input_np
216 |
217 | # Blend if shapes match
218 | if input_np_rgb.shape == mask_rgb.shape:
219 | blended = 0.7 * input_np_rgb + 0.3 * mask_rgb
220 | axes[plot_idx].imshow(np.clip(blended, 0, 1))
221 | axes[plot_idx].set_title("Blended Result")
222 | else:
223 | axes[plot_idx].set_title("Blend Error (Shape Mismatch)")
224 | axes[plot_idx].axis('off')
225 | plot_idx += 1
226 |
227 |
228 | plt.tight_layout()
229 | try:
230 | plt.savefig(output_path)
231 | print(f"- Result visualization: {os.path.basename(output_path)}")
232 | except Exception as e:
233 | print(f"Error saving result visualization {output_path}: {e}")
234 | plt.close()
235 |
236 |
237 | def save_final_mask(output_dir: str, test_name: str, final_mask: torch.Tensor):
238 | """Save the final generated mask as a PNG image."""
239 | os.makedirs(output_dir, exist_ok=True)
240 | output_path = os.path.join(output_dir, f"{test_name}_final_mask.png")
241 |
242 | # Standardize mask: [H, W] before saving
243 | if final_mask.dim() == 4 and final_mask.shape[1] == 1: final_mask = final_mask.squeeze(1) #[B, H, W]
244 | if final_mask.dim() == 4 and final_mask.shape[-1] == 1: final_mask = final_mask.squeeze(-1) #[B, H, W]
245 | if final_mask.dim() == 3 and final_mask.shape[0] == 1: final_mask = final_mask.squeeze(0) # [H, W]
246 |
247 | if final_mask.dim() != 2:
248 | print(f"Warning: Cannot save final mask with shape {final_mask.shape}.")
249 | return
250 |
251 | try:
252 | # Ensure tensor is on CPU and in a suitable format for ToPILImage (e.g., float [0,1] or uint8)
253 | # Normalize before converting
254 | final_mask_norm = normalize_tensor(final_mask).cpu().float()
255 | final_pil = ToPILImage()(final_mask_norm)
256 | final_pil.save(output_path)
257 | print(f"- Final mask: {os.path.basename(output_path)}")
258 | except Exception as e:
259 | print(f"Error saving final mask {output_path}: {e}")
--------------------------------------------------------------------------------
/video_utils/gradient_mask_video.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 | from io import BytesIO
5 |
6 | class KeyframeBlender:
7 | @classmethod
8 | def INPUT_TYPES(s):
9 | return {"required":
10 | {"image_frames": ("IMAGE",),
11 | "keyframe_ip_adapter_features": ("EMBEDS",),
12 | "n_frames": ("INT", {"default": 50}),
13 | "denoise_gamma": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 5.0}),
14 | "ip_adapter_gamma": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 5.0}),
15 | }
16 | }
17 |
18 | RETURN_TYPES = ("IMAGE", "MASK", "MASK", "IMAGE", "EMBEDS")
19 | RETURN_NAMES = ("keyframe_blend", "denoising_masks", "ip_adapter_attention_masks", "denoising_mask_curve", "ip_adapter_trajectory")
20 | FUNCTION = "blend_keyframes"
21 | CATEGORY = "Video Effects"
22 |
23 | def plot_denoising_values(self, denoising_values):
24 | fig, ax = plt.subplots()
25 | ax.plot(denoising_values)
26 | ax.set(xlabel='Frame Number', ylabel='Denoising Value', title='Denoising Mask Curve')
27 | ax.grid()
28 | ax.set_ylim(0, 1)
29 | fig.canvas.draw()
30 | image_from_plot = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
31 | image_from_plot = image_from_plot.reshape(fig.canvas.get_width_height()[::-1] + (3,))
32 | tensor_image = torch.from_numpy(image_from_plot).float().unsqueeze(0) / 255.0
33 | plt.close(fig)
34 | return tensor_image
35 |
36 | def blend_keyframes(self, image_frames, keyframe_ip_adapter_features, n_frames, denoise_gamma, ip_adapter_gamma):
37 | num_keyframes, height, width, channels = image_frames.shape
38 | _, n_features, feature_dim = keyframe_ip_adapter_features.shape
39 | device = image_frames.device
40 |
41 | transition_frames = [n_frames // (num_keyframes - 1)] * (num_keyframes - 1)
42 | remainder = n_frames % (num_keyframes - 1)
43 | for i in range(remainder):
44 | transition_frames[i] += 1
45 |
46 | blended_video = torch.zeros(n_frames, height, width, 3, device=device)
47 | denoising_masks = torch.zeros(n_frames, height, width, device=device)
48 | ip_adapter_trajectory = torch.zeros(n_frames, n_features, feature_dim, device=device)
49 |
50 | start_frame = 0
51 | denoising_values = []
52 | for i in range(num_keyframes - 1):
53 | end_frame = start_frame + transition_frames[i]
54 | midpoint_frame = start_frame + (end_frame - start_frame) // 2
55 |
56 | for j in range(start_frame, end_frame):
57 | alpha = (j - start_frame) / (end_frame - start_frame)
58 | blended_video[j] = image_frames[i] * (1 - alpha) + image_frames[i + 1] * alpha
59 | ip_adapter_trajectory[j] = keyframe_ip_adapter_features[i] * (1 - alpha) + keyframe_ip_adapter_features[i + 1] * alpha
60 |
61 | if j < midpoint_frame:
62 | denoising_value = (j - start_frame) / (midpoint_frame - start_frame)
63 | else:
64 | denoising_value = (end_frame - j) / (end_frame - midpoint_frame)
65 |
66 | denoising_values.append(denoising_value)
67 | denoising_masks[j] = torch.tensor(denoising_value, device=device).float()
68 |
69 | start_frame = end_frame
70 |
71 | denoising_values = np.array(denoising_values)**denoise_gamma
72 | curve_image = self.plot_denoising_values(denoising_values)
73 |
74 | # apply gamma corrections:
75 | ip_adapter_attention_masks = denoising_masks.clone()
76 | denoising_masks = denoising_masks ** denoise_gamma
77 | ip_adapter_attention_masks = ip_adapter_attention_masks ** ip_adapter_gamma
78 |
79 | return blended_video, denoising_masks, ip_adapter_attention_masks, curve_image, ip_adapter_trajectory
80 |
81 |
82 | import os, re
83 | import subprocess
84 | import torch
85 | import numpy as np
86 | from folder_paths import get_output_directory
87 |
88 | class MaskedRegionVideoExport:
89 | @classmethod
90 | def INPUT_TYPES(cls):
91 | return {
92 | "required": {
93 | "images": ("IMAGE",),
94 | "masks": ("MASK",),
95 | "fps": ("INT", {"default": 16, "min": 1, "max": 120}),
96 | "filename_prefix": ("STRING", {"default": "masked_video"}),
97 | "flip_mask": ("BOOLEAN", {"default": False}),
98 | "format": (["webm", "prores_mov"],),
99 | }
100 | }
101 |
102 | RETURN_TYPES = ("STRING",)
103 | RETURN_NAMES = ("video_path",)
104 | OUTPUT_NODE = True
105 | CATEGORY = "Video"
106 | FUNCTION = "export"
107 |
108 | def export(self, images, masks, fps, filename_prefix, flip_mask, format):
109 | if images.shape[0] != masks.shape[0]:
110 | raise ValueError("Number of images and masks must match!")
111 |
112 | print(f"Masking images of shape: {images.shape} with masks of shape: {masks.shape}")
113 | print(f"Mask max value: {masks.max()}, min value: {masks.min()}")
114 |
115 | output_dir = get_output_directory()
116 | ext = "webm" if format == "webm" else "mov"
117 | base_name = f"{filename_prefix}"
118 | existing_files = os.listdir(output_dir)
119 | matcher = re.compile(re.escape(base_name) + r"_(\d+)\." + ext + r"$", re.IGNORECASE)
120 | max_index = -1
121 | for f in existing_files:
122 | match = matcher.fullmatch(f)
123 | if match:
124 | max_index = max(max_index, int(match.group(1)))
125 | new_index = max_index + 1
126 | video_filename = f"{base_name}_{new_index:03d}.{ext}"
127 | video_path = os.path.join(output_dir, video_filename)
128 |
129 | height, width = images.shape[1:3]
130 |
131 | if format == "webm":
132 | codec = "libvpx-vp9"
133 | pix_fmt = "yuva420p"
134 | ffmpeg_args = [
135 | "ffmpeg", "-y", "-f", "rawvideo", "-vcodec", "rawvideo",
136 | "-pix_fmt", "rgba", "-s", f"{width}x{height}", "-r", str(fps),
137 | "-i", "-", "-c:v", codec,
138 | "-crf", "19", "-b:v", "0",
139 | "-pix_fmt", pix_fmt,
140 | "-auto-alt-ref", "0",
141 | video_path
142 | ]
143 | else: # prores_mov
144 | codec = "prores_ks"
145 | pix_fmt = "yuva444p10le"
146 | ffmpeg_args = [
147 | "ffmpeg", "-y", "-f", "rawvideo", "-vcodec", "rawvideo",
148 | "-pix_fmt", "rgba", "-s", f"{width}x{height}", "-r", str(fps),
149 | "-i", "-", "-c:v", codec,
150 | "-profile:v", "4", # ProRes 4444
151 | "-pix_fmt", pix_fmt,
152 | video_path
153 | ]
154 |
155 | frames = []
156 | for img, mask in zip(images, masks):
157 | img = (img.cpu().numpy() * 255).clip(0, 255).astype(np.uint8)
158 | mask = mask.cpu().numpy()
159 |
160 | if flip_mask:
161 | mask = 1.0 - mask
162 |
163 | mask = np.clip(mask, 0, 1)
164 | alpha = (mask * 255).astype(np.uint8)
165 | img[alpha == 0] = 0
166 | rgba = np.dstack([img, alpha])
167 | frames.append(rgba)
168 |
169 | video_data = b''.join([frame.tobytes() for frame in frames])
170 |
171 | try:
172 | subprocess.run(ffmpeg_args, input=video_data, check=True)
173 | except subprocess.CalledProcessError as e:
174 | raise RuntimeError(f"ffmpeg failed: {e.stderr}")
175 |
176 | preview = {
177 | "filename": video_filename,
178 | "subfolder": "",
179 | "type": "output",
180 | "format": f"video/{ext}",
181 | "frame_rate": fps,
182 | "workflow": "",
183 | "fullpath": video_path,
184 | }
185 |
186 | return {"ui": {"gifs": [preview]}, "result": (video_path,)}
--------------------------------------------------------------------------------
/video_utils/video_interpolation.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 |
4 | def compute_sampling_indices(total_n_frames, target_n_frames, verbose=0):
5 | """
6 | Computes an optimal subset of frames to sample from a video. It calculates a cost that represents the
7 | temporal (visual) distortion in the output video. The cost combines standard deviation of frame index
8 | differences with a penalty for abrupt changes, providing a more accurate measure of visual continuity.
9 | """
10 |
11 | # Generate target_n_frames evenly spaced frame indices
12 | target_indices = np.linspace(0, total_n_frames - 1, target_n_frames)
13 | target_indices_rounded = target_indices.round().astype(int)
14 |
15 | # Calculate the differences between consecutive indices
16 | index_diffs = np.diff(target_indices_rounded)
17 |
18 | # Calculate standard deviation of index differences
19 | std_diff = np.std(index_diffs)
20 | mean_diff = np.mean(index_diffs)
21 |
22 | visual_cost = float(std_diff / mean_diff)
23 |
24 | if verbose:
25 | print("---------------------------")
26 | print("Target indices:")
27 | print(target_indices_rounded)
28 | print(f"Total frames: {total_n_frames}")
29 | print(f"Target frames: {target_n_frames}")
30 | print(f"Standard Deviation of Differences: {std_diff:.3f}")
31 | print(f"Visual Cost: {visual_cost:.3f}")
32 |
33 | # plot the index differences:
34 | plt.figure(figsize=(10, 5))
35 | plt.plot(index_diffs, marker='o')
36 | plt.title(f"diffs @{target_n_frames}, cost = {visual_cost:.3f}, std_diff = {std_diff:.3f}")
37 | plt.savefig(f"index_diffs_{target_n_frames}.png")
38 | plt.close()
39 |
40 | return list(target_indices_rounded), visual_cost
41 |
42 |
43 | def compute_frame_parameters(video_info, target_video_speedup_factor, output_fps, source_sampling_fps_range = [7,12], n_tests = 20):
44 | # Extract relevant data from video_info dictionary
45 | source_fps = video_info['source_fps']
46 | total_n_frames = video_info['loaded_frame_count']
47 |
48 | if source_fps < source_sampling_fps_range[0]:
49 | select_frame_indices = list(range(total_n_frames))
50 | best_source_sampling_fps = source_fps
51 | best_cost = 0
52 | else:
53 | # Step 1: Pick the optimal subset of frames to sample from the source video:
54 | best_cost, best_source_sampling_fps = np.inf, source_fps
55 | max_sampling_rate = min(source_fps, source_sampling_fps_range[1])
56 |
57 | for source_sampling_fps in list(np.linspace(source_sampling_fps_range[0], max_sampling_rate + 1, 100)):
58 | n_target_frames = round(total_n_frames * (source_sampling_fps / source_fps))
59 | target_indices, rounding_cost = compute_sampling_indices(total_n_frames, n_target_frames)
60 |
61 | if rounding_cost < best_cost:
62 | best_cost = rounding_cost
63 | select_frame_indices = target_indices
64 | best_source_sampling_fps = source_sampling_fps
65 |
66 | # Step 2: Compute the output frame multiplier such that the output video has the desired output_fps and appropriate speedup
67 | original_duration = total_n_frames / source_fps
68 | target_duration = original_duration / target_video_speedup_factor
69 | required_output_frames = target_duration * output_fps
70 |
71 | # to achieve target_video_speedup_factor at output_fps, we need to create final video frames at a rate of
72 | # the frame_multiplier will make sure that the select_frame_indices frames will get interpolated to produce frame_multiplier * len(select_frame_indices) frames
73 | # those will then finally be played back at output_fps
74 | frame_multiplier = int(round(required_output_frames / len(select_frame_indices)))
75 |
76 | # Compute how much the video will be sped up visually compared to the source:
77 | output_duration = len(select_frame_indices) * frame_multiplier / output_fps
78 | actual_video_speedup_factor = original_duration / output_duration
79 |
80 | print(f"Selected source_video sampling FPS: {best_source_sampling_fps:.3f} with visual cost {best_cost:.5f}")
81 | print(f"Selecting {len(select_frame_indices)} frames from the source video (originally {total_n_frames} frames).")
82 | print(f"Output frame multiplier: {frame_multiplier}")
83 | print(f"Actual achieved visual speedup: {actual_video_speedup_factor:.3f}")
84 |
85 | return select_frame_indices, output_fps, frame_multiplier
86 |
87 |
88 | class VideoFrameSelector:
89 | @classmethod
90 | def INPUT_TYPES(s):
91 |
92 | return {
93 | "required": {
94 | "input_frames": ("IMAGE",),
95 | "video_info": ("VHS_VIDEOINFO",),
96 | "output_fps": ("FLOAT", {"default": 24.0, "min": 1.0, "max": 60.0}),
97 | "target_video_speedup_factor": ("FLOAT", {"default": 1.0, "min": 0.1, "max": 10.0}),
98 | "min_source_sampling_fps": ("INT", {"default": 8, "min": 1, "max": 24}),
99 | "max_source_sampling_fps": ("INT", {"default": 12, "min": 1, "max": 24}),
100 | "frame_load_cap": ("INT", {"default": 0, "min": 0, "max": 1000}),
101 | }
102 | }
103 |
104 | CATEGORY = "Eden 🌱"
105 | RETURN_TYPES = ("IMAGE","INT","FLOAT",)
106 | RETURN_NAMES = ("Selected_frames","multiplier","frame_rate",)
107 | FUNCTION = "select_frames"
108 |
109 | def select_frames(self, input_frames, video_info, output_fps, target_video_speedup_factor, min_source_sampling_fps, max_source_sampling_fps, frame_load_cap):
110 |
111 | # Compute the optimal subset of frames to sample from the source video:
112 | select_frame_indices, output_fps, frame_multiplier = compute_frame_parameters(video_info, target_video_speedup_factor, output_fps, source_sampling_fps_range=[min_source_sampling_fps, max_source_sampling_fps])
113 |
114 | # Select the frames from the input_frames:
115 | selected_frames = input_frames[select_frame_indices]
116 |
117 | if frame_load_cap > 0:
118 | # Limit the number of frames to be loaded:
119 | selected_frames = selected_frames[:frame_load_cap]
120 |
121 | return (selected_frames, frame_multiplier, output_fps,)
122 |
123 |
124 | if __name__ == "__main__":
125 |
126 | video_info_dict = {"source_fps": 100.0, "source_frame_count": 150, "source_duration": 9.375, "source_width": 896, "source_height": 512, "loaded_fps": 16.0, "loaded_frame_count": 150, "loaded_duration": 9.375, "loaded_width": 896, "loaded_height": 512}
127 | target_video_speedup_factor = 0.75
128 | output_fps = 24
129 |
130 | compute_frame_parameters(video_info_dict, target_video_speedup_factor, output_fps)
131 |
--------------------------------------------------------------------------------