├── .gitignore ├── examples ├── FluxPromptSaver.png └── FluxPromptSaver-v02.png ├── __init__.py ├── pyproject.toml ├── .github └── workflows │ └── publish.yml ├── README.md └── nodes.py /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ -------------------------------------------------------------------------------- /examples/FluxPromptSaver.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/markuryy/ComfyUI-Flux-Prompt-Saver/HEAD/examples/FluxPromptSaver.png -------------------------------------------------------------------------------- /examples/FluxPromptSaver-v02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/markuryy/ComfyUI-Flux-Prompt-Saver/HEAD/examples/FluxPromptSaver-v02.png -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | from .nodes import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS 2 | 3 | __all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS'] 4 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "comfyui-flux-prompt-saver" 3 | description = "The Flux Prompt Saver is set of simple nodes for saving images generated with Flux with A1111-style metadata." 4 | version = "2.0.0" 5 | license = {file = "LICENSE"} 6 | 7 | [project.urls] 8 | Repository = "https://github.com/markuryy/ComfyUI-Flux-Prompt-Saver" 9 | # Used by Comfy Registry https://comfyregistry.org 10 | 11 | [tool.comfy] 12 | PublisherId = "markury" 13 | DisplayName = "ComfyUI-Flux-Prompt-Saver" 14 | Icon = "" 15 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish to Comfy registry 2 | on: 3 | workflow_dispatch: 4 | push: 5 | branches: 6 | - main 7 | - master 8 | paths: 9 | - "pyproject.toml" 10 | 11 | jobs: 12 | publish-node: 13 | name: Publish Custom Node to registry 14 | runs-on: ubuntu-latest 15 | # if this is a forked repository. Skipping the workflow. 16 | if: github.event.repository.fork == false 17 | steps: 18 | - name: Check out code 19 | uses: actions/checkout@v4 20 | - name: Publish Custom Node 21 | uses: Comfy-Org/publish-node-action@main 22 | with: 23 | ## Add your own personal access token to your Github Repository secrets and reference it here. 24 | personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }} 25 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ComfyUI Flux Prompt Saver 2 | 3 | Custom nodes for ComfyUI to save images with standardized metadata that's compatible with common Stable Diffusion tools (Discord bots, prompt readers, image organization tools). Now includes its own sampling node copied from an earlier version of ComfyUI Essentials to maintain compatibility without requiring additional dependencies. 4 | 5 | ## Nodes 6 | 7 | ### 🐈‍⬛ Flux Text Sampler 8 | - Copy of the Flux Sampler Parameters node from ComfyUI Essentials (pre-LoRA version) 9 | - Works exactly like the original with ImpactWildcardEncode and other prompt nodes 10 | - Provides consistent parameter output format that many SD tools can read 11 | - Accepts integer seeds directly from primitive nodes 12 | 13 | ### 🐈‍⬛ Flux Prompt Saver 14 | - Enhanced control with separate path and filename inputs 15 | - Flexible date formatting with `%date:FORMAT%` syntax 16 | - Automatically finds models across multiple directories (checkpoints, models, unet, diffusion_models) 17 | - Creates output directories as needed 18 | - Embeds comprehensive parameter metadata in saved images 19 | 20 | ### 🐈‍⬛ Model Name 21 | - Provides model selection from all available model directories 22 | - Simple string output for use with other nodes 23 | - Helps handle different model types without modifying loaders 24 | 25 | ## Installation 26 | 27 | ```bash 28 | git clone https://github.com/markuryy/ComfyUI-Flux-Prompt-Saver 29 | ``` 30 | 31 | > Note: Also available through ComfyUI Manager 32 | 33 | ## Breaking Changes in 2.0.0 34 | 35 | - Split save location into separate path and filename inputs 36 | - Removed dependency on ComfyUI Essentials 37 | - Added built-in `🐈‍⬛ Flux Text Sampler` (copy of original Flux Sampler Parameters) 38 | - Added `🐈‍⬛ Model Name` node for simpler model selection 39 | - Changed to `%date:FORMAT%` syntax for date formatting 40 | 41 | ## Usage 42 | 43 | ### File Path Formatting 44 | The save_path and filename inputs support various placeholder patterns: 45 | - Date formats: `%date:FORMAT%` where FORMAT is any strftime format 46 | - Example: `%date:yyyy-MM-dd%` for folder path 47 | - Example: `FLUX_%date:HHmmss%` for filename 48 | - Image properties: `%width%`, `%height%`, `%seed%` 49 | 50 | ### Example Paths 51 | ``` 52 | save_path: %date:yyyy-MM-dd% 53 | filename: FLUX_%date:HHmmss% 54 | # Results in: output/2024-01-30/FLUX_143022_00000_.png 55 | ``` 56 | 57 | ## Example Workflow 58 | 59 | ![Example Workflow](examples/FluxPromptSaver-v02.png) 60 | 61 | Drag and drop the above image into ComfyUI to load the workflow (the workflow JSON is embedded in the image). 62 | 63 | ### Required Nodes for Example 64 | - [ComfyUI-Impact-Pack](https://github.com/ltdrdata/ComfyUI-Impact-Pack) 65 | - [ComfyUI-Custom-Scripts](https://github.com/pythongosssss/ComfyUI-Custom-Scripts) 66 | - [ComfyUI_Comfyroll_CustomNodes](https://github.com/RockOfFire/ComfyUI_Comfyroll_CustomNodes) 67 | 68 | While no longer required, [ComfyUI_essentials](https://github.com/cubiq/ComfyUI_essentials) is still recommended for its other useful nodes. 69 | 70 | ## Credits 71 | - Original Flux Sampler Parameters node by [cubiq](https://github.com/cubiq) 72 | - ComfyUI Essentials: https://github.com/cubiq/ComfyUI_essentials -------------------------------------------------------------------------------- /nodes.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | import torch 4 | import numpy as np 5 | from PIL import Image 6 | from PIL.PngImagePlugin import PngInfo 7 | import folder_paths 8 | from datetime import datetime 9 | import comfy.samplers 10 | import random 11 | import time 12 | import logging 13 | from comfy.utils import ProgressBar 14 | from comfy_extras.nodes_custom_sampler import Noise_RandomNoise, BasicScheduler, BasicGuider, SamplerCustomAdvanced 15 | from comfy_extras.nodes_latent import LatentBatch 16 | from comfy_extras.nodes_model_advanced import ModelSamplingFlux, ModelSamplingAuraFlow 17 | 18 | def parse_string_to_list(input_string): 19 | try: 20 | if not input_string: 21 | return [] 22 | items = input_string.replace('\n', ',').split(',') 23 | result = [] 24 | for item in items: 25 | item = item.strip() 26 | if not item: 27 | continue 28 | try: 29 | num = float(item) 30 | if num.is_integer(): 31 | num = int(num) 32 | result.append(num) 33 | except ValueError: 34 | continue 35 | return result 36 | except: 37 | return [] 38 | 39 | def conditioning_set_values(conditioning, values): 40 | c = [] 41 | for t in conditioning: 42 | n = [t[0], t[1].copy()] 43 | for k, v in values.items(): 44 | if k == "guidance": 45 | n[1]['guidance_scale'] = v 46 | c.append(tuple(n)) 47 | return c 48 | 49 | class FluxTextSampler: 50 | @classmethod 51 | def INPUT_TYPES(s): 52 | return {"required": { 53 | "model": ("MODEL", ), 54 | "conditioning": ("CONDITIONING", ), 55 | "latent_image": ("LATENT", ), 56 | "seed": ("INT", { "default": 0, "min": 0, "max": 0xffffffffffffffff }), 57 | "sampler": ("STRING", { "multiline": False, "dynamicPrompts": False, "default": "euler" }), 58 | "scheduler": ("STRING", { "multiline": False, "dynamicPrompts": False, "default": "simple" }), 59 | "steps": ("STRING", { "multiline": False, "dynamicPrompts": False, "default": "20" }), 60 | "guidance": ("STRING", { "multiline": False, "dynamicPrompts": False, "default": "3.5" }), 61 | "max_shift": ("STRING", { "multiline": False, "dynamicPrompts": False, "default": "" }), 62 | "base_shift": ("STRING", { "multiline": False, "dynamicPrompts": False, "default": "" }), 63 | "denoise": ("STRING", { "multiline": False, "dynamicPrompts": False, "default": "1.0" }), 64 | }} 65 | 66 | RETURN_TYPES = ("LATENT","SAMPLER_PARAMS") 67 | RETURN_NAMES = ("latent", "params") 68 | FUNCTION = "execute" 69 | CATEGORY = "sampling" 70 | 71 | def execute(self, model, conditioning, latent_image, seed, sampler, scheduler, steps, guidance, max_shift, base_shift, denoise): 72 | is_schnell = model.model.model_type == comfy.model_base.ModelType.FLOW 73 | 74 | # Handle seed 75 | noise = [seed] 76 | 77 | if sampler == '*': 78 | sampler = comfy.samplers.KSampler.SAMPLERS 79 | elif sampler.startswith("!"): 80 | sampler = sampler.replace("\n", ",").split(",") 81 | sampler = [s.strip("! ") for s in sampler] 82 | sampler = [s for s in comfy.samplers.KSampler.SAMPLERS if s not in sampler] 83 | else: 84 | sampler = sampler.replace("\n", ",").split(",") 85 | sampler = [s.strip() for s in sampler if s.strip() in comfy.samplers.KSampler.SAMPLERS] 86 | if not sampler: 87 | sampler = ['euler'] 88 | 89 | if scheduler == '*': 90 | scheduler = comfy.samplers.KSampler.SCHEDULERS 91 | elif scheduler.startswith("!"): 92 | scheduler = scheduler.replace("\n", ",").split(",") 93 | scheduler = [s.strip("! ") for s in scheduler] 94 | scheduler = [s for s in comfy.samplers.KSampler.SCHEDULERS if s not in scheduler] 95 | else: 96 | scheduler = scheduler.replace("\n", ",").split(",") 97 | scheduler = [s.strip() for s in scheduler] 98 | scheduler = [s for s in scheduler if s in comfy.samplers.KSampler.SCHEDULERS] 99 | if not scheduler: 100 | scheduler = ['simple'] 101 | 102 | if steps == "": 103 | if is_schnell: 104 | steps = "4" 105 | else: 106 | steps = "20" 107 | steps = parse_string_to_list(steps) 108 | 109 | denoise = "1.0" if denoise == "" else denoise 110 | denoise = parse_string_to_list(denoise) 111 | 112 | guidance = "3.5" if guidance == "" else guidance 113 | guidance = parse_string_to_list(guidance) 114 | 115 | if not is_schnell: 116 | max_shift = "1.15" if max_shift == "" else max_shift 117 | base_shift = "0.5" if base_shift == "" else base_shift 118 | else: 119 | max_shift = "0" 120 | base_shift = "1.0" if base_shift == "" else base_shift 121 | 122 | max_shift = parse_string_to_list(max_shift) 123 | base_shift = parse_string_to_list(base_shift) 124 | 125 | cond_text = None 126 | if isinstance(conditioning, dict) and "encoded" in conditioning: 127 | cond_text = conditioning["text"] 128 | cond_encoded = conditioning["encoded"] 129 | else: 130 | cond_encoded = [conditioning] 131 | 132 | out_latent = None 133 | out_params = [] 134 | 135 | basicschedueler = BasicScheduler() 136 | basicguider = BasicGuider() 137 | samplercustomadvanced = SamplerCustomAdvanced() 138 | latentbatch = LatentBatch() 139 | modelsamplingflux = ModelSamplingFlux() if not is_schnell else ModelSamplingAuraFlow() 140 | width = latent_image["samples"].shape[3]*8 141 | height = latent_image["samples"].shape[2]*8 142 | 143 | total_samples = len(cond_encoded) * len(noise) * len(max_shift) * len(base_shift) * len(guidance) * len(sampler) * len(scheduler) * len(steps) * len(denoise) 144 | current_sample = 0 145 | if total_samples > 1: 146 | pbar = ProgressBar(total_samples) 147 | 148 | for i in range(len(cond_encoded)): 149 | conditioning = cond_encoded[i] 150 | ct = cond_text[i] if cond_text else None 151 | for n in noise: 152 | randnoise = Noise_RandomNoise(n) 153 | for ms in max_shift: 154 | for bs in base_shift: 155 | if is_schnell: 156 | work_model = modelsamplingflux.patch_aura(model, bs)[0] 157 | else: 158 | work_model = modelsamplingflux.patch(model, ms, bs, width, height)[0] 159 | for g in guidance: 160 | cond = conditioning_set_values(conditioning, {"guidance": g}) 161 | guider = basicguider.get_guider(work_model, cond)[0] 162 | for s in sampler: 163 | samplerobj = comfy.samplers.sampler_object(s) 164 | for sc in scheduler: 165 | for st in steps: 166 | for d in denoise: 167 | sigmas = basicschedueler.get_sigmas(work_model, sc, st, d)[0] 168 | current_sample += 1 169 | logging.info(f"Sampling {current_sample}/{total_samples} with seed {n}, sampler {s}, scheduler {sc}, steps {st}, guidance {g}, max_shift {ms}, base_shift {bs}, denoise {d}") 170 | start_time = time.time() 171 | latent = samplercustomadvanced.sample(randnoise, guider, samplerobj, sigmas, latent_image)[1] 172 | elapsed_time = time.time() - start_time 173 | out_params.append({ 174 | "time": elapsed_time, 175 | "seed": n, 176 | "width": width, 177 | "height": height, 178 | "sampler": s, 179 | "scheduler": sc, 180 | "steps": st, 181 | "guidance": g, 182 | "max_shift": ms, 183 | "base_shift": bs, 184 | "denoise": d, 185 | "prompt": ct 186 | }) 187 | 188 | if out_latent is None: 189 | out_latent = latent 190 | else: 191 | out_latent = latentbatch.batch(out_latent, latent)[0] 192 | if total_samples > 1: 193 | pbar.update(1) 194 | 195 | return (out_latent, out_params) 196 | 197 | class FluxPromptSaver: 198 | def __init__(self): 199 | self.output_dir = folder_paths.get_output_directory() 200 | self.type = "output" 201 | self.default_size = 1344 # Default image size 202 | 203 | @classmethod 204 | def INPUT_TYPES(s): 205 | return { 206 | "required": { 207 | "images": ("IMAGE",), 208 | "params": ("SAMPLER_PARAMS",), 209 | "positive": ("STRING", {"forceInput": True}), 210 | "model_name": ("STRING", {"forceInput": True}), 211 | "filename_prefix": ("STRING", { 212 | "default": "%date:yyyy-MM-dd%", 213 | "tooltip": "Subfolder to save the images in. Supports date formatting like %date:yyyy-MM-dd%" 214 | }), 215 | "filename": ("STRING", { 216 | "default": "FLUX_%date:HHmmss%", 217 | "tooltip": "Filename for the image. Supports date formatting like %date:HHmmss%" 218 | }), 219 | }, 220 | "optional": { 221 | "negative": ("STRING", {"forceInput": True}), 222 | } 223 | } 224 | 225 | 226 | RETURN_TYPES = () 227 | FUNCTION = "save_images" 228 | OUTPUT_NODE = True 229 | CATEGORY = "image" 230 | 231 | def save_images(self, images, params, positive, model_name, filename_prefix, filename, negative=""): 232 | # Replace date placeholders with actual date strings 233 | filename_prefix = self.replace_date_placeholders(filename_prefix) 234 | filename = self.replace_date_placeholders(filename) 235 | 236 | results = [] 237 | p = params[0] 238 | 239 | # Construct the full output folder path 240 | full_output_folder = os.path.join(self.output_dir, filename_prefix) 241 | os.makedirs(full_output_folder, exist_ok=True) 242 | 243 | for image in images: 244 | i = 255. * image.cpu().numpy() 245 | img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) 246 | 247 | metadata = PngInfo() 248 | metadata.add_text("parameters", self.create_metadata_string(p, positive, negative, model_name)) 249 | 250 | # Initial file path 251 | file_base = filename 252 | file_ext = ".png" 253 | file_name = f"{file_base}{file_ext}" 254 | file_path = os.path.join(full_output_folder, file_name) 255 | 256 | # Check if file exists and add iterator if necessary 257 | counter = 1 258 | while os.path.exists(file_path): 259 | file_name = f"{file_base}_{counter}{file_ext}" 260 | file_path = os.path.join(full_output_folder, file_name) 261 | counter += 1 262 | 263 | # Save the image 264 | img.save(file_path, pnginfo=metadata, optimize=True) 265 | results.append({ 266 | "filename": file_name, 267 | "subfolder": filename_prefix, 268 | "type": self.type 269 | }) 270 | 271 | return {"ui": {"images": results}} 272 | 273 | def replace_date_placeholders(self, s): 274 | # Regular expression to find all '%date:...%' placeholders 275 | date_placeholder_pattern = re.compile(r'%date:(.*?)%') 276 | 277 | def replace_match(match): 278 | # Extract the date format from the placeholder 279 | date_format = match.group(1) 280 | # Map custom date tokens to strftime tokens 281 | format_mappings = { 282 | 'yyyy': '%Y', 283 | 'MM': '%m', 284 | 'dd': '%d', 285 | 'HH': '%H', 286 | 'mm': '%M', 287 | 'ss': '%S', 288 | # Add more mappings if needed 289 | } 290 | # Replace custom tokens with strftime tokens 291 | for token, strftime_token in format_mappings.items(): 292 | date_format = date_format.replace(token, strftime_token) 293 | try: 294 | # Return the formatted date 295 | return datetime.now().strftime(date_format) 296 | except Exception as e: 297 | # If formatting fails, return the original placeholder 298 | return match.group(0) 299 | 300 | # Replace all date placeholders in the string 301 | return date_placeholder_pattern.sub(replace_match, s) 302 | 303 | def create_metadata_string(self, params, positive, negative, model_name): 304 | sampler_scheduler = f"{params['sampler']}_{params['scheduler']}" if params['scheduler'] != 'normal' else params['sampler'] 305 | 306 | negative_text = "(not used)" if not negative else negative 307 | 308 | guidance_val = params.get('guidance', 1.0) 309 | seed_val = params.get('seed', '?') 310 | 311 | return f"{positive}\nNegative prompt: {negative_text}\n" \ 312 | f"Steps: {params['steps']}, Sampler: {sampler_scheduler}, CFG scale: {guidance_val}, Seed: {seed_val}, " \ 313 | f"Size: {params['width']}x{params['height']}, Model hash: {params.get('model_hash', '')}, " \ 314 | f"Model: {model_name}, Version: ComfyUI" 315 | 316 | 317 | class ModelName: 318 | @classmethod 319 | def INPUT_TYPES(s): 320 | model_list = [] 321 | for model_folder in ["checkpoints", "models", "unet", "diffusion_models"]: 322 | try: 323 | model_list.extend(folder_paths.get_filename_list(model_folder)) 324 | except: 325 | pass 326 | model_list = list(set(model_list)) 327 | 328 | return {"required": {"model_name": (model_list,)}} 329 | 330 | RETURN_TYPES = ("STRING",) 331 | FUNCTION = "get_name" 332 | CATEGORY = "utils" 333 | 334 | def get_name(self, model_name): 335 | return (model_name,) 336 | 337 | NODE_CLASS_MAPPINGS = { 338 | "FluxPromptSaver": FluxPromptSaver, 339 | "FluxTextSampler": FluxTextSampler, 340 | "ModelName": ModelName 341 | } 342 | 343 | NODE_DISPLAY_NAME_MAPPINGS = { 344 | "FluxPromptSaver": "🐈‍⬛ Flux Prompt Saver", 345 | "FluxTextSampler": "🐈‍⬛ Flux Text Sampler", 346 | "ModelName": "🐈‍⬛ Model Name" 347 | } --------------------------------------------------------------------------------