├── requirements.txt
├── example.png
├── highlight.jpg
├── trash
├── output.txt
├── check_vq_scheduler.py
├── check_vq_scheduler_sigmas.py
├── verify_nodes.py
├── Z-IMAGE-TURBO.md
├── test_extraction_batch.py
└── batch_test_results.txt
├── G62578SWUAAfD2f.jpg
├── workflow_1.0.6.png
├── fist_gen_settings.jpg
├── .gitignore
├── requirements.md
├── .github
└── workflows
│ └── publish.yml
├── LICENSE.TXT
├── pyproject.toml
├── README.md
├── extract_metadata_node.py
├── flash_attention_node.py
├── nunchaku_compat.py
└── __init__.py
/requirements.txt:
--------------------------------------------------------------------------------
1 | diffusers
--------------------------------------------------------------------------------
/example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-EulerDiscreteScheduler/HEAD/example.png
--------------------------------------------------------------------------------
/highlight.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-EulerDiscreteScheduler/HEAD/highlight.jpg
--------------------------------------------------------------------------------
/trash/output.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-EulerDiscreteScheduler/HEAD/trash/output.txt
--------------------------------------------------------------------------------
/G62578SWUAAfD2f.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-EulerDiscreteScheduler/HEAD/G62578SWUAAfD2f.jpg
--------------------------------------------------------------------------------
/workflow_1.0.6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-EulerDiscreteScheduler/HEAD/workflow_1.0.6.png
--------------------------------------------------------------------------------
/fist_gen_settings.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erosDiffusion/ComfyUI-EulerDiscreteScheduler/HEAD/fist_gen_settings.jpg
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__
2 | node.zip
3 | .vs/
4 | ComfyUI-EulerDiscreteScheduler/.vs
5 | .idea
6 | .iml
7 | .vs
8 | EulerDiscrete.iml
9 |
--------------------------------------------------------------------------------
/trash/check_vq_scheduler.py:
--------------------------------------------------------------------------------
1 |
2 | try:
3 | from diffusers import VQDiffusionScheduler
4 | import inspect
5 |
6 | print("VQDiffusionScheduler found!")
7 | print("Init signature:")
8 | print(inspect.signature(VQDiffusionScheduler.__init__))
9 |
10 | # Also check config defaults if possible
11 | scheduler = VQDiffusionScheduler()
12 | print("\nDefault config:")
13 | print(scheduler.config)
14 |
15 | except ImportError:
16 | print("VQDiffusionScheduler not found in diffusers.")
17 | except Exception as e:
18 | print(f"Error: {e}")
19 |
--------------------------------------------------------------------------------
/requirements.md:
--------------------------------------------------------------------------------
1 | # Requirements
2 |
3 | ## Requirement 1
4 | **Date**: 2025-12-04
5 | **Description**: Create code to expose VQVAE scheduler (VQDiffusionScheduler) in the schedulers list and as a node to see if it can be used for sampling.
6 | **Branch**: feature/req1-vq-scheduler
7 | **Status**: In Progress
8 |
9 | ## Requirement 2
10 | **Date**: 2025-12-09
11 | **Description**: Investigate and fix compatibility issue with RES4LYF custom node package where flowmatcheeulerdiscretescheduler disappears from ksampler list.
12 | **Branch**: feature/req2-res4lyf-compat
13 | **Status**: Completed
14 |
--------------------------------------------------------------------------------
/trash/check_vq_scheduler_sigmas.py:
--------------------------------------------------------------------------------
1 |
2 | try:
3 | from diffusers import VQDiffusionScheduler
4 | import torch
5 |
6 | scheduler = VQDiffusionScheduler(num_vec_classes=4096, num_train_timesteps=100)
7 | print("Scheduler created successfully.")
8 |
9 | print(f"Has sigmas attribute? {hasattr(scheduler, 'sigmas')}")
10 |
11 | scheduler.set_timesteps(10)
12 | print("Timesteps set to 10.")
13 | print(f"Timesteps: {scheduler.timesteps}")
14 |
15 | if hasattr(scheduler, 'sigmas'):
16 | print(f"Sigmas: {scheduler.sigmas}")
17 | else:
18 | print("No 'sigmas' attribute found. This scheduler might not be compatible with ComfyUI's standard sampler loop which expects sigmas.")
19 |
20 | except Exception as e:
21 | print(f"Error: {e}")
22 |
--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | name: Publish to Comfy registry
2 | on:
3 | workflow_dispatch:
4 | push:
5 | branches:
6 | - main
7 | - master
8 | paths:
9 | - "pyproject.toml"
10 |
11 | permissions:
12 | issues: write
13 |
14 | jobs:
15 | publish-node:
16 | name: Publish Custom Node to registry
17 | runs-on: ubuntu-latest
18 | if: ${{ github.repository_owner == 'erosDiffusion' }}
19 | steps:
20 | - name: Check out code
21 | uses: actions/checkout@v4
22 | with:
23 | submodules: true
24 | - name: Publish Custom Node
25 | uses: Comfy-Org/publish-node-action@v1
26 | with:
27 | ## Add your own personal access token to your Github Repository secrets and reference it here.
28 | personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }}
29 |
--------------------------------------------------------------------------------
/LICENSE.TXT:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2025 erosDiffusion
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "erosdiffusion-eulerflowmatchingdiscretescheduler"
3 | description = "Noise Free images with Euler Discrete Scheduler in ComfyUI with Z-Image or other models"
4 | version = "1.0.8"
5 | license = {file = "LICENSE.TXT"}
6 | dependencies = ["diffusers"]
7 | # classifiers = [
8 | # # For OS-independent nodes (works on all operating systems)
9 | # "Operating System :: OS Independent",
10 | #
11 | # # OR for OS-specific nodes, specify the supported systems:
12 | # "Operating System :: Microsoft :: Windows", # Windows specific
13 | # "Operating System :: POSIX :: Linux", # Linux specific
14 | # "Operating System :: MacOS", # macOS specific
15 | #
16 | # # GPU Accelerator support. Pick the ones that are supported by your extension.
17 | # "Environment :: GPU :: NVIDIA CUDA", # NVIDIA CUDA support
18 | # ]
19 |
20 | [project.urls]
21 | Repository = "https://github.com/erosDiffusion/ComfyUI-EulerDiscreteScheduler"
22 | # Used by Comfy Registry https://registry.comfy.org
23 | Documentation = "https://github.com/erosDiffusion/ComfyUI-EulerDiscreteScheduler/wiki"
24 | "Bug Tracker" = "https://github.com/erosDiffusion/ComfyUI-EulerDiscreteScheduler/issues"
25 |
26 | [tool.comfy]
27 | PublisherId = "erosdiffusion"
28 | DisplayName = "ComfyUI-EulerFlowMatchingDiscreteScheduler"
29 | Icon = "💜"
30 | includes = []
31 | # "requires-comfyui" = ">=1.0.0" # ComfyUI version compatibility
32 |
--------------------------------------------------------------------------------
/trash/verify_nodes.py:
--------------------------------------------------------------------------------
1 |
2 | import sys
3 | import os
4 | import importlib.util
5 |
6 | # Add the parent directory to sys.path so we can import EulerDiscrete as a module
7 | current_dir = os.path.dirname(os.path.abspath(__file__))
8 | parent_dir = os.path.dirname(current_dir)
9 | sys.path.append(parent_dir)
10 |
11 | try:
12 | # Import EulerDiscrete as a module
13 | import EulerDiscrete
14 |
15 | print("Successfully imported EulerDiscrete package.")
16 |
17 | mappings = EulerDiscrete.NODE_CLASS_MAPPINGS
18 |
19 | print("\nChecking NODE_CLASS_MAPPINGS:")
20 |
21 | has_flow_match = "FlowMatchEulerDiscreteScheduler (Custom)" in mappings
22 | has_vq = "VQDiffusionScheduler" in mappings
23 |
24 | if has_flow_match:
25 | print("✅ FlowMatchEulerDiscreteScheduler (Custom) found.")
26 | else:
27 | print("❌ FlowMatchEulerDiscreteScheduler (Custom) NOT found!")
28 |
29 | if has_vq:
30 | print("✅ VQDiffusionScheduler found.")
31 | else:
32 | print("❌ VQDiffusionScheduler NOT found!")
33 |
34 | if has_flow_match and has_vq:
35 | print("\nSUCCESS: Both schedulers are present.")
36 | else:
37 | print("\nFAILURE: Missing schedulers.")
38 | sys.exit(1)
39 |
40 | except Exception as e:
41 | print(f"\nERROR: Import failed: {e}")
42 | # Print traceback for more details
43 | import traceback
44 | traceback.print_exc()
45 | sys.exit(1)
46 |
--------------------------------------------------------------------------------
/trash/Z-IMAGE-TURBO.md:
--------------------------------------------------------------------------------
1 | # Z-Image-Turbo Configuration
2 |
3 | This node is pre-configured with defaults optimized for **Z-Image-Turbo** model.
4 |
5 | ## Default Configuration
6 |
7 | Based on [Tongyi-MAI/Z-Image-Turbo](https://huggingface.co/Tongyi-MAI/Z-Image-Turbo) scheduler config:
8 |
9 | ```json
10 | {
11 | "num_train_timesteps": 1000,
12 | "use_dynamic_shifting": false,
13 | "shift": 3.0,
14 | "steps": 9
15 | }
16 | ```
17 |
18 | ### Key Z-Image-Turbo Settings
19 |
20 | - **steps**: `9` (actually results in 8 DiT forward passes)
21 | - **shift**: `3.0` (optimized for Turbo model)
22 | - **use_dynamic_shifting**: `disable` (disabled for consistent Turbo performance)
23 | - **base_shift**: `0.5` (default diffusers value)
24 | - **max_shift**: `1.15` (default diffusers value)
25 | - **num_train_timesteps**: `1000`
26 |
27 | ### Usage with Z-Image-Turbo
28 |
29 | ```
30 | Z-Image-Turbo Model -> SamplerCustom
31 | ↑
32 | |
33 | FlowMatch Euler Scheduler (use defaults)
34 | ```
35 |
36 | **Important**: For Z-Image-Turbo, use **guidance_scale=0.0** in your sampler/pipeline as Turbo models are guidance-free.
37 |
38 | ## Adjusting Parameters
39 |
40 | While the defaults are optimized for Z-Image-Turbo, you can experiment:
41 |
42 | - **More quality**: Increase `steps` to 15-20 (slower but potentially better)
43 | - **More speed**: Reduce `steps` to 4-6 (faster but lower quality)
44 | - **Different shift**: Adjust `shift` parameter (3.0 is optimal for Turbo)
45 |
46 | ## Reference
47 |
48 | Official Z-Image-Turbo example:
49 | ```python
50 | image = pipe(
51 | prompt=prompt,
52 | height=1024,
53 | width=1024,
54 | num_inference_steps=9, # 9 steps = 8 DiT forwards
55 | guidance_scale=0.0, # No guidance for Turbo
56 | generator=torch.Generator("cuda").manual_seed(42),
57 | ).images[0]
58 | ```
59 |
60 | See full documentation: [https://huggingface.co/Tongyi-MAI/Z-Image-Turbo](https://huggingface.co/Tongyi-MAI/Z-Image-Turbo)
61 |
--------------------------------------------------------------------------------
/trash/test_extraction_batch.py:
--------------------------------------------------------------------------------
1 | import os
2 | import random
3 | import sys
4 |
5 | # Add custom node directory to path so we can import the class
6 | sys.path.append(os.path.dirname(__file__))
7 |
8 | # Mock folder_paths for the node to work
9 | import folder_paths
10 | folder_paths.get_annotated_filepath = lambda x: x # Just return the path as is
11 |
12 | from extract_metadata_node import ImageMetadataExtractor
13 |
14 | def main():
15 | output_dir = r"D:\ComfyUI7\ComfyUI\output"
16 | output_file = "batch_test_results.txt"
17 |
18 | # Get all image files
19 | all_files = [os.path.join(output_dir, f) for f in os.listdir(output_dir)
20 | if f.lower().endswith(('.png', '.jpg', '.jpeg', '.webp'))]
21 |
22 | if not all_files:
23 | print(f"No images found in {output_dir}")
24 | return
25 |
26 | # Select 20 random images
27 | num_samples = min(20, len(all_files))
28 | selected_files = random.sample(all_files, num_samples)
29 |
30 | extractor = ImageMetadataExtractor()
31 |
32 | print(f"Testing on {num_samples} images...")
33 |
34 | with open(output_file, "w", encoding="utf-8") as f:
35 | for i, file_path in enumerate(selected_files):
36 | try:
37 | # We need to bypass the folder_paths.get_annotated_filepath call inside the node
38 | # by mocking it, or just passing the absolute path if our mock above works.
39 | # The node calls folder_paths.get_annotated_filepath(image)
40 | # Our mock returns x, so we pass the full path.
41 |
42 | prompt, width, height = extractor.extract_metadata(file_path)
43 |
44 | separator = "=" * 80
45 | entry = f"{separator}\nImage: {os.path.basename(file_path)}\nDimensions: {width}x{height}\nPrompt:\n{prompt}\n"
46 |
47 | f.write(entry + "\n")
48 | print(f"Processed {i+1}/{num_samples}: {os.path.basename(file_path)}")
49 |
50 | except Exception as e:
51 | error_msg = f"Error processing {os.path.basename(file_path)}: {e}\n"
52 | f.write(error_msg)
53 | print(error_msg)
54 |
55 | print(f"Done. Results saved to {output_file}")
56 |
57 | if __name__ == "__main__":
58 | main()
59 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # FlowMatch Euler Discrete Scheduler for ComfyUI
2 |
3 | 9 steps, big res, zero noise.
4 |
5 | **FlowMatchEulerDiscrete** seems not exposed in ComfyUI, but it is what the official Z-Image demo in diffusers use.
6 |
7 | So:
8 |
9 | - I am exposing it in the scheduler section for you to use within KSampler.
10 | - On top I provide a node, experimental, to configure the scheduler for use with CustomSampler and play with.
11 |
12 | In short...if you want **sharper and noise free images**, use this!
13 |
14 | ## Installation
15 |
16 | - use comfy ui manager (search erosDiffusion or ComfyUI-EulerFlowMatchingDiscreteScheduler)
17 |
18 | or
19 |
20 | - `git clone https://github.com/erosDiffusion/ComfyUI-EulerDiscreteScheduler.git` in your custom nodes folder.
21 |
22 | Example output (more below)
23 |
24 |
25 | ## What you get
26 |
27 | - one new scheduler **FlowMatchEulerDiscreteScheduler** registered in the KSampler
28 | - a custom node that exposes all parameters of the FlowMatchEulerDiscreteScheduler which Outputs **SIGMAS** for use with **SamplerCustom** node.
29 |
30 | 
31 |
32 | ## Usage
33 |
34 | - **Simple**: select the FlowMatchEulerDiscreteScheduler in the default workflow from ComfyUI and run.
35 | - **Advanced/experimental**:
36 | 1. Add **FlowMatch Euler Discrete Scheduler (Custom)** node to your workflow
37 | 2. Connect its SIGMAS output to **SamplerCustom** node's sigmas input
38 | 3. Adjust parameters to control the sampling behavior, you have ALL the parameters to play with.
39 |
40 | ## Troubleshoot
41 | - if the scheduler does not appear when you have res4lyf package installed you can try:
42 | -- workaround 1: adding an samplerCustom node and connect the sigmas to a basicScheduler node. this way the scheduler should be available in the list
43 | -- workaround 2: disable res4lyf if you don't need that
44 | -- workaround 3 use the flowmatch scheduler (custom) and connect to the sigmas of the samplerCustom.
45 | - if your install fails you might have to use the correct version of peft package, some users reported this as issue, check startup logs and install the proper version
46 |
47 | ## Tech bits:
48 |
49 | - https://huggingface.co/docs/diffusers/api/schedulers/flow_match_euler_discrete
50 | - https://huggingface.co/Tongyi-MAI/Z-Image-Turbo/blob/main/scheduler/scheduler_config.json
51 |
52 | ## Find this useful and want to support ?
53 |
54 | [Buy me a beer!](https://donate.stripe.com/cNi9ALaASf65clXahPcV201)
55 |
56 |
57 |
58 | More examples:
59 |
60 |
61 |
62 | ## Changelog
63 | **1.0.8**
64 |
65 | - attempt fixing incompatibility with res4lyf by adding the scheduler to the list.
66 |
67 | **1.0.7**
68 |
69 | - nunchaku qwen patch fix, tiled diffusion patch fix
70 | users reported issues with dimensions not being handled correctly, this should fix it.
71 |
72 |
73 | **1.0.6**
74 |
75 | - updated example
76 | - updated pyproject deps (diffusers)
77 |
78 | **1.0.5**
79 |
80 | - remove bad practice of forking diffusers install on error (requirements.txt and does not rollback your diffusers if available)
81 |
82 | **1.0.4**
83 |
84 | - add start and end step by Etupa, with some fixes (can be used for image to image or restart sampling)
85 |
86 |
87 | **1.0.3**
88 |
89 | - node publish action
90 |
91 | **1.0.2**
92 |
93 | - changed the device management in the custom scheduler node to be on gpu (cuda)
94 | - removed flash attention node dependency from the custom scheduler node
95 | - removed flash attention node from init
96 | - added mit licensing
97 |
--------------------------------------------------------------------------------
/extract_metadata_node.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import os
3 | import json
4 | from PIL import Image, ImageOps
5 | import folder_paths
6 | import numpy as np
7 |
8 | class ImageMetadataExtractor:
9 | @classmethod
10 | def INPUT_TYPES(s):
11 | input_dir = folder_paths.get_input_directory()
12 | files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
13 | return {"required":
14 | {"image": (sorted(files), {"image_upload": True})},
15 | }
16 |
17 | RETURN_TYPES = ("IMAGE", "STRING", "INT", "INT", "STRING")
18 | RETURN_NAMES = ("image", "positive_prompt", "width", "height", "filename")
19 | FUNCTION = "extract_metadata"
20 | CATEGORY = "utils"
21 |
22 | def extract_metadata(self, image):
23 | image_path = folder_paths.get_annotated_filepath(image)
24 | img = Image.open(image_path)
25 |
26 | output_image = ImageOps.exif_transpose(img)
27 | output_image = output_image.convert("RGB")
28 | output_image = np.array(output_image).astype(np.float32) / 255.0
29 | output_image = torch.from_numpy(output_image)[None,]
30 |
31 | positive_prompt = ""
32 | width = 0
33 | height = 0
34 |
35 | # Extract from 'prompt' (API format) which is what ComfyUI uses for execution
36 | if 'prompt' in img.info:
37 | try:
38 | prompt = json.loads(img.info['prompt'])
39 |
40 | # 1. Find Positive Prompt
41 | # Strategy: Find KSampler -> positive input -> CLIPTextEncode -> text
42 | ksampler_nodes = []
43 | for node_id, node in prompt.items():
44 | class_type = node.get('class_type', '')
45 | if 'KSampler' in class_type or 'SamplerCustom' in class_type:
46 | ksampler_nodes.append(node)
47 |
48 | for ksampler in ksampler_nodes:
49 | inputs = ksampler.get('inputs', {})
50 | if 'positive' in inputs:
51 | positive_link = inputs['positive']
52 | if isinstance(positive_link, list): # It's a link [node_id, slot_index]
53 | positive_node_id = str(positive_link[0])
54 | if positive_node_id in prompt:
55 | positive_node = prompt[positive_node_id]
56 | if positive_node.get('class_type') == 'CLIPTextEncode':
57 | positive_prompt = positive_node.get('inputs', {}).get('text', "")
58 | break # Found it
59 |
60 | # Fallback: Look for any CLIPTextEncode with "positive" in title/meta if not found via KSampler
61 | if not positive_prompt:
62 | candidates = []
63 | for node_id, node in prompt.items():
64 | if node.get('class_type') == 'CLIPTextEncode':
65 | title = node.get('_meta', {}).get('title', '').lower()
66 | text = node.get('inputs', {}).get('text', "")
67 | if 'positive' in title and 'negative' not in title:
68 | candidates.append(text)
69 | # Also consider just long text if no clear title match
70 | elif len(text) > 50:
71 | candidates.append(text)
72 |
73 | # Pick the longest candidate if any found
74 | if candidates:
75 | positive_prompt = max(candidates, key=len)
76 |
77 | # 2. Find Width/Height
78 | # Strategy: Find EmptyLatentImage
79 | for node_id, node in prompt.items():
80 | if node.get('class_type') == 'EmptyLatentImage':
81 | width = node.get('inputs', {}).get('width', 0)
82 | height = node.get('inputs', {}).get('height', 0)
83 | break
84 |
85 | # Fallback: Look for width/height in any node if still 0
86 | if width == 0 or height == 0:
87 | for node_id, node in prompt.items():
88 | inputs = node.get('inputs', {})
89 | if 'width' in inputs and 'height' in inputs:
90 | width = inputs['width']
91 | height = inputs['height']
92 | break
93 |
94 | except Exception as e:
95 | print(f"Error parsing metadata: {e}")
96 |
97 | return (output_image, positive_prompt, width, height, image)
98 |
99 | # Node registration
100 | NODE_CLASS_MAPPINGS = {
101 | "ImageMetadataExtractor": ImageMetadataExtractor
102 | }
103 |
104 | NODE_DISPLAY_NAME_MAPPINGS = {
105 | "ImageMetadataExtractor": "Load Image ErosDiffusion"
106 | }
107 |
--------------------------------------------------------------------------------
/trash/batch_test_results.txt:
--------------------------------------------------------------------------------
1 | ================================================================================
2 | Image: ComfyUI-zit_00006_.png
3 | Dimensions: 0x0
4 | Prompt:
5 | 胶片摄影,镜头语言,淡彩,暗调,氛围低光,个性视角,身穿朝鲜族民族服饰的少女,灵动俏皮,朦胧梦幻,高颜值,既视感,现场感,情绪氛围感拉满,透视感,暗朦,光朦,泛白,褪色,漏光,粒子高噪点,胶片颗粒质感,层次丰富,写意,朦胧美学,光的美学,lomo效果,超现实,高级感,杰作,
6 | pornmaster bukkake, white cum on her face, white cum on her hair, White cum covered her hair, white cum covered her face. White cum covered her clothes
7 | 濃稠半透明精液從她的臉滴落,液體拉絲滴落,
8 |
9 | ================================================================================
10 | Image: ComfyUI_00030_.png
11 | Dimensions: 1792x1120
12 | Prompt:
13 | A low-angle cinematic shot in a dense bamboo forest at golden hour, sunbeams filtering diagonally through the tall green bamboo canopy to cast sharp, dramatic shadows on the moss-covered ground. A young East Asian swordswoman in her mid-twenties, with long flowing black hair escaping her topknot, wears a traditional red silk martial arts robe edged with gold thread embroidery; her expression is intensely focused, muscles tensed in her arms and legs. She is captured mid-action during a horizontal sword slash: body twisted dynamically forward, left foot planted firmly on emerald moss, right arm fully extended holding a polished steel jian sword with a black lacquered hilt, the blade angled sharply downward. Bamboo stalks surround her, their smooth green surfaces textured with vertical grooves and dew drops catching the light; fallen bamboo leaves scatter the forest floor, partially covered in soft velvety moss. Mist drifts faintly near the ground, with background bamboo softly blurred to create depth. Color palette emphasizes vibrant jade greens of the bamboo, warm amber sunlight, and the rich crimson of the robe, accented by the metallic gleam of the sword. Highly detailed textures include the woven silk fabric rippling with movement, the sword's reflective edge, and the rough bark of nearby bamboo trunks.
14 |
15 | ================================================================================
16 | Image: Comfy Edit ZimageVs_00004_.png
17 | Dimensions: 0x0
18 | Prompt:
19 |
20 |
21 | ================================================================================
22 | Image: ComfyUI_00041_.png
23 | Dimensions: 0x0
24 | Prompt:
25 |
26 |
27 | ================================================================================
28 | Image: ComfyUI-zit_00060_.png
29 | Dimensions: 0x0
30 | Prompt:
31 | masterpiece, best quality, photo realistic, 8k, a cybernetic woman with flowing nanotech ink tattoos animating across her skin, glossy black fluid moving like circuitry, sleek tech-editorial mood , hyperdetailed, dramatic lighting, cinematic shot, ultra detailed, intricate details, cinematic, photorealistic, masterpiece
32 |
33 | ================================================================================
34 | Image: ComfyUI_00008_.png
35 | Dimensions: 512x512
36 | Prompt:
37 | ['7', 0]
38 |
39 | ================================================================================
40 | Image: ComfyUI-sqnd-multi-flux_00013_.png
41 | Dimensions: 0x0
42 | Prompt:
43 |
44 |
45 | ================================================================================
46 | Image: ComfyUI-sqnd-multi-zimage_00013_.png
47 | Dimensions: 0x0
48 | Prompt:
49 |
50 |
51 | ================================================================================
52 | Image: ComfyUI-zit_00028_.png
53 | Dimensions: 0x0
54 | Prompt:
55 | A close-up, explicit portrait of a 25-year-old beautiful woman in an ancient Egyptian royal sleep chamber at night. She has long black hair and a curvy figure with saggy, hanging breasts, visible nipples, and natural pubic hair. She wears elaborate golden body jewelry, including an underbra, armlets, thighlets, and a crotchless thong. The scene is captured from multiple anglesâfrom behind, straight on, and from belowâwith dramatic foreshortening, focusing on her buttocks and visible labia and clitoral hood. The atmosphere is intimate, lit by candlelight with a warm, dim glow, casting soft shadows across her body and the silk bed she rests on. detailed labia, clitoral hood, visible pussy, from below, from behind
56 |
57 | ================================================================================
58 | Image: ComfyUI_00050_.png
59 | Dimensions: 1024x1024
60 | Prompt:
61 | tranin passing, anime style, 4k ultra resolution, flat shading.
62 |
63 | ================================================================================
64 | Image: ComfyUI-FlowmatchEuler-Simple_00004_.png
65 | Dimensions: 1280x960
66 | Prompt:
67 | A low-angle cinematic shot in a dense bamboo forest at golden hour, sunbeams filtering diagonally through the tall green bamboo canopy to cast sharp, dramatic shadows on the moss-covered ground. A young East Asian swordswoman in her mid-twenties, with long flowing black hair escaping her topknot, wears a traditional red silk martial arts robe edged with gold thread embroidery; her expression is intensely focused, muscles tensed in her arms and legs. She is captured mid-action during a horizontal sword slash: body twisted dynamically forward, left foot planted firmly on emerald moss, right arm fully extended holding a polished steel jian sword with a black lacquered hilt, the blade angled sharply downward. Bamboo stalks surround her, their smooth green surfaces textured with vertical grooves and dew drops catching the light; fallen bamboo leaves scatter the forest floor, partially covered in soft velvety moss. Mist drifts faintly near the ground, with background bamboo softly blurred to create depth. Color palette emphasizes vibrant jade greens of the bamboo, warm amber sunlight, and the rich crimson of the robe, accented by the metallic gleam of the sword. Highly detailed textures include the woven silk fabric rippling with movement, the sword's reflective edge, and the rough bark of nearby bamboo trunks.
68 |
69 | ================================================================================
70 | Image: ComfyUI-zit_00042_.png
71 | Dimensions: 0x0
72 | Prompt:
73 | 镜头从高处拍摄,在一片宁静花园的斑驳光线下,一位纤细的女子优雅地坐在一张磨损的石凳上,藤蔓和花朵环绕四周。她的身形纤细,但胸部巨大且圆润,胸部远远大于角色的头部,超巨乳,自然地向下垂。她微微前倾,双手叠放在膝上,嘴角带着淡淡的微笑,头微微倾向阳光。光线柔和地温暖地照在她裸露的肌肤上,勾勒出她身体的每一处曲线和轻柔的重力拉伸。镜头从女子侧面拍摄,
74 |
75 | ================================================================================
76 | Image: ComfyUI-zit_00032_.png
77 | Dimensions: 0x0
78 | Prompt:
79 | a lomo photograph of a striking portrait of a naked woman with a detailed dragon tattoo on her back standing in front of a window, with her hand on the window sill, facing away from the camera but looking back, enveloped in the shadows of a dark room with an ethereal red glow cast from a neon light outside at night. her long wavy dark purple hair is tied back in a pony tail. her back is arched accentuating her equisite hour glass figure. the neon-lit sign and night time cityscape outside the window casts a red hue over the inside of the dimly lit apartment illuminating her back to reveal the dragon tattoo. the photograph has large dark vignetting and was shot on 35mm film with visible film grain and color splashing throughout the frame. while the woman is sharply in focus, the edges of the composition are soft, with a shallow depth of field, excellent bokeh. the film frame border can be seen in the image. the photograph was shot with a canon f1 using 800 iso film. dramatic cinematic lighting. the neon sign has chinese characters. light leaks and film borders visible. 4ft3rd4rk
80 |
81 | ================================================================================
82 | Image: ComfyUI_00049_.png
83 | Dimensions: 768x1280
84 | Prompt:
85 | tranin passing, anime style, 4k ultra resolution, flat shading.
86 |
87 | ================================================================================
88 | Image: ComfyUI-zit_00077_.png
89 | Dimensions: 0x0
90 | Prompt:
91 | masterpiece, best quality, photo realistic, 8k, a woman wearing a sculptural translucent mask carved from pure lightbeams, refracting prismatic colors across her face, futuristic beauty campaign energy , hyperdetailed, dramatic lighting, cinematic shot, ultra detailed, intricate details, cinematic, photorealistic, masterpiece
92 |
93 | ================================================================================
94 | Image: ComfyUI_00046_.png
95 | Dimensions: 0x0
96 | Prompt:
97 | Gothic Glamour. "Back to the Future" Delorean car rushes through the mysterious night forest through the fog glowing in the moonlight.High detail, 10-bit color rendering, large-scale image.Half-turned to the viewer,action pose.Cinematic realism, high contrast, surround light, exceptional detail, 8k,. on the plate the text "F.M.E.D.S". a partly visible indication on a wooden signe reads "Eros Diffusion" with an arrow pointing backwards to wards the car. the driver is just a shadow inside the car and not well lit.
98 |
99 | ================================================================================
100 | Image: ComfyUI-zit_00056_.png
101 | Dimensions: 0x0
102 | Prompt:
103 | masterpiece, best quality, photo realistic, 8k, a serene Japanese onsen scene with an otaku-styled woman relaxing in steaming mineral water, soft lantern light reflecting off wooden bath walls, subtle anime-inspired accessories, gentle mist rising around her, cherry blossoms drifting in the air, tranquil mountain backdrop, elegant editorial composition , hyperdetailed, dramatic lighting, cinematic shot, ultra detailed, intricate details, cinematic, photorealistic, masterpiece
104 |
105 | ================================================================================
106 | Image: ComfyUI-zit_00026_.png
107 | Dimensions: 0x0
108 | Prompt:
109 | You are an assistant... Hyper-realistic cinematic shot of a nude bio-mechanical Asian woman m3tsumi1, her silicon skin is completely revealed. She sits leaning against a crumbling stucco wall, its texture rough and weathered. The wall is overtaken by nature, with creeping vines, vibrant wildflowers, and dense foliage bursting through cracks. The scene is set centuries after a devastating post-apocalyptic battle. The woman's exposed silicon parts show signs of wounds, and battle damage, with subtle LED lights flickering weakly in her circuitry. Dirt and grime coat her form, emphasizing the passage of time. Shafts of golden sunlight filter through the overgrown canopy above, casting dappled shadows across the scene. The atmosphere is one of eerie beauty and abandoned technology reclaimed by nature. Ultra-detailed textures, dramatic lighting, and a muted color palette dominated by earth tones and metallic hues. 8K resolution, photorealistic rendering, cinematic composition.
110 |
111 | ================================================================================
112 | Image: \
113 | Dimensions: 768x1280
114 | Prompt:
115 |
116 |
117 | ================================================================================
118 | Image: ComfyUI_00052_.png
119 | Dimensions: 1024x1024
120 | Prompt:
121 | tranin passing, anime style, 4k ultra resolution, flat shading.
122 |
123 |
--------------------------------------------------------------------------------
/flash_attention_node.py:
--------------------------------------------------------------------------------
1 | """
2 | ComfyUI Custom Node: Patch Flash Attention 2
3 |
4 | This node patches ComfyUI models to use Flash Attention 2 as the attention backend.
5 | Flash Attention 2 provides optimized attention computation for compatible GPUs.
6 |
7 | Based on the pattern from ComfyUI-KJNodes model optimization nodes.
8 | """
9 |
10 | import torch
11 | import logging
12 |
13 | logger = logging.getLogger(__name__)
14 |
15 | class PatchFlashAttention:
16 | """
17 | Patches a model to use Flash Attention 2 as the attention backend.
18 |
19 | Flash Attention 2 provides memory-efficient and faster attention computation
20 | for NVIDIA GPUs with Ampere, Ada, or Hopper architectures (RTX 30xx, 40xx, etc.).
21 | """
22 |
23 | @classmethod
24 | def INPUT_TYPES(cls):
25 | return {
26 | "required": {
27 | "model": ("MODEL",),
28 | "enabled": ("BOOLEAN", {
29 | "default": True,
30 | "tooltip": "Enable or disable Flash Attention 2. When enabled, uses optimized attention kernels."
31 | }),
32 | },
33 | "optional": {
34 | "softmax_scale": ("FLOAT", {
35 | "default": 0.0,
36 | "min": 0.0,
37 | "max": 10.0,
38 | "step": 0.01,
39 | "tooltip": "Softmax scale factor. Set to 0 for automatic scaling (1/sqrt(d)). Higher values increase attention sharpness."
40 | }),
41 | "causal": ("BOOLEAN", {
42 | "default": False,
43 | "tooltip": "Use causal masking (for autoregressive models). Usually False for diffusion models."
44 | }),
45 | "window_size": ("INT", {
46 | "default": -1,
47 | "min": -1,
48 | "max": 8192,
49 | "tooltip": "Local attention window size. -1 for full attention. Positive values enable sliding window attention."
50 | }),
51 | "deterministic": ("BOOLEAN", {
52 | "default": False,
53 | "tooltip": "Use deterministic implementation. May be slower but gives reproducible results."
54 | }),
55 | "debug": (["disabled", "enabled"], {
56 | "default": "disabled",
57 | "tooltip": "Enable verbose debug logging to console. Use 'enabled' to see detailed Flash Attention status messages."
58 | }),
59 | }
60 | }
61 |
62 | RETURN_TYPES = ("MODEL", "STRING",)
63 | RETURN_NAMES = ("model", "status",)
64 | FUNCTION = "patch"
65 | CATEGORY = "model_patches/attention"
66 | DESCRIPTION = "Patches model to use Flash Attention 2 for optimized attention computation. Requires flash_attn library installed and compatible GPU."
67 |
68 | def patch(self, model, enabled, softmax_scale=0.0, causal=False, window_size=-1, deterministic=False, debug="disabled"):
69 | # Clone the model to avoid modifying the original
70 | model_clone = model.clone()
71 | debug_enabled = (debug == "enabled")
72 |
73 | if not enabled:
74 | status_msg = "⚠️ Flash Attention 2 DISABLED - Using standard attention"
75 | if debug_enabled:
76 | print(f"\n{'='*60}")
77 | print(f"[Flash Attention 2] {status_msg}")
78 | print(f"{'='*60}\n")
79 | logger.info("Flash Attention 2 is disabled, returning original model")
80 | return (model_clone, status_msg)
81 |
82 | # Check if flash_attn is available
83 | try:
84 | from flash_attn import flash_attn_func, flash_attn_varlen_func
85 | flash_attn_available = True
86 | if debug_enabled:
87 | print(f"\n{'='*60}")
88 | print(f"[Flash Attention 2] ✅ Library found and loaded successfully!")
89 | print(f"{'='*60}")
90 | except ImportError as e:
91 | status_msg = "❌ FAILED: flash_attn library not installed"
92 | # Always show critical errors even without debug
93 | print(f"\n{'='*60}")
94 | print(f"[Flash Attention 2] {status_msg}")
95 | print(f"[Flash Attention 2] Install with: pip install flash-attn")
96 | if debug_enabled:
97 | print(f"[Flash Attention 2] Error: {e}")
98 | print(f"{'='*60}\n")
99 | logger.warning(
100 | "flash_attn library not found. Flash Attention 2 cannot be enabled. "
101 | "Install with: pip install flash-attn"
102 | )
103 | return (model_clone, status_msg)
104 |
105 | # Prepare configuration for flash attention
106 | flash_config = {
107 | "softmax_scale": softmax_scale if softmax_scale > 0 else None,
108 | "causal": causal,
109 | "window_size": (-1, -1) if window_size == -1 else (window_size, window_size),
110 | "deterministic": deterministic,
111 | }
112 |
113 | # Create the patching function
114 | def flash_attention_forward(q, k, v, extra_options=None):
115 | """
116 | Flash Attention 2 forward function.
117 |
118 | Args:
119 | q: Query tensor [batch, seq_len, num_heads, head_dim]
120 | k: Key tensor [batch, seq_len, num_heads, head_dim]
121 | v: Value tensor [batch, seq_len, num_heads, head_dim]
122 | extra_options: Additional options (mask, etc.)
123 |
124 | Returns:
125 | Attention output tensor
126 | """
127 | # Log first few calls to verify Flash Attention is being used (only in debug mode)
128 | if flash_attention_forward._debug_enabled:
129 | flash_attention_forward._call_count += 1
130 | if flash_attention_forward._call_count <= 2:
131 | print(f"[Flash Attention 2] 🔄 Attention call #{flash_attention_forward._call_count} - Using Flash Attention kernel")
132 |
133 | # Flash attention expects input shape: [batch, seq_len, num_heads, head_dim]
134 | # Ensure tensors are contiguous and in the right format
135 | q = q.contiguous()
136 | k = k.contiguous()
137 | v = v.contiguous()
138 |
139 | # Get softmax scale
140 | scale = flash_config["softmax_scale"]
141 | if scale is None:
142 | head_dim = q.shape[-1]
143 | scale = 1.0 / (head_dim ** 0.5)
144 |
145 | # Apply flash attention
146 | try:
147 | output = flash_attn_func(
148 | q, k, v,
149 | softmax_scale=scale,
150 | causal=flash_config["causal"],
151 | window_size=flash_config["window_size"],
152 | deterministic=flash_config["deterministic"],
153 | )
154 | return output
155 | except Exception as e:
156 | error_msg = f"Flash Attention 2 runtime error: {e}. Falling back to standard attention."
157 | # Always show runtime errors even without debug
158 | print(f"\n[Flash Attention 2] ⚠️ {error_msg}\n")
159 | logger.error(error_msg)
160 | # Fallback to standard attention
161 | return torch.nn.functional.scaled_dot_product_attention(
162 | q.transpose(1, 2),
163 | k.transpose(1, 2),
164 | v.transpose(1, 2),
165 | scale=scale
166 | ).transpose(1, 2)
167 |
168 | # Initialize function attributes after definition
169 | flash_attention_forward._call_count = 0
170 | flash_attention_forward._debug_enabled = debug_enabled
171 |
172 | # Apply the patch to the model
173 | # We need to patch all attention blocks in the model
174 | try:
175 | # Get the model's attention block configuration
176 | # We'll patch both input and output blocks
177 | patched_count = 0
178 |
179 | # Patch input blocks
180 | if hasattr(model_clone.model, 'diffusion_model') and hasattr(model_clone.model.diffusion_model, 'input_blocks'):
181 | num_input_blocks = len(model_clone.model.diffusion_model.input_blocks)
182 | for i in range(num_input_blocks):
183 | model_clone.set_model_attn2_replace(flash_attention_forward, "input", i)
184 | patched_count += 1
185 |
186 | # Patch middle block
187 | if hasattr(model_clone.model, 'diffusion_model') and hasattr(model_clone.model.diffusion_model, 'middle_block'):
188 | model_clone.set_model_attn2_replace(flash_attention_forward, "middle", 0)
189 | patched_count += 1
190 |
191 | # Patch output blocks
192 | if hasattr(model_clone.model, 'diffusion_model') and hasattr(model_clone.model.diffusion_model, 'output_blocks'):
193 | num_output_blocks = len(model_clone.model.diffusion_model.output_blocks)
194 | for i in range(num_output_blocks):
195 | model_clone.set_model_attn2_replace(flash_attention_forward, "output", i)
196 | patched_count += 1
197 |
198 | status_msg = (
199 | f"✅ Flash Attention 2 ENABLED\n"
200 | f" • Patched blocks: {patched_count}\n"
201 | f" • Softmax scale: {flash_config['softmax_scale'] or 'auto (1/√d)'}\n"
202 | f" • Causal: {flash_config['causal']}\n"
203 | f" • Window size: {flash_config['window_size']}\n"
204 | f" • Deterministic: {flash_config['deterministic']}"
205 | )
206 |
207 | if debug_enabled:
208 | print(f"\n{'='*60}")
209 | print(f"[Flash Attention 2] ✅ SUCCESSFULLY PATCHED MODEL")
210 | print(f"{'='*60}")
211 | print(f" Patched {patched_count} attention blocks")
212 | print(f" Configuration:")
213 | print(f" Softmax scale: {flash_config['softmax_scale'] or 'auto (1/√d)'}")
214 | print(f" Causal: {flash_config['causal']}")
215 | print(f" Window size: {flash_config['window_size']}")
216 | print(f" Deterministic: {flash_config['deterministic']}")
217 | print(f"{'='*60}")
218 | print(f" 💡 Watch for any attention-related errors during inference.")
219 | print(f" 💡 If errors occur, the node will auto-fallback to standard attention.")
220 | print(f" 💡 Compatible GPUs: RTX 30xx/40xx, A100, H100")
221 | print(f"{'='*60}\n")
222 |
223 | logger.info(
224 | f"Flash Attention 2 patched successfully: {patched_count} blocks with config: "
225 | f"softmax_scale={flash_config['softmax_scale']}, "
226 | f"causal={flash_config['causal']}, "
227 | f"window_size={flash_config['window_size']}, "
228 | f"deterministic={flash_config['deterministic']}"
229 | )
230 | except Exception as e:
231 | status_msg = f"❌ FAILED: Could not patch model - {str(e)}"
232 | # Always show critical patching errors
233 | print(f"\n{'='*60}")
234 | print(f"[Flash Attention 2] {status_msg}")
235 | print(f"{'='*60}\n")
236 | logger.error(f"Failed to patch model with Flash Attention 2: {e}")
237 | logger.info("Returning unpatched model")
238 |
239 | return (model_clone, status_msg)
240 |
241 |
242 | # Node registration for ComfyUI
243 | NODE_CLASS_MAPPINGS = {
244 | "PatchFlashAttention": PatchFlashAttention
245 | }
246 |
247 | NODE_DISPLAY_NAME_MAPPINGS = {
248 | "PatchFlashAttention": "Patch Flash Attention 2"
249 | }
250 |
--------------------------------------------------------------------------------
/nunchaku_compat.py:
--------------------------------------------------------------------------------
1 | # Nunchaku Qwen Direct Model Patcher
2 | #
3 | # This module monkey-patches Nunch aku models at runtime to fix dimension mismatches
4 | # It installs a wrapper around model.apply_model that detects and fixes tensor shapes
5 |
6 | import torch
7 | import logging
8 |
9 | logger = logging.getLogger(__name__)
10 |
11 | _original_module_call = None
12 | _original_tiled_call = None
13 | _patch_applied = False
14 |
15 | def is_nunchaku_qwen_model(model):
16 | """Detect if the model is a Nunchaku Qwen model"""
17 | try:
18 | if hasattr(model, 'diffusion_model'):
19 | dm = model.diffusion_model
20 | if hasattr(dm, 'txt_norm') and hasattr(dm.txt_norm, 'normalized_shape'):
21 | return True
22 | return False
23 | except Exception:
24 | return False
25 |
26 |
27 | def get_expected_txt_dim(model):
28 | """Get the expected text encoder dimension from txt_norm"""
29 | try:
30 | if hasattr(model, 'diffusion_model'):
31 | dm = model.diffusion_model
32 | if hasattr(dm, 'txt_norm') and hasattr(dm.txt_norm, 'normalized_shape'):
33 | return dm.txt_norm.normalized_shape[0]
34 | return None
35 | except Exception:
36 | return None
37 |
38 |
39 | def patched_apply_model(original_func):
40 | """Wrapper for model.apply_model that fixes dimension mismatches"""
41 | projection_cache = {}
42 |
43 | def wrapper(self, *args, **kwargs):
44 | if not is_nunchaku_qwen_model(self):
45 | return original_func(self, *args, **kwargs)
46 |
47 | expected_dim = get_expected_txt_dim(self)
48 | if expected_dim is None:
49 | return original_func(self, *args, **kwargs)
50 |
51 | context = kwargs.get('context', None)
52 |
53 | if context is not None and context.shape[-1] != expected_dim:
54 | actual_dim = context.shape[-1]
55 | print(f"[Nunchaku Compat] Fixing dimension: {actual_dim} -> {expected_dim}")
56 |
57 | cache_key = (actual_dim, expected_dim, context.device, context.dtype)
58 |
59 | if cache_key not in projection_cache:
60 | projection = torch.nn.Linear(actual_dim, expected_dim, bias=False,
61 | device=context.device, dtype=context.dtype)
62 | with torch.no_grad():
63 | if actual_dim > expected_dim:
64 | projection.weight.data = torch.eye(expected_dim, actual_dim,
65 | device=context.device, dtype=context.dtype)
66 | else:
67 | projection.weight.data = torch.zeros(expected_dim, actual_dim,
68 | device=context.device, dtype=context.dtype)
69 | projection.weight.data[:actual_dim, :] = torch.eye(actual_dim,
70 | device=context.device, dtype=context.dtype)
71 | projection_cache[cache_key] = projection
72 |
73 | context = projection_cache[cache_key](context)
74 | kwargs['context'] = context
75 |
76 | return original_func(self, *args, **kwargs)
77 |
78 | return wrapper
79 |
80 |
81 | def patch_diffusion_model_forward(original_forward):
82 | """Wrap diffusion_model's forward/__call__ to fix encoder_hidden_states"""
83 | projection_cache = {}
84 |
85 | def wrapper(self, *args, **kwargs):
86 | # Check if this is a Nunchaku model by looking for txt_norm
87 | if not (hasattr(self, 'txt_norm') and hasattr(self.txt_norm, 'normalized_shape')):
88 | return original_forward(self, *args, **kwargs)
89 |
90 | expected_dim = self.txt_norm.normalized_shape[0]
91 |
92 | # Extract encoder_hidden_states from kwargs
93 | # The diffusion model is called with: diffusion_model(xc, t, context=context, ...)
94 | # which maps to: forward(hidden_states, encoder_hidden_states, ...)
95 | # So 'context' kwarg becomes encoder_hidden_states parameter
96 |
97 | encoder_hidden_states = None
98 | param_name = None
99 |
100 | # Try common parameter names
101 | for name in ['context', 'encoder_hidden_states', 'text_embeds']:
102 | if name in kwargs:
103 | encoder_hidden_states = kwargs[name]
104 | param_name = name
105 | break
106 |
107 | # If not in kwargs, it might be in args (positional)
108 | # Typical signature: forward(self, hidden_states, encoder_hidden_states, timestep, ...)
109 | if encoder_hidden_states is None and len(args) > 1:
110 | # args[0] = hidden_states, args[1] = encoder_hidden_states
111 | if isinstance(args[1], torch.Tensor) and len(args[1].shape) >= 2:
112 | encoder_hidden_states = args[1]
113 | param_name = 'args[1]'
114 |
115 | # Check and fix dimension mismatch
116 | if encoder_hidden_states is not None and isinstance(encoder_hidden_states, torch.Tensor):
117 | # encoder_hidden_states should be shape [batch, seq_len, dim]
118 | if len(encoder_hidden_states.shape) >= 2:
119 | actual_dim = encoder_hidden_states.shape[-1]
120 |
121 | if actual_dim != expected_dim:
122 | print(f"[Nunchaku Compat] Fixing {param_name}: shape={list(encoder_hidden_states.shape)}, {actual_dim} -> {expected_dim}")
123 |
124 | cache_key = (actual_dim, expected_dim, encoder_hidden_states.device, encoder_hidden_states.dtype)
125 |
126 | if cache_key not in projection_cache:
127 | projection = torch.nn.Linear(actual_dim, expected_dim, bias=False,
128 | device=encoder_hidden_states.device,
129 | dtype=encoder_hidden_states.dtype)
130 | with torch.no_grad():
131 | if actual_dim > expected_dim:
132 | projection.weight.data = torch.eye(expected_dim, actual_dim,
133 | device=encoder_hidden_states.device,
134 | dtype=encoder_hidden_states.dtype)
135 | projection_cache[cache_key] = projection
136 | print(f"[Nunchaku Compat] Created projection layer {actual_dim}->{expected_dim}")
137 |
138 | encoder_hidden_states = projection_cache[cache_key](encoder_hidden_states)
139 |
140 | # Update the parameter
141 | if param_name in kwargs:
142 | kwargs[param_name] = encoder_hidden_states
143 | elif param_name == 'args[1]':
144 | args = list(args)
145 | args[1] = encoder_hidden_states
146 | args = tuple(args)
147 |
148 | return original_forward(self, *args, **kwargs)
149 |
150 | return wrapper
151 |
152 |
153 | # Global variables to store original functions
154 | _original_module_call = None
155 | _original_tiled_call = None
156 |
157 | def apply_nunchaku_patches():
158 | """Apply monkey patches to fix Nunchaku compatibility issues"""
159 | global _patch_applied, _original_module_call, _original_tiled_call
160 |
161 | if _patch_applied:
162 | print("[Nunchaku Compat] Patches already applied")
163 | return
164 |
165 | try:
166 | # We need to patch at the diffusion_model level, not model.apply_model
167 | # The patch will be applied when models are loaded
168 | import torch.nn as nn
169 |
170 | # Store original if not already stored
171 | if _original_module_call is None:
172 | _original_module_call = nn.Module.__call__
173 |
174 | # Patch torch.nn.Module's __call__ for modules that have txt_norm
175 | # This is tricky - we'll patch specific Nunchaku model classes when we detect them
176 |
177 | def patched_module_call(self, *args, **kwargs):
178 | # ONLY patch Nunchaku diffusion models - very specific detection
179 | # Must have all three: txt_norm, txt_in, img_in (unique to Nunchaku Qwen)
180 | is_nunchaku_diffusion = (
181 | hasattr(self, 'txt_norm') and
182 | hasattr(self, 'txt_in') and
183 | hasattr(self, 'img_in') and
184 | hasattr(self, 'transformer_blocks') # Extra check to ensure it's the diffusion model
185 | )
186 |
187 | if is_nunchaku_diffusion:
188 | # This is a NunchakuQwenImageTransformer2DModel
189 | if not hasattr(self, '_nunchaku_patched'):
190 | print(f"[Nunchaku Compat] Detected and patching Nunchaku diffusion model: {type(self).__name__}")
191 | self._nunchaku_patched = True
192 |
193 | return patch_diffusion_model_forward(_original_module_call)(self, *args, **kwargs)
194 |
195 | # For all other modules (VAE, etc.), use original __call__ without modification
196 | return _original_module_call(self, *args, **kwargs)
197 |
198 | nn.Module.__call__ = patched_module_call
199 |
200 | # Also patch TiledDiffusion if present
201 | try:
202 | import sys
203 | if 'ComfyUI-TiledDiffusion.tiled_diffusion' in sys.modules:
204 | tiled_diff = sys.modules['ComfyUI-TiledDiffusion.tiled_diffusion']
205 | if hasattr(tiled_diff, 'TiledDiffusion'):
206 | if _original_tiled_call is None:
207 | _original_tiled_call = tiled_diff.TiledDiffusion.__call__
208 |
209 | def patched_tiled_call(self, model_function, kwargs):
210 | """Wrap TiledDiffusion to handle 5D tensors from Qwen Image models"""
211 | x_in = kwargs.get('input', None)
212 |
213 | # Check if we have a 5D tensor
214 | if x_in is not None and len(x_in.shape) == 5:
215 | # Shape is [N, C, F, H, W], squeeze F dimension if it's 1
216 | N, C, F, H, W = x_in.shape
217 |
218 | if F == 1:
219 | print(f"[Nunchaku Compat] TiledDiffusion: Squeezing 5D tensor {list(x_in.shape)} -> 4D")
220 | kwargs['input'] = x_in.squeeze(2) # Remove F dimension
221 |
222 | # Call original with 4D tensor
223 | result = _original_tiled_call(self, model_function, kwargs)
224 |
225 | # Restore 5D shape if result is 4D
226 | if isinstance(result, torch.Tensor) and len(result.shape) == 4:
227 | result = result.unsqueeze(2) # Add F dimension back
228 | print(f"[Nunchaku Compat] TiledDiffusion: Restored to 5D shape {list(result.shape)}")
229 |
230 | return result
231 | else:
232 | print(f"[Nunchaku Compat] TiledDiffusion: Warning - 5D tensor with F={F} (not 1), cannot safely squeeze")
233 |
234 | return _original_tiled_call(self, model_function, kwargs)
235 |
236 | tiled_diff.TiledDiffusion.__call__ = patched_tiled_call
237 | print("[Nunchaku Compat] Successfully patched TiledDiffusion for 5D tensor support")
238 | except Exception as e:
239 | print(f"[Nunchaku Compat] Could not patch TiledDiffusion (not installed or incompatible): {e}")
240 |
241 | print("[Nunchaku Compat] Successfully installed Nunchaku compatibility patches")
242 | _patch_applied = True
243 |
244 | except Exception as e:
245 | print(f"[Nunchaku Compat] Error applying patches: {e}")
246 | import traceback
247 | traceback.print_exc()
248 |
249 |
250 | def remove_nunchaku_patches():
251 | """Remove Nunchaku compatibility patches"""
252 | global _patch_applied, _original_module_call, _original_tiled_call
253 |
254 | if not _patch_applied:
255 | print("[Nunchaku Compat] Patches not applied, nothing to remove")
256 | return
257 |
258 | try:
259 | import torch.nn as nn
260 |
261 | # Restore nn.Module.__call__
262 | if _original_module_call is not None:
263 | nn.Module.__call__ = _original_module_call
264 | print("[Nunchaku Compat] Restored original nn.Module.__call__")
265 |
266 | # Restore TiledDiffusion.__call__
267 | try:
268 | import sys
269 | if 'ComfyUI-TiledDiffusion.tiled_diffusion' in sys.modules:
270 | tiled_diff = sys.modules['ComfyUI-TiledDiffusion.tiled_diffusion']
271 | if hasattr(tiled_diff, 'TiledDiffusion') and _original_tiled_call is not None:
272 | tiled_diff.TiledDiffusion.__call__ = _original_tiled_call
273 | print("[Nunchaku Compat] Restored original TiledDiffusion.__call__")
274 | except Exception as e:
275 | print(f"[Nunchaku Compat] Error restoring TiledDiffusion: {e}")
276 |
277 | _patch_applied = False
278 | print("[Nunchaku Compat] Successfully removed Nunchaku compatibility patches")
279 |
280 | except Exception as e:
281 | print(f"[Nunchaku Compat] Error removing patches: {e}")
282 | import traceback
283 | traceback.print_exc()
284 |
285 |
286 | class NunchakuQwenPatches:
287 | @classmethod
288 | def INPUT_TYPES(s):
289 | return {
290 | "required": {
291 | "mode": (["enable", "disable"], {"default": "enable"}),
292 | },
293 | "optional": {
294 | "model": ("MODEL",),
295 | "image": ("IMAGE",),
296 | }
297 | }
298 |
299 | RETURN_TYPES = ("MODEL", "IMAGE",)
300 | RETURN_NAMES = ("model", "image",)
301 | FUNCTION = "execute"
302 | CATEGORY = "utils"
303 |
304 | def execute(self, mode, model=None, image=None):
305 | if mode == "enable":
306 | apply_nunchaku_patches()
307 | else:
308 | remove_nunchaku_patches()
309 | return (model, image)
310 |
311 | NODE_CLASS_MAPPINGS = {
312 | "NunchakuQwenPatches": NunchakuQwenPatches
313 | }
314 |
315 | NODE_DISPLAY_NAME_MAPPINGS = {
316 | "NunchakuQwenPatches": "Nunchaku Qwen Patches"
317 | }
318 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
1 | # ComfyUI custom node for FlowMatch Euler Scheduler
2 | #
3 | # This node creates a FlowMatchEulerDiscreteScheduler with configurable parameters
4 | # so it can be used with compatible sampler nodes.
5 | #
6 | # Also registers the scheduler in ComfyUI's scheduler list with default config.
7 | #
8 | # Place this file into: ComfyUI/custom_nodes/
9 | # Then restart ComfyUI. It will show up as "FlowMatch Euler Discrete Scheduler (Custom)"
10 |
11 | import math
12 | import torch
13 | import numpy as np # <-- Required for robust slicing of PyTorch tensors
14 |
15 | try:
16 | from diffusers.schedulers.scheduling_flow_match_euler_discrete import FlowMatchEulerDiscreteScheduler
17 | except ImportError as e:
18 | print("=" * 80)
19 | print("ERROR: Failed to import FlowMatchEulerDiscreteScheduler from diffusers")
20 | print("Please ensure dependencies are installed by running:")
21 | print(" pip install -r requirements.txt")
22 | print("=" * 80)
23 | raise ImportError(
24 | "FlowMatchEulerDiscreteScheduler not found. "
25 | "Please install dependencies from requirements.txt"
26 | ) from e
27 |
28 | try:
29 | from diffusers import VQDiffusionScheduler
30 | except ImportError:
31 | VQDiffusionScheduler = None
32 | print("[FlowMatch Scheduler] Warning: VQDiffusionScheduler not found in diffusers.")
33 |
34 | from comfy.samplers import SchedulerHandler, SCHEDULER_HANDLERS, SCHEDULER_NAMES
35 |
36 | # Import Nunchaku compatibility patches (auto-applies on import)
37 | try:
38 | from . import nunchaku_compat
39 | except Exception as e:
40 | print(f"[FlowMatch Scheduler] Warning: Could not load Nunchaku compatibility: {e}")
41 |
42 |
43 | # Default config for registering in ComfyUI
44 | default_config = {
45 | "base_image_seq_len": 256,
46 | "base_shift": math.log(3),
47 | "invert_sigmas": False,
48 | "max_image_seq_len": 8192,
49 | "max_shift": math.log(3),
50 | "num_train_timesteps": 1000,
51 | "shift": 1.0,
52 | "shift_terminal": None,
53 | "stochastic_sampling": False,
54 | "time_shift_type": "exponential",
55 | "use_beta_sigmas": False,
56 | "use_dynamic_shifting": True,
57 | "use_exponential_sigmas": False,
58 | "use_karras_sigmas": False,
59 | }
60 |
61 | def flow_match_euler_scheduler_handler(model_sampling, steps):
62 | scheduler = FlowMatchEulerDiscreteScheduler.from_config(default_config)
63 | scheduler.set_timesteps(steps, device=model_sampling.device if hasattr(model_sampling, 'device') else 'cpu', mu=0.0)
64 | sigmas = scheduler.sigmas
65 | return sigmas
66 |
67 | def vq_diffusion_scheduler_handler(model_sampling, steps):
68 | if VQDiffusionScheduler is None:
69 | raise ImportError("VQDiffusionScheduler is not available.")
70 |
71 | # VQDiffusionScheduler requires num_vec_classes.
72 | print("[FlowMatch Scheduler] WARNING: VQDiffusionScheduler is for discrete models (VQ-Diffusion).")
73 | print("It does not produce 'sigmas' for continuous diffusion.")
74 | print("Returning dummy linear sigmas to prevent crash, but sampling will likely fail with standard models.")
75 |
76 | # Dummy initialization
77 | # scheduler = VQDiffusionScheduler(num_vec_classes=4096, num_train_timesteps=1000)
78 |
79 | # Return dummy sigmas
80 | sigmas = torch.linspace(1.0, 0.0, steps + 1)
81 | if hasattr(model_sampling, 'device'):
82 | sigmas = sigmas.to(model_sampling.device)
83 | return sigmas
84 |
85 | # Register the schedulers in ComfyUI
86 | if "FlowMatchEulerDiscreteScheduler" not in SCHEDULER_HANDLERS:
87 | handler = SchedulerHandler(handler=flow_match_euler_scheduler_handler, use_ms=True)
88 | SCHEDULER_HANDLERS["FlowMatchEulerDiscreteScheduler"] = handler
89 | SCHEDULER_NAMES.append("FlowMatchEulerDiscreteScheduler")
90 |
91 | # Explicitly add to KSampler.SCHEDULERS to ensure compatibility with nodes
92 | # that might replace the list object (like RES4LYF)
93 | try:
94 | from comfy.samplers import KSampler
95 | if "FlowMatchEulerDiscreteScheduler" not in KSampler.SCHEDULERS:
96 | KSampler.SCHEDULERS.append("FlowMatchEulerDiscreteScheduler")
97 | except ImportError:
98 | pass
99 |
100 | # if "VQDiffusionScheduler" not in SCHEDULER_HANDLERS:
101 | # SCHEDULER_HANDLERS["VQDiffusionScheduler"] = SchedulerHandler(handler=vq_diffusion_scheduler_handler, use_ms=True)
102 | # SCHEDULER_NAMES.append("VQDiffusionScheduler")
103 |
104 | # try:
105 | # from comfy.samplers import KSampler
106 | # if "VQDiffusionScheduler" not in KSampler.SCHEDULERS:
107 | # KSampler.SCHEDULERS.append("VQDiffusionScheduler")
108 | # except ImportError:
109 | # pass
110 |
111 | class FlowMatchEulerSchedulerNode:
112 | @classmethod
113 | def INPUT_TYPES(cls):
114 | return {
115 | "required": {
116 | "steps": ("INT", {
117 | "default": 9,
118 | "min": 1,
119 | "max": 10000,
120 | "tooltip": "Total number of diffusion steps to generate the full sigma schedule."
121 | }),
122 | "start_at_step": ("INT", {
123 | "default": 0,
124 | "min": 0,
125 | "max": 10000,
126 | "tooltip": "The starting step (index) of the sigma schedule to use. Set to 0 to start at the beginning (first step)."
127 | }),
128 | "end_at_step": ("INT", {
129 | "default": 9999,
130 | "min": 0,
131 | "max": 10000,
132 | "tooltip": "The ending step (index) of the sigma schedule to use. Set higher than 'steps' to use all steps."
133 | }),
134 | "base_image_seq_len": ("INT", {
135 | "default": 256,
136 | "tooltip": "Base sequence length for dynamic shifting. Should match model's training resolution (e.g., 256 for 512x512 images)."
137 | }),
138 | "base_shift": ("FLOAT", {
139 | "default": 0.5,
140 | "step": 0.01,
141 | "tooltip": "Stabilizes generation. Higher values = more consistent/predictable outputs. Z-Image-Turbo uses default 0.5."
142 | }),
143 | "invert_sigmas": (["disable", "enable"], {
144 | "default": "disable",
145 | "tooltip": "Reverses the sigma schedule. Keep disabled unless experimenting with advanced techniques."
146 | }),
147 | "max_image_seq_len": ("INT", {
148 | "default": 8192,
149 | "tooltip": "Maximum sequence length for dynamic shifting. Affects how the scheduler adapts to large images."
150 | }),
151 | "max_shift": ("FLOAT", {
152 | "default": 1.15,
153 | "step": 0.01,
154 | "tooltip": "Maximum variation allowed. Higher = more exaggerated/stylized results. Z-Image-Turbo uses default 1.15."
155 | }),
156 | "num_train_timesteps": ("INT", {
157 | "default": 1000,
158 | "tooltip": "Timesteps the model was trained with. Should match your model's config (typically 1000)."
159 | }),
160 | "shift": ("FLOAT", {
161 | "default": 3.0,
162 | "step": 0.01,
163 | "tooltip": "Global timestep schedule shift. Z-Image-Turbo uses 3.0 for optimal performance with the Turbo model."
164 | }),
165 | "shift_terminal": ("FLOAT", {
166 | "default": 0.0,
167 | "step": 0.01,
168 | "tooltip": "End value for shifted schedule. Set to 0.0 to disable. Advanced parameter for timestep schedule control."
169 | }),
170 | "stochastic_sampling": (["disable", "enable"], {
171 | "default": "disable",
172 | "tooltip": "Adds controlled randomness to each step. Enable for more varied outputs (similar to ancestral samplers)."
173 | }),
174 | "time_shift_type": (["exponential", "linear"], {
175 | "default": "exponential",
176 | "tooltip": "Method for resolution-dependent shifting. Use 'exponential' for most cases, 'linear' for experiments."
177 | }),
178 | "use_beta_sigmas": (["disable", "enable"], {
179 | "default": "disable",
180 | "tooltip": "Uses beta distribution for sigmas. Experimental alternative noise schedule."
181 | }),
182 | "use_dynamic_shifting": (["disable", "enable"], {
183 | "default": "disable",
184 | "tooltip": "Auto-adjusts timesteps based on image resolution. Z-Image-Turbo disables this for consistent Turbo performance."
185 | }),
186 | "use_exponential_sigmas": (["disable", "enable"], {
187 | "default": "disable",
188 | "tooltip": "Uses exponential sigma spacing. Try enabling for different noise distribution characteristics."
189 | }),
190 | "use_karras_sigmas": (["disable", "enable"], {
191 | "default": "disable",
192 | "tooltip": "Uses Karras noise schedule for smoother results. Similar to DPM++ samplers, often improves quality."
193 | }),
194 | "device": (["auto", "cuda", "cpu"], {
195 | "default": "auto",
196 | "tooltip": "Device for sigma computation. 'auto' detects GPU if available, otherwise CPU. Using GPU avoids CPU->GPU transfers."
197 | }),
198 | }
199 | }
200 |
201 | RETURN_TYPES = ("SIGMAS",)
202 | RETURN_NAMES = ("sigmas",)
203 | FUNCTION = "create"
204 | CATEGORY = "sampling/schedulers"
205 | DESCRIPTION = "FlowMatch Euler Discrete Scheduler with full parameter control and ability to trim the schedule (start_at_step/end_at_step)."
206 |
207 | def create(
208 | self,
209 | steps,
210 | start_at_step,
211 | end_at_step,
212 | base_image_seq_len,
213 | base_shift,
214 | invert_sigmas,
215 | max_image_seq_len,
216 | max_shift,
217 | num_train_timesteps,
218 | shift,
219 | shift_terminal,
220 | stochastic_sampling,
221 | time_shift_type,
222 | use_beta_sigmas,
223 | use_dynamic_shifting,
224 | use_exponential_sigmas,
225 | use_karras_sigmas,
226 | device="auto",
227 | ):
228 | # Convert string combo values to boolean
229 | config = {
230 | "base_image_seq_len": base_image_seq_len,
231 | "base_shift": base_shift,
232 | "invert_sigmas": invert_sigmas == "enable",
233 | "max_image_seq_len": max_image_seq_len,
234 | "max_shift": max_shift,
235 | "num_train_timesteps": num_train_timesteps,
236 | "shift": shift,
237 | "shift_terminal": shift_terminal if shift_terminal != 0.0 else None,
238 | "stochastic_sampling": stochastic_sampling == "enable",
239 | "time_shift_type": time_shift_type,
240 | "use_beta_sigmas": use_beta_sigmas == "enable",
241 | "use_dynamic_shifting": use_dynamic_shifting == "enable",
242 | "use_exponential_sigmas": use_exponential_sigmas == "enable",
243 | "use_karras_sigmas": use_karras_sigmas == "enable",
244 | }
245 |
246 | scheduler = FlowMatchEulerDiscreteScheduler.from_config(config)
247 |
248 | # 1. Generate the full sigma schedule
249 |
250 | # Determine device to use for sigma computation
251 | if device == "auto":
252 | # Auto-detect: use CUDA if available, otherwise CPU
253 | target_device = "cuda" if torch.cuda.is_available() else "cpu"
254 | print(f"[FlowMatch Scheduler] Auto-detected device: {target_device.upper()}")
255 | else:
256 | target_device = device
257 | print(f"[FlowMatch Scheduler] Using manually specified device: {target_device.upper()}")
258 |
259 | # Set timesteps and get sigmas for the specified number of steps
260 | # Using the model's device avoids unnecessary CPU->GPU transfers during sampling
261 | scheduler.set_timesteps(steps, device=target_device, mu=0.0)
262 | sigmas = scheduler.sigmas
263 |
264 | # 2. Apply start_at_step and end_at_step (Slicing the sigmas tensor)
265 | # Determine the exclusive end index for the slice
266 | # end_at_step is the step index (e.g., 5). We use 5+1=6 for the slice end index.
267 | end_index = min(end_at_step + 1, len(sigmas))
268 |
269 | # Slice the tensor: [start:end]
270 | sigmas_sliced = sigmas[start_at_step:end_index]
271 |
272 | # Check for empty schedule resulting from slicing
273 | if sigmas_sliced.numel() == 0:
274 | print("Warning: start_at_step/end_at_step resulted in an empty sigma schedule. Using full schedule as fallback.")
275 | sigmas_sliced = sigmas
276 |
277 | return (sigmas_sliced,)
278 |
279 | class VQDiffusionSchedulerNode:
280 | @classmethod
281 | def INPUT_TYPES(cls):
282 | return {
283 | "required": {
284 | "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
285 | "num_vec_classes": ("INT", {"default": 4096, "min": 1, "max": 65536, "tooltip": "Number of vector classes for VQ model."}),
286 | "num_train_timesteps": ("INT", {"default": 1000}),
287 | }
288 | }
289 |
290 | RETURN_TYPES = ("SIGMAS",)
291 | RETURN_NAMES = ("sigmas",)
292 | FUNCTION = "create"
293 | CATEGORY = "sampling/schedulers"
294 | DESCRIPTION = "VQ Diffusion Scheduler (Experimental). For VQ-Diffusion models. Returns dummy sigmas for compatibility."
295 |
296 | def create(self, steps, num_vec_classes, num_train_timesteps):
297 | if VQDiffusionScheduler is None:
298 | raise ImportError("VQDiffusionScheduler not found.")
299 |
300 | print("[FlowMatch Scheduler] Creating VQDiffusionScheduler (Experimental)")
301 | print("[FlowMatch Scheduler] WARNING: Returning dummy sigmas. This scheduler is for discrete latent models.")
302 |
303 | # We don't actually use the scheduler to generate sigmas because it can't.
304 | # We just return the dummy sigmas.
305 | sigmas = torch.linspace(1.0, 0.0, steps + 1)
306 | # Default to CPU, KSampler will move it if needed or we can try to detect
307 | # But here we don't have model context easily.
308 | return (sigmas,)
309 |
310 | NODE_CLASS_MAPPINGS = {
311 | "FlowMatchEulerDiscreteScheduler (Custom)": FlowMatchEulerSchedulerNode,
312 | # "VQDiffusionScheduler": VQDiffusionSchedulerNode,
313 | }
314 |
315 | NODE_DISPLAY_NAME_MAPPINGS = {
316 | "FlowMatchEulerDiscreteScheduler (Custom)": "FlowMatch Euler Discrete Scheduler (Custom)",
317 | # "VQDiffusionScheduler": "VQ Diffusion Scheduler (Experimental)",
318 | }
319 |
320 | from .extract_metadata_node import NODE_CLASS_MAPPINGS as METADATA_NODE_MAPPINGS
321 | from .extract_metadata_node import NODE_DISPLAY_NAME_MAPPINGS as METADATA_DISPLAY_MAPPINGS
322 |
323 | NODE_CLASS_MAPPINGS.update(METADATA_NODE_MAPPINGS)
324 | NODE_DISPLAY_NAME_MAPPINGS.update(METADATA_DISPLAY_MAPPINGS)
325 |
326 | # Import Nunchaku nodes
327 | try:
328 | from .nunchaku_compat import NODE_CLASS_MAPPINGS as NUNCHAKU_NODES
329 | from .nunchaku_compat import NODE_DISPLAY_NAME_MAPPINGS as NUNCHAKU_NAMES
330 | NODE_CLASS_MAPPINGS.update(NUNCHAKU_NODES)
331 | NODE_DISPLAY_NAME_MAPPINGS.update(NUNCHAKU_NAMES)
332 | except Exception as e:
333 | print(f"[FlowMatch Scheduler] Could not load Nunchaku nodes: {e}")
--------------------------------------------------------------------------------