├── .DS_Store
├── .github
└── workflows
│ └── publish.yml
├── .gitignore
├── PERFORMANCE_OPTIMIZATION.md
├── README.md
├── __init__.py
├── api
├── __init__.py
├── anthropic_api.py
├── bfl_api.py
├── deepseek_api.py
├── gemini_api.py
├── gemini_image_api.py
├── groq_api.py
├── ollama_api.py
├── openai_api.py
├── openrouter_api.py
├── suno_api.py
├── transformers_api.py
└── wavespeed_image_api.py
├── comfy-nodes
├── api_key_input.py
├── api_provider_selector.py
├── audio_duration_frames.py
├── banana_task_generator.py
├── blank_image.py
├── check_image_empty.py
├── config_generate_image.py
├── config_generate_image_bfl.py
├── config_generate_image_flux_dev.py
├── config_generate_image_gemini.py
├── config_generate_image_openai.py
├── config_generate_image_openrouter.py
├── config_generate_image_portrait.py
├── config_generate_image_seedream.py
├── config_generate_image_split.py
├── config_generate_image_unified.py
├── config_generate_music.py
├── config_generate_speech.py
├── config_generate_video.py
├── config_generate_video_hailuo_i2v_pro.py
├── config_generate_video_hailuo_i2v_standard.py
├── config_generate_video_hailuo_t2v_pro.py
├── config_generate_video_hailuo_t2v_standard.py
├── config_generate_video_kling_i2v_master.py
├── config_generate_video_kling_i2v_pro.py
├── config_generate_video_kling_i2v_standard.py
├── config_generate_video_seedance_pro_i2v_720p.py
├── config_generate_video_seedance_pro_t2v_720p.py
├── config_generate_video_veo2_i2v.py
├── config_generate_video_veo2_t2v.py
├── config_generate_video_veo3.py
├── config_generate_video_veo3_fast.py
├── display_text.py
├── frames_to_seconds.py
├── generate_image.py
├── generate_music.py
├── generate_speech.py
├── generate_text.py
├── generate_video.py
├── high_low_snr.py
├── image_comparer.py
├── image_generation_capabilities.py
├── llmtoolkit_providers.py
├── load_audio_from_path.py
├── load_video_from_path.py
├── logic_preview_image.py
├── model_list_fetcher.py
├── play_random_sound.py
├── preview_outputs.py
├── preview_video.py
├── prompt_manager.py
├── resolution_selector.py
├── string_utils.py
├── style_prompt_generator.py
├── switch_any.py
├── system_prompt_task_generator.py
├── test_api_key_context.py
├── transformers_provider.py
├── upscale_video.py
└── video_generation_capabilities.py
├── context_payload.py
├── example_workflows
├── LLMToolkit_banana-Gemini-api.jpg
├── LLMToolkit_banana-Gemini-api.json
├── LLMToolkit_banana-OR-api.jpg
├── LLMToolkit_banana-OR-api.json
├── RESTYLE_KONTEXT.jpg
├── RESTYLE_KONTEXT.json
├── kontext_edit_images_with_guided_auto_prompts.jpg
├── kontext_edit_images_with_guided_auto_prompts.json
├── kontext_simple_single_image_edit.jpg
├── kontext_simple_single_image_edit.json
├── llm_toolkit_dall-e-2_variations.jpg
├── llm_toolkit_dall-e-2_variations.json
├── llm_toolkit_gpt-image-1_combine_images.jpg
├── llm_toolkit_gpt-image-1_combine_images.json
├── llm_toolkit_gpt-image-1_generate_image.jpg
├── llm_toolkit_gpt-image-1_generate_image.json
├── llm_toolkit_gpt-image1_inpainting.jpg
├── llm_toolkit_ollama_generate-stream.json
├── llm_toolkit_openai_provider_generate-stream.jpg
├── llm_toolkit_openai_provider_generate-stream.json
├── llmtoolki-BFL-Kontext-image-api.jpg
├── llmtoolki-BFL-Kontext-image-api.json
├── llmtoolkit_Hailuo_I2VPro_api.jpg
├── llmtoolkit_Hailuo_I2VPro_api.json
├── llmtoolkit_Seedance_api.jpg
├── llmtoolkit_Seedance_api.json
├── llmtoolkit_VEO2_api.jpg
├── llmtoolkit_VEO2_api.json
├── llmtoolkit_VEO3_api.json
├── llmtoolkit_VEO3_fast_api.json
├── llmtoolkit_gemini_speech_api.jpg
├── llmtoolkit_gemini_speech_api.json
├── radial_attn.jpg
└── radial_attn.json
├── llmtoolkit_utils.py
├── model_lists_output
├── anthropic_models.json
├── anthropic_models.py
├── deepseek_models.json
├── deepseek_models.py
├── gemini_models.json
├── gemini_models.py
├── groq_models.json
├── groq_models.py
├── mistral_models.json
├── mistral_models.py
├── openai_models.json
├── openai_models.py
├── openrouter_models.json
└── openrouter_models.py
├── package-lock.json
├── package.json
├── presets
├── banana-tasks.json
├── styles.json
└── system_prompt_tasks.json
├── provider_models_hardcoded.py
├── pyproject.toml
├── requirements.txt
├── send_request.py
├── sounds
├── LetsBegin.mp3
├── faile_run.mp3
├── follow_IF_YT.mp3
├── oh_smth_wrong.mp3
├── wellcometocomfydeploy.mp3
└── workflow_staretd.mp3
├── test_import.py
├── test_js_integration.py
└── web
├── .DS_Store
└── js
├── APIProviderSelectorNode.js
├── APIProviderSelectorNode.js.bak
├── DisplayTextNode.js
├── GenerateTextNode.js
├── GenerateTextStreamNode.js
├── ImageComparerNode.js
├── LLMToolkitProviderSelectorNode.js
├── LLMToolkitProviderSelectorNode.js.disabled
├── ResolutionSelectorNode.js
├── prompt_manager_dynamic.js
└── prompt_manager_dynamic.js.disabled
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comfy-deploy/comfyui-llm-toolkit/d0a526e850a90d1bac0025ddd977b96a09989144/.DS_Store
--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | name: Publish to Comfy registry
2 | on:
3 | workflow_dispatch:
4 | push:
5 | branches:
6 | - main
7 | - master
8 | paths:
9 | - "pyproject.toml"
10 |
11 | permissions:
12 | issues: write
13 |
14 | jobs:
15 | publish-node:
16 | name: Publish Custom Node to registry
17 | runs-on: ubuntu-latest
18 | if: ${{ github.repository_owner == 'comfy-deploy' }}
19 | steps:
20 | - name: Check out code
21 | uses: actions/checkout@v4
22 | with:
23 | submodules: true
24 | - name: Publish Custom Node
25 | uses: Comfy-Org/publish-node-action@v1
26 | with:
27 | ## Add your own personal access token to your Github Repository secrets and reference it here.
28 | personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }}
29 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Created by https://www.toptal.com/developers/gitignore/api/python
2 | # Edit at https://www.toptal.com/developers/gitignore?templates=python
3 |
4 | ### Python ###
5 | # Byte-compiled / optimized / DLL files
6 | __pycache__/
7 | *.py[cod]
8 | *$py.class
9 |
10 | # C extensions
11 | *.so
12 |
13 | # Distribution / packaging
14 | .Python
15 | build/
16 | develop-eggs/
17 | dist/
18 | downloads/
19 | eggs/
20 | .eggs/
21 | lib/
22 | lib64/
23 | parts/
24 | sdist/
25 | var/
26 | wheels/
27 | share/python-wheels/
28 | *.egg-info/
29 | .installed.cfg
30 | *.egg
31 | MANIFEST
32 |
33 | # PyInstaller
34 | # Usually these files are written by a python script from a template
35 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
36 | *.manifest
37 | *.spec
38 |
39 | # Installer logs
40 | pip-log.txt
41 | pip-delete-this-directory.txt
42 |
43 | # Unit test / coverage reports
44 | htmlcov/
45 | .tox/
46 | .nox/
47 | .coverage
48 | .coverage.*
49 | .cache
50 | nosetests.xml
51 | coverage.xml
52 | *.cover
53 | *.py,cover
54 | .hypothesis/
55 | .pytest_cache/
56 | cover/
57 |
58 | # Translations
59 | *.mo
60 | *.pot
61 |
62 | # Django stuff:
63 | *.log
64 | local_settings.py
65 | db.sqlite3
66 | db.sqlite3-journal
67 |
68 | # Flask stuff:
69 | instance/
70 | .webassets-cache
71 |
72 | # Scrapy stuff:
73 | .scrapy
74 |
75 | # Sphinx documentation
76 | docs/_build/
77 |
78 | # PyBuilder
79 | .pybuilder/
80 | target/
81 |
82 | # Jupyter Notebook
83 | .ipynb_checkpoints
84 |
85 | # IPython
86 | profile_default/
87 | ipython_config.py
88 |
89 | # Model lists
90 | test_import.py
91 | test_js_integration.py
92 | test_lazy_loading.py
93 | update_model_lists.py
94 |
95 | # pyenv
96 | # For a library or package, you might want to ignore these files since the code is
97 | # intended to run in multiple environments; otherwise, check them in:
98 | # .python-version
99 |
100 | # pipenv
101 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
102 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
103 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
104 | # install all needed dependencies.
105 | #Pipfile.lock
106 |
107 | # poetry
108 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
109 | # This is especially recommended for binary packages to ensure reproducibility, and is more
110 | # commonly ignored for libraries.
111 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
112 | #poetry.lock
113 |
114 | # pdm
115 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
116 | #pdm.lock
117 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
118 | # in version control.
119 | # https://pdm.fming.dev/#use-with-ide
120 | .pdm.toml
121 |
122 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
123 | __pypackages__/
124 |
125 | # Celery stuff
126 | celerybeat-schedule
127 | celerybeat.pid
128 |
129 | # SageMath parsed files
130 | *.sage.py
131 |
132 | # Environments
133 | .env
134 | .venv
135 | env/
136 | venv/
137 | ENV/
138 | env.bak/
139 | venv.bak/
140 |
141 | # Spyder project settings
142 | .spyderproject
143 | .spyproject
144 |
145 | # Rope project settings
146 | .ropeproject
147 |
148 | # mkdocs documentation
149 | /site
150 |
151 | # mypy
152 | .mypy_cache/
153 | .dmypy.json
154 | dmypy.json
155 |
156 | # Pyre type checker
157 | .pyre/
158 |
159 | # pytype static type analyzer
160 | .pytype/
161 |
162 | # Cython debug symbols
163 | cython_debug/
164 |
165 | # PyCharm
166 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
167 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
168 | # and can be added to the global gitignore or merged into this file. For a more nuclear
169 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
170 | #.idea/
171 |
172 | ### Python Patch ###
173 | # Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
174 | poetry.toml
175 |
176 | # ruff
177 | .ruff_cache/
178 |
179 | # LSP config files
180 | pyrightconfig.json
181 |
182 | # End of https://www.toptal.com/developers/gitignore/api/python
183 |
--------------------------------------------------------------------------------
/PERFORMANCE_OPTIMIZATION.md:
--------------------------------------------------------------------------------
1 | # LLM Toolkit Performance Optimization Guide
2 |
3 | ## Current Load Time Issue
4 | The LLM Toolkit takes ~1.0 seconds to load compared to 0.1-0.2 seconds for other nodes.
5 |
6 | ## Root Causes
7 | 1. **Heavy library imports at module level** (torch, numpy, torchaudio)
8 | 2. **Loading all 50+ node files during initialization**
9 | 3. **Cascading imports between modules**
10 | 4. **Unintended TensorFlow loading**
11 |
12 | ## Recommended Optimizations
13 |
14 | ### 1. Lazy Import Pattern
15 | Convert heavy imports to lazy loading:
16 |
17 | ```python
18 | # Instead of:
19 | import torch
20 | import numpy as np
21 |
22 | # Use:
23 | def _get_torch():
24 | import torch
25 | return torch
26 |
27 | # Or use lazy imports inside functions:
28 | def process_image(self, image):
29 | import torch # Import only when needed
30 | return torch.tensor(image)
31 | ```
32 |
33 | ### 2. Selective Node Loading
34 | Modify `__init__.py` to load nodes on-demand:
35 |
36 | ```python
37 | # Load only essential nodes at startup
38 | ESSENTIAL_NODES = [
39 | 'api_key_input',
40 | 'generate_text',
41 | 'prompt_manager',
42 | # ... core nodes only
43 | ]
44 |
45 | # Load others when first accessed
46 | def lazy_load_node(node_name):
47 | if node_name not in NODE_CLASS_MAPPINGS:
48 | module = importlib.import_module(node_name)
49 | # ... register the node
50 | ```
51 |
52 | ### 3. Import Optimization in transformers_api.py
53 | The transformers library is already using lazy loading pattern - this is good!
54 | Ensure other heavy modules follow the same pattern.
55 |
56 | ### 4. Defer torch/numpy imports
57 | Move imports inside class methods:
58 |
59 | ```python
60 | class GenerateMusic:
61 | def run(self, ...):
62 | import torch # Import when actually used
63 | import torchaudio
64 | # ... rest of the code
65 | ```
66 |
67 | ### 5. Profile-guided optimization
68 | Use Python's `-X importtime` flag to identify slow imports:
69 | ```bash
70 | python -X importtime main.py 2>&1 | grep "llm-toolkit"
71 | ```
72 |
73 | ## Quick Wins (Immediate improvements)
74 |
75 | 1. **Move torch imports to function level** in these files:
76 | - `blank_image.py`
77 | - `check_image_empty.py`
78 | - `logic_preview_image.py`
79 | - `prompt_manager.py`
80 | - `generate_music.py`
81 | - `preview_outputs.py`
82 |
83 | 2. **Use conditional imports** for optional dependencies:
84 | ```python
85 | try:
86 | import torchaudio
87 | AUDIO_SUPPORT = True
88 | except ImportError:
89 | AUDIO_SUPPORT = False
90 | ```
91 |
92 | 3. **Cache module imports** in `__init__.py`:
93 | ```python
94 | _module_cache = {}
95 |
96 | def get_module(name):
97 | if name not in _module_cache:
98 | _module_cache[name] = importlib.import_module(name)
99 | return _module_cache[name]
100 | ```
101 |
102 | ## Expected Impact
103 | These optimizations should reduce load time from 1.0s to approximately 0.2-0.3s, bringing it in line with other ComfyUI nodes.
104 |
105 | ## Implementation Priority
106 | 1. High: Move torch/numpy imports to function level
107 | 2. High: Implement lazy loading for non-essential nodes
108 | 3. Medium: Add import caching
109 | 4. Low: Profile and optimize remaining bottlenecks
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # ComfyUI LLM Toolkit
2 |
3 | A custom node collection for integrating various LLM (Large Language Model) providers with ComfyUI.
4 | 
5 | ## NEW GENERAL IMAGE AND VIDEO CFGs
6 |
7 |
8 | ## Features
9 | - True Context-to-Context node connections one-input / one-output design
10 |
11 |
12 | - Generators run even if nothing is connected, and they are the only node in the workflow
13 | 
14 |
15 | - Streaming Output directly on the node UI
16 | 
17 |
18 | - Runs openai latest image model GPT-image-1 and we include various templates
19 |
20 | - Text generation using various LLM providers (OpenAI and local models, etc.)
21 | - Provider selection and configuration with dynamic model fetching
22 | - API key management
23 | - Seamless integration with ComfyUI workflows
24 |
25 | ## Installation
26 |
27 | @@ -33,6 +43,7 @@ The following Python packages are required:
28 | - pyyaml
29 | - python-dotenv
30 | - requests
31 | - openai
32 |
33 | ## Installation
34 |
35 | 1. Clone this repository into your ComfyUI custom_nodes directory:
36 | ```bash
37 | cd /path/to/ComfyUI/custom_nodes
38 | git clone https://github.com/comfy-deploy/comfyui-llm-toolkit.git
39 | ```
40 |
41 | 2. Install the required dependencies:
42 | ```bash
43 | cd comfyui-llm-toolkit
44 | pip install -r requirements.txt
45 | ```
46 |
47 | 3. Restart ComfyUI
48 |
49 | ## Dependencies
50 |
51 | The following Python packages are required:
52 | - aiohttp
53 | - pyyaml
54 | - python-dotenv
55 | - requests
56 |
57 | ## Configuration
58 |
59 | 1. API keys for various providers can be stored in a `.env` file in the root directory:
60 | ```
61 | OPENAI_API_KEY=your_openai_key
62 | # Add other provider keys as needed
63 | ```
64 |
65 | 2. Alternatively, you can provide API keys directly in the node interface.
66 |
67 | ## Usage
68 |
69 | ### LLM Provider Selector Node
70 | Use this node to select the LLM provider and model. It outputs provider configuration within the wildcard "context" output.
71 |
72 | - When you change the provider, the model dropdown updates dynamically with available models
73 | - IP and Port fields appear/disappear based on whether the provider needs them
74 | - The output is a single "context" type that contains the provider configuration
75 |
76 | ### Generate Text Node
77 | Basic text generation with the selected provider and model.
78 |
79 | - Connect it to the LLM Provider Selector by connecting their "context" ports
80 | - The node automatically detects if provider config is present in the input
81 | - You can override the model with the dropdown
82 | - The "context" output contains both the original input data and the LLM response
83 |
84 | ## Unified ONE-INPUT / ONE-OUTPUT Architecture
85 |
86 | The LLM Toolkit uses a single "context" input/output approach for maximum flexibility:
87 |
88 | 1. **Single Connection Point**: Each node has just one wildcard input and one wildcard output named "context"
89 | 2. **Smart Data Handling**:
90 | - Provider config is embedded within the "context" data structure
91 | - Each node intelligently extracts the data it needs from the "context" input
92 | - Nodes preserve all input data and add their own data to the "context" output
93 | 3. **Cascading Data Flow**: As data flows through nodes, it accumulates in the "context" structure
94 |
95 | For example, with nodes A → B → C:
96 | - Node A creates an "context" with provider config
97 | - Node B receives A's "context", extracts the provider config, and adds LLM response to the "context"
98 | - Node C receives B's "context" which now contains both provider config and LLM response
99 |
100 | This allows you to:
101 | - Chain multiple LLM operations with a single connection
102 | - Preserve and accumulate data throughout the workflow
103 | - Easily integrate with other ComfyUI nodes
104 |
105 | ## Supported Providers
106 |
107 | - OpenAI (Default: gpt-4o-mini)
108 | - Ollama (local)
109 |
110 |
111 | ## Troubleshooting
112 |
113 | If you encounter model list update issues:
114 | 1. Make sure ComfyUI is running with the correct server configuration
115 | 2. Check that JavaScript is enabled in your browser
116 | 3. Verify that your API keys are correctly set in the .env file or provided in the node
117 |
118 | If you encounter import errors:
119 | 1. Make sure you've installed all dependencies: `pip install -r requirements.txt`
120 | 2. Verify that you've placed the custom node in the correct directory
121 | 3. Restart ComfyUI after installation
122 |
123 | ## Testing Your Installation
124 |
125 | Run the included test script to verify your setup:
126 | ```bash
127 | cd /path/to/ComfyUI/custom_nodes/comfyui-llm-toolkit
128 | python test_js_integration.py
129 | ```
130 |
131 |
132 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | @author: ComfyDeploy
3 | @title: llm toolkit
4 | @nickname: llm_toolkit
5 | @description: llm toolkit
6 | """
7 | import os
8 | import sys
9 | import inspect
10 | import importlib
11 | import re
12 | import importlib.util
13 |
14 | sys.path.append(os.path.join(os.path.dirname(__file__)))
15 |
16 | ag_path = os.path.join(os.path.dirname(__file__))
17 |
18 | def get_python_files(path):
19 | return [f[:-3] for f in os.listdir(path) if f.endswith(".py")]
20 |
21 | def append_to_sys_path(path):
22 | if path not in sys.path:
23 | sys.path.append(path)
24 |
25 | paths = ["comfy-nodes"]
26 | files = []
27 |
28 | for path in paths:
29 | full_path = os.path.join(ag_path, path)
30 | append_to_sys_path(full_path)
31 | files.extend(get_python_files(full_path))
32 |
33 | NODE_CLASS_MAPPINGS = {}
34 | NODE_DISPLAY_NAME_MAPPINGS = {}
35 |
36 | def split_camel_case(name):
37 | # Split on underscores first, then split each part on camelCase
38 | parts = []
39 | for part in name.split('_'):
40 | # Find all camelCase boundaries
41 | words = re.findall('[A-Z][^A-Z]*', part)
42 | if not words: # If no camelCase found, use the whole part
43 | words = [part]
44 | parts.extend(words)
45 | return parts
46 |
47 | # Import all the modules and append their mappings
48 | for file in files:
49 | module = importlib.import_module(file)
50 |
51 | # Check if the module has explicit mappings
52 | if hasattr(module, "NODE_CLASS_MAPPINGS"):
53 | NODE_CLASS_MAPPINGS.update(module.NODE_CLASS_MAPPINGS)
54 | if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS"):
55 | NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS)
56 |
57 | # Auto-discover classes with ComfyUI node attributes
58 | for name, obj in inspect.getmembers(module):
59 | # Check if it's a class and has the required ComfyUI node attributes
60 | if inspect.isclass(obj) and hasattr(obj, "INPUT_TYPES") and hasattr(obj, "RETURN_TYPES"):
61 | # Set or override class attributes here
62 | if not hasattr(obj, "CATEGORY"):
63 | setattr(obj, "CATEGORY", "llm_toolkit")
64 | if not hasattr(obj, "FUNCTION"):
65 | setattr(obj, "FUNCTION", "run")
66 | # Use the class name as the key if not already in mappings
67 | if name not in NODE_CLASS_MAPPINGS:
68 | NODE_CLASS_MAPPINGS[name] = obj
69 | # Create a display name by converting camelCase to Title Case with spaces
70 | words = split_camel_case(name)
71 | display_name = " ".join(word.capitalize() for word in words)
72 | # print(display_name, name)
73 | NODE_DISPLAY_NAME_MAPPINGS[name] = display_name
74 |
75 | # WEB_DIRECTORY points to the directory where your frontend files should be served from
76 | WEB_DIRECTORY = os.path.join(os.path.dirname(os.path.realpath(__file__)), "web", "js")
77 |
78 | __all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS", "WEB_DIRECTORY"]
79 |
--------------------------------------------------------------------------------
/api/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | API sub-package providing namespacing for all *_api helper modules.
3 |
4 | This file dynamically re-exports the original top-level modules under the
5 | `api` namespace so that both of the following patterns are valid:
6 |
7 | from openai_api import send_openai_request # legacy
8 | from api.openai_api import send_openai_request # new preferred form
9 |
10 | The implementation is **lazy** and incurs virtually zero overhead.
11 | """
12 |
13 | import importlib
14 | import sys
15 | from typing import List
16 |
17 | # ---------------------------------------------------------------------------
18 | # List every *_api module shipped with the toolkit.
19 | # Add new entries here whenever a new provider helper is introduced.
20 | # ---------------------------------------------------------------------------
21 | _api_modules: List[str] = [
22 | "bfl_api",
23 | "gemini_api",
24 | "gemini_image_api",
25 | "ollama_api",
26 | "openai_api",
27 | "suno_api",
28 | "transformers_api",
29 | "wavespeed_image_api",
30 | ]
31 |
32 | _pkg_name = __name__
33 | for _mod_name in _api_modules:
34 | # Prefer relative import inside this package (i.e. api.)
35 | try:
36 | _module = importlib.import_module(f".{_mod_name}", package=_pkg_name)
37 | except ModuleNotFoundError:
38 | # Fall back to legacy top-level module if it still exists
39 | _module = importlib.import_module(_mod_name)
40 |
41 | # Expose under both the new and old namespaces
42 | sys.modules[f"{_pkg_name}.{_mod_name}"] = _module # api.openai_api, etc.
43 | sys.modules[_mod_name] = _module # openai_api, etc. (back-compat)
44 | setattr(sys.modules[_pkg_name], _mod_name, _module)
45 |
46 | # Optional: make the modules discoverable via `api.__all__`
47 | __all__ = _api_modules.copy()
48 |
49 | # Cleanup internals to avoid leaking symbols
50 | for _tmp in ("importlib", "sys", "List", "_pkg_name", "_api_modules", "_mod_name", "_module"):
51 | globals().pop(_tmp, None)
--------------------------------------------------------------------------------
/api/anthropic_api.py:
--------------------------------------------------------------------------------
1 | # api/anthropic_api.py
2 | import aiohttp
3 | import json
4 | import logging
5 | from typing import List, Optional, Dict, Any
6 |
7 | logger = logging.getLogger(__name__)
8 |
9 | async def send_anthropic_request(
10 | api_url: Optional[str],
11 | model: str,
12 | system_message: str,
13 | user_message: str,
14 | messages: List[Dict[str, Any]],
15 | api_key: Optional[str],
16 | temperature: float = 0.7,
17 | max_tokens: int = 1024,
18 | top_p: float = 0.9,
19 | top_k: int = 40,
20 | stop_sequences: Optional[List[str]] = None,
21 | **kwargs, # absorb extra arguments
22 | ) -> Dict[str, Any]:
23 | if not api_key:
24 | return {"choices": [{"message": {"content": "Error: API key is required for Anthropic requests"}}]}
25 |
26 | endpoint = api_url or "https://api.anthropic.com/v1/messages"
27 | headers = {
28 | "x-api-key": api_key,
29 | "anthropic-version": "2023-06-01",
30 | "content-type": "application/json",
31 | }
32 |
33 | # Construct messages list for Anthropic
34 | anthropic_messages = []
35 | if messages:
36 | for msg in messages:
37 | # Skip system messages if we are passing it as a top-level parameter
38 | if msg.get("role") != "system":
39 | anthropic_messages.append(msg)
40 |
41 | # Add current user message
42 | if user_message:
43 | anthropic_messages.append({"role": "user", "content": user_message})
44 |
45 | body = {
46 | "model": model,
47 | "max_tokens": max_tokens,
48 | "messages": anthropic_messages,
49 | "temperature": temperature,
50 | "top_p": top_p,
51 | "top_k": top_k,
52 | }
53 | if system_message:
54 | body["system"] = system_message
55 | if stop_sequences:
56 | body["stop_sequences"] = stop_sequences
57 |
58 | try:
59 | async with aiohttp.ClientSession() as session:
60 | async with session.post(endpoint, headers=headers, json=body) as response:
61 | if response.status != 200:
62 | err_txt = await response.text()
63 | logger.error(f"Anthropic API error: {response.status} - {err_txt}")
64 | return {"choices": [{"message": {"content": f"Anthropic API error: {response.status}. {err_txt}"}}]}
65 |
66 | data = await response.json()
67 |
68 | # Convert response to OpenAI-compatible format
69 | text_content = ""
70 | if "content" in data and isinstance(data["content"], list):
71 | for block in data["content"]:
72 | if block.get("type") == "text":
73 | text_content += block.get("text", "")
74 |
75 | return {"choices": [{"message": {"content": text_content}}]}
76 |
77 | except Exception as e:
78 | logger.error(f"Exception during Anthropic API call: {e}", exc_info=True)
79 | return {"choices": [{"message": {"content": f"Exception during Anthropic API call: {str(e)}"}}]}
80 |
--------------------------------------------------------------------------------
/api/deepseek_api.py:
--------------------------------------------------------------------------------
1 | # api/deepseek_api.py
2 |
3 | import logging
4 | from typing import List, Dict, Any, Optional
5 | from .openai_api import send_openai_request # Re-use the openai compatible request sender
6 |
7 | logger = logging.getLogger(__name__)
8 |
9 | async def send_deepseek_request(
10 | api_url: Optional[str],
11 | base64_images: Optional[List[str]],
12 | model: str,
13 | system_message: str,
14 | user_message: str,
15 | messages: List[Dict[str, Any]],
16 | api_key: Optional[str],
17 | seed: Optional[int] = None,
18 | temperature: float = 0.7,
19 | max_tokens: int = 1024,
20 | top_p: float = 0.9,
21 | repeat_penalty: float = 1.1,
22 | tools: Optional[Any] = None,
23 | tool_choice: Optional[Any] = None,
24 | **kwargs, # absorb extra arguments
25 | ) -> Dict[str, Any]:
26 | """
27 | Sends a request to the DeepSeek API using the OpenAI-compatible endpoint.
28 | """
29 | deepseek_api_url = api_url or "https://api.deepseek.com/v1/chat/completions"
30 |
31 | logger.info(f"Sending request to DeepSeek API: model={model}")
32 |
33 | # DeepSeek API is OpenAI-compatible, so we can reuse the same request function.
34 | return await send_openai_request(
35 | api_url=deepseek_api_url,
36 | base64_images=base64_images,
37 | model=model,
38 | system_message=system_message,
39 | user_message=user_message,
40 | messages=messages,
41 | api_key=api_key,
42 | seed=seed,
43 | temperature=temperature,
44 | max_tokens=max_tokens,
45 | top_p=top_p,
46 | repeat_penalty=repeat_penalty,
47 | tools=tools,
48 | tool_choice=tool_choice,
49 | )
50 |
--------------------------------------------------------------------------------
/api/groq_api.py:
--------------------------------------------------------------------------------
1 | import aiohttp
2 | import json
3 | import logging
4 | from typing import List, Dict, Any, Optional, Union
5 |
6 | logger = logging.getLogger(__name__)
7 |
8 | # Re-use the OpenAI utilities for message preparation so we stay DRY
9 | try:
10 | from api.openai_api import prepare_openai_messages
11 | except ImportError: # Fallback stub so import errors do not crash plugin load
12 | def prepare_openai_messages(*args, **kwargs):
13 | raise RuntimeError("prepare_openai_messages missing – openai_api not available")
14 |
15 |
16 | async def send_groq_request(
17 | *,
18 | base64_images: Optional[List[str]] = None,
19 | model: str,
20 | system_message: str,
21 | user_message: str,
22 | messages: List[Dict[str, Any]],
23 | api_key: str,
24 | seed: Optional[int] = None,
25 | temperature: float = 0.7,
26 | max_tokens: int = 2048,
27 | top_p: float = 0.9,
28 | reasoning_format: Optional[str] = None,
29 | tools: Optional[Any] = None,
30 | tool_choice: Optional[Any] = None,
31 | **kwargs,
32 | ) -> Union[str, Dict[str, Any]]:
33 | """Send a chat completion request to Groq's OpenAI-compatible endpoint.
34 |
35 | Parameters largely mirror the OpenAI helper. `reasoning_format` is a Groq-
36 | specific extra that controls how content is returned for supported
37 | reasoning models ("raw", "hidden", "parsed").
38 | """
39 | api_url = "https://api.groq.com/openai/v1/chat/completions"
40 |
41 | # Validate essentials
42 | if not api_key:
43 | logger.error("Groq API key missing – cannot send request")
44 | return {"choices": [{"message": {"content": "Error: Missing GROQ_API_KEY"}}]}
45 | if not model:
46 | logger.error("Groq request missing model param")
47 | return {"choices": [{"message": {"content": "Error: Missing model"}}]}
48 |
49 | headers = {
50 | "Authorization": f"Bearer {api_key}",
51 | "Content-Type": "application/json",
52 | }
53 |
54 | # --- Vision model checks ---
55 | # Only certain Groq models support vision. If images are provided for a
56 | # non-vision model, we must strip them to avoid an API error.
57 | is_vision_model = "scout" in model or "maverick" in model
58 | images_to_send = base64_images
59 |
60 | if images_to_send and not is_vision_model:
61 | logger.warning("Model '%s' may not support images. Sending request without them.", model)
62 | images_to_send = None
63 | elif images_to_send and len(images_to_send) > 5:
64 | logger.warning("Groq supports a max of 5 images, but %s were provided. Taking the first 5.", len(images_to_send))
65 | images_to_send = images_to_send[:5]
66 |
67 | groq_messages = prepare_openai_messages(
68 | images_to_send,
69 | system_message,
70 | user_message,
71 | messages,
72 | )
73 |
74 | payload: Dict[str, Any] = {
75 | "model": model,
76 | "messages": groq_messages,
77 | "temperature": temperature,
78 | "max_completion_tokens": max_tokens, # Correct key for Groq API
79 | "top_p": top_p,
80 | }
81 | # Optional params
82 | if seed is not None:
83 | payload["seed"] = seed
84 | if reasoning_format is not None:
85 | payload["reasoning_format"] = reasoning_format
86 | if tools is not None:
87 | payload["tools"] = tools
88 | if tool_choice is not None:
89 | payload["tool_choice"] = tool_choice
90 |
91 | # Debug – mask key
92 | logger.debug(
93 | "Sending Groq request: %s", {k: (v if k != "messages" else f"[{len(v)} messages]") for k, v in payload.items()}
94 | )
95 |
96 | try:
97 | async with aiohttp.ClientSession() as session:
98 | async with session.post(api_url, headers=headers, json=payload) as resp:
99 | if resp.status != 200:
100 | err_text = await resp.text()
101 | logger.error("Groq API error %s: %s", resp.status, err_text[:200])
102 | return {"choices": [{"message": {"content": f"Groq API error {resp.status}: {err_text}"}}]}
103 | data = await resp.json()
104 | return data
105 | except Exception as exc:
106 | logger.error("Exception when calling Groq API: %s", exc, exc_info=True)
107 | return {"choices": [{"message": {"content": str(exc)}}]}
--------------------------------------------------------------------------------
/api/openrouter_api.py:
--------------------------------------------------------------------------------
1 | # api/openrouter_api.py
2 | import aiohttp
3 | import logging
4 | import json
5 | from typing import Optional, Dict, Any, List
6 |
7 | logger = logging.getLogger(__name__)
8 |
9 | async def send_openrouter_image_generation_request(
10 | api_key: str,
11 | model: str,
12 | prompt: str,
13 | n: int = 1,
14 | timeout: int = 120,
15 | input_image_base64: Optional[List[str]] = None,
16 | **kwargs
17 | ) -> Dict[str, Any]:
18 | """
19 | Sends a request to OpenRouter's chat completions endpoint for image generation,
20 | optionally including input images for context.
21 | """
22 | api_url = "https://openrouter.ai/api/v1/chat/completions"
23 | headers = {
24 | "Authorization": f"Bearer {api_key}",
25 | "Content-Type": "application/json",
26 | }
27 |
28 | # Build the message content
29 | content_parts = []
30 | if prompt:
31 | content_parts.append({"type": "text", "text": prompt})
32 |
33 | if input_image_base64:
34 | for b64_image in input_image_base64:
35 | content_parts.append({
36 | "type": "image_url",
37 | "image_url": {
38 | "url": f"data:image/jpeg;base64,{b64_image}"
39 | }
40 | })
41 |
42 | messages = [{"role": "user", "content": content_parts}]
43 |
44 | payload = {
45 | "model": model,
46 | "messages": messages,
47 | "modalities": ["image", "text"],
48 | }
49 |
50 | # Add 'n' if it's greater than 1, as some models may support it.
51 | if n > 1:
52 | payload['n'] = n
53 |
54 | # Add only SEED from kwargs as it's a documented advanced parameter.
55 | # 'size' is not a standard for chat completions and likely cause 400 errors.
56 | if 'seed' in kwargs and kwargs.get('seed') != -1 and kwargs.get('seed') is not None:
57 | payload['seed'] = kwargs['seed']
58 |
59 | # Log a sanitized version of the payload for debugging
60 | log_payload = payload.copy()
61 | if log_payload.get("messages") and log_payload["messages"][0].get("content"):
62 | import copy
63 | log_payload = copy.deepcopy(payload)
64 | for part in log_payload["messages"][0]["content"]:
65 | if part.get("type") == "image_url":
66 | part["image_url"]["url"] = "[base64 data omitted]"
67 | logger.info(f"Sending image generation request to OpenRouter with payload: {json.dumps(log_payload, indent=2)}")
68 |
69 | try:
70 | async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=timeout)) as session:
71 | async with session.post(api_url, headers=headers, json=payload) as response:
72 | response.raise_for_status()
73 | response_json = await response.json()
74 |
75 | if response_json.get("choices"):
76 | image_data = []
77 | for choice in response_json["choices"]:
78 | message = choice.get("message", {})
79 | if "images" in message and message["images"]:
80 | for img in message["images"]:
81 | base64_string = img.get("image_url", {}).get("url", "")
82 | if "base64," in base64_string:
83 | base64_string = base64_string.split("base64,")[1]
84 | image_data.append({"b64_json": base64_string})
85 |
86 | if image_data:
87 | return {"data": image_data}
88 |
89 | logger.error(f"OpenRouter API response did not contain expected image data. Response: {response_json}")
90 | return {"error": "No image data in response", "data": []}
91 |
92 | except aiohttp.ClientResponseError as e:
93 | logger.error(f"HTTP error from OpenRouter API: {e.status} {e.message}")
94 | error_body = "Could not read error body."
95 | try:
96 | error_json = await e.json()
97 | error_body = str(error_json)
98 | logger.error(f"Error Body (JSON): {error_body}")
99 | except Exception:
100 | try:
101 | error_body = await e.text()
102 | logger.error(f"Error Body (text): {error_body}")
103 | except Exception:
104 | pass
105 | return {"error": f"HTTP error: {e.status} {e.message}. Body: {error_body}", "data": []}
106 | except Exception as e:
107 | logger.error(f"Error during OpenRouter API call: {e}", exc_info=True)
108 | return {"error": str(e), "data": []}
109 |
110 | async def send_openrouter_request(
111 | api_url: str,
112 | model: str,
113 | messages: List[Dict[str, Any]],
114 | api_key: str,
115 | **kwargs,
116 | ) -> Dict[str, Any]:
117 | """Generic OpenRouter chat completions handler."""
118 | headers = {
119 | "Authorization": f"Bearer {api_key}",
120 | "Content-Type": "application/json",
121 | }
122 | payload = {"model": model, "messages": messages, **kwargs}
123 | payload = {k: v for k, v in payload.items() if v is not None}
124 |
125 | # Log sanitized payload
126 | log_payload = payload.copy()
127 | logger.info(f"Sending request to OpenRouter with payload: {json.dumps(log_payload, indent=2)}")
128 |
129 | try:
130 | async with aiohttp.ClientSession() as session:
131 | async with session.post(
132 | api_url, headers=headers, json=payload
133 | ) as response:
134 | response.raise_for_status()
135 | return await response.json()
136 | except aiohttp.ClientResponseError as e:
137 | logger.error(f"HTTP error from OpenRouter API: {e.status} {e.message}")
138 | error_body = await response.text()
139 | logger.error(f"Error body: {error_body}")
140 | return {"error": f"HTTP error: {e.status} {e.message}. Body: {error_body}"}
141 | except Exception as e:
142 | logger.error(f"Error during OpenRouter API call: {e}", exc_info=True)
143 | return {"error": str(e)}
144 |
--------------------------------------------------------------------------------
/comfy-nodes/api_key_input.py:
--------------------------------------------------------------------------------
1 | # api_key_input.py
2 | import os
3 | import sys
4 | import logging
5 | from typing import Any, Dict, Optional, Tuple
6 |
7 | # Ensure parent directory is in path
8 | current_dir = os.path.dirname(os.path.abspath(__file__))
9 | parent_dir = os.path.dirname(current_dir)
10 | if parent_dir not in sys.path:
11 | sys.path.insert(0, parent_dir)
12 |
13 | from context_payload import extract_context
14 |
15 | logger = logging.getLogger(__name__)
16 |
17 | class APIKeyInput:
18 | """
19 | A node to input API keys for various providers through context.
20 | This allows dynamic API key management without hardcoding keys in provider nodes.
21 | The API key is passed through context and can override provider-specific keys.
22 | """
23 |
24 | SUPPORTED_PROVIDERS = [
25 | "openai", "gemini", "google", "anthropic", "groq", "huggingface",
26 | "bfl", "wavespeed", "ollama", "lmstudio", "textgen", "kobold",
27 | "llamacpp", "vllm", "transformers", "custom"
28 | ]
29 |
30 | @classmethod
31 | def INPUT_TYPES(cls):
32 | return {
33 | "required": {
34 | "provider": (cls.SUPPORTED_PROVIDERS, {
35 | "default": "gemini",
36 | "tooltip": "Select the provider this API key is for"
37 | }),
38 | "api_key": ("STRING", {
39 | "multiline": False,
40 | "default": "",
41 | "tooltip": "Enter the API key for the selected provider"
42 | }),
43 | },
44 | "optional": {
45 | "context": ("*", {"tooltip": "Optional context to merge with"}),
46 | "override_existing": ("BOOLEAN", {
47 | "default": False,
48 | "tooltip": "Whether to override existing API keys in context/provider config"
49 | }),
50 | }
51 | }
52 |
53 | RETURN_TYPES = ("*",)
54 | RETURN_NAMES = ("context",)
55 | FUNCTION = "set_api_key"
56 | CATEGORY = "🔗llm_toolkit/config"
57 |
58 | def set_api_key(
59 | self,
60 | provider: str,
61 | api_key: str,
62 | context: Optional[Dict[str, Any]] = None,
63 | override_existing: bool = False
64 | ) -> Tuple[Dict[str, Any]]:
65 | """
66 | Sets an API key for a specific provider in the context.
67 |
68 | Args:
69 | provider: The provider name (e.g., 'gemini', 'openai')
70 | api_key: The API key string
71 | context: Existing context to merge with
72 | override_existing: Whether to override existing API keys
73 |
74 | Returns:
75 | Updated context with API key information
76 | """
77 | logger.info(f"APIKeyInput: Setting API key for provider '{provider}'")
78 |
79 | # Validate inputs
80 | if not api_key or not api_key.strip():
81 | logger.warning(f"APIKeyInput: Empty API key provided for provider '{provider}'")
82 |
83 | api_key = api_key.strip()
84 |
85 | # Initialize or copy context
86 | if context is None:
87 | output_context = {}
88 | elif isinstance(context, dict):
89 | output_context = context.copy()
90 | else:
91 | # Handle ContextPayload or other formats
92 | unwrapped = extract_context(context)
93 | if isinstance(unwrapped, dict):
94 | output_context = unwrapped.copy()
95 | output_context.setdefault("passthrough_data", context)
96 | else:
97 | output_context = {"passthrough_data": context}
98 |
99 | # Initialize api_keys section in context
100 | if "api_keys" not in output_context:
101 | output_context["api_keys"] = {}
102 |
103 | # Check if we should set the key
104 | existing_key = output_context["api_keys"].get(provider)
105 |
106 | if existing_key and not override_existing:
107 | # Secure logging: only show first 5 characters
108 | masked_existing = existing_key[:5] + "..." if len(existing_key) > 5 else "..."
109 | masked_new = api_key[:5] + "..." if len(api_key) > 5 else "..."
110 | logger.info(f"APIKeyInput: Provider '{provider}' already has API key ({masked_existing}), keeping existing (override=False)")
111 | else:
112 | # Set the new key
113 | output_context["api_keys"][provider] = api_key
114 |
115 | # Secure logging: only show first 5 characters
116 | masked_key = api_key[:5] + "..." if len(api_key) > 5 else "..."
117 | action = "overriding" if existing_key else "setting"
118 | logger.info(f"APIKeyInput: {action} API key for provider '{provider}' ({masked_key})")
119 |
120 | # For backward compatibility, also update provider_config if it exists and matches
121 | provider_config = output_context.get("provider_config")
122 | if provider_config and isinstance(provider_config, dict):
123 | config_provider = provider_config.get("provider_name", "").lower()
124 |
125 | # Check if this API key is for the current provider config
126 | if config_provider == provider.lower() or (provider == "google" and config_provider == "gemini"):
127 | if override_existing or not provider_config.get("api_key") or provider_config.get("api_key") == "1234":
128 | provider_config["api_key"] = api_key
129 | logger.info(f"APIKeyInput: Updated provider_config API key for '{config_provider}'")
130 |
131 | logger.info(f"APIKeyInput: Context now contains API keys for: {list(output_context['api_keys'].keys())}")
132 |
133 | return (output_context,)
134 |
135 |
136 | # --- Node Mappings ---
137 | NODE_CLASS_MAPPINGS = {
138 | "APIKeyInput": APIKeyInput
139 | }
140 | NODE_DISPLAY_NAME_MAPPINGS = {
141 | "APIKeyInput": "API Key Input (🔗LLMToolkit)"
142 | }
--------------------------------------------------------------------------------
/comfy-nodes/audio_duration_frames.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import math
3 | from typing import Tuple, List, Union
4 |
5 | # Initialize logger
6 | logger = logging.getLogger(__name__)
7 |
8 | class AudioDurationFrames:
9 | """ComfyUI node that reads an AUDIO dict (from LoadAudio or other audio-producing node),
10 | calculates its duration in milliseconds (named *a*), then applies the equation
11 | frames = a / 1000 * b
12 | where *b* is the chosen frame-rate (fps).
13 |
14 | The node outputs two integers:
15 | 1. duration_ms – the audio length in milliseconds
16 | 2. frames – the computed frame count according to the selected fps
17 | """
18 |
19 | # ---------------------------------------------------------------------
20 | # ComfyUI required class attributes
21 | # ---------------------------------------------------------------------
22 | @classmethod
23 | def INPUT_TYPES(cls):
24 | """Define node inputs.
25 | `fps` is an integer slider ranging from 8 to 30 frames-per-second.
26 | """
27 | return {
28 | "required": {
29 | "audio": ("AUDIO", {}),
30 | # fps slider: integer between 8 and 30
31 | "fps": ("INT", {"default": 25, "min": 8, "max": 30, "step": 1, "tooltip": "Frames-per-second to use in the a/1000*b equation."}),
32 | "num_frames": ("INT", {"default": 1, "min": 1, "max": 10000, "tooltip": "Value used to divide total frames (context = frames / num_frames)."}),
33 | }
34 | }
35 |
36 | RETURN_TYPES = ("INT", "INT", "INT", "INT", "STRING")
37 | RETURN_NAMES = (
38 | "duration_ms",
39 | "frames",
40 | "context_times",
41 | "maximum_length_allowed",
42 | "end_time",
43 | )
44 | FUNCTION = "calculate"
45 | CATEGORY = "🔗llm_toolkit/utils/audio"
46 |
47 | # ---------------------------------------------------------------------
48 | # Core logic
49 | # ---------------------------------------------------------------------
50 | def calculate(self, audio: dict, fps: Union[str, int], num_frames: int) -> Tuple[int, int, int, int, str]:
51 | """Compute duration in milliseconds and resulting frame count.
52 |
53 | Args:
54 | audio: Dict produced by LoadAudio (expects keys `waveform` and `sample_rate`).
55 | fps: Selected frame-rate – integer between 8 and 30 (frames-per-second).
56 | num_frames: Value used to divide total frames (context = frames / num_frames).
57 |
58 | Returns:
59 | Tuple(duration_ms, frames, context_times, maximum_length_allowed, end_time) all as ints and str.
60 | """
61 | try:
62 | waveform = audio.get("waveform")
63 | sample_rate = audio.get("sample_rate")
64 | if waveform is None or sample_rate is None:
65 | raise ValueError("Audio dictionary must contain 'waveform' and 'sample_rate'.")
66 |
67 | # Assume shape: (batch, channels, samples) or (channels, samples)
68 | # We compute duration based on the *first* sample in the batch.
69 | if waveform.ndim == 3:
70 | num_samples = waveform.shape[-1]
71 | elif waveform.ndim == 2:
72 | num_samples = waveform.shape[-1]
73 | else:
74 | raise ValueError(f"Unexpected waveform dimensions: {waveform.shape}")
75 |
76 | duration_ms = int(round(num_samples / sample_rate * 1000))
77 |
78 | fps_int = int(fps) if isinstance(fps, str) else fps
79 | frames = int(round(duration_ms / 1000 * fps_int))
80 |
81 | # Compute how many times num_frames fits into frames (floor division)
82 | if num_frames <= 0:
83 | raise ValueError("num_frames must be a positive integer")
84 | context_times = frames // num_frames
85 | maximum_length_allowed = context_times * num_frames
86 |
87 | # Compute end_time string in M:SS format based on maximum_length_allowed frames
88 | seconds_total = maximum_length_allowed / fps_int if fps_int else 0
89 | seconds_int = math.ceil(seconds_total)
90 | minutes = seconds_int // 60
91 | seconds_rem = seconds_int % 60
92 | end_time_str = f"{minutes}:{seconds_rem:02d}"
93 |
94 | logger.debug(
95 | "AudioDurationFrames: num_samples=%s, sample_rate=%s, duration_ms=%s, fps=%s, frames=%s",
96 | num_samples, sample_rate, duration_ms, fps_int, frames,
97 | )
98 |
99 | return (
100 | duration_ms,
101 | frames,
102 | context_times,
103 | maximum_length_allowed,
104 | end_time_str,
105 | )
106 | except Exception as e:
107 | logger.error("AudioDurationFrames: error during calculation: %s", e, exc_info=True)
108 | # Return zeros on failure to avoid breaking the workflow
109 | return (0, 0, 0, 0, "0:00")
110 |
111 |
112 | # -------------------------------------------------------------------------
113 | # ComfyUI mappings so the node is discoverable
114 | # -------------------------------------------------------------------------
115 | NODE_CLASS_MAPPINGS = {
116 | "AudioDurationFrames": AudioDurationFrames,
117 | }
118 |
119 | NODE_DISPLAY_NAME_MAPPINGS = {
120 | "AudioDurationFrames": "Audio Duration → Frames (🔗LLMToolkit)",
121 | }
--------------------------------------------------------------------------------
/comfy-nodes/blank_image.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from typing import Tuple
3 |
4 | logger = logging.getLogger(__name__)
5 |
6 | class BlankImage:
7 | """ComfyUI node that creates a solid-color image tensor of the desired size.
8 |
9 | The color is chosen via a color-picker UI (hex string) and size via width/height ints.
10 | Output format: torch.Tensor shaped (1, 3, H, W) with values in [0,1].
11 | """
12 |
13 | @classmethod
14 | def INPUT_TYPES(cls):
15 | return {
16 | "required": {
17 | "width": ("INT", {"default": 512, "min": 1, "max": 4096, "step": 1, "tooltip": "Image width in pixels"}),
18 | "height": ("INT", {"default": 512, "min": 1, "max": 4096, "step": 1, "tooltip": "Image height in pixels"}),
19 | "color": ("COLOR", {"default": "#000000", "tooltip": "Solid fill color"}),
20 | },
21 | }
22 |
23 | RETURN_TYPES = ("IMAGE",)
24 | RETURN_NAMES = ("image",)
25 | FUNCTION = "create"
26 | CATEGORY = "🔗llm_toolkit/utils/image"
27 |
28 | # ------------------------------------------------------------------
29 | # Core logic
30 | # ------------------------------------------------------------------
31 | def _hex_to_rgb(self, hex_color: str) -> Tuple[float, float, float]:
32 | """Convert hex string (6- or 8-digit) to 0-1 float RGB tuple."""
33 | hex_color = hex_color.lstrip("#")
34 | if len(hex_color) not in {6, 8}:
35 | raise ValueError("Invalid hex color string")
36 | r = int(hex_color[0:2], 16)
37 | g = int(hex_color[2:4], 16)
38 | b = int(hex_color[4:6], 16)
39 | return r / 255.0, g / 255.0, b / 255.0
40 |
41 | def create(self, width: int, height: int, color: str):
42 | import torch
43 | import numpy as np
44 |
45 | try:
46 | r, g, b = self._hex_to_rgb(color)
47 | arr = np.stack([
48 | np.full((height, width), r, dtype=np.float32),
49 | np.full((height, width), g, dtype=np.float32),
50 | np.full((height, width), b, dtype=np.float32),
51 | ], axis=0) # shape (3, H, W)
52 | img_tensor = torch.from_numpy(arr).unsqueeze(0) # (1,3,H,W)
53 | return (img_tensor,)
54 | except Exception as e:
55 | logger.error("BlankImage: Failed to create image – %s", e, exc_info=True)
56 | return (torch.zeros((1, 3, height, width), dtype=torch.float32),)
57 |
58 | # --------------------------------------------------------------------
59 | # Mappings for ComfyUI
60 | # --------------------------------------------------------------------
61 | NODE_CLASS_MAPPINGS = {
62 | "BlankImage": BlankImage,
63 | }
64 |
65 | NODE_DISPLAY_NAME_MAPPINGS = {
66 | "BlankImage": "Blank Image (🔗LLMToolkit)",
67 | }
--------------------------------------------------------------------------------
/comfy-nodes/check_image_empty.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import logging
3 | from typing import Optional, Any, Tuple
4 |
5 | # Initialize logger
6 | logger = logging.getLogger(__name__)
7 |
8 | class CheckImageEmpty:
9 | """
10 | Checks if an incoming image is null or empty and returns a boolean.
11 | Returns True if the image is null, empty, or invalid, False if it contains valid image data.
12 | """
13 | def __init__(self):
14 | self.type = "utility"
15 |
16 | @classmethod
17 | def INPUT_TYPES(cls):
18 | return {
19 | "required": {},
20 | "optional": {
21 | "image": ("IMAGE", {"tooltip": "Image to check for null/empty state"}),
22 | },
23 | "hidden": {},
24 | }
25 |
26 | RETURN_TYPES = ("BOOLEAN", "STRING")
27 | RETURN_NAMES = ("is_empty", "status_message")
28 | FUNCTION = "check_image_empty"
29 | OUTPUT_NODE = False # This is a utility node, not an output node
30 | CATEGORY = "🔗llm_toolkit/utils/logic"
31 |
32 | def check_image_empty(self, image: Optional[Any] = None) -> Tuple[bool, str]:
33 | """
34 | Checks if the provided image is null or empty.
35 |
36 | Args:
37 | image: Optional image tensor to check
38 |
39 | Returns:
40 | Tuple containing:
41 | - is_empty: Boolean indicating if image is null/empty
42 | - status_message: Human-readable status description
43 | """
44 | logger.info("CheckImageEmpty node executing...")
45 |
46 | # Initialize return values
47 | is_empty = True
48 | status_message = "Image is empty or null"
49 |
50 | import torch
51 |
52 | try:
53 | # Check if image is None
54 | if image is None:
55 | is_empty = True
56 | status_message = "Image is None (null)"
57 | logger.info("Image check result: None (null)")
58 |
59 | # Check if image is a valid tensor
60 | elif not isinstance(image, torch.Tensor):
61 | is_empty = True
62 | status_message = f"Image is not a valid tensor (type: {type(image).__name__})"
63 | logger.info(f"Image check result: Invalid type {type(image).__name__}")
64 |
65 | # Check if tensor is empty or has no data
66 | elif image.numel() == 0:
67 | is_empty = True
68 | status_message = "Image tensor is empty (no elements)"
69 | logger.info("Image check result: Empty tensor")
70 |
71 | # Check if tensor has invalid dimensions for an image
72 | elif len(image.shape) < 3:
73 | is_empty = True
74 | status_message = f"Image tensor has insufficient dimensions: {image.shape}"
75 | logger.info(f"Image check result: Invalid dimensions {image.shape}")
76 |
77 | # Check if any dimension is zero
78 | elif any(dim == 0 for dim in image.shape):
79 | is_empty = True
80 | status_message = f"Image tensor has zero-sized dimension: {image.shape}"
81 | logger.info(f"Image check result: Zero dimension in {image.shape}")
82 |
83 | # Image appears to be valid
84 | else:
85 | is_empty = False
86 | status_message = f"Image is valid with shape: {image.shape}"
87 | logger.info(f"Image check result: Valid image with shape {image.shape}")
88 |
89 | except Exception as e:
90 | # If any error occurs during checking, consider image as empty/invalid
91 | is_empty = True
92 | status_message = f"Error checking image: {str(e)}"
93 | logger.error(f"Error in CheckImageEmpty: {e}", exc_info=True)
94 |
95 | # Log the final result
96 | logger.info(f"CheckImageEmpty completed: is_empty={is_empty}, message='{status_message}'")
97 |
98 | return (is_empty, status_message)
99 |
100 | # --- Node Mappings for ComfyUI ---
101 | NODE_CLASS_MAPPINGS = {
102 | "CheckImageEmpty": CheckImageEmpty
103 | }
104 |
105 | NODE_DISPLAY_NAME_MAPPINGS = {
106 | "CheckImageEmpty": "Check Image Empty (🔗LLMToolkit)"
107 | }
--------------------------------------------------------------------------------
/comfy-nodes/config_generate_image_bfl.py:
--------------------------------------------------------------------------------
1 | # config_generate_image_bfl.py
2 | import os
3 | import sys
4 | import logging
5 | from typing import Any, Dict, Optional, Tuple
6 |
7 | from context_payload import extract_context
8 |
9 | # Ensure parent directory is in path if running standalone for testing
10 | current_dir = os.path.dirname(os.path.abspath(__file__))
11 | parent_dir = os.path.dirname(current_dir)
12 | if parent_dir not in sys.path:
13 | sys.path.insert(0, parent_dir)
14 |
15 | logger = logging.getLogger(__name__)
16 |
17 | class ConfigGenerateImageBFL:
18 | """
19 | Configures parameters specifically for BFL Flux Kontext Max image generation.
20 | """
21 | # BFL specific options
22 | ASPECT_RATIO_OPTIONS = ["1:1", "3:4", "4:3", "9:16", "16:9", "21:9", "9:21"]
23 | OUTPUT_FORMAT_OPTIONS = ["png", "jpeg"]
24 |
25 | @classmethod
26 | def INPUT_TYPES(cls):
27 | return {
28 | "required": {},
29 | "optional": {
30 | "context": ("*", {}),
31 | # BFL parameters
32 | "aspect_ratio": (cls.ASPECT_RATIO_OPTIONS, {"default": "1:1", "tooltip": "Aspect ratio between 21:9 and 9:21."}),
33 | "prompt_upsampling": ("BOOLEAN", {"default": False, "tooltip": "Automatically enhance prompt for more creative generation."}),
34 | "safety_tolerance": ("INT", {"default": 2, "min": 0, "max": 6, "step": 1, "tooltip": "Safety tolerance (0=strict, 6=relaxed). Limit of 2 for image editing."}),
35 | "output_format": (cls.OUTPUT_FORMAT_OPTIONS, {"default": "png", "tooltip": "Output image format."}),
36 | "seed": ("INT", {"default": -1, "min": -1, "max": 0xffffffffffffffff, "tooltip": "Seed for reproducible generation (-1 for random)."}),
37 | }
38 | }
39 |
40 | RETURN_TYPES = ("*",)
41 | RETURN_NAMES = ("context",)
42 | FUNCTION = "configure"
43 | CATEGORY = "🔗llm_toolkit/config/image/bfl"
44 |
45 | def configure(self, context: Optional[Dict[str, Any]] = None, **kwargs) -> Tuple[Dict[str, Any]]:
46 | """
47 | Adds BFL-specific image generation parameters to the context.
48 | """
49 | logger.info("ConfigGenerateImageBFL executing...")
50 |
51 | # Initialize or copy the context
52 | if context is None:
53 | output_context = {}
54 | elif isinstance(context, dict):
55 | output_context = context.copy()
56 | else:
57 | # Try to unwrap ContextPayload
58 | unwrapped = extract_context(context)
59 | if isinstance(unwrapped, dict):
60 | output_context = unwrapped.copy()
61 | output_context.setdefault("passthrough_data", context)
62 | else:
63 | output_context = {"passthrough_data": context}
64 |
65 | # Initialize generation_config dictionary
66 | generation_config = output_context.get("generation_config", {})
67 | if not isinstance(generation_config, dict):
68 | generation_config = {}
69 |
70 | # Add BFL parameters - context values take precedence
71 | generation_config['aspect_ratio'] = generation_config.get('aspect_ratio', kwargs.get('aspect_ratio', '1:1'))
72 | generation_config['prompt_upsampling'] = generation_config.get('prompt_upsampling', kwargs.get('prompt_upsampling', False))
73 | generation_config['safety_tolerance'] = generation_config.get('safety_tolerance', kwargs.get('safety_tolerance', 2))
74 | generation_config['output_format_bfl'] = generation_config.get('output_format_bfl', kwargs.get('output_format', 'png'))
75 |
76 | # Seed handling
77 | seed_val = generation_config.get('seed', kwargs.get('seed', -1))
78 | if seed_val != -1:
79 | generation_config['seed'] = seed_val
80 |
81 | # BFL always generates 1 image at a time
82 | generation_config['n'] = 1
83 |
84 | # Add the config to the main context
85 | output_context["generation_config"] = generation_config
86 | logger.info(f"ConfigGenerateImageBFL: Updated context with generation_config")
87 |
88 | return (output_context,)
89 |
90 | # --- Node Mappings ---
91 | NODE_CLASS_MAPPINGS = {
92 | "ConfigGenerateImageBFL": ConfigGenerateImageBFL
93 | }
94 | NODE_DISPLAY_NAME_MAPPINGS = {
95 | "ConfigGenerateImageBFL": "Configure Image Generation - BFL (🔗LLMToolkit)"
96 | }
--------------------------------------------------------------------------------
/comfy-nodes/config_generate_image_flux_dev.py:
--------------------------------------------------------------------------------
1 | # comfy-nodes/config_generate_image_flux_dev.py
2 | import os
3 | import sys
4 | import logging
5 | from typing import Any, Dict, Optional, Tuple
6 |
7 | current_dir = os.path.dirname(os.path.abspath(__file__))
8 | parent_dir = os.path.dirname(current_dir)
9 | if parent_dir not in sys.path:
10 | sys.path.insert(0, parent_dir)
11 |
12 | from context_payload import extract_context
13 |
14 | logger = logging.getLogger(__name__)
15 |
16 | class ConfigGenerateImageFluxDev:
17 | """
18 | Configures parameters for WaveSpeedAI FLUX.1 Kontext Multi Ultra Fast [dev].
19 | """
20 | @classmethod
21 | def INPUT_TYPES(cls):
22 | return {
23 | "required": {},
24 | "optional": {
25 | "context": ("*", {}),
26 | "n": ("INT", {"default": 1, "min": 1, "max": 4, "step": 1, "tooltip": "Number of images to generate (1-4)."}),
27 | "size": ("STRING", {"default": "1024x1024", "multiline": False, "tooltip": "The size of the generated image (e.g., '1024x1024')."}),
28 | "num_inference_steps": ("INT", {"default": 28, "min": 1, "max": 50, "step": 1, "tooltip": "Number of inference steps to perform."}),
29 | "guidance_scale": ("FLOAT", {"default": 2.5, "min": 1.0, "max": 20.0, "step": 0.1, "tooltip": "How closely the model follows the prompt."}),
30 | "seed": ("INT", {"default": -1, "min": -1, "max": 2147483647, "tooltip": "Seed for reproducible generation (-1 for random)."}),
31 | "enable_safety_checker": ("BOOLEAN", {"default": True, "tooltip": "Enable the safety checker."}),
32 | }
33 | }
34 |
35 | RETURN_TYPES = ("*",)
36 | RETURN_NAMES = ("context",)
37 | FUNCTION = "configure"
38 | CATEGORY = "🔗llm_toolkit/config/image/wavespeed"
39 |
40 | def configure(self, context: Optional[Dict[str, Any]] = None, **kwargs) -> Tuple[Dict[str, Any]]:
41 | logger.info("ConfigGenerateImageFluxDev executing...")
42 |
43 | if context is None:
44 | output_context = {}
45 | elif isinstance(context, dict):
46 | output_context = context.copy()
47 | else:
48 | unwrapped = extract_context(context)
49 | if isinstance(unwrapped, dict):
50 | output_context = unwrapped.copy()
51 | output_context.setdefault("passthrough_data", context)
52 | else:
53 | output_context = {"passthrough_data": context}
54 |
55 | generation_config = output_context.get("generation_config", {})
56 | if not isinstance(generation_config, dict):
57 | generation_config = {}
58 |
59 | # Add Flux Dev parameters - context values take precedence
60 | generation_config['n'] = generation_config.get('n', kwargs.get('n', 1))
61 | generation_config['size'] = generation_config.get('size', kwargs.get('size', '1024x1024'))
62 | generation_config['num_inference_steps'] = generation_config.get('num_inference_steps', kwargs.get('num_inference_steps', 28))
63 | generation_config['guidance_scale'] = generation_config.get('guidance_scale', kwargs.get('guidance_scale', 2.5))
64 | generation_config['enable_safety_checker'] = generation_config.get('enable_safety_checker', kwargs.get('enable_safety_checker', True))
65 |
66 | seed_val = generation_config.get('seed', kwargs.get('seed', -1))
67 | if seed_val != -1:
68 | generation_config['seed'] = seed_val
69 |
70 | output_context["generation_config"] = generation_config
71 | logger.info(f"ConfigGenerateImageFluxDev: Updated context with generation_config")
72 |
73 | return (output_context,)
74 |
75 | # --- Node Mappings ---
76 | NODE_CLASS_MAPPINGS = {
77 | "ConfigGenerateImageFluxDev": ConfigGenerateImageFluxDev
78 | }
79 | NODE_DISPLAY_NAME_MAPPINGS = {
80 | "ConfigGenerateImageFluxDev": "Configure Image Generation - Flux Dev (🔗LLMToolkit)"
81 | }
--------------------------------------------------------------------------------
/comfy-nodes/config_generate_image_gemini.py:
--------------------------------------------------------------------------------
1 | # config_generate_image_gemini.py
2 | import os
3 | import sys
4 | import logging
5 | from typing import Any, Dict, Optional, Tuple
6 |
7 | from context_payload import extract_context
8 |
9 | # Ensure parent directory is in path if running standalone for testing
10 | current_dir = os.path.dirname(os.path.abspath(__file__))
11 | parent_dir = os.path.dirname(current_dir)
12 | if parent_dir not in sys.path:
13 | sys.path.insert(0, parent_dir)
14 |
15 | logger = logging.getLogger(__name__)
16 |
17 | class ConfigGenerateImageGemini:
18 | """
19 | Configures parameters specifically for Gemini/Imagen image generation.
20 | """
21 | # Gemini/Imagen specific options
22 | ASPECT_RATIO_OPTIONS = ["1:1", "3:4", "4:3", "9:16", "16:9"]
23 | PERSON_GENERATION_OPTIONS = ["dont_allow", "allow_adult", "allow_all"]
24 | SAFETY_FILTER_OPTIONS = ["block_low_and_above", "block_medium_and_above", "block_high_and_above"]
25 | LANGUAGE_OPTIONS = ["auto", "en", "es-MX", "ja-JP", "zh-CN", "hi-IN"]
26 |
27 | @classmethod
28 | def INPUT_TYPES(cls):
29 | return {
30 | "required": {},
31 | "optional": {
32 | "context": ("*", {}),
33 | # Common parameters
34 | "n": ("INT", {"default": 1, "min": 1, "max": 4, "step": 1, "tooltip": "Number of images (1-4). Imagen 4 Ultra only supports 1."}),
35 | "aspect_ratio": (cls.ASPECT_RATIO_OPTIONS, {"default": "1:1", "tooltip": "Aspect ratio for generated images."}),
36 |
37 | # Imagen specific
38 | "person_generation": (cls.PERSON_GENERATION_OPTIONS, {"default": "allow_adult", "tooltip": "Policy for generating people in images."}),
39 | "safety_filter_level": (cls.SAFETY_FILTER_OPTIONS, {"default": "block_medium_and_above", "tooltip": "Content safety filter level."}),
40 | "language": (cls.LANGUAGE_OPTIONS, {"default": "auto", "tooltip": "Language hint for generation (best results with listed languages)."}),
41 |
42 | # Gemini native specific
43 | "temperature": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 2.0, "step": 0.1, "tooltip": "Generation temperature for Gemini native models."}),
44 | "max_tokens": ("INT", {"default": 8192, "min": 1, "max": 32768, "step": 1, "tooltip": "Max tokens for Gemini native generation response."}),
45 |
46 | # Seed
47 | "seed": ("INT", {"default": -1, "min": -1, "max": 0xffffffffffffffff, "tooltip": "Seed for reproducible generation (-1 for random)."}),
48 | }
49 | }
50 |
51 | RETURN_TYPES = ("*",)
52 | RETURN_NAMES = ("context",)
53 | FUNCTION = "configure"
54 | CATEGORY = "🔗llm_toolkit/config/image/gemini"
55 |
56 | def configure(self, context: Optional[Dict[str, Any]] = None, **kwargs) -> Tuple[Dict[str, Any]]:
57 | """
58 | Adds Gemini/Imagen-specific image generation parameters to the context.
59 | """
60 | logger.info("ConfigGenerateImageGemini executing...")
61 |
62 | # Initialize or copy the context
63 | if context is None:
64 | output_context = {}
65 | elif isinstance(context, dict):
66 | output_context = context.copy()
67 | else:
68 | # Try to unwrap ContextPayload
69 | unwrapped = extract_context(context)
70 | if isinstance(unwrapped, dict):
71 | output_context = unwrapped.copy()
72 | output_context.setdefault("passthrough_data", context)
73 | else:
74 | output_context = {"passthrough_data": context}
75 |
76 | # Initialize generation_config dictionary
77 | generation_config = output_context.get("generation_config", {})
78 | if not isinstance(generation_config, dict):
79 | generation_config = {}
80 |
81 | # Add Gemini/Imagen parameters
82 | # Context values from upstream nodes (like ResolutionSelector) take precedence
83 | generation_config['n'] = generation_config.get('n', kwargs.get('n', 1))
84 |
85 | # Set aspect ratio: prioritize upstream 'aspect_ratio', then allow 'size' to be converted downstream,
86 | # and finally use the widget value as a fallback.
87 | if 'aspect_ratio' not in generation_config and 'size' not in generation_config:
88 | generation_config['aspect_ratio'] = kwargs.get('aspect_ratio', '1:1')
89 |
90 | generation_config['person_generation'] = generation_config.get('person_generation', kwargs.get('person_generation', 'allow_adult'))
91 | generation_config['safety_filter_level'] = generation_config.get('safety_filter_level', kwargs.get('safety_filter_level', 'block_some'))
92 |
93 | language = generation_config.get('language', kwargs.get('language', 'auto'))
94 | if language != 'auto':
95 | generation_config['language'] = language
96 |
97 | # Gemini native specific (store with _gemini suffix for clarity)
98 | generation_config['temperature_gemini'] = generation_config.get('temperature_gemini', kwargs.get('temperature', 0.7))
99 | generation_config['max_tokens_gemini'] = generation_config.get('max_tokens_gemini', kwargs.get('max_tokens', 8192))
100 |
101 | # Seed handling
102 | seed_val = generation_config.get('seed', kwargs.get('seed', -1))
103 | if seed_val != -1:
104 | generation_config['seed'] = seed_val
105 |
106 | # Add the config to the main context
107 | output_context["generation_config"] = generation_config
108 | logger.info(f"ConfigGenerateImageGemini: Updated context with generation_config")
109 |
110 | return (output_context,)
111 |
112 | # --- Node Mappings ---
113 | NODE_CLASS_MAPPINGS = {
114 | "ConfigGenerateImageGemini": ConfigGenerateImageGemini
115 | }
116 | NODE_DISPLAY_NAME_MAPPINGS = {
117 | "ConfigGenerateImageGemini": "Configure Image Generation - Gemini (🔗LLMToolkit)"
118 | }
--------------------------------------------------------------------------------
/comfy-nodes/config_generate_image_openai.py:
--------------------------------------------------------------------------------
1 | # config_generate_image_openai.py
2 | import os
3 | import sys
4 | import logging
5 | from typing import Any, Dict, Optional, Tuple
6 |
7 | from context_payload import extract_context
8 |
9 | # Ensure parent directory is in path
10 | current_dir = os.path.dirname(os.path.abspath(__file__))
11 | parent_dir = os.path.dirname(current_dir)
12 | if parent_dir not in sys.path:
13 | sys.path.insert(0, parent_dir)
14 |
15 | logger = logging.getLogger(__name__)
16 |
17 | class ConfigGenerateImageOpenAI:
18 | """
19 | Configures parameters specifically for OpenAI's DALL-E and GPT-Image models.
20 | """
21 | # DALL-E 2/3 Options
22 | SIZE_OPTIONS_DALLE3 = ["1024x1024", "1792x1024", "1024x1792"]
23 | SIZE_OPTIONS_DALLE2 = ["256x256", "512x512", "1024x1024"]
24 | ALL_DALLE_SIZES = sorted(list(set(SIZE_OPTIONS_DALLE3 + SIZE_OPTIONS_DALLE2)))
25 |
26 | QUALITY_OPTIONS_DALLE3 = ["standard", "hd"]
27 | STYLE_OPTIONS_DALLE3 = ["vivid", "natural"]
28 |
29 | # GPT-Image-1 Options
30 | QUALITY_OPTIONS_GPT = ["auto", "low", "medium", "high"]
31 | BACKGROUND_OPTIONS_GPT = ["auto", "opaque", "transparent"]
32 | OUTPUT_FORMAT_OPTIONS_GPT = ["png", "jpeg", "webp"]
33 |
34 | @classmethod
35 | def INPUT_TYPES(cls):
36 | return {
37 | "required": {},
38 | "optional": {
39 | "context": ("*", {}),
40 | # Common parameters
41 | "n": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1, "tooltip": "Number of images (DALL-E 3 only supports 1)."}),
42 | "size": (cls.ALL_DALLE_SIZES, {"default": "1024x1024", "tooltip": "Image dimensions (supported sizes vary by model)."}),
43 | "response_format": (["url", "b64_json"], {"default": "b64_json", "tooltip": "Return format (b64_json recommended for ComfyUI). gpt-image-1 always uses b64_json."}),
44 | "user": ("STRING", {"default": "", "multiline": False, "tooltip": "Optional user ID for moderation tracking."}),
45 |
46 | # DALL-E 3 specific
47 | "quality_dalle3": (cls.QUALITY_OPTIONS_DALLE3, {"default": "standard", "tooltip": "DALL-E 3 quality (standard/hd)."}),
48 | "style_dalle3": (cls.STYLE_OPTIONS_DALLE3, {"default": "vivid", "tooltip": "DALL-E 3 style (vivid/natural)."}),
49 |
50 | # GPT-Image-1 specific
51 | "quality_gpt": (cls.QUALITY_OPTIONS_GPT, {"default": "auto", "tooltip": "GPT-Image-1 quality."}),
52 | "background_gpt": (cls.BACKGROUND_OPTIONS_GPT, {"default": "auto", "tooltip": "GPT-Image-1 background."}),
53 | "output_format_gpt": (cls.OUTPUT_FORMAT_OPTIONS_GPT, {"default": "png", "tooltip": "GPT-Image-1 output format."}),
54 | "moderation_gpt": (["auto", "low"], {"default": "auto", "tooltip": "GPT-Image-1 content moderation level."}),
55 | "output_compression_gpt": ("INT", {"default": 100, "min": 0, "max": 100, "step": 1, "tooltip": "GPT-Image-1 compression (0-100) for webp/jpeg."}),
56 | }
57 | }
58 |
59 | RETURN_TYPES = ("*",)
60 | RETURN_NAMES = ("context",)
61 | FUNCTION = "configure"
62 | CATEGORY = "🔗llm_toolkit/config/image/openai"
63 |
64 | def configure(self, context: Optional[Dict[str, Any]] = None, **kwargs) -> Tuple[Dict[str, Any]]:
65 | logger.info("ConfigGenerateImageOpenAI executing...")
66 |
67 | if context is None:
68 | output_context = {}
69 | elif isinstance(context, dict):
70 | output_context = context.copy()
71 | else:
72 | unwrapped = extract_context(context)
73 | if isinstance(unwrapped, dict):
74 | output_context = unwrapped.copy()
75 | output_context.setdefault("passthrough_data", context)
76 | else:
77 | output_context = {"passthrough_data": context}
78 |
79 | generation_config = output_context.get("generation_config", {})
80 | if not isinstance(generation_config, dict):
81 | generation_config = {}
82 |
83 | # Common - context values take precedence
84 | generation_config['n'] = generation_config.get('n', kwargs.get('n', 1))
85 | generation_config['size'] = generation_config.get('size', kwargs.get('size', '1024x1024'))
86 | generation_config['response_format'] = generation_config.get('response_format', kwargs.get('response_format', 'b64_json'))
87 | user = generation_config.get('user', kwargs.get('user', "")).strip()
88 | if user:
89 | generation_config['user'] = user
90 |
91 | # DALL-E 3
92 | generation_config['quality_dalle3'] = generation_config.get('quality_dalle3', kwargs.get('quality_dalle3', 'standard'))
93 | generation_config['style_dalle3'] = generation_config.get('style_dalle3', kwargs.get('style_dalle3', 'vivid'))
94 |
95 | # GPT-Image-1
96 | generation_config['quality_gpt'] = generation_config.get('quality_gpt', kwargs.get('quality_gpt', 'auto'))
97 | generation_config['background_gpt'] = generation_config.get('background_gpt', kwargs.get('background_gpt', 'auto'))
98 | generation_config['output_format_gpt'] = generation_config.get('output_format_gpt', kwargs.get('output_format_gpt', 'png'))
99 | generation_config['moderation_gpt'] = generation_config.get('moderation_gpt', kwargs.get('moderation_gpt', 'auto'))
100 | generation_config['output_compression_gpt'] = generation_config.get('output_compression_gpt', kwargs.get('output_compression_gpt', 100))
101 |
102 | output_context["generation_config"] = generation_config
103 | logger.info("ConfigGenerateImageOpenAI: Updated context with generation_config.")
104 |
105 | return (output_context,)
106 |
107 | # --- Node Mappings ---
108 | NODE_CLASS_MAPPINGS = {
109 | "ConfigGenerateImageOpenAI": ConfigGenerateImageOpenAI
110 | }
111 | NODE_DISPLAY_NAME_MAPPINGS = {
112 | "ConfigGenerateImageOpenAI": "Configure Image Generation - OpenAI (🔗LLMToolkit)"
113 | }
--------------------------------------------------------------------------------
/comfy-nodes/config_generate_image_openrouter.py:
--------------------------------------------------------------------------------
1 | # config_generate_image_openrouter.py
2 | import os
3 | import sys
4 | import logging
5 | from typing import Any, Dict, Optional, Tuple
6 |
7 | from context_payload import extract_context
8 |
9 | # Ensure parent directory is in path
10 | current_dir = os.path.dirname(os.path.abspath(__file__))
11 | parent_dir = os.path.dirname(current_dir)
12 | if parent_dir not in sys.path:
13 | sys.path.insert(0, parent_dir)
14 |
15 | logger = logging.getLogger(__name__)
16 |
17 | class ConfigGenerateImageOpenRouter:
18 | """
19 | Configures parameters for OpenRouter image generation models.
20 | """
21 | @classmethod
22 | def INPUT_TYPES(cls):
23 | return {
24 | "required": {},
25 | "optional": {
26 | "n": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1, "tooltip": "Number of images to generate."}),
27 | "context": ("*", {}),
28 | "seed": ("INT", {"default": -1, "min": -1, "max": 0xffffffffffffffff, "tooltip": "Seed for reproducible generation (-1 for random)."}),
29 | }
30 | }
31 |
32 | RETURN_TYPES = ("*",)
33 | RETURN_NAMES = ("context",)
34 | FUNCTION = "configure"
35 | CATEGORY = "🔗llm_toolkit/config/image/openrouter"
36 |
37 | def configure(self, context: Optional[Dict[str, Any]] = None, **kwargs) -> Tuple[Dict[str, Any]]:
38 | logger.info("ConfigGenerateImageOpenRouter executing...")
39 |
40 | if context is None:
41 | output_context = {}
42 | elif isinstance(context, dict):
43 | output_context = context.copy()
44 | else:
45 | unwrapped = extract_context(context)
46 | if isinstance(unwrapped, dict):
47 | output_context = unwrapped.copy()
48 | output_context.setdefault("passthrough_data", context)
49 | else:
50 | output_context = {"passthrough_data": context}
51 |
52 | generation_config = output_context.get("generation_config", {})
53 | if not isinstance(generation_config, dict):
54 | generation_config = {}
55 |
56 | # Add common parameters - context values take precedence
57 | seed_val = generation_config.get('seed', kwargs.get('seed', -1))
58 | if seed_val != -1:
59 | generation_config['seed'] = seed_val
60 |
61 | if 'n' in kwargs and kwargs['n'] is not None:
62 | generation_config['n'] = kwargs['n']
63 |
64 | output_context["generation_config"] = generation_config
65 | logger.info("ConfigGenerateImageOpenRouter: Updated context with generation_config.")
66 |
67 | return (output_context,)
68 |
69 | # --- Node Mappings ---
70 | NODE_CLASS_MAPPINGS = {
71 | "ConfigGenerateImageOpenRouter": ConfigGenerateImageOpenRouter
72 | }
73 | NODE_DISPLAY_NAME_MAPPINGS = {
74 | "ConfigGenerateImageOpenRouter": "Configure Image Generation - OpenRouter (🔗LLMToolkit)"
75 | }
76 |
--------------------------------------------------------------------------------
/comfy-nodes/config_generate_image_portrait.py:
--------------------------------------------------------------------------------
1 | # comfy-nodes/config_generate_image_portrait.py
2 | import os
3 | import sys
4 | import logging
5 | from typing import Any, Dict, Optional, Tuple
6 |
7 | # Ensure parent directory is in path
8 | current_dir = os.path.dirname(os.path.abspath(__file__))
9 | parent_dir = os.path.dirname(current_dir)
10 | if parent_dir not in sys.path:
11 | sys.path.insert(0, parent_dir)
12 |
13 | from context_payload import extract_context
14 |
15 | logger = logging.getLogger(__name__)
16 |
17 | class ConfigGenerateImagePortrait:
18 | """
19 | Configures parameters specifically for WaveSpeedAI Bytedance Portrait enhancement.
20 | """
21 | @classmethod
22 | def INPUT_TYPES(cls):
23 | return {
24 | "required": {},
25 | "optional": {
26 | "context": ("*", {}),
27 | "seed": ("INT", {"default": -1, "min": -1, "max": 2147483647, "tooltip": "Seed for reproducible generation (-1 for random)."}),
28 | }
29 | }
30 |
31 | RETURN_TYPES = ("*",)
32 | RETURN_NAMES = ("context",)
33 | FUNCTION = "configure"
34 | CATEGORY = "🔗llm_toolkit/config/image/wavespeed"
35 |
36 | def configure(self, context: Optional[Dict[str, Any]] = None, **kwargs) -> Tuple[Dict[str, Any]]:
37 | logger.info("ConfigGenerateImagePortrait executing...")
38 |
39 | if context is None:
40 | output_context = {}
41 | elif isinstance(context, dict):
42 | output_context = context.copy()
43 | else:
44 | unwrapped = extract_context(context)
45 | if isinstance(unwrapped, dict):
46 | output_context = unwrapped.copy()
47 | output_context.setdefault("passthrough_data", context)
48 | else:
49 | output_context = {"passthrough_data": context}
50 |
51 | generation_config = output_context.get("generation_config", {})
52 | if not isinstance(generation_config, dict):
53 | generation_config = {}
54 |
55 | seed_val = kwargs.get('seed', -1)
56 | if seed_val != -1:
57 | generation_config['seed'] = seed_val
58 |
59 | output_context["generation_config"] = generation_config
60 | logger.info(f"ConfigGenerateImagePortrait: Updated context with generation_config")
61 |
62 | return (output_context,)
63 |
64 | # --- Node Mappings ---
65 | NODE_CLASS_MAPPINGS = {
66 | "ConfigGenerateImagePortrait": ConfigGenerateImagePortrait
67 | }
68 | NODE_DISPLAY_NAME_MAPPINGS = {
69 | "ConfigGenerateImagePortrait": "Configure Image Generation - Portrait (🔗LLMToolkit)"
70 | }
--------------------------------------------------------------------------------
/comfy-nodes/config_generate_image_seedream.py:
--------------------------------------------------------------------------------
1 | # config_generate_image_seedream.py
2 | import os
3 | import sys
4 | import logging
5 | from typing import Any, Dict, Optional, Tuple
6 |
7 | # Ensure parent directory is in path
8 | current_dir = os.path.dirname(os.path.abspath(__file__))
9 | parent_dir = os.path.dirname(current_dir)
10 | if parent_dir not in sys.path:
11 | sys.path.insert(0, parent_dir)
12 |
13 | from context_payload import extract_context
14 |
15 | logger = logging.getLogger(__name__)
16 |
17 | class ConfigGenerateImageSeedream:
18 | """
19 | Unified configuration for WaveSpeedAI's Seedream V4 and SeedEdit V3 models.
20 | """
21 |
22 | MODEL_FAMILIES = ["seedream_v4", "seededit_v3"]
23 |
24 | @classmethod
25 | def INPUT_TYPES(cls):
26 | return {
27 | "required": {
28 | "model_family": (cls.MODEL_FAMILIES, {"default": "seedream_v4"}),
29 | },
30 | "optional": {
31 | "context": ("*", {}),
32 | # Seedream V4 parameters
33 | "size": ("STRING", {"default": "2048*2048", "tooltip": "[V4] The size of the generated media in pixels (width*height)."}),
34 | "max_images": ("INT", {"default": 1, "min": 1, "max": 16, "step": 1, "tooltip": "[V4] Number of images for sequential models."}),
35 |
36 | # SeedEdit V3 parameters
37 | "guidance_scale": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.1, "tooltip": "[V3] Controls how strongly the model follows the editing instruction."}),
38 |
39 | # Common parameters
40 | "seed": ("INT", {"default": -1, "min": -1, "max": 2147483647, "tooltip": "[V3 & V4] Seed for reproducible generation (-1 for random)."}),
41 | }
42 | }
43 |
44 | RETURN_TYPES = ("*",)
45 | RETURN_NAMES = ("context",)
46 | FUNCTION = "configure"
47 | CATEGORY = "🔗llm_toolkit/config/image/wavespeed"
48 |
49 | def configure(self, model_family: str, context: Optional[Dict[str, Any]] = None, **kwargs) -> Tuple[Dict[str, Any]]:
50 | """
51 | Adds model-specific image generation parameters to the context based on the selected family.
52 | """
53 | logger.info(f"ConfigGenerateImageSeedream executing for model family: {model_family}")
54 |
55 | if context is None:
56 | output_context = {}
57 | elif isinstance(context, dict):
58 | output_context = context.copy()
59 | else:
60 | unwrapped = extract_context(context)
61 | if isinstance(unwrapped, dict):
62 | output_context = unwrapped.copy()
63 | output_context.setdefault("passthrough_data", context)
64 | else:
65 | output_context = {"passthrough_data": context}
66 |
67 | generation_config = output_context.get("generation_config", {})
68 | if not isinstance(generation_config, dict):
69 | generation_config = {}
70 |
71 | # Apply parameters based on the selected model family
72 | if model_family == "seedream_v4":
73 | generation_config['size'] = kwargs.get('size', "2048*2048")
74 | generation_config['max_images'] = kwargs.get('max_images', 1)
75 | # 'n' is a common parameter for number of images, so let's set it for v4
76 | generation_config['n'] = kwargs.get('max_images', 1)
77 |
78 | elif model_family == "seededit_v3":
79 | generation_config['guidance_scale'] = kwargs.get('guidance_scale', 0.5)
80 |
81 | # Apply common parameters
82 | seed_val = kwargs.get('seed', -1)
83 | if seed_val != -1:
84 | generation_config['seed'] = seed_val
85 |
86 | output_context["generation_config"] = generation_config
87 | logger.info(f"ConfigGenerateImageSeedream: Updated context with generation_config for {model_family}: {generation_config}")
88 |
89 | return (output_context,)
90 |
91 | # --- Node Mappings ---
92 | NODE_CLASS_MAPPINGS = {
93 | "ConfigGenerateImageSeedream": ConfigGenerateImageSeedream
94 | }
95 | NODE_DISPLAY_NAME_MAPPINGS = {
96 | "ConfigGenerateImageSeedream": "Configure Image Gen - Seedream (🔗LLMToolkit)"
97 | }
98 |
--------------------------------------------------------------------------------
/comfy-nodes/config_generate_speech.py:
--------------------------------------------------------------------------------
1 | # comfy-nodes/config_generate_speech.py
2 | """ComfyUI helper node – prepares generation_config for Gemini TTS.
3 |
4 | The node mirrors *config_generate_image.py* but specialises on the
5 | parameters needed by the Gemini 2.5 TTS models.
6 | """
7 |
8 | import os
9 | import sys
10 | import logging
11 | from typing import Any, Dict, Optional, Tuple
12 |
13 | from context_payload import extract_context
14 |
15 | # Ensure parent dir in path
16 | current_dir = os.path.dirname(os.path.abspath(__file__))
17 | parent_dir = os.path.dirname(current_dir)
18 | if parent_dir not in sys.path:
19 | sys.path.insert(0, parent_dir)
20 |
21 | logger = logging.getLogger(__name__)
22 |
23 | # Pre-built voice names published by Google (see docs)
24 | VOICE_OPTIONS = [
25 | "Zephyr", "Puck", "Charon", "Kore", "Fenrir", "Leda", "Orus", "Aoede",
26 | "Callirrhoe", "Autonoe", "Enceladus", "Iapetus", "Umbriel", "Algieba",
27 | "Despina", "Erinome", "Algenib", "Rasalgethi", "Laomedeia", "Achernar",
28 | "Alnilam", "Schedar", "Gacrux", "Pulcherrima", "Achird", "Zubenelgenubi",
29 | "Vindemiatrix", "Sadachbia", "Sadaltager", "Sulafat",
30 | ]
31 |
32 |
33 | class ConfigGenerateSpeech:
34 | """Adds text-to-speech parameters to the context for downstream nodes."""
35 |
36 | @classmethod
37 | def INPUT_TYPES(cls):
38 | return {
39 | "required": {},
40 | "optional": {
41 | "context": ("*", {}),
42 | "voice_name": (VOICE_OPTIONS, {"default": "Kore"}),
43 | # Sample rate & channels kept here for future flexibility
44 | "sample_rate": ("INT", {"default": 24000, "min": 8000, "max": 48000}),
45 | "channels": ("INT", {"default": 1, "min": 1, "max": 2}),
46 | },
47 | }
48 |
49 | RETURN_TYPES = ("*",)
50 | RETURN_NAMES = ("context",)
51 | FUNCTION = "configure"
52 | CATEGORY = "🔗llm_toolkit/config/speech"
53 |
54 | def configure(
55 | self,
56 | context: Optional[Dict[str, Any]] = None,
57 | voice_name: str = "Kore",
58 | sample_rate: int = 24000,
59 | channels: int = 1,
60 | ) -> Tuple[Dict[str, Any]]:
61 | logger.info("ConfigGenerateSpeech executing…")
62 |
63 | # Unwrap / copy context
64 | if context is None:
65 | output_context: Dict[str, Any] = {}
66 | elif isinstance(context, dict):
67 | output_context = context.copy()
68 | else:
69 | output_context = extract_context(context)
70 | if not isinstance(output_context, dict):
71 | output_context = {"passthrough_data": context}
72 |
73 | gen_cfg = output_context.get("generation_config", {})
74 | if not isinstance(gen_cfg, dict):
75 | gen_cfg = {}
76 |
77 | gen_cfg.update(
78 | {
79 | "voice_name": voice_name,
80 | "sample_rate": sample_rate,
81 | "channels": channels,
82 | }
83 | )
84 |
85 | output_context["generation_config"] = gen_cfg
86 | logger.info("ConfigGenerateSpeech: Updated generation_config with voice %s", voice_name)
87 | return (output_context,)
88 |
89 |
90 | NODE_CLASS_MAPPINGS = {"ConfigGenerateSpeech": ConfigGenerateSpeech}
91 | NODE_DISPLAY_NAME_MAPPINGS = {
92 | "ConfigGenerateSpeech": "Configure Speech Generation (🔗LLMToolkit)",
93 | }
--------------------------------------------------------------------------------
/comfy-nodes/config_generate_video.py:
--------------------------------------------------------------------------------
1 | """Unified Configure Video Generation node for LLM Toolkit."""
2 |
3 | import os
4 | import sys
5 | import logging
6 | from typing import Any, Dict, Optional, Tuple
7 |
8 | from context_payload import extract_context
9 |
10 | current_dir = os.path.dirname(os.path.abspath(__file__))
11 | parent_dir = os.path.dirname(current_dir)
12 | if parent_dir not in sys.path:
13 | sys.path.insert(0, parent_dir)
14 |
15 | from video_generation_capabilities import (
16 | DEFAULT_MODEL,
17 | MANAGED_KEYS,
18 | normalize_generation_config,
19 | resolve_canonical_model,
20 | )
21 |
22 | logger = logging.getLogger(__name__)
23 |
24 | ASPECT_CHOICES = ["21:9", "16:9", "4:3", "1:1", "3:4", "9:16", "9:21"]
25 | RESOLUTION_CHOICES = ["720p", "1080p"]
26 | DURATION_CHOICES = ["5", "8", "10"]
27 | DEFAULT_RESOLUTION = RESOLUTION_CHOICES[-1]
28 | DEFAULT_DURATION_SECONDS = int(DURATION_CHOICES[1])
29 | NEGATIVE_PROMPT_DEFAULT = "bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy"
30 |
31 |
32 | class ConfigGenerateVideo:
33 | """Collect video generation parameters for supported providers/models."""
34 |
35 | @classmethod
36 | def INPUT_TYPES(cls):
37 | return {
38 | "required": {},
39 | "optional": {
40 | "context": ("*", {}),
41 | "aspect_ratio": (ASPECT_CHOICES, {"default": "16:9"}),
42 | "video_resolution": (RESOLUTION_CHOICES, {"default": DEFAULT_RESOLUTION}),
43 | "duration_seconds": (DURATION_CHOICES, {"default": str(DEFAULT_DURATION_SECONDS)}),
44 | "guidance_scale": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
45 | "negative_prompt": ("STRING", {"default": NEGATIVE_PROMPT_DEFAULT, "multiline": True}),
46 | "enhance_prompt": ("BOOLEAN", {"default": True}),
47 | "enable_prompt_expansion": ("BOOLEAN", {"default": True}),
48 | "generate_audio": ("BOOLEAN", {"default": True}),
49 | "seed": ("INT", {"default": -1, "min": -1, "max": 0x7FFFFFFF}),
50 | },
51 | }
52 |
53 | RETURN_TYPES = ("*",)
54 | RETURN_NAMES = ("context",)
55 | FUNCTION = "configure"
56 | CATEGORY = "🔗llm_toolkit/config"
57 |
58 | def configure(
59 | self,
60 | context: Optional[Any] = None,
61 | aspect_ratio: str = "16:9",
62 | video_resolution: str = DEFAULT_RESOLUTION,
63 | duration_seconds: str = str(DEFAULT_DURATION_SECONDS),
64 | guidance_scale: float = 0.5,
65 | negative_prompt: str = NEGATIVE_PROMPT_DEFAULT,
66 | enhance_prompt: bool = True,
67 | enable_prompt_expansion: bool = True,
68 | generate_audio: bool = True,
69 | seed: int = -1,
70 | ) -> Tuple[Dict[str, Any]]:
71 | logger.info("ConfigGenerateVideo executing.")
72 |
73 | if context is None:
74 | out_ctx: Dict[str, Any] = {}
75 | elif isinstance(context, dict):
76 | out_ctx = context.copy()
77 | else:
78 | out_ctx = extract_context(context)
79 | if not isinstance(out_ctx, dict):
80 | out_ctx = {"passthrough_data": context}
81 |
82 | provider_cfg = out_ctx.get("provider_config", {})
83 | if not isinstance(provider_cfg, dict):
84 | provider_cfg = {}
85 |
86 | gen_cfg = out_ctx.get("generation_config", {})
87 | if not isinstance(gen_cfg, dict):
88 | gen_cfg = {}
89 |
90 | model_hint = (
91 | provider_cfg.get("llm_model")
92 | or gen_cfg.get("model_id")
93 | or DEFAULT_MODEL
94 | )
95 | canonical_model = resolve_canonical_model(model_hint)
96 |
97 | try:
98 | duration_value = int(duration_seconds)
99 | except (TypeError, ValueError):
100 | duration_value = DEFAULT_DURATION_SECONDS
101 |
102 | requested_values: Dict[str, Any] = {
103 | "aspect_ratio": aspect_ratio,
104 | "video_resolution": video_resolution,
105 | "duration_seconds": duration_value,
106 | "guidance_scale": guidance_scale,
107 | "negative_prompt": negative_prompt,
108 | "enhance_prompt": enhance_prompt,
109 | "enable_prompt_expansion": enable_prompt_expansion,
110 | "generate_audio": generate_audio,
111 | "seed": seed,
112 | }
113 |
114 | _, sanitized_payload = normalize_generation_config(
115 | canonical_model,
116 | requested=requested_values,
117 | existing=gen_cfg,
118 | )
119 |
120 | for key in list(gen_cfg.keys()):
121 | if key in MANAGED_KEYS and key not in sanitized_payload:
122 | gen_cfg.pop(key)
123 |
124 | gen_cfg.update(sanitized_payload)
125 | out_ctx["generation_config"] = gen_cfg
126 |
127 | logger.info(
128 | "ConfigGenerateVideo: model=%s payload=%s",
129 | canonical_model,
130 | sanitized_payload,
131 | )
132 | return (out_ctx,)
133 |
134 |
135 | NODE_CLASS_MAPPINGS = {"ConfigGenerateVideo": ConfigGenerateVideo}
136 | NODE_DISPLAY_NAME_MAPPINGS = {"ConfigGenerateVideo": "Configure Video Generation (🔗LLMToolkit)"}
137 |
--------------------------------------------------------------------------------
/comfy-nodes/config_generate_video_hailuo_i2v_pro.py:
--------------------------------------------------------------------------------
1 | # comfy-nodes/config_generate_video_hailuo_i2v_pro.py
2 | """Configure Video Generation for Minimax Hailuo 02 I2V Pro.
3 |
4 | Handles image-to-video parameters specific to the
5 | `minimax/hailuo-02/i2v-pro` endpoint.
6 | """
7 |
8 | from __future__ import annotations
9 |
10 | import os
11 | import sys
12 | import logging
13 | from typing import Any, Dict, Optional, Tuple
14 |
15 | from context_payload import extract_context # type: ignore
16 |
17 | current_dir = os.path.dirname(os.path.abspath(__file__))
18 | parent_dir = os.path.dirname(current_dir)
19 | if parent_dir not in sys.path:
20 | sys.path.insert(0, parent_dir)
21 |
22 | logger = logging.getLogger(__name__)
23 |
24 | class ConfigGenerateVideoHailuoI2VPro:
25 | MODEL_ID = "minimax/hailuo-02/i2v-pro"
26 |
27 | @classmethod
28 | def INPUT_TYPES(cls):
29 | return {
30 | "required": {},
31 | "optional": {
32 | "context": ("*", {}),
33 | "image_url": (
34 | "STRING",
35 | {
36 | "multiline": False,
37 | "default": "",
38 | "tooltip": "Input image URL (.jpg/.png)",
39 | },
40 | ),
41 | "enable_prompt_expansion": ("BOOLEAN", {"default": True}),
42 | },
43 | }
44 |
45 | RETURN_TYPES = ("*",)
46 | RETURN_NAMES = ("context",)
47 | FUNCTION = "configure"
48 | CATEGORY = "🔗llm_toolkit/config/video/hailuo"
49 |
50 | def configure(
51 | self,
52 | context: Optional[Any] = None,
53 | image_url: str = "",
54 | enable_prompt_expansion: bool = True,
55 | ) -> Tuple[Dict[str, Any]]:
56 | logger.info("ConfigGenerateVideoHailuoI2VPro executing…")
57 |
58 | if context is None:
59 | out_ctx: Dict[str, Any] = {}
60 | elif isinstance(context, dict):
61 | out_ctx = context.copy()
62 | else:
63 | out_ctx = extract_context(context)
64 | if not isinstance(out_ctx, dict):
65 | out_ctx = {"passthrough_data": context}
66 |
67 | gen_cfg = out_ctx.get("generation_config", {})
68 | if not isinstance(gen_cfg, dict):
69 | gen_cfg = {}
70 |
71 | gen_cfg.update(
72 | {
73 | "model_id": self.MODEL_ID,
74 | **({"image": image_url.strip()} if image_url.strip() else {}),
75 | "enable_prompt_expansion": enable_prompt_expansion,
76 | }
77 | )
78 |
79 | out_ctx["generation_config"] = gen_cfg
80 | logger.info("ConfigGenerateVideoHailuoI2VPro: saved config %s", gen_cfg)
81 | return (out_ctx,)
82 |
83 |
84 | NODE_CLASS_MAPPINGS = {
85 | "ConfigGenerateVideoHailuoI2VPro": ConfigGenerateVideoHailuoI2VPro
86 | }
87 | NODE_DISPLAY_NAME_MAPPINGS = {
88 | "ConfigGenerateVideoHailuoI2VPro": "Configure Hailuo 02 I2V Pro (🔗LLMToolkit)"
89 | }
--------------------------------------------------------------------------------
/comfy-nodes/config_generate_video_hailuo_i2v_standard.py:
--------------------------------------------------------------------------------
1 | # comfy-nodes/config_generate_video_hailuo_i2v_standard.py
2 | """Configure Video Generation for Minimax Hailuo 02 I2V Standard.
3 |
4 | Handles image-to-video parameters specific to the
5 | `minimax/hailuo-02/i2v-standard` endpoint.
6 | """
7 |
8 | from __future__ import annotations
9 |
10 | import os
11 | import sys
12 | import logging
13 | from typing import Any, Dict, Optional, Tuple
14 |
15 | from context_payload import extract_context # type: ignore
16 |
17 | current_dir = os.path.dirname(os.path.abspath(__file__))
18 | parent_dir = os.path.dirname(current_dir)
19 | if parent_dir not in sys.path:
20 | sys.path.insert(0, parent_dir)
21 |
22 | logger = logging.getLogger(__name__)
23 |
24 | class ConfigGenerateVideoHailuoI2VStandard:
25 | MODEL_ID = "minimax/hailuo-02/i2v-standard"
26 |
27 | @classmethod
28 | def INPUT_TYPES(cls):
29 | return {
30 | "required": {},
31 | "optional": {
32 | "context": ("*", {}),
33 | "image_url": (
34 | "STRING",
35 | {
36 | "multiline": False,
37 | "default": "",
38 | "tooltip": "Input image URL (.jpg/.png)",
39 | },
40 | ),
41 | "duration": (["6", "10"], {"default": "6"}),
42 | "enable_prompt_expansion": ("BOOLEAN", {"default": True}),
43 | },
44 | }
45 |
46 | RETURN_TYPES = ("*",)
47 | RETURN_NAMES = ("context",)
48 | FUNCTION = "configure"
49 | CATEGORY = "🔗llm_toolkit/config/video/hailuo"
50 |
51 | def configure(
52 | self,
53 | context: Optional[Any] = None,
54 | image_url: str = "",
55 | duration: str = "6",
56 | enable_prompt_expansion: bool = True,
57 | ) -> Tuple[Dict[str, Any]]:
58 | logger.info("ConfigGenerateVideoHailuoI2VStandard executing…")
59 |
60 | if context is None:
61 | out_ctx: Dict[str, Any] = {}
62 | elif isinstance(context, dict):
63 | out_ctx = context.copy()
64 | else:
65 | out_ctx = extract_context(context)
66 | if not isinstance(out_ctx, dict):
67 | out_ctx = {"passthrough_data": context}
68 |
69 | gen_cfg = out_ctx.get("generation_config", {})
70 | if not isinstance(gen_cfg, dict):
71 | gen_cfg = {}
72 |
73 | gen_cfg.update(
74 | {
75 | "model_id": self.MODEL_ID,
76 | **({"image": image_url.strip()} if image_url.strip() else {}),
77 | "duration": int(duration),
78 | "enable_prompt_expansion": enable_prompt_expansion,
79 | }
80 | )
81 |
82 | out_ctx["generation_config"] = gen_cfg
83 | logger.info("ConfigGenerateVideoHailuoI2VStandard: saved config %s", gen_cfg)
84 | return (out_ctx,)
85 |
86 |
87 | NODE_CLASS_MAPPINGS = {
88 | "ConfigGenerateVideoHailuoI2VStandard": ConfigGenerateVideoHailuoI2VStandard
89 | }
90 | NODE_DISPLAY_NAME_MAPPINGS = {
91 | "ConfigGenerateVideoHailuoI2VStandard": "Configure Hailuo 02 I2V Standard (🔗LLMToolkit)"
92 | }
--------------------------------------------------------------------------------
/comfy-nodes/config_generate_video_hailuo_t2v_pro.py:
--------------------------------------------------------------------------------
1 | # comfy-nodes/config_generate_video_hailuo_t2v_pro.py
2 | """Configure Video Generation for Minimax Hailuo 02 T2V Pro.
3 |
4 | Handles text-to-video parameters specific to the
5 | `minimax/hailuo-02/t2v-pro` endpoint.
6 | """
7 |
8 | from __future__ import annotations
9 |
10 | import os
11 | import sys
12 | import logging
13 | from typing import Any, Dict, Optional, Tuple
14 |
15 | from context_payload import extract_context # type: ignore
16 |
17 | current_dir = os.path.dirname(os.path.abspath(__file__))
18 | parent_dir = os.path.dirname(current_dir)
19 | if parent_dir not in sys.path:
20 | sys.path.insert(0, parent_dir)
21 |
22 | logger = logging.getLogger(__name__)
23 |
24 | class ConfigGenerateVideoHailuoT2VPro:
25 | MODEL_ID = "minimax/hailuo-02/t2v-pro"
26 |
27 | @classmethod
28 | def INPUT_TYPES(cls):
29 | return {
30 | "required": {},
31 | "optional": {
32 | "context": ("*", {}),
33 | "enable_prompt_expansion": ("BOOLEAN", {"default": True}),
34 | },
35 | }
36 |
37 | RETURN_TYPES = ("*",)
38 | RETURN_NAMES = ("context",)
39 | FUNCTION = "configure"
40 | CATEGORY = "🔗llm_toolkit/config/video/hailuo"
41 |
42 | def configure(
43 | self,
44 | context: Optional[Any] = None,
45 | enable_prompt_expansion: bool = True,
46 | ) -> Tuple[Dict[str, Any]]:
47 | logger.info("ConfigGenerateVideoHailuoT2VPro executing…")
48 |
49 | if context is None:
50 | out_ctx: Dict[str, Any] = {}
51 | elif isinstance(context, dict):
52 | out_ctx = context.copy()
53 | else:
54 | out_ctx = extract_context(context)
55 | if not isinstance(out_ctx, dict):
56 | out_ctx = {"passthrough_data": context}
57 |
58 | gen_cfg = out_ctx.get("generation_config", {})
59 | if not isinstance(gen_cfg, dict):
60 | gen_cfg = {}
61 |
62 | gen_cfg.update(
63 | {
64 | "model_id": self.MODEL_ID,
65 | "enable_prompt_expansion": enable_prompt_expansion,
66 | }
67 | )
68 |
69 | out_ctx["generation_config"] = gen_cfg
70 | logger.info("ConfigGenerateVideoHailuoT2VPro: saved config %s", gen_cfg)
71 | return (out_ctx,)
72 |
73 |
74 | NODE_CLASS_MAPPINGS = {
75 | "ConfigGenerateVideoHailuoT2VPro": ConfigGenerateVideoHailuoT2VPro
76 | }
77 | NODE_DISPLAY_NAME_MAPPINGS = {
78 | "ConfigGenerateVideoHailuoT2VPro": "Configure Hailuo 02 T2V Pro (🔗LLMToolkit)"
79 | }
--------------------------------------------------------------------------------
/comfy-nodes/config_generate_video_hailuo_t2v_standard.py:
--------------------------------------------------------------------------------
1 | # comfy-nodes/config_generate_video_hailuo_t2v_standard.py
2 | """Configure Video Generation for Minimax Hailuo 02 T2V Standard.
3 |
4 | Handles text-to-video parameters specific to the
5 | `minimax/hailuo-02/t2v-standard` endpoint.
6 | """
7 |
8 | from __future__ import annotations
9 |
10 | import os
11 | import sys
12 | import logging
13 | from typing import Any, Dict, Optional, Tuple
14 |
15 | from context_payload import extract_context # type: ignore
16 |
17 | current_dir = os.path.dirname(os.path.abspath(__file__))
18 | parent_dir = os.path.dirname(current_dir)
19 | if parent_dir not in sys.path:
20 | sys.path.insert(0, parent_dir)
21 |
22 | logger = logging.getLogger(__name__)
23 |
24 | class ConfigGenerateVideoHailuoT2VStandard:
25 | MODEL_ID = "minimax/hailuo-02/t2v-standard"
26 |
27 | @classmethod
28 | def INPUT_TYPES(cls):
29 | return {
30 | "required": {},
31 | "optional": {
32 | "context": ("*", {}),
33 | "duration": (["6", "10"], {"default": "6"}),
34 | "enable_prompt_expansion": ("BOOLEAN", {"default": True}),
35 | },
36 | }
37 |
38 | RETURN_TYPES = ("*",)
39 | RETURN_NAMES = ("context",)
40 | FUNCTION = "configure"
41 | CATEGORY = "🔗llm_toolkit/config/video/hailuo"
42 |
43 | def configure(
44 | self,
45 | context: Optional[Any] = None,
46 | duration: str = "6",
47 | enable_prompt_expansion: bool = True,
48 | ) -> Tuple[Dict[str, Any]]:
49 | logger.info("ConfigGenerateVideoHailuoT2VStandard executing…")
50 |
51 | if context is None:
52 | out_ctx: Dict[str, Any] = {}
53 | elif isinstance(context, dict):
54 | out_ctx = context.copy()
55 | else:
56 | out_ctx = extract_context(context)
57 | if not isinstance(out_ctx, dict):
58 | out_ctx = {"passthrough_data": context}
59 |
60 | gen_cfg = out_ctx.get("generation_config", {})
61 | if not isinstance(gen_cfg, dict):
62 | gen_cfg = {}
63 |
64 | gen_cfg.update(
65 | {
66 | "model_id": self.MODEL_ID,
67 | "duration": int(duration),
68 | "enable_prompt_expansion": enable_prompt_expansion,
69 | }
70 | )
71 |
72 | out_ctx["generation_config"] = gen_cfg
73 | logger.info("ConfigGenerateVideoHailuoT2VStandard: saved config %s", gen_cfg)
74 | return (out_ctx,)
75 |
76 |
77 | NODE_CLASS_MAPPINGS = {
78 | "ConfigGenerateVideoHailuoT2VStandard": ConfigGenerateVideoHailuoT2VStandard
79 | }
80 | NODE_DISPLAY_NAME_MAPPINGS = {
81 | "ConfigGenerateVideoHailuoT2VStandard": "Configure Hailuo 02 T2V Standard (🔗LLMToolkit)"
82 | }
--------------------------------------------------------------------------------
/comfy-nodes/config_generate_video_kling_i2v_master.py:
--------------------------------------------------------------------------------
1 | # comfy-nodes/config_generate_video_kling_i2v_master.py
2 | """Configure Video Generation for Kling V2.1 I2V Master.
3 |
4 | Handles image-to-video parameters specific to the
5 | `kwaivgi/kling-v2.1-i2v-master` endpoint.
6 | """
7 |
8 | from __future__ import annotations
9 |
10 | import os
11 | import sys
12 | import logging
13 | from typing import Any, Dict, Optional, Tuple
14 |
15 | from context_payload import extract_context # type: ignore
16 |
17 | current_dir = os.path.dirname(os.path.abspath(__file__))
18 | parent_dir = os.path.dirname(current_dir)
19 | if parent_dir not in sys.path:
20 | sys.path.insert(0, parent_dir)
21 |
22 | logger = logging.getLogger(__name__)
23 |
24 | class ConfigGenerateVideoKlingI2VMaster:
25 | MODEL_ID = "kwaivgi/kling-v2.1-i2v-master"
26 |
27 | @classmethod
28 | def INPUT_TYPES(cls):
29 | return {
30 | "required": {},
31 | "optional": {
32 | "context": ("*", {}),
33 | "image_url": (
34 | "STRING",
35 | {
36 | "multiline": False,
37 | "default": "",
38 | "tooltip": "Input image URL (.jpg/.png)",
39 | },
40 | ),
41 | "negative_prompt": ("STRING", {"multiline": True, "default": ""}),
42 | "guidance_scale": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
43 | "duration": (["5", "10"], {"default": "5"}),
44 | },
45 | }
46 |
47 | RETURN_TYPES = ("*",)
48 | RETURN_NAMES = ("context",)
49 | FUNCTION = "configure"
50 | CATEGORY = "🔗llm_toolkit/config/video/kling"
51 |
52 | def configure(
53 | self,
54 | context: Optional[Any] = None,
55 | image_url: str = "",
56 | negative_prompt: str = "",
57 | guidance_scale: float = 0.5,
58 | duration: str = "5",
59 | ) -> Tuple[Dict[str, Any]]:
60 | logger.info("ConfigGenerateVideoKlingI2VMaster executing…")
61 |
62 | if context is None:
63 | out_ctx: Dict[str, Any] = {}
64 | elif isinstance(context, dict):
65 | out_ctx = context.copy()
66 | else:
67 | out_ctx = extract_context(context)
68 | if not isinstance(out_ctx, dict):
69 | out_ctx = {"passthrough_data": context}
70 |
71 | gen_cfg = out_ctx.get("generation_config", {})
72 | if not isinstance(gen_cfg, dict):
73 | gen_cfg = {}
74 |
75 | gen_cfg.update(
76 | {
77 | "model_id": self.MODEL_ID,
78 | **({"image": image_url.strip()} if image_url.strip() else {}),
79 | **({"negative_prompt": negative_prompt.strip()} if negative_prompt.strip() else {}),
80 | "guidance_scale": float(guidance_scale),
81 | "duration": duration,
82 | }
83 | )
84 |
85 | out_ctx["generation_config"] = gen_cfg
86 | logger.info("ConfigGenerateVideoKlingI2VMaster: saved config %s", gen_cfg)
87 | return (out_ctx,)
88 |
89 |
90 | NODE_CLASS_MAPPINGS = {
91 | "ConfigGenerateVideoKlingI2VMaster": ConfigGenerateVideoKlingI2VMaster
92 | }
93 | NODE_DISPLAY_NAME_MAPPINGS = {
94 | "ConfigGenerateVideoKlingI2VMaster": "Configure Kling 2.1 I2V Master (🔗LLMToolkit)"
95 | }
--------------------------------------------------------------------------------
/comfy-nodes/config_generate_video_kling_i2v_pro.py:
--------------------------------------------------------------------------------
1 | # comfy-nodes/config_generate_video_kling_i2v_pro.py
2 | """Configure Video Generation for Kling V2.1 I2V Pro.
3 |
4 | Handles image-to-video parameters specific to the
5 | `kwaivgi/kling-v2.1-i2v-pro` endpoint.
6 | """
7 |
8 | from __future__ import annotations
9 |
10 | import os
11 | import sys
12 | import logging
13 | from typing import Any, Dict, Optional, Tuple
14 |
15 | from context_payload import extract_context # type: ignore
16 |
17 | current_dir = os.path.dirname(os.path.abspath(__file__))
18 | parent_dir = os.path.dirname(current_dir)
19 | if parent_dir not in sys.path:
20 | sys.path.insert(0, parent_dir)
21 |
22 | logger = logging.getLogger(__name__)
23 |
24 | class ConfigGenerateVideoKlingI2VPro:
25 | MODEL_ID = "kwaivgi/kling-v2.1-i2v-pro"
26 |
27 | @classmethod
28 | def INPUT_TYPES(cls):
29 | return {
30 | "required": {},
31 | "optional": {
32 | "context": ("*", {}),
33 | "image_url": (
34 | "STRING",
35 | {
36 | "multiline": False,
37 | "default": "",
38 | "tooltip": "Input image URL (.jpg/.png)",
39 | },
40 | ),
41 | "negative_prompt": ("STRING", {"multiline": True, "default": ""}),
42 | "guidance_scale": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
43 | "duration": (["5", "10"], {"default": "5"}),
44 | },
45 | }
46 |
47 | RETURN_TYPES = ("*",)
48 | RETURN_NAMES = ("context",)
49 | FUNCTION = "configure"
50 | CATEGORY = "🔗llm_toolkit/config/video/kling"
51 |
52 | def configure(
53 | self,
54 | context: Optional[Any] = None,
55 | image_url: str = "",
56 | negative_prompt: str = "",
57 | guidance_scale: float = 0.5,
58 | duration: str = "5",
59 | ) -> Tuple[Dict[str, Any]]:
60 | logger.info("ConfigGenerateVideoKlingI2VPro executing…")
61 |
62 | if context is None:
63 | out_ctx: Dict[str, Any] = {}
64 | elif isinstance(context, dict):
65 | out_ctx = context.copy()
66 | else:
67 | out_ctx = extract_context(context)
68 | if not isinstance(out_ctx, dict):
69 | out_ctx = {"passthrough_data": context}
70 |
71 | gen_cfg = out_ctx.get("generation_config", {})
72 | if not isinstance(gen_cfg, dict):
73 | gen_cfg = {}
74 |
75 | gen_cfg.update(
76 | {
77 | "model_id": self.MODEL_ID,
78 | **({"image": image_url.strip()} if image_url.strip() else {}),
79 | **({"negative_prompt": negative_prompt.strip()} if negative_prompt.strip() else {}),
80 | "guidance_scale": float(guidance_scale),
81 | "duration": duration,
82 | }
83 | )
84 |
85 | out_ctx["generation_config"] = gen_cfg
86 | logger.info("ConfigGenerateVideoKlingI2VPro: saved config %s", gen_cfg)
87 | return (out_ctx,)
88 |
89 |
90 | NODE_CLASS_MAPPINGS = {
91 | "ConfigGenerateVideoKlingI2VPro": ConfigGenerateVideoKlingI2VPro
92 | }
93 | NODE_DISPLAY_NAME_MAPPINGS = {
94 | "ConfigGenerateVideoKlingI2VPro": "Configure Kling 2.1 I2V Pro (🔗LLMToolkit)"
95 | }
--------------------------------------------------------------------------------
/comfy-nodes/config_generate_video_kling_i2v_standard.py:
--------------------------------------------------------------------------------
1 | # comfy-nodes/config_generate_video_kling_i2v_standard.py
2 | """Configure Video Generation for Kling V2.1 I2V Standard.
3 |
4 | Handles image-to-video parameters specific to the
5 | `kwaivgi/kling-v2.1-i2v-standard` endpoint.
6 | """
7 |
8 | from __future__ import annotations
9 |
10 | import os
11 | import sys
12 | import logging
13 | from typing import Any, Dict, Optional, Tuple
14 |
15 | from context_payload import extract_context # type: ignore
16 |
17 | current_dir = os.path.dirname(os.path.abspath(__file__))
18 | parent_dir = os.path.dirname(current_dir)
19 | if parent_dir not in sys.path:
20 | sys.path.insert(0, parent_dir)
21 |
22 | logger = logging.getLogger(__name__)
23 |
24 | class ConfigGenerateVideoKlingI2VStandard:
25 | MODEL_ID = "kwaivgi/kling-v2.1-i2v-standard"
26 |
27 | @classmethod
28 | def INPUT_TYPES(cls):
29 | return {
30 | "required": {},
31 | "optional": {
32 | "context": ("*", {}),
33 | "image_url": (
34 | "STRING",
35 | {
36 | "multiline": False,
37 | "default": "",
38 | "tooltip": "Input image URL (.jpg/.png)",
39 | },
40 | ),
41 | "negative_prompt": ("STRING", {"multiline": True, "default": ""}),
42 | "guidance_scale": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
43 | "duration": (["5", "10"], {"default": "5"}),
44 | },
45 | }
46 |
47 | RETURN_TYPES = ("*",)
48 | RETURN_NAMES = ("context",)
49 | FUNCTION = "configure"
50 | CATEGORY = "🔗llm_toolkit/config/video/kling"
51 |
52 | def configure(
53 | self,
54 | context: Optional[Any] = None,
55 | image_url: str = "",
56 | negative_prompt: str = "",
57 | guidance_scale: float = 0.5,
58 | duration: str = "5",
59 | ) -> Tuple[Dict[str, Any]]:
60 | logger.info("ConfigGenerateVideoKlingI2VStandard executing…")
61 |
62 | if context is None:
63 | out_ctx: Dict[str, Any] = {}
64 | elif isinstance(context, dict):
65 | out_ctx = context.copy()
66 | else:
67 | out_ctx = extract_context(context)
68 | if not isinstance(out_ctx, dict):
69 | out_ctx = {"passthrough_data": context}
70 |
71 | gen_cfg = out_ctx.get("generation_config", {})
72 | if not isinstance(gen_cfg, dict):
73 | gen_cfg = {}
74 |
75 | gen_cfg.update(
76 | {
77 | "model_id": self.MODEL_ID,
78 | **({"image": image_url.strip()} if image_url.strip() else {}),
79 | **({"negative_prompt": negative_prompt.strip()} if negative_prompt.strip() else {}),
80 | "guidance_scale": float(guidance_scale),
81 | "duration": duration,
82 | }
83 | )
84 |
85 | out_ctx["generation_config"] = gen_cfg
86 | logger.info("ConfigGenerateVideoKlingI2VStandard: saved config %s", gen_cfg)
87 | return (out_ctx,)
88 |
89 |
90 | NODE_CLASS_MAPPINGS = {
91 | "ConfigGenerateVideoKlingI2VStandard": ConfigGenerateVideoKlingI2VStandard
92 | }
93 | NODE_DISPLAY_NAME_MAPPINGS = {
94 | "ConfigGenerateVideoKlingI2VStandard": "Configure Kling 2.1 I2V Standard (🔗LLMToolkit)"
95 | }
--------------------------------------------------------------------------------
/comfy-nodes/config_generate_video_seedance_pro_i2v_720p.py:
--------------------------------------------------------------------------------
1 | # comfy-nodes/config_generate_video_seedance_pro_i2v_720p.py
2 | """Configure Video Generation for Bytedance Seedance Pro I2V (WaveSpeedAI).
3 |
4 | Handles image-to-video parameters specific to the
5 | `bytedance-seedance-v1-pro-i2v-720p` endpoint. Prompt text is supplied via
6 | PromptManager or GenerateVideo; this node only adds model-specific settings to
7 | generation_config.
8 | """
9 |
10 | from __future__ import annotations
11 |
12 | import os
13 | import sys
14 | import logging
15 | from typing import Any, Dict, Optional, Tuple
16 |
17 | from context_payload import extract_context # type: ignore
18 |
19 | current_dir = os.path.dirname(os.path.abspath(__file__))
20 | parent_dir = os.path.dirname(current_dir)
21 | if parent_dir not in sys.path:
22 | sys.path.insert(0, parent_dir)
23 |
24 | logger = logging.getLogger(__name__)
25 |
26 | class ConfigGenerateVideoSeedanceProI2V:
27 | MODEL_ID = "bytedance-seedance-v1-pro-i2v-720p"
28 |
29 | @classmethod
30 | def INPUT_TYPES(cls):
31 | return {
32 | "required": {},
33 | "optional": {
34 | "context": ("*", {}),
35 | "image_url": (
36 | "STRING",
37 | {
38 | "multiline": False,
39 | "default": "",
40 | "tooltip": "Input image URL (.jpg/.png)",
41 | },
42 | ),
43 | "duration": ("INT", {"default": 5, "min": 5, "max": 10}),
44 | "seed": ("INT", {"default": -1, "min": -1, "max": 0x7FFFFFFF}),
45 | },
46 | }
47 |
48 | RETURN_TYPES = ("*",)
49 | RETURN_NAMES = ("context",)
50 | FUNCTION = "configure"
51 | CATEGORY = "🔗llm_toolkit/config/video/seedance"
52 |
53 | def configure(
54 | self,
55 | context: Optional[Any] = None,
56 | image_url: str = "",
57 | duration: int = 5,
58 | seed: int = -1,
59 | ) -> Tuple[Dict[str, Any]]:
60 | logger.info("ConfigGenerateVideoSeedanceProI2V executing…")
61 |
62 | # Prepare context dict
63 | if context is None:
64 | out_ctx: Dict[str, Any] = {}
65 | elif isinstance(context, dict):
66 | out_ctx = context.copy()
67 | else:
68 | out_ctx = extract_context(context)
69 | if not isinstance(out_ctx, dict):
70 | out_ctx = {"passthrough_data": context}
71 |
72 | gen_cfg = out_ctx.get("generation_config", {})
73 | if not isinstance(gen_cfg, dict):
74 | gen_cfg = {}
75 |
76 | # Only set/update keys relevant to this model, keep others intact
77 | gen_cfg.update(
78 | {
79 | "model_id": self.MODEL_ID,
80 | **({"image": image_url.strip()} if image_url.strip() else {}),
81 | "duration": int(duration),
82 | "seed": int(seed),
83 | }
84 | )
85 |
86 | out_ctx["generation_config"] = gen_cfg
87 | logger.info("ConfigGenerateVideoSeedanceProI2V: saved config %s", gen_cfg)
88 | return (out_ctx,)
89 |
90 |
91 | NODE_CLASS_MAPPINGS = {
92 | "ConfigGenerateVideoSeedanceProI2V": ConfigGenerateVideoSeedanceProI2V
93 | }
94 | NODE_DISPLAY_NAME_MAPPINGS = {
95 | "ConfigGenerateVideoSeedanceProI2V": "Configure Seedance Pro I2V (🔗LLMToolkit)"
96 | }
--------------------------------------------------------------------------------
/comfy-nodes/config_generate_video_seedance_pro_t2v_720p.py:
--------------------------------------------------------------------------------
1 | # comfy-nodes/config_generate_video_seedance_pro_t2v_720p.py
2 | """Configure Video Generation for Bytedance Seedance Pro T2V (WaveSpeedAI).
3 |
4 | This node follows the same pattern as *config_generate_video.py* but exposes the
5 | parameters that are specific to the *bytedance-seedance-v1-pro-t2v-720p* model
6 | on WaveSpeedAI.
7 | """
8 |
9 | from __future__ import annotations
10 |
11 | import os
12 | import sys
13 | import logging
14 | from typing import Any, Dict, Optional, Tuple
15 |
16 | from context_payload import extract_context # type: ignore
17 |
18 | current_dir = os.path.dirname(os.path.abspath(__file__))
19 | parent_dir = os.path.dirname(current_dir)
20 | if parent_dir not in sys.path:
21 | sys.path.insert(0, parent_dir)
22 |
23 | logger = logging.getLogger(__name__)
24 |
25 | ASPECT_OPTIONS = ["21:9", "16:9", "4:3", "1:1", "3:4", "9:16", "9:21"]
26 |
27 | class ConfigGenerateVideoSeedanceProT2V:
28 | """Builds *generation_config* for the Seedance Pro T2V WaveSpeed model."""
29 |
30 | MODEL_ID = "bytedance-seedance-v1-pro-t2v-720p"
31 |
32 | # ------------------------------------------------------------------
33 | @classmethod
34 | def INPUT_TYPES(cls):
35 | return {
36 | "required": {},
37 | "optional": {
38 | "context": ("*", {}),
39 | "aspect_ratio": (ASPECT_OPTIONS, {"default": "16:9"}),
40 | "duration": ("INT", {"default": 5, "min": 5, "max": 10}),
41 | "seed": ("INT", {"default": -1, "min": -1, "max": 0x7FFFFFFF}),
42 | },
43 | }
44 |
45 | RETURN_TYPES = ("*",)
46 | RETURN_NAMES = ("context",)
47 | FUNCTION = "configure"
48 | CATEGORY = "🔗llm_toolkit/config/video/seedance"
49 |
50 | # ------------------------------------------------------------------
51 | def configure(
52 | self,
53 | context: Optional[Any] = None,
54 | aspect_ratio: str = "16:9",
55 | duration: int = 5,
56 | seed: int = -1,
57 | ) -> Tuple[Dict[str, Any]]:
58 | logger.info("ConfigGenerateVideoSeedanceProT2V executing…")
59 |
60 | # Convert incoming context to dict or create new one
61 | if context is None:
62 | out_ctx: Dict[str, Any] = {}
63 | elif isinstance(context, dict):
64 | out_ctx = context.copy()
65 | else:
66 | out_ctx = extract_context(context)
67 | if not isinstance(out_ctx, dict):
68 | out_ctx = {"passthrough_data": context}
69 |
70 | gen_cfg = out_ctx.get("generation_config", {})
71 | if not isinstance(gen_cfg, dict):
72 | gen_cfg = {}
73 |
74 | gen_cfg.update(
75 | {
76 | "model_id": self.MODEL_ID,
77 | "aspect_ratio": aspect_ratio,
78 | "duration": int(duration),
79 | "seed": int(seed),
80 | }
81 | )
82 |
83 | out_ctx["generation_config"] = gen_cfg
84 | logger.info("ConfigGenerateVideoSeedanceProT2V: saved config %s", gen_cfg)
85 | return (out_ctx,)
86 |
87 |
88 | # ------------------------------------------------------------------
89 | # Node registration
90 | # ------------------------------------------------------------------
91 | NODE_CLASS_MAPPINGS = {
92 | "ConfigGenerateVideoSeedanceProT2V": ConfigGenerateVideoSeedanceProT2V
93 | }
94 | NODE_DISPLAY_NAME_MAPPINGS = {
95 | "ConfigGenerateVideoSeedanceProT2V": "Configure Seedance Pro T2V (🔗LLMToolkit)"
96 | }
--------------------------------------------------------------------------------
/comfy-nodes/config_generate_video_veo2_i2v.py:
--------------------------------------------------------------------------------
1 | # comfy-nodes/config_generate_video_veo2_i2v.py
2 | """Configure Video Generation for WaveSpeedAI Veo2 I2V.
3 |
4 | Handles image-to-video parameters specific to the
5 | `wavespeed-ai/veo2-i2v` endpoint. Prompt text is supplied via
6 | PromptManager or GenerateVideo; this node only adds model-specific settings to
7 | generation_config.
8 | """
9 |
10 | from __future__ import annotations
11 |
12 | import os
13 | import sys
14 | import logging
15 | from typing import Any, Dict, Optional, Tuple
16 |
17 | from context_payload import extract_context # type: ignore
18 |
19 | current_dir = os.path.dirname(os.path.abspath(__file__))
20 | parent_dir = os.path.dirname(current_dir)
21 | if parent_dir not in sys.path:
22 | sys.path.insert(0, parent_dir)
23 |
24 | logger = logging.getLogger(__name__)
25 |
26 | class ConfigGenerateVideoVeo2I2V:
27 | MODEL_ID = "wavespeed-ai/veo2-i2v"
28 |
29 | @classmethod
30 | def INPUT_TYPES(cls):
31 | return {
32 | "required": {},
33 | "optional": {
34 | "context": ("*", {}),
35 | "image_url": (
36 | "STRING",
37 | {
38 | "multiline": False,
39 | "default": "",
40 | "tooltip": "Input image URL (.jpg/.png)",
41 | },
42 | ),
43 | "aspect_ratio": (
44 | ["16:9", "9:16", "1:1", "4:3", "3:4"],
45 | {"default": "16:9"}
46 | ),
47 | "duration": (
48 | ["5s", "6s", "7s", "8s"],
49 | {"default": "5s"}
50 | ),
51 | },
52 | }
53 |
54 | RETURN_TYPES = ("*",)
55 | RETURN_NAMES = ("context",)
56 | FUNCTION = "configure"
57 | CATEGORY = "🔗llm_toolkit/config/video/veo"
58 |
59 | def configure(
60 | self,
61 | context: Optional[Any] = None,
62 | image_url: str = "",
63 | aspect_ratio: str = "16:9",
64 | duration: str = "5s",
65 | ) -> Tuple[Dict[str, Any]]:
66 | logger.info("ConfigGenerateVideoVeo2I2V executing…")
67 |
68 | # Prepare context dict
69 | if context is None:
70 | out_ctx: Dict[str, Any] = {}
71 | elif isinstance(context, dict):
72 | out_ctx = context.copy()
73 | else:
74 | out_ctx = extract_context(context)
75 | if not isinstance(out_ctx, dict):
76 | out_ctx = {"passthrough_data": context}
77 |
78 | gen_cfg = out_ctx.get("generation_config", {})
79 | if not isinstance(gen_cfg, dict):
80 | gen_cfg = {}
81 |
82 | # Only set/update keys relevant to this model, keep others intact
83 | gen_cfg.update(
84 | {
85 | "model_id": self.MODEL_ID,
86 | **({"image": image_url.strip()} if image_url.strip() else {}),
87 | "aspect_ratio": aspect_ratio,
88 | "duration": duration,
89 | }
90 | )
91 |
92 | out_ctx["generation_config"] = gen_cfg
93 | logger.info("ConfigGenerateVideoVeo2I2V: saved config %s", gen_cfg)
94 | return (out_ctx,)
95 |
96 |
97 | NODE_CLASS_MAPPINGS = {
98 | "ConfigGenerateVideoVeo2I2V": ConfigGenerateVideoVeo2I2V
99 | }
100 | NODE_DISPLAY_NAME_MAPPINGS = {
101 | "ConfigGenerateVideoVeo2I2V": "Configure Veo2 I2V (🔗LLMToolkit)"
102 | }
--------------------------------------------------------------------------------
/comfy-nodes/config_generate_video_veo2_t2v.py:
--------------------------------------------------------------------------------
1 | # comfy-nodes/config_generate_video_veo2_t2v.py
2 | """Configure Video Generation for WaveSpeedAI Veo2 T2V.
3 |
4 | Handles text-to-video parameters specific to the
5 | `wavespeed-ai/veo2-t2v` endpoint. Prompt text is supplied via
6 | PromptManager or GenerateVideo; this node only adds model-specific settings to
7 | generation_config.
8 | """
9 |
10 | from __future__ import annotations
11 |
12 | import os
13 | import sys
14 | import logging
15 | from typing import Any, Dict, Optional, Tuple
16 |
17 | from context_payload import extract_context # type: ignore
18 |
19 | current_dir = os.path.dirname(os.path.abspath(__file__))
20 | parent_dir = os.path.dirname(current_dir)
21 | if parent_dir not in sys.path:
22 | sys.path.insert(0, parent_dir)
23 |
24 | logger = logging.getLogger(__name__)
25 |
26 | class ConfigGenerateVideoVeo2T2V:
27 | MODEL_ID = "wavespeed-ai/veo2-t2v"
28 |
29 | @classmethod
30 | def INPUT_TYPES(cls):
31 | return {
32 | "required": {},
33 | "optional": {
34 | "context": ("*", {}),
35 | "aspect_ratio": (
36 | ["16:9", "9:16", "1:1", "4:3", "3:4"],
37 | {"default": "16:9"}
38 | ),
39 | "duration": (
40 | ["5s", "6s", "7s", "8s"],
41 | {"default": "5s"}
42 | ),
43 | },
44 | }
45 |
46 | RETURN_TYPES = ("*",)
47 | RETURN_NAMES = ("context",)
48 | FUNCTION = "configure"
49 | CATEGORY = "🔗llm_toolkit/config/video/veo"
50 |
51 | def configure(
52 | self,
53 | context: Optional[Any] = None,
54 | aspect_ratio: str = "16:9",
55 | duration: str = "5s",
56 | ) -> Tuple[Dict[str, Any]]:
57 | logger.info("ConfigGenerateVideoVeo2T2V executing…")
58 |
59 | # Prepare context dict
60 | if context is None:
61 | out_ctx: Dict[str, Any] = {}
62 | elif isinstance(context, dict):
63 | out_ctx = context.copy()
64 | else:
65 | out_ctx = extract_context(context)
66 | if not isinstance(out_ctx, dict):
67 | out_ctx = {"passthrough_data": context}
68 |
69 | gen_cfg = out_ctx.get("generation_config", {})
70 | if not isinstance(gen_cfg, dict):
71 | gen_cfg = {}
72 |
73 | # Only set/update keys relevant to this model, keep others intact
74 | gen_cfg.update(
75 | {
76 | "model_id": self.MODEL_ID,
77 | "aspect_ratio": aspect_ratio,
78 | "duration": duration,
79 | }
80 | )
81 |
82 | out_ctx["generation_config"] = gen_cfg
83 | logger.info("ConfigGenerateVideoVeo2T2V: saved config %s", gen_cfg)
84 | return (out_ctx,)
85 |
86 |
87 | NODE_CLASS_MAPPINGS = {
88 | "ConfigGenerateVideoVeo2T2V": ConfigGenerateVideoVeo2T2V
89 | }
90 | NODE_DISPLAY_NAME_MAPPINGS = {
91 | "ConfigGenerateVideoVeo2T2V": "Configure Veo2 T2V (🔗LLMToolkit)"
92 | }
--------------------------------------------------------------------------------
/comfy-nodes/config_generate_video_veo3.py:
--------------------------------------------------------------------------------
1 | # comfy-nodes/config_generate_video_veo3.py
2 | """Configure Video Generation for Google Veo 3 (WaveSpeedAI).
3 |
4 | Veo 3 is Google DeepMind's text-to-video model with native audio. This
5 | configuration node handles model-specific parameters (currently only *seed*).
6 | Prompt text itself is supplied via the *Prompt Manager* node or the
7 | GenerateVideo node argument – we intentionally keep it out of
8 | *generation_config* so later nodes can override it cleanly.
9 | """
10 |
11 | from __future__ import annotations
12 |
13 | import os
14 | import sys
15 | import logging
16 | from typing import Any, Dict, Optional, Tuple
17 |
18 | from context_payload import extract_context # type: ignore
19 |
20 | current_dir = os.path.dirname(os.path.abspath(__file__))
21 | parent_dir = os.path.dirname(current_dir)
22 | if parent_dir not in sys.path:
23 | sys.path.insert(0, parent_dir)
24 |
25 | logger = logging.getLogger(__name__)
26 |
27 | class ConfigGenerateVideoVeo3:
28 | MODEL_ID = "google-veo3"
29 |
30 | @classmethod
31 | def INPUT_TYPES(cls):
32 | return {
33 | "required": {},
34 | "optional": {
35 | "context": ("*", {}),
36 | # prompt handled via PromptManager; not included here
37 | "seed": ("INT", {"default": -1, "min": -1, "max": 0x7FFFFFFF}),
38 | },
39 | }
40 |
41 | RETURN_TYPES = ("*",)
42 | RETURN_NAMES = ("context",)
43 | FUNCTION = "configure"
44 | CATEGORY = "🔗llm_toolkit/config/video/veo"
45 |
46 | # ------------------------------------------------------------------
47 | def configure(
48 | self,
49 | context: Optional[Any] = None,
50 | seed: int = -1,
51 | ) -> Tuple[Dict[str, Any]]:
52 | logger.info("ConfigGenerateVideoVeo3 executing…")
53 |
54 | if context is None:
55 | out_ctx: Dict[str, Any] = {}
56 | elif isinstance(context, dict):
57 | out_ctx = context.copy()
58 | else:
59 | out_ctx = extract_context(context)
60 | if not isinstance(out_ctx, dict):
61 | out_ctx = {"passthrough_data": context}
62 |
63 | gen_cfg = out_ctx.get("generation_config", {})
64 | if not isinstance(gen_cfg, dict):
65 | gen_cfg = {}
66 |
67 | gen_cfg.update(
68 | {
69 | "model_id": self.MODEL_ID,
70 | # 'prompt' handled elsewhere; only include seed here
71 | "seed": int(seed),
72 | }
73 | )
74 |
75 | out_ctx["generation_config"] = gen_cfg
76 | logger.info("ConfigGenerateVideoVeo3: saved config %s", gen_cfg)
77 | return (out_ctx,)
78 |
79 |
80 | # ------------------------------------------------------------------
81 | # Node registration
82 | # ------------------------------------------------------------------
83 | NODE_CLASS_MAPPINGS = {
84 | "ConfigGenerateVideoVeo3": ConfigGenerateVideoVeo3
85 | }
86 | NODE_DISPLAY_NAME_MAPPINGS = {
87 | "ConfigGenerateVideoVeo3": "Configure Veo 3 (🔗LLMToolkit)"
88 | }
--------------------------------------------------------------------------------
/comfy-nodes/config_generate_video_veo3_fast.py:
--------------------------------------------------------------------------------
1 | # comfy-nodes/config_generate_video_veo3_fast.py
2 | """Configure Video Generation for Google VEO3 Fast.
3 |
4 | Handles text-to-video parameters specific to the
5 | `google/veo3-fast` endpoint. Prompt text is supplied via
6 | PromptManager or GenerateVideo; this node only adds model-specific settings to
7 | generation_config.
8 | """
9 |
10 | from __future__ import annotations
11 |
12 | import os
13 | import sys
14 | import logging
15 | from typing import Any, Dict, Optional, Tuple
16 |
17 | from context_payload import extract_context # type: ignore
18 |
19 | current_dir = os.path.dirname(os.path.abspath(__file__))
20 | parent_dir = os.path.dirname(current_dir)
21 | if parent_dir not in sys.path:
22 | sys.path.insert(0, parent_dir)
23 |
24 | logger = logging.getLogger(__name__)
25 |
26 | class ConfigGenerateVideoVeo3Fast:
27 | MODEL_ID = "google/veo3-fast"
28 |
29 | @classmethod
30 | def INPUT_TYPES(cls):
31 | return {
32 | "required": {},
33 | "optional": {
34 | "context": ("*", {}),
35 | "aspect_ratio": (
36 | ["16:9", "9:16", "1:1", "4:3", "3:4"],
37 | {"default": "16:9"}
38 | ),
39 | "duration": ("INT", {"default": 8, "min": 8, "max": 8}), # Fixed at 8s
40 | "negative_prompt": ("STRING", {"multiline": True, "default": ""}),
41 | "enable_prompt_expansion": ("BOOLEAN", {"default": True}),
42 | "generate_audio": ("BOOLEAN", {"default": False}),
43 | "seed": ("INT", {"default": -1, "min": -1, "max": 0x7FFFFFFF}),
44 | },
45 | }
46 |
47 | RETURN_TYPES = ("*",)
48 | RETURN_NAMES = ("context",)
49 | FUNCTION = "configure"
50 | CATEGORY = "🔗llm_toolkit/config/video/veo"
51 |
52 | def configure(
53 | self,
54 | context: Optional[Any] = None,
55 | aspect_ratio: str = "16:9",
56 | duration: int = 8,
57 | negative_prompt: str = "",
58 | enable_prompt_expansion: bool = True,
59 | generate_audio: bool = False,
60 | seed: int = -1,
61 | ) -> Tuple[Dict[str, Any]]:
62 | logger.info("ConfigGenerateVideoVeo3Fast executing…")
63 |
64 | if context is None:
65 | out_ctx: Dict[str, Any] = {}
66 | elif isinstance(context, dict):
67 | out_ctx = context.copy()
68 | else:
69 | out_ctx = extract_context(context)
70 | if not isinstance(out_ctx, dict):
71 | out_ctx = {"passthrough_data": context}
72 |
73 | gen_cfg = out_ctx.get("generation_config", {})
74 | if not isinstance(gen_cfg, dict):
75 | gen_cfg = {}
76 |
77 | gen_cfg.update(
78 | {
79 | "model_id": self.MODEL_ID,
80 | "aspect_ratio": aspect_ratio,
81 | "duration": int(duration),
82 | "enable_prompt_expansion": enable_prompt_expansion,
83 | "generate_audio": generate_audio,
84 | "seed": int(seed),
85 | **({"negative_prompt": negative_prompt.strip()} if negative_prompt.strip() else {}),
86 | }
87 | )
88 |
89 | out_ctx["generation_config"] = gen_cfg
90 | logger.info("ConfigGenerateVideoVeo3Fast: saved config %s", gen_cfg)
91 | return (out_ctx,)
92 |
93 |
94 | NODE_CLASS_MAPPINGS = {
95 | "ConfigGenerateVideoVeo3Fast": ConfigGenerateVideoVeo3Fast
96 | }
97 | NODE_DISPLAY_NAME_MAPPINGS = {
98 | "ConfigGenerateVideoVeo3Fast": "Configure VEO3 Fast (🔗LLMToolkit)"
99 | }
--------------------------------------------------------------------------------
/comfy-nodes/frames_to_seconds.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from typing import Tuple
3 |
4 | # Initialize logger
5 | logger = logging.getLogger(__name__)
6 |
7 | class FramesToSeconds:
8 | """ComfyUI node that converts a frame count into seconds based on the
9 | frames-per-second (fps) value.
10 |
11 | The node outputs both a floating-point and an integer representation of the
12 | duration in seconds, plus it echoes the fps and frames values.
13 | """
14 |
15 | # ------------------------------------------------------------------
16 | # ComfyUI required class attributes
17 | # ------------------------------------------------------------------
18 | @classmethod
19 | def INPUT_TYPES(cls):
20 | """Define node inputs.
21 | `frames` is the number of frames to convert.
22 | `fps` is an integer slider ranging from 8 to 30 frames-per-second.
23 | """
24 | return {
25 | "required": {
26 | "frames": (
27 | "INT",
28 | {
29 | "default": 1,
30 | "min": 0,
31 | "max": 1_000_000,
32 | "tooltip": "Frame count representing the duration to convert.",
33 | },
34 | ),
35 | "fps": (
36 | "INT",
37 | {
38 | "default": 25,
39 | "min": 8,
40 | "max": 30,
41 | "step": 1,
42 | "tooltip": "Frames-per-second for the conversion (8–30).",
43 | },
44 | ),
45 | }
46 | }
47 |
48 | RETURN_TYPES = ("FLOAT", "INT", "FLOAT", "INT")
49 | RETURN_NAMES = (
50 | "duration_seconds",
51 | "duration_seconds_int",
52 | "fps",
53 | "frames",
54 | )
55 | FUNCTION = "convert"
56 | CATEGORY = "🔗llm_toolkit/utils/audio"
57 |
58 | # ------------------------------------------------------------------
59 | # Core logic
60 | # ------------------------------------------------------------------
61 | def convert(self, frames: int, fps: int) -> Tuple[float, int, float, int]:
62 | """Convert a frame count into seconds.
63 |
64 | Args:
65 | frames: Total number of frames.
66 | fps: Frames-per-second value used for conversion.
67 |
68 | Returns:
69 | Tuple(duration_seconds_float, duration_seconds_int, fps_float, frames_int)
70 | """
71 | try:
72 | if fps <= 0:
73 | raise ValueError("fps must be a positive integer")
74 |
75 | duration_seconds = frames / fps
76 | duration_seconds_int = int(round(duration_seconds))
77 |
78 | logger.debug(
79 | "FramesToSeconds: frames=%s, fps=%s, duration_seconds=%s",
80 | frames,
81 | fps,
82 | duration_seconds,
83 | )
84 |
85 | return duration_seconds, duration_seconds_int, float(fps), frames
86 | except Exception as e:
87 | logger.error("FramesToSeconds: error during conversion: %s", e, exc_info=True)
88 | # Return zeros on failure
89 | return 0.0, 0, 0.0, 0
90 |
91 |
92 | # -------------------------------------------------------------------------
93 | # ComfyUI mappings so the node is discoverable
94 | # -------------------------------------------------------------------------
95 | NODE_CLASS_MAPPINGS = {
96 | "FramesToSeconds": FramesToSeconds,
97 | }
98 |
99 | NODE_DISPLAY_NAME_MAPPINGS = {
100 | "FramesToSeconds": "Frames → Seconds (🔗LLMToolkit)",
101 | }
--------------------------------------------------------------------------------
/comfy-nodes/high_low_snr.py:
--------------------------------------------------------------------------------
1 | # high_low_snr.py
2 | from __future__ import annotations
3 |
4 | from typing import Tuple
5 |
6 |
7 | class HighLowSNR:
8 | """Map diffusion total steps to the corresponding High-SNR steps.
9 |
10 | This is a lightweight utility node intended for quick schedule mapping.
11 | """
12 |
13 | @classmethod
14 | def INPUT_TYPES(cls):
15 | return {
16 | "required": {
17 | "TotalSteps": (
18 | "INT",
19 | {
20 | "default": 8,
21 | "min": 1,
22 | "max": 4000,
23 | "step": 1,
24 | "tooltip": "Total sampling steps for your scheduler/pipeline.",
25 | },
26 | ),
27 | },
28 | }
29 |
30 | RETURN_TYPES = ("INT",)
31 | RETURN_NAMES = ("HighSteps",)
32 | FUNCTION = "map"
33 | CATEGORY = "🔗llm_toolkit/utils"
34 |
35 | def map(self, TotalSteps: int) -> Tuple[int]:
36 | mapping = {
37 | 4: 2,
38 | 6: 2,
39 | 8: 3,
40 | 10: 4,
41 | 12: 4,
42 | 14: 5,
43 | 16: 6,
44 | 18: 7,
45 | 20: 8,
46 | }
47 | return (mapping.get(int(TotalSteps), 3),)
48 |
49 |
50 | # --- Node registration ---
51 | NODE_CLASS_MAPPINGS = {
52 | "HighLowSNR": HighLowSNR,
53 | }
54 |
55 | NODE_DISPLAY_NAME_MAPPINGS = {
56 | "HighLowSNR": "High/Low SNR Mapper (🔗LLMToolkit)",
57 | }
58 |
59 |
--------------------------------------------------------------------------------
/comfy-nodes/image_comparer.py:
--------------------------------------------------------------------------------
1 | from nodes import PreviewImage
2 |
3 |
4 | class ImageComparer(PreviewImage):
5 | """A node that compares two images in the UI."""
6 |
7 | @classmethod
8 | def INPUT_TYPES(cls):
9 | return {
10 | "required": {},
11 | "optional": {
12 | "image_a": ("IMAGE",),
13 | "image_b": ("IMAGE",),
14 | },
15 | "hidden": {
16 | "prompt": "PROMPT",
17 | "extra_pnginfo": "EXTRA_PNGINFO"
18 | },
19 | }
20 |
21 | RETURN_TYPES = ()
22 | FUNCTION = "compare_images"
23 | CATEGORY = "🔗llm_toolkit/utils"
24 | OUTPUT_NODE = True
25 |
26 | def compare_images(self,
27 | image_a=None,
28 | image_b=None,
29 | filename_prefix="llmtoolkit.compare.",
30 | prompt=None,
31 | extra_pnginfo=None):
32 |
33 | result = {"ui": {"a_images": [], "b_images": []}}
34 | if image_a is not None and len(image_a) > 0:
35 | result['ui']['a_images'] = self.save_images(image_a, filename_prefix, prompt, extra_pnginfo)['ui']['images']
36 |
37 | if image_b is not None and len(image_b) > 0:
38 | result['ui']['b_images'] = self.save_images(image_b, filename_prefix, prompt, extra_pnginfo)['ui']['images']
39 |
40 | return result
41 |
42 |
43 | NODE_CLASS_MAPPINGS = {
44 | "ImageComparer": ImageComparer,
45 | }
46 |
47 | NODE_DISPLAY_NAME_MAPPINGS = {
48 | "ImageComparer": "Image Comparer (🔗LLMToolkit)",
49 | }
--------------------------------------------------------------------------------
/comfy-nodes/load_audio_from_path.py:
--------------------------------------------------------------------------------
1 | # comfy-nodes/load_audio_from_path.py
2 | """Load Audio From Path (🔗LLMToolkit)
3 |
4 | Utility node that takes a string path to an audio file, loads it,
5 | previews it in the UI, and returns an `AUDIO` object for use with other
6 | audio nodes. Unlike the built-in LoadAudio node, this one accepts an
7 | arbitrary path from an input.
8 | """
9 |
10 | from __future__ import annotations
11 |
12 | import os
13 | import shutil
14 | import logging
15 | import random
16 |
17 | try:
18 | import torchaudio
19 | import torch
20 | except ImportError:
21 | # This should not happen in a ComfyUI environment with audio nodes.
22 | torchaudio = None
23 | torch = None
24 |
25 | try:
26 | import folder_paths
27 | except ImportError:
28 | # Mock for local development
29 | class MockFolderPaths:
30 | def get_output_directory(self): return "output"
31 | def get_input_directory(self): return "input"
32 | def get_temp_directory(self): return "temp"
33 | folder_paths = MockFolderPaths()
34 |
35 | # Import save_audio function from core nodes
36 | try:
37 | from comfy_extras.nodes_audio import save_audio, SaveAudio
38 | except ImportError:
39 | save_audio = None
40 | SaveAudio = object
41 |
42 | logger = logging.getLogger(__name__)
43 |
44 | class LoadAudioFromPath(SaveAudio if SaveAudio is not object else object):
45 | def __init__(self):
46 | if SaveAudio is not object:
47 | self.output_dir = folder_paths.get_temp_directory()
48 | self.type = "temp"
49 | self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5))
50 |
51 | @classmethod
52 | def INPUT_TYPES(cls):
53 | return {
54 | "required": {
55 | "audio_path": (
56 | "STRING",
57 | {
58 | "multiline": False,
59 | "placeholder": "/absolute/or/relative/path.wav",
60 | "tooltip": "Full path to the audio file.",
61 | },
62 | ),
63 | },
64 | "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
65 | }
66 |
67 | CATEGORY = "🔗llm_toolkit/utils/audio"
68 |
69 | RETURN_TYPES = ("AUDIO", "STRING")
70 | RETURN_NAMES = ("audio", "audio_path")
71 | FUNCTION = "load_and_preview"
72 | OUTPUT_NODE = True
73 |
74 | def load_and_preview(self, audio_path: str, prompt=None, extra_pnginfo=None):
75 | if torchaudio is None or torch is None:
76 | logger.error("torchaudio or torch is not installed. Cannot load or preview audio.")
77 | return (None, audio_path, {"ui": {"audio": []}})
78 |
79 | audio_path = audio_path.strip()
80 | if not audio_path or not os.path.exists(audio_path):
81 | logger.warning("LoadAudioFromPath: Audio path is empty or file does not exist: %s", audio_path)
82 | return (None, audio_path, {"ui": {"audio": []}})
83 |
84 | # Load audio
85 | try:
86 | waveform, sample_rate = torchaudio.load(audio_path)
87 | audio_out = {"waveform": waveform.unsqueeze(0), "sample_rate": sample_rate}
88 | except Exception as e:
89 | logger.error("Failed to load audio file %s: %s", audio_path, e, exc_info=True)
90 | return {"ui": {"audio": []}, "result": (None, audio_path)}
91 |
92 | # --- Preview generation --------------------------------------------------
93 | preview_dict = {"ui": {"audio": []}}
94 | if save_audio and SaveAudio is not object:
95 | try:
96 | preview_dict = save_audio(
97 | self,
98 | audio_out,
99 | filename_prefix="preview",
100 | format="flac",
101 | prompt=prompt,
102 | extra_pnginfo=extra_pnginfo,
103 | )
104 | except Exception as e:
105 | logger.warning("save_audio preview failed: %s", e, exc_info=True)
106 |
107 | # Ensure we return the correct structure
108 | if not isinstance(preview_dict, dict):
109 | preview_dict = {"ui": {"audio": []}}
110 |
111 | preview_dict["result"] = (audio_out, audio_path)
112 | return preview_dict
113 |
114 |
115 | NODE_CLASS_MAPPINGS = {"LoadAudioFromPath": LoadAudioFromPath}
116 | NODE_DISPLAY_NAME_MAPPINGS = {"LoadAudioFromPath": "Load Audio From Path (🔗LLMToolkit)"}
--------------------------------------------------------------------------------
/comfy-nodes/load_video_from_path.py:
--------------------------------------------------------------------------------
1 | # comfy-nodes/load_video_from_path.py
2 | """Load Video From Path (🔗LLMToolkit)
3 |
4 | Utility node that takes a string path to a video file (e.g. the output of
5 | GenerateVideo) and returns an `IO.VIDEO` object so it can be previewed or saved
6 | with standard ComfyUI video nodes. Unlike the built-in LoadVideo node, this
7 | one accepts an arbitrary path — you don’t have to place the file in the
8 | `input/` folder.
9 | """
10 |
11 | from __future__ import annotations
12 |
13 | import os
14 | import sys
15 | from typing import Tuple
16 |
17 | from comfy.comfy_types import IO, ComfyNodeABC
18 | from comfy_api.input_impl import VideoFromFile
19 | from comfy_api.util import VideoContainer
20 |
21 | class LoadVideoFromPath(ComfyNodeABC):
22 | @classmethod
23 | def INPUT_TYPES(cls):
24 | return {
25 | "required": {
26 | "video_path": (
27 | "STRING",
28 | {
29 | "multiline": False,
30 | "placeholder": "/absolute/or/relative/path.mp4",
31 | "tooltip": "Full path to the video file produced by GenerateVideo",
32 | },
33 | ),
34 | }
35 | }
36 |
37 | CATEGORY = "🔗llm_toolkit/utils/video"
38 |
39 | RETURN_TYPES = (IO.VIDEO, "STRING")
40 | RETURN_NAMES = ("video", "video_path")
41 | FUNCTION = "load"
42 |
43 | def load(self, video_path: str) -> Tuple[VideoFromFile, str]:
44 | video_path = video_path.strip().replace("\\", "/")
45 | if not video_path or not os.path.exists(video_path):
46 | raise FileNotFoundError(f"LoadVideoFromPath: file does not exist – {video_path}")
47 |
48 | video = VideoFromFile(video_path)
49 | return (video, video_path)
50 |
51 |
52 | NODE_CLASS_MAPPINGS = {"LoadVideoFromPath": LoadVideoFromPath}
53 | NODE_DISPLAY_NAME_MAPPINGS = {"LoadVideoFromPath": "Load Video From Path (🔗LLMToolkit)"}
--------------------------------------------------------------------------------
/comfy-nodes/logic_preview_image.py:
--------------------------------------------------------------------------------
1 | import os
2 | import logging
3 | from typing import Tuple, Optional, List
4 |
5 | # ComfyUI utility for temp/output directories (if available)
6 | try:
7 | import folder_paths # type: ignore
8 | except ImportError:
9 | folder_paths = None # Fallback: will use working dir
10 |
11 | logger = logging.getLogger(__name__)
12 |
13 |
14 | def _hex_to_rgb_floats(hex_color: str) -> Tuple[float, float, float]:
15 | """Convert #RRGGBB hex to normalized floats (0-1)."""
16 | hex_color = hex_color.lstrip("#")
17 | if len(hex_color) != 6:
18 | raise ValueError("Color must be #RRGGBB")
19 | r = int(hex_color[0:2], 16) / 255.0
20 | g = int(hex_color[2:4], 16) / 255.0
21 | b = int(hex_color[4:6], 16) / 255.0
22 | return r, g, b
23 |
24 |
25 | class PreviewImageLogic:
26 | """Preview/save an image but fall back to a generated blank if no image provided.
27 |
28 | Useful in logic branches where an IMAGE might be None (e.g., from a switch).
29 | """
30 |
31 | @classmethod
32 | def INPUT_TYPES(cls):
33 | return {
34 | "required": {
35 | "save_image": ("BOOLEAN", {"default": False, "tooltip": "Save the resulting image to disk"}),
36 | "width": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
37 | "height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
38 | "color": ("COLOR", {"default": "#FFFFFF", "tooltip": "Fill color when generating blank image"}),
39 | },
40 | "optional": {
41 | "file_name": ("STRING", {"default": "logic_preview", "multiline": False, "tooltip": "Base filename when saving (no extension)"}),
42 | "image": ("IMAGE", {"tooltip": "Optional image tensor; if None blank is generated"}),
43 | },
44 | }
45 |
46 | RETURN_TYPES = ("IMAGE",)
47 | RETURN_NAMES = ("image",)
48 | FUNCTION = "preview"
49 | OUTPUT_NODE = False
50 | CATEGORY = "🔗llm_toolkit/utils/logic"
51 |
52 | # ------------------------------------------------------------------
53 | def _make_blank(self, width: int, height: int, color: str):
54 | import torch
55 | import numpy as np
56 |
57 | r, g, b = _hex_to_rgb_floats(color)
58 | arr = np.stack([
59 | np.full((height, width), r, dtype=np.float32),
60 | np.full((height, width), g, dtype=np.float32),
61 | np.full((height, width), b, dtype=np.float32),
62 | ], axis=-1) # (H,W,3)
63 | arr = arr[np.newaxis, ...] # (1,H,W,3)
64 | return torch.from_numpy(arr)
65 |
66 | def _save(self, img_tensor, name_prefix: str) -> str:
67 | # Lazy import PIL only when saving
68 | from PIL import Image # type: ignore
69 | import numpy as np
70 |
71 | # Accept (B,H,W,3) or (B,3,H,W). Convert to (H,W,3) for PIL.
72 | if img_tensor.ndim != 4:
73 | raise ValueError("Expected image tensor (B,3,H,W) or (B,H,W,3)")
74 | t = img_tensor[0].clamp(0, 1).cpu()
75 | if t.shape[0] == 3: # (3,H,W)
76 | img_np = (t.numpy() * 255).astype(np.uint8).transpose(1, 2, 0)
77 | else: # (H,W,3)
78 | img_np = (t.numpy() * 255).astype(np.uint8)
79 | h, w = img_np.shape[:2]
80 | img = Image.fromarray(img_np)
81 |
82 | output_dir = folder_paths.get_output_directory() if folder_paths else os.getcwd()
83 | os.makedirs(output_dir, exist_ok=True)
84 | filename = f"{name_prefix}_{os.getpid()}_{np.random.randint(0,1e6):06d}.png"
85 | path = os.path.join(output_dir, filename)
86 | img.save(path)
87 | return path
88 |
89 | # ------------------------------------------------------------------
90 | def preview(
91 | self,
92 | save_image: bool,
93 | width: int,
94 | height: int,
95 | color: str,
96 | file_name: str = "logic_preview",
97 | image=None,
98 | ):
99 | import torch
100 |
101 | try:
102 | # Handle ComfyUI IMAGE formats: could be torch.Tensor, list[torch.Tensor], or None
103 | if image is None:
104 | out_img = None
105 | elif isinstance(image, list):
106 | out_img = image[0] if image else None
107 | else:
108 | out_img = image # assume torch.Tensor
109 |
110 | if out_img is None:
111 | logger.debug("PreviewImageLogic: Creating blank image %dx%d color %s", width, height, color)
112 | out_img = self._make_blank(width, height, color)
113 |
114 | if save_image:
115 | try:
116 | path = self._save(out_img, file_name or "logic_preview")
117 | logger.info("PreviewImageLogic: Saved image to %s", path)
118 | except Exception as save_err:
119 | logger.error("PreviewImageLogic: Failed to save image – %s", save_err, exc_info=True)
120 | return (out_img,)
121 | except Exception as e:
122 | logger.error("PreviewImageLogic: %s", e, exc_info=True)
123 | # On catastrophic error, return blank 1x1 black to keep graph alive
124 | import torch
125 | fallback = torch.zeros((1, 3, 1, 1), dtype=torch.float32)
126 | return (fallback,)
127 |
128 |
129 | # ---------------------------------------------------------------------------
130 | NODE_CLASS_MAPPINGS = {
131 | "PreviewImageLogic": PreviewImageLogic,
132 | }
133 |
134 | NODE_DISPLAY_NAME_MAPPINGS = {
135 | "PreviewImageLogic": "Preview Image Logic (🔗LLMToolkit)",
136 | }
--------------------------------------------------------------------------------
/comfy-nodes/preview_video.py:
--------------------------------------------------------------------------------
1 | # comfy-nodes/preview_video.py
2 | """Preview Video (🔗LLMToolkit)
3 |
4 | A node that displays a video from a given file path without re-encoding or
5 | saving a new version. It's designed to preview videos that already exist on
6 | the hard drive, such as those produced by the GenerateVideo or
7 | LoadVideoFromPath nodes.
8 | """
9 |
10 | from __future__ import annotations
11 |
12 | import os
13 | import shutil
14 | import logging
15 | from pathlib import Path
16 |
17 | try:
18 | from comfy.comfy_types import IO
19 | from comfy_api.input_impl import VideoFromFile
20 | except ImportError:
21 | # Mock for local development if comfy isn't available
22 | IO = type("IO", (), {"VIDEO": "VIDEO"})
23 | VideoFromFile = None
24 |
25 | try:
26 | import folder_paths
27 | except ImportError:
28 | # Mock for local development
29 | class MockFolderPaths:
30 | def get_output_directory(self): return "output"
31 | def get_input_directory(self): return "input"
32 | def get_temp_directory(self): return "temp"
33 | folder_paths = MockFolderPaths()
34 |
35 | logger = logging.getLogger(__name__)
36 |
37 | class PreviewVideo:
38 | @classmethod
39 | def INPUT_TYPES(cls):
40 | return {
41 | "required": {
42 | "video_path": (
43 | "STRING",
44 | {
45 | "multiline": False,
46 | "placeholder": "/path/to/video.mp4",
47 | "tooltip": "Absolute or relative path to the video file to preview.",
48 | },
49 | ),
50 | }
51 | }
52 |
53 | RETURN_TYPES = (IO.VIDEO, "STRING")
54 | RETURN_NAMES = ("video", "video_path")
55 | FUNCTION = "preview"
56 | OUTPUT_NODE = True
57 | CATEGORY = "🔗llm_toolkit/utils/video"
58 |
59 | def preview(self, video_path: str):
60 | if not video_path or not os.path.exists(video_path):
61 | logger.warning("PreviewVideo: Video path is empty or file does not exist: %s", video_path)
62 | return {"ui": {"images": []}}
63 |
64 | video_path = os.path.abspath(video_path)
65 |
66 | video = None
67 | if VideoFromFile:
68 | try:
69 | video = VideoFromFile(video_path)
70 | except Exception as e:
71 | logger.error("Failed to create VideoFromFile object: %s", e)
72 |
73 | # Check if the video is in a web-accessible directory
74 | for dir_type, dir_path in [
75 | ("output", folder_paths.get_output_directory()),
76 | ("input", folder_paths.get_input_directory()),
77 | ("temp", folder_paths.get_temp_directory()),
78 | ]:
79 | try:
80 | abs_dir_path = os.path.abspath(dir_path)
81 | if os.path.commonpath([video_path, abs_dir_path]) == abs_dir_path:
82 | relative_path = os.path.relpath(video_path, abs_dir_path)
83 | subfolder, filename = os.path.split(relative_path)
84 | return {
85 | "ui": {
86 | "images": [
87 | {
88 | "filename": filename,
89 | "subfolder": subfolder,
90 | "type": dir_type,
91 | }
92 | ],
93 | "animated": (True,),
94 | },
95 | "result": (video, video_path),
96 | }
97 | except Exception as e:
98 | logger.error("Error checking path %s against %s: %s", video_path, dir_path, e)
99 |
100 |
101 | # If not, copy to temp directory to make it accessible
102 | try:
103 | temp_dir = folder_paths.get_temp_directory()
104 | filename = os.path.basename(video_path)
105 | dest_path = os.path.join(temp_dir, filename)
106 |
107 | # To avoid re-copying, check if it already exists
108 | if not os.path.exists(dest_path) or os.path.getmtime(video_path) != os.path.getmtime(dest_path):
109 | shutil.copy2(video_path, dest_path)
110 | logger.info("Copied video to temp for preview: %s", dest_path)
111 |
112 | return {
113 | "ui": {
114 | "images": [
115 | {"filename": filename, "subfolder": "", "type": "temp"}
116 | ],
117 | "animated": (True,),
118 | },
119 | "result": (video, video_path),
120 | }
121 | except Exception as e:
122 | logger.error("Failed to copy video to temp directory for preview: %s", e, exc_info=True)
123 | return {"ui": {"images": []}}
124 |
125 |
126 | NODE_CLASS_MAPPINGS = {"PreviewVideo": PreviewVideo}
127 | NODE_DISPLAY_NAME_MAPPINGS = {"PreviewVideo": "Preview Video Form Path (🔗LLMToolkit)"}
--------------------------------------------------------------------------------
/comfy-nodes/string_utils.py:
--------------------------------------------------------------------------------
1 | # comfy-nodes/string_utils.py
2 | import logging
3 | from typing import Any, Dict, List, Tuple
4 |
5 | logger = logging.getLogger(__name__)
6 |
7 |
8 | class JoinStringsMulti:
9 | """
10 | A node to join multiple strings with a specified delimiter.
11 | Can return either a single concatenated string or a list of strings.
12 | The number of string inputs is dynamically adjustable in the UI.
13 | """
14 |
15 | _last_input_count: int = 2 # Tracks the last known input count for dynamic UI generation
16 |
17 | # ------------------------------------------------------------------
18 | # ComfyUI metadata
19 | # ------------------------------------------------------------------
20 | RETURN_TYPES = ("STRING",)
21 | RETURN_NAMES = ("string",)
22 | FUNCTION = "join_strings"
23 | CATEGORY = "🔗llm_toolkit/utils/text"
24 | OUTPUT_NODE = False # Utility node, not an output node
25 |
26 | # ------------------------------------------------------------------
27 | @classmethod
28 | def INPUT_TYPES(cls) -> Dict[str, Any]:
29 | """Defines the input types for the node (dynamic based on `input_count`)."""
30 | # Base fields that are always present
31 | inputs: Dict[str, Any] = {
32 | "required": {
33 | "input_count": ("INT", {"default": 2, "min": 2, "max": 100, "step": 1}),
34 | "delimiter": ("STRING", {"default": " ", "multiline": False}),
35 | "return_list": ("BOOLEAN", {"default": False}),
36 | }
37 | }
38 |
39 | # Add string_i fields (at least two by default, or `_last_input_count` if bigger)
40 | max_fields = max(2, getattr(cls, "_last_input_count", 2))
41 | for i in range(1, max_fields + 1):
42 | # Force first two inputs; others optional
43 | inputs["required"][f"string_{i}"] = (
44 | "STRING",
45 | {
46 | "default": "",
47 | "forceInput": i <= 2, # First two must be wired, rest optional
48 | "multiline": False,
49 | },
50 | )
51 |
52 | return inputs
53 |
54 | # ------------------------------------------------------------------
55 | # Dynamic update hook – triggers the "Update" button in ComfyUI
56 | # ------------------------------------------------------------------
57 | @classmethod
58 | def IS_CHANGED(cls, input_count: int = 2, **_ignored):
59 | """Return a value that changes when `input_count` changes to trigger UI update."""
60 | try:
61 | input_count = int(input_count)
62 | except Exception:
63 | input_count = 2
64 |
65 | input_count = max(2, min(100, input_count))
66 | cls._last_input_count = input_count
67 | return input_count
68 |
69 | # ------------------------------------------------------------------
70 | # Main execution
71 | # ------------------------------------------------------------------
72 | def join_strings(self, **kwargs) -> Tuple[List[str] | str]:
73 | # Pull settings
74 | input_count = int(kwargs.get("input_count", 2))
75 | delimiter = kwargs.get("delimiter", " ")
76 | return_list = bool(kwargs.get("return_list", False))
77 |
78 | logger.debug(
79 | "JoinStringsMulti: Joining %s strings with delimiter '%s'.", input_count, delimiter
80 | )
81 |
82 | # Collect strings based on the current input_count
83 | collected: List[str] = []
84 | for i in range(1, input_count + 1):
85 | key = f"string_{i}"
86 | if key not in kwargs:
87 | continue # Skip if not provided (shouldn't happen after update)
88 | val = kwargs[key]
89 | if isinstance(val, list):
90 | collected.extend([str(item) for item in val])
91 | else:
92 | collected.append(str(val))
93 |
94 | if return_list:
95 | return (collected,)
96 | else:
97 | return (delimiter.join(collected),)
98 |
99 |
100 | # --- Node Mappings ---
101 | NODE_CLASS_MAPPINGS = {
102 | "JoinStringsMulti": JoinStringsMulti,
103 | }
104 |
105 | NODE_DISPLAY_NAME_MAPPINGS = {
106 | "JoinStringsMulti": "Join Strings Multi (🔗LLMToolkit)",
107 | }
--------------------------------------------------------------------------------
/comfy-nodes/test_api_key_context.py:
--------------------------------------------------------------------------------
1 | # test_api_key_context.py
2 | import os
3 | import sys
4 | import logging
5 | from typing import Any, Dict, Optional, Tuple
6 |
7 | # Ensure parent directory is in path
8 | current_dir = os.path.dirname(os.path.abspath(__file__))
9 | parent_dir = os.path.dirname(current_dir)
10 | if parent_dir not in sys.path:
11 | sys.path.insert(0, parent_dir)
12 |
13 | from context_payload import extract_context
14 |
15 | logger = logging.getLogger(__name__)
16 |
17 | class TestAPIKeyContext:
18 | """
19 | A test node to verify that API key context handling doesn't interfere with other providers.
20 | This node displays what API keys are available in the context and what the active provider config is.
21 | """
22 |
23 | @classmethod
24 | def INPUT_TYPES(cls):
25 | return {
26 | "required": {},
27 | "optional": {
28 | "context": ("*", {"tooltip": "Context to inspect for API keys"}),
29 | }
30 | }
31 |
32 | RETURN_TYPES = ("STRING",)
33 | RETURN_NAMES = ("report",)
34 | FUNCTION = "test_context"
35 | CATEGORY = "🔗llm_toolkit/debug"
36 | OUTPUT_NODE = True
37 |
38 | def test_context(self, context: Optional[Dict[str, Any]] = None) -> Tuple[str]:
39 | """
40 | Tests and reports on API key context handling.
41 |
42 | Args:
43 | context: Context to inspect
44 |
45 | Returns:
46 | A report string showing API key status
47 | """
48 | logger.info("TestAPIKeyContext: Analyzing context...")
49 |
50 | # Initialize or copy context
51 | if context is None:
52 | output_context = {}
53 | elif isinstance(context, dict):
54 | output_context = context.copy()
55 | else:
56 | # Handle ContextPayload or other formats
57 | unwrapped = extract_context(context)
58 | if isinstance(unwrapped, dict):
59 | output_context = unwrapped.copy()
60 | else:
61 | output_context = {}
62 |
63 | report_lines = ["=== API Key Context Test Report ===\n"]
64 |
65 | # Check for context-based API keys
66 | context_api_keys = output_context.get("api_keys", {})
67 | if context_api_keys:
68 | report_lines.append("📋 Context API Keys Found:")
69 | for provider, key in context_api_keys.items():
70 | masked_key = key[:5] + "..." if len(key) > 5 else "..."
71 | report_lines.append(f" • {provider}: {masked_key}")
72 | else:
73 | report_lines.append("📋 No context API keys found")
74 |
75 | report_lines.append("")
76 |
77 | # Check provider config
78 | provider_config = output_context.get("provider_config", {})
79 | if provider_config:
80 | provider_name = provider_config.get("provider_name", "unknown")
81 | config_key = provider_config.get("api_key", "")
82 | masked_config_key = config_key[:5] + "..." if len(config_key) > 5 else "none"
83 |
84 | report_lines.append("⚙️ Provider Config:")
85 | report_lines.append(f" • Provider: {provider_name}")
86 | report_lines.append(f" • API Key: {masked_config_key}")
87 |
88 | # Check if context key would override
89 | if context_api_keys:
90 | context_key = context_api_keys.get(provider_name)
91 | if not context_key and provider_name == "google":
92 | context_key = context_api_keys.get("gemini")
93 | elif not context_key and provider_name == "gemini":
94 | context_key = context_api_keys.get("google")
95 |
96 | if context_key:
97 | masked_context_key = context_key[:5] + "..." if len(context_key) > 5 else "..."
98 | report_lines.append(f" • Context Override: {masked_context_key} ✅")
99 | else:
100 | report_lines.append(f" • Context Override: none")
101 | else:
102 | report_lines.append("⚙️ No provider config found")
103 |
104 | report_lines.append("")
105 |
106 | # Check for potential conflicts
107 | report_lines.append("🔍 Compatibility Check:")
108 |
109 | if context_api_keys and provider_config:
110 | provider_name = provider_config.get("provider_name", "").lower()
111 | matching_keys = []
112 |
113 | for ctx_provider in context_api_keys.keys():
114 | if ctx_provider.lower() == provider_name or \
115 | (ctx_provider == "google" and provider_name == "gemini") or \
116 | (ctx_provider == "gemini" and provider_name == "google"):
117 | matching_keys.append(ctx_provider)
118 |
119 | if matching_keys:
120 | report_lines.append(f" • Found matching context key(s): {', '.join(matching_keys)} ✅")
121 | else:
122 | report_lines.append(f" • No matching context keys for provider '{provider_name}' ✅")
123 | report_lines.append(" • This won't interfere with the provider's API key")
124 | else:
125 | report_lines.append(" • No potential conflicts detected ✅")
126 |
127 | report = "\n".join(report_lines)
128 | logger.info("TestAPIKeyContext: Report generated")
129 | print(report) # Also print to console for debugging
130 |
131 | return (report,)
132 |
133 |
134 | # --- Node Mappings ---
135 | NODE_CLASS_MAPPINGS = {
136 | "TestAPIKeyContext": TestAPIKeyContext
137 | }
138 | NODE_DISPLAY_NAME_MAPPINGS = {
139 | "TestAPIKeyContext": "Test API Key Context (LLMToolkit Debug)"
140 | }
--------------------------------------------------------------------------------
/context_payload.py:
--------------------------------------------------------------------------------
1 | from typing import Optional, List
2 |
3 | class ContextPayload(str):
4 | """A string that carries a full context dictionary and optional images.
5 |
6 | Because it subclasses ``str`` it can be connected anywhere a plain STRING is
7 | expected in ComfyUI. The extra attributes remain accessible to nodes that
8 | know about them (``.context`` and ``.images``).
9 | """
10 |
11 | def __new__(cls, text: str, context: Optional[dict] = None, images: Optional[List] = None):
12 | # Create the actual string instance
13 | obj = str.__new__(cls, text if text is not None else "")
14 | # Attach additional payload attributes
15 | obj.context = context or {}
16 | obj.images = images or []
17 | return obj
18 |
19 |
20 | def extract_context(value):
21 | """Return the context dictionary embedded in *value* if possible.
22 |
23 | If *value* is already a ``dict`` it is returned unchanged. If it is a
24 | :class:`ContextPayload` the embedded ``.context`` dict is returned. In all
25 | other cases an empty dict is returned.
26 | """
27 | if isinstance(value, dict):
28 | return value
29 | return getattr(value, "context", {})
30 |
31 |
32 | def extract_images(value):
33 | """Return list of images embedded in *value* (or empty list)."""
34 | return getattr(value, "images", [])
--------------------------------------------------------------------------------
/example_workflows/LLMToolkit_banana-Gemini-api.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comfy-deploy/comfyui-llm-toolkit/d0a526e850a90d1bac0025ddd977b96a09989144/example_workflows/LLMToolkit_banana-Gemini-api.jpg
--------------------------------------------------------------------------------
/example_workflows/LLMToolkit_banana-OR-api.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comfy-deploy/comfyui-llm-toolkit/d0a526e850a90d1bac0025ddd977b96a09989144/example_workflows/LLMToolkit_banana-OR-api.jpg
--------------------------------------------------------------------------------
/example_workflows/RESTYLE_KONTEXT.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comfy-deploy/comfyui-llm-toolkit/d0a526e850a90d1bac0025ddd977b96a09989144/example_workflows/RESTYLE_KONTEXT.jpg
--------------------------------------------------------------------------------
/example_workflows/kontext_edit_images_with_guided_auto_prompts.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comfy-deploy/comfyui-llm-toolkit/d0a526e850a90d1bac0025ddd977b96a09989144/example_workflows/kontext_edit_images_with_guided_auto_prompts.jpg
--------------------------------------------------------------------------------
/example_workflows/kontext_simple_single_image_edit.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comfy-deploy/comfyui-llm-toolkit/d0a526e850a90d1bac0025ddd977b96a09989144/example_workflows/kontext_simple_single_image_edit.jpg
--------------------------------------------------------------------------------
/example_workflows/llm_toolkit_dall-e-2_variations.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comfy-deploy/comfyui-llm-toolkit/d0a526e850a90d1bac0025ddd977b96a09989144/example_workflows/llm_toolkit_dall-e-2_variations.jpg
--------------------------------------------------------------------------------
/example_workflows/llm_toolkit_gpt-image-1_combine_images.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comfy-deploy/comfyui-llm-toolkit/d0a526e850a90d1bac0025ddd977b96a09989144/example_workflows/llm_toolkit_gpt-image-1_combine_images.jpg
--------------------------------------------------------------------------------
/example_workflows/llm_toolkit_gpt-image-1_generate_image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comfy-deploy/comfyui-llm-toolkit/d0a526e850a90d1bac0025ddd977b96a09989144/example_workflows/llm_toolkit_gpt-image-1_generate_image.jpg
--------------------------------------------------------------------------------
/example_workflows/llm_toolkit_gpt-image1_inpainting.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comfy-deploy/comfyui-llm-toolkit/d0a526e850a90d1bac0025ddd977b96a09989144/example_workflows/llm_toolkit_gpt-image1_inpainting.jpg
--------------------------------------------------------------------------------
/example_workflows/llm_toolkit_ollama_generate-stream.json:
--------------------------------------------------------------------------------
1 | {"id":"539ddabc-3e07-4691-b4b0-3b30d7ee929c","revision":0,"last_node_id":39,"last_link_id":50,"nodes":[{"id":39,"type":"LLMToolkitTextGeneratorStream","pos":[-1405.5787353515625,533.3887329101562],"size":[435.7979431152344,379.8169250488281],"flags":{},"order":1,"mode":0,"inputs":[{"localized_name":"context","name":"context","shape":7,"type":"*","link":50},{"localized_name":"llm_model","name":"llm_model","type":"COMBO","widget":{"name":"llm_model"},"link":null},{"localized_name":"prompt","name":"prompt","type":"STRING","widget":{"name":"prompt"},"link":null}],"outputs":[{"localized_name":"context","name":"context","type":"*","links":null}],"properties":{"aux_id":"comfy-deploy/comfyui-llm-toolkit","ver":"a3e39758d7b4758943d8b4c0f1ea39572c8e875d","Node name for S&R":"LLMToolkitTextGeneratorStream"},"widgets_values":["gemma3:1b","Write a detailed description of a futuristic city.",""]},{"id":27,"type":"OpenAIProviderNode","pos":[-1816.65625,531.9752197265625],"size":[352.79998779296875,58],"flags":{},"order":0,"mode":4,"inputs":[{"localized_name":"context","name":"context","shape":7,"type":"*","link":null},{"localized_name":"llm_model","name":"llm_model","type":"STRING","widget":{"name":"llm_model"},"link":null}],"outputs":[{"localized_name":"context","name":"context","type":"*","links":[50]}],"properties":{"aux_id":"comfy-deploy/comfyui-llm-toolkit","ver":"a3e39758d7b4758943d8b4c0f1ea39572c8e875d","Node name for S&R":"OpenAIProviderNode","llm_model":"gpt-4.1"},"widgets_values":["gpt-4.1"]}],"links":[[50,27,0,39,0,"*"]],"groups":[],"config":{},"extra":{"ds":{"scale":0.876922695000001,"offset":[2218.6969636246245,-264.81114741626334]},"frontendVersion":"1.17.11","VHS_latentpreview":false,"VHS_latentpreviewrate":0},"version":0.4}
--------------------------------------------------------------------------------
/example_workflows/llm_toolkit_openai_provider_generate-stream.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comfy-deploy/comfyui-llm-toolkit/d0a526e850a90d1bac0025ddd977b96a09989144/example_workflows/llm_toolkit_openai_provider_generate-stream.jpg
--------------------------------------------------------------------------------
/example_workflows/llm_toolkit_openai_provider_generate-stream.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": "b153b51e-37f1-49be-95b1-49304afbf418",
3 | "revision": 0,
4 | "last_node_id": 39,
5 | "last_link_id": 50,
6 | "nodes": [
7 | {
8 | "id": 39,
9 | "type": "LLMToolkitTextGeneratorStream",
10 | "pos": [
11 | -1413.9561767578125,
12 | 516.63427734375
13 | ],
14 | "size": [
15 | 435.7979431152344,
16 | 379.8169250488281
17 | ],
18 | "flags": {},
19 | "order": 0,
20 | "mode": 0,
21 | "inputs": [
22 | {
23 | "name": "context",
24 | "shape": 7,
25 | "type": "*",
26 | "link": null
27 | }
28 | ],
29 | "outputs": [
30 | {
31 | "name": "context",
32 | "type": "*",
33 | "links": null
34 | },
35 | {
36 | "name": "text",
37 | "type": "STRING",
38 | "links": null
39 | }
40 | ],
41 | "properties": {
42 | "Node name for S&R": "LLMToolkitTextGeneratorStream",
43 | "aux_id": "comfy-deploy/comfyui-llm-toolkit",
44 | "ver": "a3e39758d7b4758943d8b4c0f1ea39572c8e875d"
45 | },
46 | "widgets_values": [
47 | "create and small story about a robot",
48 | false,
49 | "In a quiet town nestled between rolling hills, there lived a little robot named Pip. Pip was no ordinary robot; he was designed to help with daily chores, but he had a secret passion for painting. Every evening, when the sun dipped below the horizon, Pip would sneak into the town's abandoned art studio, where he would unleash his creativity on blank canvases.\n\nOne day, the town announced a festival celebrating local artists. Pip, feeling a spark of courage, decided to enter the competition. He worked tirelessly, creating a vibrant mural that depicted the beauty of the hills, the laughter of children, and the warmth of community.\n\nOn the day of the festival, the townsfolk were surprised to see a robot’s artwork among the submissions. When the judges unveiled Pip’s mural, there was a moment of silence, followed by a wave of applause. The colors danced across the canvas, telling a story that resonated with everyone.\n\nPip won the competition, but more importantly, he won the hearts of the townspeople. They realized that art knows no boundaries, and Pip became a beloved member of the community. From that day on, the little robot continued to create, reminding everyone that creativity can come from the most unexpected places."
50 | ]
51 | }
52 | ],
53 | "links": [],
54 | "groups": [],
55 | "config": {},
56 | "extra": {
57 | "ds": {
58 | "scale": 1.0610764609500016,
59 | "offset": [
60 | 1984.6580529595008,
61 | -190.14283125132894
62 | ]
63 | },
64 | "frontendVersion": "1.25.11",
65 | "VHS_latentpreview": false,
66 | "VHS_latentpreviewrate": 0,
67 | "VHS_MetadataImage": true,
68 | "VHS_KeepIntermediate": true
69 | },
70 | "version": 0.4
71 | }
--------------------------------------------------------------------------------
/example_workflows/llmtoolki-BFL-Kontext-image-api.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comfy-deploy/comfyui-llm-toolkit/d0a526e850a90d1bac0025ddd977b96a09989144/example_workflows/llmtoolki-BFL-Kontext-image-api.jpg
--------------------------------------------------------------------------------
/example_workflows/llmtoolkit_Hailuo_I2VPro_api.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comfy-deploy/comfyui-llm-toolkit/d0a526e850a90d1bac0025ddd977b96a09989144/example_workflows/llmtoolkit_Hailuo_I2VPro_api.jpg
--------------------------------------------------------------------------------
/example_workflows/llmtoolkit_Seedance_api.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comfy-deploy/comfyui-llm-toolkit/d0a526e850a90d1bac0025ddd977b96a09989144/example_workflows/llmtoolkit_Seedance_api.jpg
--------------------------------------------------------------------------------
/example_workflows/llmtoolkit_VEO2_api.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comfy-deploy/comfyui-llm-toolkit/d0a526e850a90d1bac0025ddd977b96a09989144/example_workflows/llmtoolkit_VEO2_api.jpg
--------------------------------------------------------------------------------
/example_workflows/llmtoolkit_VEO3_api.json:
--------------------------------------------------------------------------------
1 | {"id":"20c46b82-7390-473b-b16e-8d73f971b678","revision":0,"last_node_id":137,"last_link_id":209,"nodes":[{"id":56,"type":"GenerateVideo","pos":[103.28775787353516,770.3394165039062],"size":[400,200],"flags":{},"order":3,"mode":0,"inputs":[{"localized_name":"context","name":"context","shape":7,"type":"*","link":207},{"localized_name":"prompt","name":"prompt","type":"STRING","widget":{"name":"prompt"},"link":null}],"outputs":[{"localized_name":"context","name":"context","type":"*","links":[]},{"localized_name":"video_paths","name":"video_paths","type":"STRING","links":[97,209]}],"properties":{"cnr_id":"llm-toolkit","ver":"1.0.2","Node name for S&R":"GenerateVideo"},"widgets_values":["\"A wise old owl, wearing spectacles and a tweed jacket, sits on a branch and delivers a philosophical monologue about the nature of time to a curious squirrel.\". Aim for an upbeat, heartwarming tone with bright, cheerful colors and playful animation."]},{"id":135,"type":"ConfigGenerateVideoVeo3","pos":[96.63573455810547,495.44708251953125],"size":[400,200],"flags":{},"order":2,"mode":0,"inputs":[{"localized_name":"context","name":"context","shape":7,"type":"*","link":206},{"localized_name":"prompt","name":"prompt","shape":7,"type":"STRING","widget":{"name":"prompt"},"link":null},{"localized_name":"seed","name":"seed","shape":7,"type":"INT","widget":{"name":"seed"},"link":null}],"outputs":[{"localized_name":"context","name":"context","type":"*","links":[207]}],"properties":{"cnr_id":"llm-toolkit","ver":"1.0.2","Node name for S&R":"ConfigGenerateVideoVeo3"},"widgets_values":["A cinematic handheld selfie-style shot of Bigfoot pacing in a lush forest clearing, angry and flustered, surrounded by human litter.\n• Camera: Selfie GoPro chest-mounted\n• Lens: Slight fish-eye lens for expressive distortion\n• Time of Day: Morning, golden shafts of light through eucalyptus trees\n• Lighting: Soft morning haze, lens flare streaks\n• Audio: Bigfoot ranting — “Your rubbish is choking the land!” — with rustling leaves and birds\n• Background: Mossy stumps, soda cans, plastic bags scattered\n• Mood: Frustrated but funny, classic Bigfoot awkward charm\n• Motion: Slight bobs as he stomps\n• Action: Suddenly hears a faint puppy whimper mid-rant",1046583340,"randomize"]},{"id":133,"type":"WaveSpeedProviderNode","pos":[87.16798400878906,349.90155029296875],"size":[311.8003845214844,82],"flags":{},"order":0,"mode":0,"inputs":[{"localized_name":"context","name":"context","shape":7,"type":"*","link":null},{"localized_name":"llm_model","name":"llm_model","type":"COMBO","widget":{"name":"llm_model"},"link":null},{"localized_name":"external_api_key","name":"external_api_key","shape":7,"type":"STRING","widget":{"name":"external_api_key"},"link":null}],"outputs":[{"localized_name":"context","name":"context","type":"*","links":[206]}],"properties":{"cnr_id":"llm-toolkit","ver":"1.0.2","Node name for S&R":"WaveSpeedProviderNode"},"widgets_values":["google-veo3",""]},{"id":57,"type":"PreviewAny","pos":[561.5684204101562,571.1472778320312],"size":[232.24462890625,104.17790985107422],"flags":{},"order":4,"mode":0,"inputs":[{"localized_name":"source","name":"source","type":"*","link":97}],"outputs":[],"properties":{"cnr_id":"comfy-core","ver":"0.3.43","Node name for S&R":"PreviewAny"},"widgets_values":[]},{"id":137,"type":"VHS_LoadVideoPath","pos":[567.2537231445312,762.7056274414062],"size":[231.8896484375,286],"flags":{},"order":5,"mode":0,"inputs":[{"localized_name":"meta_batch","name":"meta_batch","shape":7,"type":"VHS_BatchManager","link":null},{"localized_name":"vae","name":"vae","shape":7,"type":"VAE","link":null},{"localized_name":"video","name":"video","type":"STRING","widget":{"name":"video"},"link":209},{"localized_name":"force_rate","name":"force_rate","type":"FLOAT","widget":{"name":"force_rate"},"link":null},{"localized_name":"custom_width","name":"custom_width","type":"INT","widget":{"name":"custom_width"},"link":null},{"localized_name":"custom_height","name":"custom_height","type":"INT","widget":{"name":"custom_height"},"link":null},{"localized_name":"frame_load_cap","name":"frame_load_cap","type":"INT","widget":{"name":"frame_load_cap"},"link":null},{"localized_name":"skip_first_frames","name":"skip_first_frames","type":"INT","widget":{"name":"skip_first_frames"},"link":null},{"localized_name":"select_every_nth","name":"select_every_nth","type":"INT","widget":{"name":"select_every_nth"},"link":null},{"localized_name":"format","name":"format","shape":7,"type":"COMBO","widget":{"name":"format"},"link":null}],"outputs":[{"localized_name":"IMAGE","name":"IMAGE","type":"IMAGE","links":null},{"localized_name":"frame_count","name":"frame_count","type":"INT","links":null},{"localized_name":"audio","name":"audio","type":"AUDIO","links":null},{"localized_name":"video_info","name":"video_info","type":"VHS_VIDEOINFO","links":null}],"properties":{"cnr_id":"comfyui-videohelpersuite","ver":"a7ce59e381934733bfae03b1be029756d6ce936d","Node name for S&R":"VHS_LoadVideoPath"},"widgets_values":{"video":"","force_rate":0,"custom_width":0,"custom_height":0,"frame_load_cap":0,"skip_first_frames":0,"select_every_nth":1,"format":"AnimateDiff","videopreview":{"hidden":false,"paused":false,"params":{"filename":"","type":"path","format":"video/","force_rate":0,"custom_width":0,"custom_height":0,"frame_load_cap":0,"skip_first_frames":0,"select_every_nth":1}}}},{"id":131,"type":"Note","pos":[537.356201171875,346.5885314941406],"size":[363.4027404785156,140.74798583984375],"flags":{},"order":1,"mode":4,"inputs":[],"outputs":[],"properties":{},"widgets_values":["Optional set the machine secrets if you are using Comfy-Deploy\n\nGEMINI_API_KEY=\nSet the system environment variable or the .env file inside the root of llm-toolki custom node folder ore set the external api key \n\nWord of caution VEO3 is super expensive be carefull"],"color":"#432","bgcolor":"#653"}],"links":[[97,56,1,57,0,"*"],[206,133,0,135,0,"*"],[207,135,0,56,0,"*"],[209,56,1,137,2,"STRING"]],"groups":[],"config":{},"extra":{"ds":{"scale":0.9890030211481178,"offset":[104.19205764533054,-161.58024992971252]}},"version":0.4}
--------------------------------------------------------------------------------
/example_workflows/llmtoolkit_gemini_speech_api.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comfy-deploy/comfyui-llm-toolkit/d0a526e850a90d1bac0025ddd977b96a09989144/example_workflows/llmtoolkit_gemini_speech_api.jpg
--------------------------------------------------------------------------------
/example_workflows/radial_attn.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comfy-deploy/comfyui-llm-toolkit/d0a526e850a90d1bac0025ddd977b96a09989144/example_workflows/radial_attn.jpg
--------------------------------------------------------------------------------
/model_lists_output/anthropic_models.json:
--------------------------------------------------------------------------------
1 | {
2 | "provider": "anthropic",
3 | "generated": "2025-09-02T13:07:33.180253",
4 | "models": [
5 | "No models found for anthropic"
6 | ]
7 | }
--------------------------------------------------------------------------------
/model_lists_output/anthropic_models.py:
--------------------------------------------------------------------------------
1 | # Auto-generated model list for ANTHROPIC
2 | # Generated: 2025-09-02T13:07:33.172250
3 | # Total models: 1
4 |
5 | ANTHROPIC_MODELS = [
6 | "No models found for anthropic",
7 | ]
8 |
9 | # Usage in node:
10 | # @classmethod
11 | # def INPUT_TYPES(cls):
12 | # return {
13 | # "required": {
14 | # "llm_model": (ANTHROPIC_MODELS, {"default": "No models found for anthropic"}),
15 | # }
16 | # }
17 |
--------------------------------------------------------------------------------
/model_lists_output/deepseek_models.json:
--------------------------------------------------------------------------------
1 | {
2 | "provider": "deepseek",
3 | "generated": "2025-09-01T21:14:46.750233",
4 | "models": [
5 | "No models found for deepseek"
6 | ]
7 | }
--------------------------------------------------------------------------------
/model_lists_output/deepseek_models.py:
--------------------------------------------------------------------------------
1 | # Auto-generated model list for DEEPSEEK
2 | # Generated: 2025-09-01T21:14:46.750233
3 | # Total models: 1
4 |
5 | DEEPSEEK_MODELS = [
6 | "No models found for deepseek",
7 | ]
8 |
9 | # Usage in node:
10 | # @classmethod
11 | # def INPUT_TYPES(cls):
12 | # return {
13 | # "required": {
14 | # "llm_model": (DEEPSEEK_MODELS, {"default": "No models found for deepseek"}),
15 | # }
16 | # }
17 |
--------------------------------------------------------------------------------
/model_lists_output/gemini_models.json:
--------------------------------------------------------------------------------
1 | {
2 | "provider": "gemini",
3 | "generated": "2025-09-01T21:15:02.806115",
4 | "models": [
5 | "aqa",
6 | "embedding-001",
7 | "embedding-gecko-001",
8 | "gemini-1.5-flash",
9 | "gemini-1.5-flash-002",
10 | "gemini-1.5-flash-8b",
11 | "gemini-1.5-flash-8b-001",
12 | "gemini-1.5-flash-8b-latest",
13 | "gemini-1.5-flash-latest",
14 | "gemini-1.5-pro",
15 | "gemini-1.5-pro-002",
16 | "gemini-1.5-pro-latest",
17 | "gemini-2.0-flash",
18 | "gemini-2.0-flash-001",
19 | "gemini-2.0-flash-exp",
20 | "gemini-2.0-flash-lite",
21 | "gemini-2.0-flash-lite-001",
22 | "gemini-2.0-flash-lite-preview",
23 | "gemini-2.0-flash-lite-preview-02-05",
24 | "gemini-2.0-flash-thinking-exp",
25 | "gemini-2.0-flash-thinking-exp-01-21",
26 | "gemini-2.0-flash-thinking-exp-1219",
27 | "gemini-2.0-pro-exp",
28 | "gemini-2.0-pro-exp-02-05",
29 | "gemini-2.5-flash",
30 | "gemini-2.5-flash-image-preview",
31 | "gemini-2.5-flash-lite",
32 | "gemini-2.5-flash-lite-preview-06-17",
33 | "gemini-2.5-flash-preview-05-20",
34 | "gemini-2.5-flash-preview-tts",
35 | "gemini-2.5-pro",
36 | "gemini-2.5-pro-preview-03-25",
37 | "gemini-2.5-pro-preview-05-06",
38 | "gemini-2.5-pro-preview-06-05",
39 | "gemini-2.5-pro-preview-tts",
40 | "gemini-embedding-001",
41 | "gemini-embedding-exp",
42 | "gemini-embedding-exp-03-07",
43 | "gemini-exp-1206",
44 | "gemma-3-12b-it",
45 | "gemma-3-1b-it",
46 | "gemma-3-27b-it",
47 | "gemma-3-4b-it",
48 | "gemma-3n-e2b-it",
49 | "gemma-3n-e4b-it",
50 | "imagen-3.0-generate-002",
51 | "imagen-4.0-generate-preview-06-06",
52 | "imagen-4.0-ultra-generate-preview-06-06",
53 | "learnlm-2.0-flash-experimental",
54 | "text-embedding-004"
55 | ]
56 | }
--------------------------------------------------------------------------------
/model_lists_output/gemini_models.py:
--------------------------------------------------------------------------------
1 | # Auto-generated model list for GEMINI
2 | # Generated: 2025-09-01T21:15:02.805116
3 | # Total models: 50
4 |
5 | GEMINI_MODELS = [
6 | "aqa",
7 | "embedding-001",
8 | "embedding-gecko-001",
9 | "gemini-1.5-flash",
10 | "gemini-1.5-flash-002",
11 | "gemini-1.5-flash-8b",
12 | "gemini-1.5-flash-8b-001",
13 | "gemini-1.5-flash-8b-latest",
14 | "gemini-1.5-flash-latest",
15 | "gemini-1.5-pro",
16 | "gemini-1.5-pro-002",
17 | "gemini-1.5-pro-latest",
18 | "gemini-2.0-flash",
19 | "gemini-2.0-flash-001",
20 | "gemini-2.0-flash-exp",
21 | "gemini-2.0-flash-lite",
22 | "gemini-2.0-flash-lite-001",
23 | "gemini-2.0-flash-lite-preview",
24 | "gemini-2.0-flash-lite-preview-02-05",
25 | "gemini-2.0-flash-thinking-exp",
26 | "gemini-2.0-flash-thinking-exp-01-21",
27 | "gemini-2.0-flash-thinking-exp-1219",
28 | "gemini-2.0-pro-exp",
29 | "gemini-2.0-pro-exp-02-05",
30 | "gemini-2.5-flash",
31 | "gemini-2.5-flash-image-preview",
32 | "gemini-2.5-flash-lite",
33 | "gemini-2.5-flash-lite-preview-06-17",
34 | "gemini-2.5-flash-preview-05-20",
35 | "gemini-2.5-flash-preview-tts",
36 | "gemini-2.5-pro",
37 | "gemini-2.5-pro-preview-03-25",
38 | "gemini-2.5-pro-preview-05-06",
39 | "gemini-2.5-pro-preview-06-05",
40 | "gemini-2.5-pro-preview-tts",
41 | "gemini-embedding-001",
42 | "gemini-embedding-exp",
43 | "gemini-embedding-exp-03-07",
44 | "gemini-exp-1206",
45 | "gemma-3-12b-it",
46 | "gemma-3-1b-it",
47 | "gemma-3-27b-it",
48 | "gemma-3-4b-it",
49 | "gemma-3n-e2b-it",
50 | "gemma-3n-e4b-it",
51 | "imagen-3.0-generate-002",
52 | "imagen-4.0-generate-preview-06-06",
53 | "imagen-4.0-ultra-generate-preview-06-06",
54 | "learnlm-2.0-flash-experimental",
55 | "text-embedding-004",
56 | ]
57 |
58 | # Usage in node:
59 | # @classmethod
60 | # def INPUT_TYPES(cls):
61 | # return {
62 | # "required": {
63 | # "llm_model": (GEMINI_MODELS, {"default": "aqa"}),
64 | # }
65 | # }
66 |
--------------------------------------------------------------------------------
/model_lists_output/groq_models.json:
--------------------------------------------------------------------------------
1 | {
2 | "provider": "groq",
3 | "generated": "2025-09-01T21:23:14.031770",
4 | "models": [
5 | "allam-2-7b",
6 | "compound-beta",
7 | "compound-beta-mini",
8 | "deepseek-r1-distill-llama-70b",
9 | "gemma2-9b-it",
10 | "llama-3.1-8b-instant",
11 | "llama-3.3-70b-versatile",
12 | "meta-llama/llama-4-maverick-17b-128e-instruct",
13 | "meta-llama/llama-4-scout-17b-16e-instruct",
14 | "meta-llama/llama-guard-4-12b",
15 | "meta-llama/llama-prompt-guard-2-22m",
16 | "meta-llama/llama-prompt-guard-2-86m",
17 | "moonshotai/kimi-k2-instruct",
18 | "openai/gpt-oss-120b",
19 | "openai/gpt-oss-20b",
20 | "playai-tts",
21 | "playai-tts-arabic",
22 | "qwen/qwen3-32b",
23 | "whisper-large-v3",
24 | "whisper-large-v3-turbo"
25 | ]
26 | }
--------------------------------------------------------------------------------
/model_lists_output/groq_models.py:
--------------------------------------------------------------------------------
1 | # Auto-generated model list for GROQ
2 | # Generated: 2025-09-01T21:23:14.031770
3 | # Total models: 20
4 |
5 | GROQ_MODELS = [
6 | "allam-2-7b",
7 | "compound-beta",
8 | "compound-beta-mini",
9 | "deepseek-r1-distill-llama-70b",
10 | "gemma2-9b-it",
11 | "llama-3.1-8b-instant",
12 | "llama-3.3-70b-versatile",
13 | "meta-llama/llama-4-maverick-17b-128e-instruct",
14 | "meta-llama/llama-4-scout-17b-16e-instruct",
15 | "meta-llama/llama-guard-4-12b",
16 | "meta-llama/llama-prompt-guard-2-22m",
17 | "meta-llama/llama-prompt-guard-2-86m",
18 | "moonshotai/kimi-k2-instruct",
19 | "openai/gpt-oss-120b",
20 | "openai/gpt-oss-20b",
21 | "playai-tts",
22 | "playai-tts-arabic",
23 | "qwen/qwen3-32b",
24 | "whisper-large-v3",
25 | "whisper-large-v3-turbo",
26 | ]
27 |
28 | # Usage in node:
29 | # @classmethod
30 | # def INPUT_TYPES(cls):
31 | # return {
32 | # "required": {
33 | # "llm_model": (GROQ_MODELS, {"default": "allam-2-7b"}),
34 | # }
35 | # }
36 |
--------------------------------------------------------------------------------
/model_lists_output/mistral_models.json:
--------------------------------------------------------------------------------
1 | {
2 | "provider": "mistral",
3 | "generated": "2025-09-01T21:30:54.660436",
4 | "models": [
5 | "No models found for mistral"
6 | ]
7 | }
--------------------------------------------------------------------------------
/model_lists_output/mistral_models.py:
--------------------------------------------------------------------------------
1 | # Auto-generated model list for MISTRAL
2 | # Generated: 2025-09-01T21:30:54.659437
3 | # Total models: 1
4 |
5 | MISTRAL_MODELS = [
6 | "No models found for mistral",
7 | ]
8 |
9 | # Usage in node:
10 | # @classmethod
11 | # def INPUT_TYPES(cls):
12 | # return {
13 | # "required": {
14 | # "llm_model": (MISTRAL_MODELS, {"default": "No models found for mistral"}),
15 | # }
16 | # }
17 |
--------------------------------------------------------------------------------
/model_lists_output/openai_models.json:
--------------------------------------------------------------------------------
1 | {
2 | "provider": "openai",
3 | "generated": "2025-09-01T21:20:46.456204",
4 | "models": [
5 | "babbage-002",
6 | "chatgpt-4o-latest",
7 | "chatgpt-5-latest",
8 | "codex-mini-latest",
9 | "computer-use-preview",
10 | "computer-use-preview-2025-03-11",
11 | "dall-e-2",
12 | "dall-e-3",
13 | "davinci-002",
14 | "gpt-3.5-turbo",
15 | "gpt-3.5-turbo-0125",
16 | "gpt-3.5-turbo-1106",
17 | "gpt-3.5-turbo-16k",
18 | "gpt-3.5-turbo-instruct",
19 | "gpt-3.5-turbo-instruct-0914",
20 | "gpt-4",
21 | "gpt-4-0125-preview",
22 | "gpt-4-0613",
23 | "gpt-4-1106-preview",
24 | "gpt-4-turbo",
25 | "gpt-4-turbo-2024-04-09",
26 | "gpt-4-turbo-preview",
27 | "gpt-4.1",
28 | "gpt-4.1-2025-04-14",
29 | "gpt-4.1-mini",
30 | "gpt-4.1-mini-2025-04-14",
31 | "gpt-4.1-nano",
32 | "gpt-4.1-nano-2025-04-14",
33 | "gpt-4.5-preview",
34 | "gpt-4.5-preview-2025-02-27",
35 | "gpt-4o",
36 | "gpt-4o-2024-05-13",
37 | "gpt-4o-2024-08-06",
38 | "gpt-4o-2024-11-20",
39 | "gpt-4o-audio-preview",
40 | "gpt-4o-audio-preview-2024-10-01",
41 | "gpt-4o-audio-preview-2024-12-17",
42 | "gpt-4o-audio-preview-2025-06-03",
43 | "gpt-4o-mini",
44 | "gpt-4o-mini-2024-07-18",
45 | "gpt-4o-mini-audio-preview",
46 | "gpt-4o-mini-audio-preview-2024-12-17",
47 | "gpt-4o-mini-realtime-preview",
48 | "gpt-4o-mini-realtime-preview-2024-12-17",
49 | "gpt-4o-mini-search-preview",
50 | "gpt-4o-mini-search-preview-2025-03-11",
51 | "gpt-4o-mini-transcribe",
52 | "gpt-4o-mini-tts",
53 | "gpt-4o-realtime-preview",
54 | "gpt-4o-realtime-preview-2024-10-01",
55 | "gpt-4o-realtime-preview-2024-12-17",
56 | "gpt-4o-realtime-preview-2025-06-03",
57 | "gpt-4o-search-preview",
58 | "gpt-4o-search-preview-2025-03-11",
59 | "gpt-4o-transcribe",
60 | "gpt-5",
61 | "gpt-5-2025-08-07",
62 | "gpt-5-chat-latest",
63 | "gpt-5-mini",
64 | "gpt-5-mini-2025-08-07",
65 | "gpt-5-nano",
66 | "gpt-5-nano-2025-08-07",
67 | "gpt-audio",
68 | "gpt-audio-2025-08-28",
69 | "gpt-image-1",
70 | "gpt-realtime",
71 | "gpt-realtime-2025-08-28",
72 | "gpt40-0806-loco-vm",
73 | "o1",
74 | "o1-2024-12-17",
75 | "o1-mini",
76 | "o1-mini-2024-09-12",
77 | "o1-preview",
78 | "o1-preview-2024-09-12",
79 | "o1-pro",
80 | "o1-pro-2025-03-19",
81 | "o3",
82 | "o3-2025-04-16",
83 | "o3-deep-research",
84 | "o3-deep-research-2025-06-26",
85 | "o3-mini",
86 | "o3-mini-2025-01-31",
87 | "o3-pro",
88 | "o3-pro-2025-06-10",
89 | "o4-mini",
90 | "o4-mini-2025-04-16",
91 | "o4-mini-deep-research",
92 | "o4-mini-deep-research-2025-06-26",
93 | "omni-moderation-2024-09-26",
94 | "omni-moderation-latest",
95 | "text-embedding-3-large",
96 | "text-embedding-3-small",
97 | "text-embedding-ada-002",
98 | "tts-1",
99 | "tts-1-1106",
100 | "tts-1-hd",
101 | "tts-1-hd-1106",
102 | "tts-l-hd",
103 | "whisper-1",
104 | "whisper-I"
105 | ]
106 | }
--------------------------------------------------------------------------------
/model_lists_output/openai_models.py:
--------------------------------------------------------------------------------
1 | # Auto-generated model list for OPENAI
2 | # Generated: 2025-09-01T21:20:46.456204
3 | # Total models: 100
4 |
5 | OPENAI_MODELS = [
6 | "babbage-002",
7 | "chatgpt-4o-latest",
8 | "chatgpt-5-latest",
9 | "codex-mini-latest",
10 | "computer-use-preview",
11 | "computer-use-preview-2025-03-11",
12 | "dall-e-2",
13 | "dall-e-3",
14 | "davinci-002",
15 | "gpt-3.5-turbo",
16 | "gpt-3.5-turbo-0125",
17 | "gpt-3.5-turbo-1106",
18 | "gpt-3.5-turbo-16k",
19 | "gpt-3.5-turbo-instruct",
20 | "gpt-3.5-turbo-instruct-0914",
21 | "gpt-4",
22 | "gpt-4-0125-preview",
23 | "gpt-4-0613",
24 | "gpt-4-1106-preview",
25 | "gpt-4-turbo",
26 | "gpt-4-turbo-2024-04-09",
27 | "gpt-4-turbo-preview",
28 | "gpt-4.1",
29 | "gpt-4.1-2025-04-14",
30 | "gpt-4.1-mini",
31 | "gpt-4.1-mini-2025-04-14",
32 | "gpt-4.1-nano",
33 | "gpt-4.1-nano-2025-04-14",
34 | "gpt-4.5-preview",
35 | "gpt-4.5-preview-2025-02-27",
36 | "gpt-4o",
37 | "gpt-4o-2024-05-13",
38 | "gpt-4o-2024-08-06",
39 | "gpt-4o-2024-11-20",
40 | "gpt-4o-audio-preview",
41 | "gpt-4o-audio-preview-2024-10-01",
42 | "gpt-4o-audio-preview-2024-12-17",
43 | "gpt-4o-audio-preview-2025-06-03",
44 | "gpt-4o-mini",
45 | "gpt-4o-mini-2024-07-18",
46 | "gpt-4o-mini-audio-preview",
47 | "gpt-4o-mini-audio-preview-2024-12-17",
48 | "gpt-4o-mini-realtime-preview",
49 | "gpt-4o-mini-realtime-preview-2024-12-17",
50 | "gpt-4o-mini-search-preview",
51 | "gpt-4o-mini-search-preview-2025-03-11",
52 | "gpt-4o-mini-transcribe",
53 | "gpt-4o-mini-tts",
54 | "gpt-4o-realtime-preview",
55 | "gpt-4o-realtime-preview-2024-10-01",
56 | "gpt-4o-realtime-preview-2024-12-17",
57 | "gpt-4o-realtime-preview-2025-06-03",
58 | "gpt-4o-search-preview",
59 | "gpt-4o-search-preview-2025-03-11",
60 | "gpt-4o-transcribe",
61 | "gpt-5",
62 | "gpt-5-2025-08-07",
63 | "gpt-5-chat-latest",
64 | "gpt-5-mini",
65 | "gpt-5-mini-2025-08-07",
66 | "gpt-5-nano",
67 | "gpt-5-nano-2025-08-07",
68 | "gpt-audio",
69 | "gpt-audio-2025-08-28",
70 | "gpt-image-1",
71 | "gpt-realtime",
72 | "gpt-realtime-2025-08-28",
73 | "gpt40-0806-loco-vm",
74 | "o1",
75 | "o1-2024-12-17",
76 | "o1-mini",
77 | "o1-mini-2024-09-12",
78 | "o1-preview",
79 | "o1-preview-2024-09-12",
80 | "o1-pro",
81 | "o1-pro-2025-03-19",
82 | "o3",
83 | "o3-2025-04-16",
84 | "o3-deep-research",
85 | "o3-deep-research-2025-06-26",
86 | "o3-mini",
87 | "o3-mini-2025-01-31",
88 | "o3-pro",
89 | "o3-pro-2025-06-10",
90 | "o4-mini",
91 | "o4-mini-2025-04-16",
92 | "o4-mini-deep-research",
93 | "o4-mini-deep-research-2025-06-26",
94 | "omni-moderation-2024-09-26",
95 | "omni-moderation-latest",
96 | "text-embedding-3-large",
97 | "text-embedding-3-small",
98 | "text-embedding-ada-002",
99 | "tts-1",
100 | "tts-1-1106",
101 | "tts-1-hd",
102 | "tts-1-hd-1106",
103 | "tts-l-hd",
104 | "whisper-1",
105 | "whisper-I",
106 | ]
107 |
108 | # Usage in node:
109 | # @classmethod
110 | # def INPUT_TYPES(cls):
111 | # return {
112 | # "required": {
113 | # "llm_model": (OPENAI_MODELS, {"default": "babbage-002"}),
114 | # }
115 | # }
116 |
--------------------------------------------------------------------------------
/package-lock.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "ComfyUI-llm-toolkit",
3 | "lockfileVersion": 3,
4 | "requires": true,
5 | "packages": {}
6 | }
7 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {}
2 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "llm-toolkit"
3 | description = "A custom node collection for integrating various LLM (Large Language Model) providers with ComfyUI."
4 | version = "1.3.0"
5 | license = {file = "LICENSE"}
6 | dependencies = [
7 | "aiohttp>=3.8.0",
8 | "pyyaml>=6.0",
9 | "python-dotenv>=0.20.0",
10 | "requests>=2.27.0",
11 | "pillow>=9.0.0",
12 | "numpy>=1.24.0",
13 | "tqdm>=4.64.0",
14 | "transformers>=4.51.0",
15 | "torch>=2.0",
16 | "openai",
17 | "accelerate>=0.28.0",
18 | "sentencepiece",
19 | "safetensors",
20 | "qwen-omni-utils",
21 | "pydub",
22 | "google-genai",
23 | "opencv-python",
24 | "tiktoken",
25 | "blobfile",
26 | "huggingface_hub",
27 | ]
28 | [project.urls]
29 | Repository = "https://github.com/comfy-deploy/comfyui-llm-toolkit"
30 | # Used by Comfy Registry https://comfyregistry.org
31 |
32 | [tool.comfy]
33 | PublisherId = "comfydeploy"
34 | DisplayName = "comfyui-llm-toolkit"
35 | Icon = ""
36 |
37 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | aiohttp>=3.8.0
2 | pyyaml>=6.0
3 | python-dotenv>=0.20.0
4 | requests>=2.27.0
5 | pillow>=9.0.0
6 | numpy>=1.24.0
7 | tqdm>=4.64.0
8 | transformers>=4.51.0
9 | torch>=2.0
10 | openai
11 | accelerate>=0.28.0
12 | sentencepiece
13 | safetensors
14 | qwen-omni-utils
15 | #playsound #install this if you have issues playingh the audio
16 | #simpleaudio
17 | pydub
18 | google-genai
19 | opencv-python
20 | tiktoken
21 | blobfile
22 | huggingface_hub
23 | #https://github.com/bitsandbytes-foundation/bitsandbytes/releases/download/continuous-release_main/bitsandbytes-1.33.7.preview-py3-none-win_amd64.whl if you using local transformer provider get bitsandbytes for windows
24 | #https://github.com/bitsandbytes-foundation/bitsandbytes/releases/download/continuous-release_main/bitsandbytes-1.33.7.preview-py3-none-manylinux_2_24_x86_64.whl if you using local transformer provider get bitsandbytes for linux
25 | #autoawq #install this if you using local transformer provider AWQ models
26 |
--------------------------------------------------------------------------------
/sounds/LetsBegin.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comfy-deploy/comfyui-llm-toolkit/d0a526e850a90d1bac0025ddd977b96a09989144/sounds/LetsBegin.mp3
--------------------------------------------------------------------------------
/sounds/faile_run.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comfy-deploy/comfyui-llm-toolkit/d0a526e850a90d1bac0025ddd977b96a09989144/sounds/faile_run.mp3
--------------------------------------------------------------------------------
/sounds/follow_IF_YT.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comfy-deploy/comfyui-llm-toolkit/d0a526e850a90d1bac0025ddd977b96a09989144/sounds/follow_IF_YT.mp3
--------------------------------------------------------------------------------
/sounds/oh_smth_wrong.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comfy-deploy/comfyui-llm-toolkit/d0a526e850a90d1bac0025ddd977b96a09989144/sounds/oh_smth_wrong.mp3
--------------------------------------------------------------------------------
/sounds/wellcometocomfydeploy.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comfy-deploy/comfyui-llm-toolkit/d0a526e850a90d1bac0025ddd977b96a09989144/sounds/wellcometocomfydeploy.mp3
--------------------------------------------------------------------------------
/sounds/workflow_staretd.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comfy-deploy/comfyui-llm-toolkit/d0a526e850a90d1bac0025ddd977b96a09989144/sounds/workflow_staretd.mp3
--------------------------------------------------------------------------------
/test_import.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os
3 | import sys
4 | import importlib
5 |
6 | # Current directory paths
7 | current_dir = os.path.dirname(os.path.abspath(__file__))
8 | print(f"Current directory: {current_dir}")
9 |
10 | # Add current dir to path
11 | sys.path.append(current_dir)
12 | print(f"sys.path: {sys.path}")
13 |
14 | # Try importing the modules
15 | try:
16 | import send_request
17 | print("Successfully imported send_request")
18 | except ImportError as e:
19 | print(f"Failed to import send_request: {e}")
20 |
21 | try:
22 | import utils
23 | print("Successfully imported utils")
24 | except ImportError as e:
25 | print(f"Failed to import utils: {e}")
26 |
27 | # Try to import from comfy-nodes
28 | try:
29 | # Use importlib to handle the hyphen in the directory name
30 | spec = importlib.util.spec_from_file_location(
31 | "generate_text",
32 | os.path.join(current_dir, "comfy-nodes", "generate_text.py")
33 | )
34 | generate_text = importlib.util.module_from_spec(spec)
35 | spec.loader.exec_module(generate_text)
36 | print("Successfully imported generate_text")
37 | except Exception as e:
38 | print(f"Failed to import generate_text: {e}")
39 |
40 | print("Import test completed")
--------------------------------------------------------------------------------
/test_js_integration.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """
3 | Simple test script to verify that the JS and Python integration for model fetching works.
4 | Run this script from the command line with:
5 | python test_js_integration.py
6 | """
7 | import os
8 | import sys
9 | import json
10 | import logging
11 | import requests
12 |
13 | # Setup logging
14 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
15 | logger = logging.getLogger(__name__)
16 |
17 | # Add the current directory to the path
18 | current_dir = os.path.dirname(os.path.abspath(__file__))
19 | if current_dir not in sys.path:
20 | sys.path.insert(0, current_dir)
21 |
22 | # Import utility functions if possible
23 | try:
24 | from utils import get_api_key, get_models
25 | import_success = True
26 | except ImportError as e:
27 | logger.error(f"Could not import required functions: {e}")
28 | import_success = False
29 |
30 |
31 | def test_model_fetch(provider, base_ip="localhost", port="11434", api_key=None):
32 | """Test fetching models for a given provider."""
33 | if not import_success:
34 | return f"ERROR: Required modules not imported. Cannot run test."
35 |
36 | try:
37 | if not api_key:
38 | try:
39 | api_key_name = f"{provider.upper()}_API_KEY"
40 | api_key = get_api_key(api_key_name, provider)
41 | logger.info(f"Using API key from environment for {provider}")
42 | except Exception as e:
43 | logger.warning(f"Could not get API key for {provider}: {e}")
44 | api_key = None
45 |
46 | logger.info(f"Fetching models for {provider} (IP: {base_ip}, Port: {port}, API Key: {'Present' if api_key else 'None'})")
47 |
48 | models = get_models(provider, base_ip, port, api_key)
49 |
50 | if models:
51 | logger.info(f"SUCCESS: Found {len(models)} models for {provider}: {models[:5]}" +
52 | (f" and {len(models)-5} more..." if len(models) > 5 else ""))
53 | return models
54 | else:
55 | logger.warning(f"WARNING: No models found for {provider}")
56 | return []
57 |
58 | except Exception as e:
59 | logger.error(f"ERROR testing {provider}: {str(e)}")
60 | return f"ERROR: {str(e)}"
61 |
62 |
63 | def test_endpoint(provider, base_ip="localhost", port="11434", api_key=None):
64 | """Test the ComfyUI API endpoint for fetching models."""
65 | logger.info("Testing ComfyUI API endpoint - this requires ComfyUI to be running")
66 |
67 | try:
68 | url = "http://localhost:8188/ComfyLLMToolkit/get_provider_models"
69 | data = {
70 | "llm_provider": provider,
71 | "base_ip": base_ip,
72 | "port": port,
73 | "external_api_key": api_key or ""
74 | }
75 |
76 | response = requests.post(url, json=data, timeout=30)
77 |
78 | if response.status_code == 200:
79 | models = response.json()
80 | logger.info(f"API Endpoint SUCCESS: Found {len(models)} models for {provider}")
81 | return models
82 | else:
83 | logger.error(f"API Endpoint ERROR: Status code {response.status_code}")
84 | return f"ERROR: Status code {response.status_code}"
85 |
86 | except requests.exceptions.ConnectionError:
87 | logger.error("API Endpoint ERROR: Could not connect to ComfyUI server. Is it running?")
88 | return "ERROR: ConnectionError - Is ComfyUI running?"
89 | except Exception as e:
90 | logger.error(f"API Endpoint ERROR: {str(e)}")
91 | return f"ERROR: {str(e)}"
92 |
93 |
94 | if __name__ == "__main__":
95 | # Test a few providers
96 | providers_to_test = ["openai", "ollama"]
97 |
98 | print("\n=== Testing Direct Model Fetching ===")
99 | for provider in providers_to_test:
100 | result = test_model_fetch(provider)
101 | print(f"{provider}: {'SUCCESS' if isinstance(result, list) else 'FAILED'}")
102 |
103 | print("\n=== Testing API Endpoint (requires ComfyUI running) ===")
104 | for provider in providers_to_test:
105 | result = test_endpoint(provider)
106 | print(f"{provider}: {'SUCCESS' if isinstance(result, list) else 'FAILED'}")
107 |
108 | print("\nTests completed. Check logs for details.")
--------------------------------------------------------------------------------
/web/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comfy-deploy/comfyui-llm-toolkit/d0a526e850a90d1bac0025ddd977b96a09989144/web/.DS_Store
--------------------------------------------------------------------------------
/web/js/GenerateTextNode.js:
--------------------------------------------------------------------------------
1 | import { app } from "/scripts/app.js";
2 | import { ComfyWidgets } from "/scripts/widgets.js";
3 |
4 | /* ------------------------------------------------------------------
5 | Extension: Comfy.GenerateTextNode
6 | Adds a read‑only multiline STRING widget to the "Generate Text (🔗LLMToolkit)"
7 | node that displays the LLM response directly on the node. The node still
8 | outputs the normal "context" so it can be connected to an external
9 | Display Text node as before.
10 | -------------------------------------------------------------------*/
11 |
12 | app.registerExtension({
13 | name: "Comfy.GenerateTextNode",
14 | async beforeRegisterNodeDef(nodeType, nodeData, app) {
15 | // Only act on our LLMToolkitTextGenerator node
16 | if (nodeData.name !== "LLMToolkitTextGenerator") return;
17 |
18 | /* -----------------------------
19 | 1. Inject widget on creation
20 | ----------------------------- */
21 | const origOnNodeCreated = nodeType.prototype.onNodeCreated;
22 | nodeType.prototype.onNodeCreated = function () {
23 | // Invoke original handler first
24 | const ret = origOnNodeCreated ? origOnNodeCreated.apply(this, arguments) : undefined;
25 |
26 | // Build a unique widget name so each node instance has its own key
27 | const existingNodes = app.graph._nodes.filter(n => n.type === nodeData.name);
28 | const widgetName = `${nodeData.name}_response_${existingNodes.length}`;
29 |
30 | // Create read‑only STRING widget for displaying response
31 | const widgetInfo = ComfyWidgets.STRING(
32 | this,
33 | widgetName,
34 | ["STRING", {
35 | default: "",
36 | placeholder: "LLM response will appear here …",
37 | multiline: true,
38 | }],
39 | app
40 | );
41 |
42 | // Make read‑only
43 | widgetInfo.widget.inputEl.readOnly = true;
44 |
45 | // Store reference for quick updates
46 | this.__llmResponseWidget = widgetInfo.widget || widgetInfo;
47 |
48 | // Ensure node size grows to fit another widget
49 | this.setSize(this.computeSize());
50 |
51 | return ret;
52 | };
53 |
54 | /* -----------------------------
55 | 2. Helper to update widget
56 | ----------------------------- */
57 | function updateResponseWidget(textContainer) {
58 | if (!this.__llmResponseWidget) return;
59 |
60 | // Extract final text string (textContainer may be array/string)
61 | let valueToSet = "";
62 | if (Array.isArray(textContainer)) {
63 | valueToSet = textContainer.join("\n");
64 | } else if (typeof textContainer === "string") {
65 | valueToSet = textContainer;
66 | }
67 |
68 | // Trim and write
69 | valueToSet = valueToSet.trim();
70 | this.__llmResponseWidget.value = valueToSet;
71 | if (this.__llmResponseWidget.inputEl) this.__llmResponseWidget.inputEl.value = valueToSet;
72 | app.graph.setDirtyCanvas(true);
73 | }
74 |
75 | /* -----------------------------
76 | 3. Hook into onExecuted to receive message
77 | ----------------------------- */
78 | const origOnExecuted = nodeType.prototype.onExecuted;
79 | nodeType.prototype.onExecuted = function (message) {
80 | if (origOnExecuted) origOnExecuted.apply(this, arguments);
81 |
82 | // message is typically { ui: { string: [...] }, result: (…) }
83 | let textData = undefined;
84 | if (message?.ui?.string) {
85 | textData = message.ui.string;
86 | } else if (message?.string) {
87 | textData = message.string;
88 | } else if (Array.isArray(message)) {
89 | textData = message;
90 | }
91 | updateResponseWidget.call(this, textData);
92 | };
93 |
94 | /* -----------------------------
95 | 4. Restore saved value on load
96 | ----------------------------- */
97 | const origOnConfigure = nodeType.prototype.onConfigure;
98 | nodeType.prototype.onConfigure = function (config) {
99 | if (origOnConfigure) origOnConfigure.apply(this, arguments);
100 |
101 | if (!this.__llmResponseWidget || !config?.widgets_values?.length) return;
102 |
103 | const idx = this.widgets.findIndex(w => w === this.__llmResponseWidget);
104 | if (idx !== -1 && config.widgets_values.length > idx) {
105 | const savedVal = config.widgets_values[idx] || "";
106 | this.__llmResponseWidget.value = savedVal;
107 | if (this.__llmResponseWidget.inputEl) this.__llmResponseWidget.inputEl.value = savedVal;
108 | }
109 | };
110 | },
111 | });
--------------------------------------------------------------------------------
/web/js/LLMToolkitProviderSelectorNode.js:
--------------------------------------------------------------------------------
1 | // Minimal safe version to prevent freezes
2 | import { app } from "/scripts/app.js";
3 |
4 | app.registerExtension({
5 | name: "Comfy.LLMToolkitProviderSelector",
6 |
7 | async beforeRegisterNodeDef(nodeType, nodeData, appInstance) {
8 | if (nodeData.name === "LLMToolkitProviderSelector") {
9 | console.log("LLMToolkitProviderSelector: Using minimal safe mode");
10 |
11 | const originalOnNodeCreated = nodeType.prototype.onNodeCreated;
12 |
13 | nodeType.prototype.onNodeCreated = function() {
14 | // Call original if exists
15 | if (originalOnNodeCreated) {
16 | try {
17 | originalOnNodeCreated.apply(this, arguments);
18 | } catch (e) {
19 | console.error("LLMToolkitProviderSelector: Error in original onNodeCreated:", e);
20 | }
21 | }
22 |
23 | // Just log that we're here, don't modify widgets or make fetch calls
24 | console.log("LLMToolkitProviderSelector: Node created safely without modifications");
25 |
26 | // Ensure widgets exist but don't modify them
27 | if (!this.widgets) {
28 | this.widgets = [];
29 | }
30 | };
31 | }
32 | }
33 | });
--------------------------------------------------------------------------------