├── MANIFEST.in ├── assets ├── exceptions.png ├── log_streaming.png ├── menu_options.png └── threshold_example.png ├── comfy_annotations.py ├── .gitignore ├── example ├── __init__.py ├── example_nodes.py └── example_workflow.json ├── .github └── workflows │ └── publish.yml ├── easy_nodes ├── __init__.py ├── web │ ├── llm_debugging.js │ ├── config_service.js │ ├── retain_previews.js │ ├── log_streaming.js │ └── easy_nodes.js ├── comfy_types.py ├── config_service.py ├── log_streaming.py ├── llm_debugging.py └── easy_nodes.py ├── LICENSE ├── pyproject.toml └── readme.md /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include easy_nodes/*.py 2 | include easy_nodes/web/* -------------------------------------------------------------------------------- /assets/exceptions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/andrewharp/ComfyUI-EasyNodes/HEAD/assets/exceptions.png -------------------------------------------------------------------------------- /assets/log_streaming.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/andrewharp/ComfyUI-EasyNodes/HEAD/assets/log_streaming.png -------------------------------------------------------------------------------- /assets/menu_options.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/andrewharp/ComfyUI-EasyNodes/HEAD/assets/menu_options.png -------------------------------------------------------------------------------- /assets/threshold_example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/andrewharp/ComfyUI-EasyNodes/HEAD/assets/threshold_example.png -------------------------------------------------------------------------------- /comfy_annotations.py: -------------------------------------------------------------------------------- 1 | # This file is deprecated, and provided only for backwards compatibility with the original comfy_annotations module. 2 | import logging 3 | from easy_nodes import * # noqa: F403 4 | 5 | logging.warning("comfy_annotations is deprecated. Please import from easy_nodes instead.") 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # Don't check in personal configs. 7 | *config.json 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | dist/ 14 | build/ 15 | *.egg-info/ 16 | *.egg 17 | 18 | # Virtual environments 19 | venv/ 20 | env/ 21 | ENV/ 22 | 23 | # IDE specific files 24 | .idea/ 25 | .vscode/ 26 | 27 | # Compiled Python files 28 | *.pyc 29 | 30 | # Logs and databases 31 | *.log 32 | *.sqlite3 33 | *.db 34 | 35 | # OS generated files 36 | .DS_Store 37 | Thumbs.db 38 | -------------------------------------------------------------------------------- /example/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import easy_nodes 3 | easy_nodes.initialize_easy_nodes(default_category="EasyNodes Examples", auto_register=False) 4 | 5 | # Simply importing your module gives the ComfyNode decorator a chance to register your nodes. 6 | from .example_nodes import * # noqa: F403, E402 7 | 8 | NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS = easy_nodes.get_node_mappings() 9 | __all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS"] 10 | 11 | # Optional: export the node list to a file so that e.g. ComfyUI-Manager can pick it up. 12 | easy_nodes.save_node_list(os.path.join(os.path.dirname(__file__), "node_list.json")) 13 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish to PyPI 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | tags: 7 | - 'v*' 8 | 9 | jobs: 10 | publish: 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - uses: actions/checkout@v2 15 | 16 | - name: Set up Python 17 | uses: actions/setup-python@v2 18 | with: 19 | python-version: '3.x' 20 | 21 | - name: Install dependencies 22 | run: | 23 | python -m pip install --upgrade pip 24 | pip install build twine 25 | 26 | - name: Build package 27 | run: python -m build 28 | 29 | - name: Publish to PyPI 30 | uses: pypa/gh-action-pypi-publish@master 31 | with: 32 | user: __token__ 33 | password: ${{ secrets.PYPI_API_TOKEN }} -------------------------------------------------------------------------------- /easy_nodes/__init__.py: -------------------------------------------------------------------------------- 1 | from easy_nodes.comfy_types import ( # noqa: F401 2 | Color, 3 | ConditioningTensor, 4 | ImageTensor, 5 | LatentTensor, 6 | MaskTensor, 7 | ModelTensor, 8 | NumberType, 9 | PhotoMaker, 10 | SigmasTensor, 11 | ) 12 | from easy_nodes.easy_nodes import ( # noqa: F401 13 | AnyType, 14 | AutoDescriptionMode, 15 | CheckSeverityMode, 16 | Choice, 17 | ComfyNode, 18 | CustomVerifier, 19 | NumberInput, 20 | StringInput, 21 | TensorVerifier, 22 | TypeVerifier, 23 | create_field_setter_node, 24 | get_node_mappings, 25 | initialize_easy_nodes, 26 | register_type, 27 | save_node_list, 28 | show_image, 29 | show_text, 30 | ) 31 | 32 | # For backwards compatibility with the original comfy_annotations module. 33 | ComfyFunc = ComfyNode 34 | -------------------------------------------------------------------------------- /easy_nodes/web/llm_debugging.js: -------------------------------------------------------------------------------- 1 | import { app } from "../../scripts/app.js"; 2 | import { createSetting } from "./config_service.js"; 3 | 4 | app.registerExtension({ 5 | name: "easy_nodes.llm_debugging", 6 | async setup() { 7 | createSetting( 8 | "easy_nodes.llm_debugging", 9 | "🪄 LLM Debugging", 10 | "combo", 11 | "Off", 12 | (value) => [ 13 | { value: "On", text: "On", selected: value === "On" }, 14 | { value: "Off", text: "Off", selected: value === "Off" }, 15 | { value: "AutoFix", text: "AutoFix", selected: value === "AutoFix" }, 16 | ] 17 | ); 18 | 19 | createSetting( 20 | "easy_nodes.max_tries", 21 | "🪄 LLM Max Tries", 22 | "number", 23 | 3 24 | ); 25 | 26 | createSetting( 27 | "easy_nodes.llm_model", 28 | "🪄 LLM Model", 29 | "text", 30 | "gpt-4o" 31 | ); 32 | }, 33 | }); 34 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Andrew Harp 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=42", "wheel"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "ComfyUI-EasyNodes" 7 | version = "1.2.7" 8 | description = "Makes creating new nodes for ComfyUI a breeze." 9 | readme = "readme.md" 10 | authors = [{ name = "Andrew Harp", email = "andrew.harp@gmail.com" }] 11 | license = {file = "LICENSE"} 12 | requires-python = ">=3.10" 13 | dependencies = [ 14 | "torch", 15 | "pillow", 16 | "ansi2html", 17 | "colorama", 18 | "coloredlogs", 19 | "numpy", 20 | "openai", 21 | ] 22 | 23 | [project.urls] 24 | "Homepage" = "https://github.com/andrewharp/ComfyUI-EasyNodes" 25 | "Bug Reports" = "https://github.com/andrewharp/ComfyUI-EasyNodes/issues" 26 | "Source" = "https://github.com/andrewharp/ComfyUI-EasyNodes/" 27 | 28 | [tool.setuptools.packages.find] 29 | where = ["."] 30 | include = ["easy_nodes"] 31 | exclude = ["web"] 32 | 33 | [tool.setuptools.package-data] 34 | "*" = ["web/**/*"] 35 | 36 | [tool.poetry] 37 | name = "comfyui-easynodes" 38 | version = "1.0.6" 39 | description = "Makes creating new nodes for ComfyUI a breeze." 40 | authors = ["Andrew Harp "] 41 | license = "MIT" 42 | homepage = "https://github.com/andrewharp/ComfyUI-EasyNodes" 43 | repository = "https://github.com/andrewharp/ComfyUI-EasyNodes" 44 | documentation = "https://github.com/andrewharp/ComfyUI-EasyNodes" 45 | readme = "readme.md" 46 | packages = [{ include = "easy_nodes" }] 47 | include = ["easy_nodes/web/**/*"] 48 | 49 | 50 | [tool.poetry.dependencies] 51 | python = ">=3.10" 52 | torch = "*" 53 | pillow = "*" 54 | ansi2html = "*" 55 | colorama = "*" 56 | coloredlogs = "*" 57 | numpy = "*" 58 | openai = "*" 59 | -------------------------------------------------------------------------------- /easy_nodes/comfy_types.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from comfy.clip_vision import ClipVisionModel 4 | from comfy.sd import VAE 5 | from easy_nodes.easy_nodes import AnythingVerifier, TensorVerifier, TypeVerifier, register_type, AnyType, any_type 6 | 7 | 8 | class ImageTensor(torch.Tensor): 9 | pass 10 | class MaskTensor(torch.Tensor): 11 | pass 12 | 13 | class LatentTensor(torch.Tensor): 14 | pass 15 | class ConditioningTensor(torch.Tensor): 16 | pass 17 | class ModelTensor(torch.Tensor): 18 | pass 19 | class SigmasTensor(torch.Tensor): 20 | pass 21 | 22 | # Maybe there's an actual class for this? 23 | class PhotoMaker: 24 | pass 25 | 26 | # Abstract type, not for instantiating. 27 | class NumberType: 28 | pass 29 | 30 | class Color(str): 31 | def __new__(cls, value): 32 | return super().__new__(cls, value) 33 | 34 | register_type(Color, "COLOR") 35 | 36 | # ComfyUI will get the special string that anytype is registered with, which is hardcoded to match anything. 37 | register_type(AnyType, any_type, verifier=AnythingVerifier()) 38 | 39 | # Primitive types 40 | register_type(int, "INT") 41 | register_type(float, "FLOAT", verifier=TypeVerifier([float, int])) 42 | register_type(str, "STRING") 43 | register_type(bool, "BOOLEAN") 44 | register_type(NumberType, "NUMBER", verifier=TypeVerifier([float, int])) 45 | 46 | register_type(ImageTensor, "IMAGE", verifier=TensorVerifier("IMAGE", allowed_shapes=[4], allowed_channels=[1, 3, 4])) 47 | register_type(MaskTensor, "MASK", verifier=TensorVerifier("MASK", allowed_shapes=[3], allowed_range=[0, 1])) 48 | register_type(LatentTensor, "LATENT", verifier=TensorVerifier("LATENT")) 49 | register_type(ConditioningTensor, "CONDITIONING", verifier=TensorVerifier("CONDITIONING")) 50 | register_type(ModelTensor, "MODEL", verifier=TensorVerifier("MODEL")) 51 | register_type(SigmasTensor, "SIGMAS", verifier=TensorVerifier("SIGMAS")) 52 | 53 | register_type(ClipVisionModel, "CLIP_VISION", verifier=AnythingVerifier()) 54 | 55 | # Did I get the right class for VAE? 56 | register_type(VAE, "VAE", verifier=AnythingVerifier()) 57 | register_type(PhotoMaker, "PHOTOMAKER", verifier=AnythingVerifier()) 58 | -------------------------------------------------------------------------------- /easy_nodes/web/config_service.js: -------------------------------------------------------------------------------- 1 | // Passes config values from JavaScript up to Python via config_service.py 2 | // Adapted from https://github.com/rgthree/rgthree-comfy 3 | import { app } from '../../scripts/app.js' 4 | 5 | export function getObjectValue(obj, objKey, def) { 6 | if (!obj || !objKey) 7 | return def; 8 | const keys = objKey.split("."); 9 | const key = keys.shift(); 10 | const found = obj[key]; 11 | if (keys.length) { 12 | return getObjectValue(found, keys.join("."), def); 13 | } 14 | return found; 15 | } 16 | 17 | export function setObjectValue(obj, objKey, value, createMissingObjects = true) { 18 | if (!obj || !objKey) 19 | return obj; 20 | const keys = objKey.split("."); 21 | const key = keys.shift(); 22 | if (obj[key] === undefined) { 23 | if (!createMissingObjects) { 24 | return; 25 | } 26 | obj[key] = {}; 27 | } 28 | if (!keys.length) { 29 | obj[key] = value; 30 | } 31 | else { 32 | if (typeof obj[key] != "object") { 33 | obj[key] = {}; 34 | } 35 | setObjectValue(obj[key], keys.join("."), value, createMissingObjects); 36 | } 37 | return obj; 38 | } 39 | 40 | class EasyNodeApi { 41 | constructor(baseUrl) { 42 | this.baseUrl = baseUrl || './easy_nodes/api'; 43 | } 44 | apiURL(route) { 45 | return `${this.baseUrl}${route}`; 46 | } 47 | fetchApi(route, options) { 48 | return fetch(this.apiURL(route), options); 49 | } 50 | async fetchJson(route, options) { 51 | const r = await this.fetchApi(route, options); 52 | return await r.json(); 53 | } 54 | } 55 | export const easyNodeApi = new EasyNodeApi(); 56 | 57 | const easyNodesConfig = {}; 58 | 59 | class ConfigService extends EventTarget { 60 | getConfigValue(key, def) { 61 | return getObjectValue(easyNodesConfig, key, def); 62 | } 63 | async setConfigValues(changed) { 64 | const body = new FormData(); 65 | body.append("json", JSON.stringify(changed)); 66 | const response = await easyNodeApi.fetchJson("/config", { method: "POST", body }); 67 | if (response.status === "ok") { 68 | for (const [key, value] of Object.entries(changed)) { 69 | setObjectValue(easyNodesConfig, key, value); 70 | this.dispatchEvent(new CustomEvent("config-change", { detail: { key, value } })); 71 | } 72 | } 73 | else { 74 | return false; 75 | } 76 | return true; 77 | } 78 | } 79 | export const SERVICE = new ConfigService(); 80 | 81 | 82 | export function createSetting(id, name, type, defaultValue, options) { 83 | app.ui.settings.addSetting({ 84 | id, 85 | name, 86 | type, 87 | defaultValue, 88 | options, 89 | onChange: (value) => { 90 | SERVICE.setConfigValues({ [id]: value }); 91 | }, 92 | }); 93 | }; 94 | -------------------------------------------------------------------------------- /easy_nodes/web/retain_previews.js: -------------------------------------------------------------------------------- 1 | import { app } from "../../scripts/app.js"; 2 | import { api } from "../../scripts/api.js"; 3 | import { createSetting } from "./config_service.js"; 4 | 5 | const retainPreviewsId = "easy_nodes.RetainPreviews"; 6 | 7 | app.registerExtension({ 8 | name: "Retain Previews", 9 | 10 | async setup() { 11 | createSetting( 12 | retainPreviewsId, 13 | "🪄 Save preview images across browser sessions. Requires initial refresh to activate/deactivate.", 14 | "boolean", 15 | false, 16 | ); 17 | }, 18 | 19 | async beforeRegisterNodeDef(nodeType, nodeData, app) { 20 | if (!app.ui.settings.getSettingValue(retainPreviewsId)) { 21 | return; 22 | } 23 | 24 | const previewTypes = ["PreviewImage", "SaveAnimatedPNG", "SaveAnimatedWEBP", "SaveImage"]; 25 | 26 | if (nodeData.description?.startsWith("EasyNodesInfo=") || 27 | nodeData?.isEasyNode || 28 | previewTypes.includes(nodeData.name)) { 29 | const onNodeCreated = nodeType.prototype.onNodeCreated; 30 | nodeType.prototype.onNodeCreated = function() { 31 | onNodeCreated?.apply(this); 32 | 33 | const node = this; 34 | const widget = { 35 | type: "dict", 36 | name: "Retain_Previews", 37 | options: { serialize: false }, 38 | _value: {}, 39 | set value(v) { 40 | if (v && v.images && v.images.length > 0) { 41 | Promise.all(v.images.map(async (params) => { 42 | try { 43 | const response = await api.fetchApi("/easy_nodes/verify_image?" + 44 | new URLSearchParams(params).toString() + 45 | (node.animatedImages ? "" : app.getPreviewFormatParam()) + app.getRandParam()); 46 | const data = await response.json(); 47 | return data.exists; 48 | } catch (error) { 49 | return false; 50 | } 51 | })).then((results) => { 52 | if (results.every(Boolean)) { 53 | this._value = v; 54 | app.nodeOutputs[node.id + ""] = v; 55 | } else { 56 | this._value = {}; 57 | app.nodeOutputs[node.id + ""] = {}; 58 | } 59 | }); 60 | } else { 61 | this._value = v; 62 | app.nodeOutputs[node.id + ""] = v; 63 | } 64 | }, 65 | get value() { 66 | return this._value; 67 | }, 68 | }; 69 | 70 | this.canvasWidget = this.addCustomWidget(widget); 71 | } 72 | 73 | const onExecuted = nodeType.prototype.onExecuted; 74 | nodeType.prototype.onExecuted = function (output) { 75 | onExecuted?.apply(this, [output]); 76 | this.canvasWidget.value = output; 77 | }; 78 | } 79 | }, 80 | }); -------------------------------------------------------------------------------- /easy_nodes/config_service.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import re 4 | 5 | from server import PromptServer 6 | from aiohttp import web 7 | 8 | # Receives config values from config_service.js 9 | # Code in this file adapted from https://github.com/rgthree/rgthree-comfy 10 | 11 | def get_dict_value(data: dict, dict_key: str, default = None): 12 | """ Gets a deeply nested value given a dot-delimited key.""" 13 | keys = dict_key.split('.') 14 | key = keys.pop(0) if len(keys) > 0 else None 15 | found = data[key] if key in data else None 16 | if found is not None and len(keys) > 0: 17 | return get_dict_value(found, '.'.join(keys), default) 18 | return found if found is not None else default 19 | 20 | 21 | def set_dict_value(data: dict, dict_key: str, value, create_missing_objects = True): 22 | """ Sets a deeply nested value given a dot-delimited key.""" 23 | keys = dict_key.split('.') 24 | key = keys.pop(0) if len(keys) > 0 else None 25 | if key not in data: 26 | if not create_missing_objects: 27 | return 28 | data[key] = {} 29 | if len(keys) == 0: 30 | data[key] = value 31 | else: 32 | set_dict_value(data[key], '.'.join(keys), value, create_missing_objects) 33 | 34 | return data 35 | 36 | 37 | def dict_has_key(data: dict, dict_key): 38 | """ Checks if a dict has a deeply nested dot-delimited key.""" 39 | keys = dict_key.split('.') 40 | key = keys.pop(0) if len(keys) > 0 else None 41 | if key is None or key not in data: 42 | return False 43 | if len(keys) == 0: 44 | return True 45 | return dict_has_key(data[key], '.'.join(keys)) 46 | 47 | 48 | def get_config_value(key, default = None): 49 | # logging.info(f"Getting config value for key: {key} from {USER_CONFIG}") 50 | return get_dict_value(USER_CONFIG, key, default) 51 | 52 | 53 | def extend_config(default_config, user_config): 54 | """ Returns a new config dict combining user_config into defined keys for default_config.""" 55 | cfg = {} 56 | for key, value in default_config.items(): 57 | if key not in user_config: 58 | cfg[key] = value 59 | elif isinstance(value, dict): 60 | cfg[key] = extend_config(value, user_config[key]) 61 | else: 62 | cfg[key] = user_config[key] if key in user_config else value 63 | return cfg 64 | 65 | 66 | def set_user_config(data: dict): 67 | """ Sets the user configuration.""" 68 | count = 0 69 | for key, value in data.items(): 70 | # if dict_has_key(USER_CONFIG, key): 71 | set_dict_value(USER_CONFIG, key, value, True) 72 | count+=1 73 | if count > 0: 74 | write_user_config() 75 | 76 | 77 | THIS_DIR = os.path.dirname(os.path.abspath(__file__)) 78 | DEFAULT_CONFIG_FILE = os.path.join(THIS_DIR, '..', 'easy_node_config.json.default') 79 | USER_CONFIG_FILE = os.path.join(THIS_DIR, '..', 'easy_node_config.json') 80 | 81 | 82 | def get_user_config(): 83 | """ Gets the user configuration.""" 84 | if os.path.exists(USER_CONFIG_FILE): 85 | with open(USER_CONFIG_FILE, 'r', encoding = 'UTF-8') as file: 86 | config = re.sub(r"(?:^|\s)//.*", "", file.read(), flags=re.MULTILINE) 87 | return json.loads(config) 88 | else: 89 | return {} 90 | 91 | 92 | def write_user_config(): 93 | """ Writes the user configuration.""" 94 | with open(USER_CONFIG_FILE, 'w+', encoding = 'UTF-8') as file: 95 | json.dump(USER_CONFIG, file, sort_keys=True, indent=2, separators=(",", ": ")) 96 | 97 | 98 | USER_CONFIG = get_user_config() 99 | 100 | 101 | # Migrate old config options into "features" 102 | needs_to_write_user_config = False 103 | 104 | 105 | if needs_to_write_user_config is True: 106 | print('writing new user config.') 107 | write_user_config() 108 | 109 | 110 | routes = PromptServer.instance.routes 111 | 112 | 113 | @routes.get('/easy_nodes/config.js') 114 | def api_get_user_config_file(request): 115 | """ Returns the user configuration as a javascript file. """ 116 | text=f'export const easy_node_config = {json.dumps(USER_CONFIG, sort_keys=True, indent=2, separators=(",", ": "))}' 117 | return web.Response(text=text, content_type='application/javascript') 118 | 119 | 120 | @routes.get('/easy_nodes/api/config') 121 | def api_get_user_config(request): 122 | """ Returns the user configuration. """ 123 | return web.json_response(json.dumps(USER_CONFIG)) 124 | 125 | 126 | @routes.post('/easy_nodes/api/config') 127 | async def api_set_user_config(request): 128 | """ Returns the user configuration. """ 129 | post = await request.post() 130 | data = json.loads(post.get("json")) 131 | set_user_config(data) 132 | return web.json_response({"status": "ok"}) 133 | -------------------------------------------------------------------------------- /example/example_nodes.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from random import random 3 | from easy_nodes import ( 4 | NumberInput, 5 | ComfyNode, 6 | MaskTensor, 7 | StringInput, 8 | ImageTensor, 9 | Choice, 10 | ) 11 | import easy_nodes 12 | import torch 13 | 14 | # Important! Make sure easy_nodes.initialize_easy_nodes is called before any nodes are defined. 15 | # See __init__.py for an example of how to do this. 16 | 17 | # This is the converted example node from ComfyUI's example_node.py.example file. 18 | @ComfyNode() 19 | def annotated_example( 20 | image: ImageTensor, 21 | string_field: str = StringInput("Hello World!", multiline=False), 22 | int_field: int = NumberInput(0, 0, 4096, 64, "number"), 23 | float_field: float = NumberInput(1.0, 0, 10.0, 0.01, 0.001), 24 | print_to_screen: str = Choice(["enabled", "disabled"]), 25 | ) -> ImageTensor: 26 | if print_to_screen == "enable": 27 | print( 28 | f"""Your input contains: 29 | string_field aka input text: {string_field} 30 | int_field: {int_field} 31 | float_field: {float_field} 32 | """ 33 | ) 34 | # do some processing on the image, in this example I just invert it 35 | image = 1.0 - image 36 | return image # Internally this gets auto-converted to (image,) for ComfyUI. 37 | 38 | 39 | # You can wrap existing functions with ComfyFunc to expose them to ComfyUI as well. 40 | def another_function(foo: float = 1.0): 41 | """Docstrings will be passed to the DESCRIPTION field on the node in ComfyUI.""" 42 | print("Hello World!", foo) 43 | 44 | 45 | ComfyNode(is_changed=lambda: random.random())(another_function) 46 | 47 | 48 | # You can register arbitrary classes to be used as inputs or outputs. 49 | class MyFunClass: 50 | def __init__(self): 51 | self.width = 640 52 | self.height = 640 53 | self.color = 0.5 54 | easy_nodes.register_type(MyFunClass, "FUN_CLASS") 55 | 56 | 57 | # If you don't want to create a node manually to create the class, you can use the 58 | # create_field_setter_node to automatically create a node that sets the fields on the class. 59 | easy_nodes.create_field_setter_node(MyFunClass) 60 | 61 | 62 | @ComfyNode(is_output_node=True, color="#4F006F") 63 | def my_fun_class_node_processor(fun_class: MyFunClass) -> ImageTensor: 64 | my_image = torch.rand((1, fun_class.height, fun_class.width, 3)) * fun_class.color 65 | return my_image 66 | 67 | 68 | @ComfyNode() 69 | def create_random_image(width: int=NumberInput(128, 128, 1024), 70 | height: int=NumberInput(128, 128, 1024)) -> ImageTensor: 71 | return torch.rand((1, height, width, 3)) 72 | 73 | 74 | # You can also wrap a method on a class and thus maintain state between calls. 75 | # 76 | # Note that you can only expose one method per class, and you have to define the 77 | # full class before manually calling the decorator on the method. 78 | class ExampleClass: 79 | def __init__(self): 80 | self.counter = 42 81 | 82 | def my_method(self) -> int: 83 | print(f"ExampleClass Hello World! {self.counter}") 84 | self.counter += 1 85 | return self.counter 86 | 87 | 88 | def my_is_changed_func(): 89 | return random() 90 | 91 | ComfyNode( 92 | is_changed=my_is_changed_func, 93 | description="Descriptions can also be passed in manually. This operation increments a counter", 94 | )(ExampleClass.my_method) 95 | 96 | 97 | # Preview text and images right in the nodes. 98 | @ComfyNode(is_output_node=True) 99 | def preview_example(str2: str = StringInput("")) -> str: 100 | easy_nodes.show_text(f"hello: {str2}") 101 | return str2 102 | 103 | 104 | # Wrapping a class method 105 | class AnotherExampleClass: 106 | class_counter = 42 107 | 108 | @classmethod 109 | def my_class_method(cls, foo: float): 110 | print(f"AnotherExampleClass Hello World! {cls.class_counter} {foo}") 111 | cls.class_counter += 1 112 | 113 | 114 | ComfyNode(is_changed=lambda: random.random())( 115 | AnotherExampleClass.my_class_method 116 | ) 117 | 118 | 119 | # ImageTensors and MaskTensors are both just torch.Tensors. Use them in annotations to 120 | # differentiate between images and masks in ComfyUI. This is purely cosmetic, and they 121 | # are interchangeable in Python. If you annotate the type of a parameter as torch.Tensor 122 | # it will be treated as an ImageTensor. 123 | @ComfyNode(color="#00FF00") 124 | def convert_to_image(mask: MaskTensor) -> ImageTensor: 125 | image = mask.unsqueeze(-1).expand(-1, -1, -1, 3) 126 | return image 127 | 128 | 129 | @ComfyNode() 130 | def text_repeater(text: str=StringInput("Sample text"), 131 | times: int=NumberInput(10, 1, 100)) -> list[str]: 132 | return [text] * times 133 | 134 | 135 | # If you wrap your input types in list[], under the hood the decorator will make sure you get 136 | # everything in a single call with the list inputs passed to you as lists automatically. 137 | # If you don't, then you'll get multiple calls with a single item on each call. 138 | @ComfyNode() 139 | def combine_lists( 140 | image1: list[ImageTensor], image2: list[ImageTensor] 141 | ) -> list[ImageTensor]: 142 | combined_lists = image1 + image2 143 | return combined_lists 144 | 145 | 146 | # Adding a default for a param makes it optional, so ComfyUI won't require it to run your node. 147 | @ComfyNode() 148 | def add_images( 149 | image1: ImageTensor, image2: ImageTensor, image3: ImageTensor = None 150 | ) -> ImageTensor: 151 | combined_tensors = image1 + image2 152 | if image3 is not None: 153 | combined_tensors += image3 154 | return combined_tensors 155 | 156 | 157 | @ComfyNode(is_output_node=True, color="#006600") 158 | def example_show_mask(mask: MaskTensor) -> MaskTensor: 159 | easy_nodes.show_image(mask) 160 | return mask 161 | 162 | 163 | # Multiple outputs can be returned by annotating with tuple[]. 164 | # Pass return_names if you want to give them labels in ComfyUI. 165 | @ComfyNode("Example category", color="#0066cc", bg_color="#ffcc00", return_names=["Below", "Above"]) 166 | def threshold_image(image: ImageTensor, threshold_value: float = NumberInput(0.5, 0, 1, 0.0001, display="slider")) -> tuple[MaskTensor, MaskTensor]: 167 | """Returns separate masks for values above and below the threshold value.""" 168 | mask_below = torch.any(image < threshold_value, dim=-1).squeeze(-1) 169 | 170 | logging.info(f"Number of pixels below threshold: {mask_below.sum()}") 171 | logging.info(f"Number of pixels above threshold: {(~mask_below).sum()}") 172 | 173 | return mask_below.float(), (~mask_below).float() 174 | 175 | 176 | # ImageTensor and MaskTensor are just torch.Tensors, so you can treat them as such. 177 | @ComfyNode(color="#0000FF", is_output_node=True) 178 | def example_mask_image(image: ImageTensor, 179 | mask: MaskTensor, 180 | value: float=NumberInput(0, 0, 1, 0.0001, display="slider")) -> ImageTensor: 181 | """Just your basic image masking node.""" 182 | for i in range(50): 183 | logging.info(f"Log line {i}") 184 | image = image.clone() 185 | image[mask == 0] = value 186 | easy_nodes.show_image(image) 187 | return image 188 | 189 | 190 | # As long as Python is happy, ComfyUI will be happy with whatever you tell it the return type is. 191 | # You can set the node color by passing in a color argument to the decorator. 192 | @ComfyNode(color="#FF0000") 193 | def convert_to_mask(image: ImageTensor, threshold: float = NumberInput(0.5, 0, 1, 0.0001, display="slider")) -> MaskTensor: 194 | return (image > threshold).float() 195 | 196 | 197 | # The decorated functions remain normal Python functions, so we can nest them inside each other too. 198 | @ComfyNode() 199 | def mask_image_with_image( 200 | image: ImageTensor, image_to_use_as_mask: ImageTensor 201 | ) -> ImageTensor: 202 | mask = convert_to_mask(image_to_use_as_mask) 203 | return example_mask_image(image, mask) 204 | 205 | 206 | # And of course you can use the code in normal Python scripts too. 207 | if __name__ == "__main__": 208 | tensor = torch.rand((5, 5)) 209 | tensor_inverted = annotated_example(tensor, "hello", 5, 0.5, "enable") 210 | assert torch.allclose(tensor, 1.0 - tensor_inverted) 211 | 212 | tensor_inverted_again = annotated_example(tensor_inverted, "Hi!") 213 | assert torch.allclose(tensor, tensor_inverted_again) 214 | -------------------------------------------------------------------------------- /easy_nodes/web/log_streaming.js: -------------------------------------------------------------------------------- 1 | import { api } from '../../scripts/api.js'; 2 | import { app } from "../../../scripts/app.js"; 3 | 4 | 5 | class FloatingLogWindow { 6 | constructor() { 7 | this.window = null; 8 | this.content = null; 9 | this.currentNodeId = null; 10 | this.currentPromptId = null; 11 | this.hideTimeout = null; 12 | this.activeStream = null; 13 | this.streamPromise = null; 14 | this.debounceTimeout = null; 15 | this.isFirstChunk = true; 16 | this.isClicked = false; 17 | this.isPinned = false; 18 | this.pinButton = null; 19 | } 20 | 21 | create() { 22 | if (this.window) return; 23 | 24 | this.window = document.createElement('div'); 25 | this.window.className = 'floating-log-window'; 26 | this.window.style.cssText = ` 27 | position: absolute; 28 | width: 400px; 29 | height: 300px; 30 | background-color: #1e1e1e; 31 | border: 1px solid #444; 32 | border-radius: 5px; 33 | box-shadow: 0 0 10px rgba(0,0,0,0.5); 34 | z-index: 1000; 35 | display: flex; 36 | flex-direction: column; 37 | overflow: hidden; 38 | resize: both; 39 | `; 40 | 41 | this.header = document.createElement('div'); 42 | this.header.style.cssText = ` 43 | padding: 5px 10px; 44 | background-color: #2a2a2a; 45 | border-bottom: 1px solid #444; 46 | font-family: Arial, sans-serif; 47 | font-size: 14px; 48 | color: #e0e0e0; 49 | font-weight: bold; 50 | cursor: move; 51 | display: flex; 52 | justify-content: space-between; 53 | align-items: center; 54 | `; 55 | 56 | const title = document.createElement('span'); 57 | title.textContent = 'Node Log'; 58 | this.header.appendChild(title); 59 | 60 | this.pinButton = document.createElement('button'); 61 | this.pinButton.style.cssText = ` 62 | background: none; 63 | border: none; 64 | color: #e0e0e0; 65 | font-size: 18px; 66 | cursor: pointer; 67 | padding: 0 5px; 68 | `; 69 | this.pinButton.innerHTML = '📌'; 70 | this.pinButton.title = 'Pin window'; 71 | this.header.appendChild(this.pinButton); 72 | 73 | this.content = document.createElement('div'); 74 | this.content.style.cssText = ` 75 | flex-grow: 1; 76 | overflow-y: auto; 77 | margin: 0; 78 | padding: 10px; 79 | background-color: #252525; 80 | color: #e0e0e0; 81 | font-family: monospace; 82 | font-size: 12px; 83 | line-height: 1.4; 84 | white-space: pre-wrap; 85 | word-wrap: break-word; 86 | `; 87 | 88 | this.resizeHandle = document.createElement('div'); 89 | this.resizeHandle.style.cssText = ` 90 | position: absolute; 91 | right: 0; 92 | bottom: 0; 93 | width: 10px; 94 | height: 10px; 95 | cursor: nwse-resize; 96 | `; 97 | 98 | this.window.appendChild(this.header); 99 | this.window.appendChild(this.content); 100 | this.window.appendChild(this.resizeHandle); 101 | document.body.appendChild(this.window); 102 | 103 | this.addEventListeners(); 104 | } 105 | 106 | addEventListeners() { 107 | let isDragging = false; 108 | let isResizing = false; 109 | let startX, startY, startWidth, startHeight; 110 | 111 | const onMouseMove = (e) => { 112 | if (isDragging) { 113 | const dx = e.clientX - startX; 114 | const dy = e.clientY - startY; 115 | this.window.style.left = `${this.window.offsetLeft + dx}px`; 116 | this.window.style.top = `${this.window.offsetTop + dy}px`; 117 | startX = e.clientX; 118 | startY = e.clientY; 119 | } else if (isResizing) { 120 | const width = startWidth + (e.clientX - startX); 121 | const height = startHeight + (e.clientY - startY); 122 | this.window.style.width = `${Math.max(this.minWidth, width)}px`; 123 | this.window.style.height = `${Math.max(this.minHeight, height)}px`; 124 | } 125 | }; 126 | 127 | const onMouseUp = () => { 128 | isDragging = false; 129 | isResizing = false; 130 | document.removeEventListener('mousemove', onMouseMove); 131 | document.removeEventListener('mouseup', onMouseUp); 132 | }; 133 | 134 | this.header.addEventListener('mousedown', (e) => { 135 | if (e.target !== this.pinButton) { 136 | isDragging = true; 137 | startX = e.clientX; 138 | startY = e.clientY; 139 | document.addEventListener('mousemove', onMouseMove); 140 | document.addEventListener('mouseup', onMouseUp); 141 | } 142 | }); 143 | 144 | this.resizeHandle.addEventListener('mousedown', (e) => { 145 | isResizing = true; 146 | startX = e.clientX; 147 | startY = e.clientY; 148 | startWidth = parseInt(document.defaultView.getComputedStyle(this.window).width, 10); 149 | startHeight = parseInt(document.defaultView.getComputedStyle(this.window).height, 10); 150 | document.addEventListener('mousemove', onMouseMove); 151 | document.addEventListener('mouseup', onMouseUp); 152 | }); 153 | 154 | this.window.addEventListener('mouseenter', () => { 155 | this.cancelHideTimeout(); 156 | }); 157 | 158 | this.window.addEventListener('mouseleave', () => { 159 | if (!this.isClicked && !this.isPinned) { 160 | this.scheduleHide(); 161 | } 162 | }); 163 | 164 | // Add click event listener to the window 165 | this.window.addEventListener('click', (e) => { 166 | e.stopPropagation(); // Prevent the click from propagating to the document 167 | this.isClicked = true; 168 | this.cancelHideTimeout(); 169 | }); 170 | 171 | // Add global click event listener 172 | document.addEventListener('click', (e) => { 173 | if (this.window && this.window.style.display !== 'none' && !this.isPinned) { 174 | this.isClicked = false; 175 | this.hide(); 176 | } 177 | }); 178 | 179 | // Add pin button functionality 180 | this.pinButton.addEventListener('click', () => { 181 | this.isPinned = !this.isPinned; 182 | this.pinButton.innerHTML = this.isPinned ? '📍' : '📌'; 183 | this.pinButton.title = this.isPinned ? 'Unpin window' : 'Pin window'; 184 | }); 185 | } 186 | 187 | resetStream() { 188 | this.content.innerHTML = ''; // Clear previous content 189 | this.content.scrollTop = 0; // Reset scroll position 190 | this.streamLog(); 191 | } 192 | 193 | show(x, y, nodeId) { 194 | if (!this.window) this.create(); 195 | 196 | if (!this.isPinned) { 197 | // Convert canvas coordinates to screen coordinates 198 | const rect = app.canvas.canvas.getBoundingClientRect(); 199 | const screenX = (x + rect.left + app.canvas.ds.offset[0]) * app.canvas.ds.scale; 200 | const screenY = (y + rect.top + app.canvas.ds.offset[1]) * app.canvas.ds.scale; 201 | 202 | this.window.style.left = `${screenX}px`; 203 | this.window.style.top = `${screenY}px`; 204 | } 205 | 206 | this.window.style.display = 'flex'; 207 | 208 | if (this.currentNodeId !== nodeId) { 209 | this.currentNodeId = nodeId; 210 | this.content.innerHTML = ''; // Clear previous content 211 | this.content.scrollTop = 0; // Reset scroll position 212 | this.debouncedStreamLog(); 213 | } 214 | 215 | this.cancelHideTimeout(); 216 | } 217 | 218 | scheduleHide() { 219 | if (!this.isPinned) { 220 | this.cancelHideTimeout(); 221 | this.hideTimeout = setTimeout(() => this.hide(), 300); 222 | } 223 | } 224 | 225 | cancelHideTimeout() { 226 | if (this.hideTimeout) { 227 | clearTimeout(this.hideTimeout); 228 | this.hideTimeout = null; 229 | } 230 | } 231 | 232 | hide() { 233 | if (this.window && !this.isClicked && !this.isPinned) { 234 | this.window.style.display = 'none'; 235 | this.currentNodeId = null; 236 | this.cancelStream(); 237 | } 238 | } 239 | 240 | cancelStream() { 241 | if (this.activeStream) { 242 | this.activeStream.cancel(); 243 | this.activeStream = null; 244 | } 245 | if (this.streamPromise) { 246 | this.streamPromise.cancel(); 247 | this.streamPromise = null; 248 | } 249 | } 250 | 251 | debouncedStreamLog() { 252 | if (this.debounceTimeout) { 253 | clearTimeout(this.debounceTimeout); 254 | } 255 | this.debounceTimeout = setTimeout(() => { 256 | this.streamLog(); 257 | }, 100); 258 | } 259 | 260 | async streamLog() { 261 | if (!this.currentNodeId) return; 262 | 263 | // Cancel any existing stream 264 | this.cancelStream(); 265 | 266 | // Create a new AbortController for this stream 267 | const controller = new AbortController(); 268 | const signal = controller.signal; 269 | 270 | this.streamPromise = (async () => { 271 | try { 272 | const response = await api.fetchApi(`/easy_nodes/show_log?node=${this.currentNodeId}`, { signal }); 273 | const reader = response.body.getReader(); 274 | const decoder = new TextDecoder(); 275 | 276 | this.activeStream = reader; 277 | 278 | while (true) { 279 | const { value, done } = await reader.read(); 280 | if (done) break; 281 | let text = decoder.decode(value, { stream: true }); 282 | 283 | // Trim initial whitespace only for the first chunk 284 | if (this.isFirstChunk) { 285 | text = text.trimStart(); 286 | this.isFirstChunk = false; 287 | } 288 | 289 | // Render HTML 290 | this.content.insertAdjacentHTML('beforeend', text); 291 | 292 | // Only auto-scroll if the user hasn't scrolled up 293 | if (this.content.scrollHeight - this.content.scrollTop === this.content.clientHeight) { 294 | this.content.scrollTop = this.content.scrollHeight; 295 | } 296 | } 297 | } catch (error) { 298 | if (error.name !== 'AbortError') { 299 | console.error('Error in streamLog:', error); 300 | } 301 | } finally { 302 | this.activeStream = null; 303 | this.streamPromise = null; 304 | } 305 | })(); 306 | 307 | // Attach the cancel method to the promise 308 | this.streamPromise.cancel = () => { 309 | controller.abort(); 310 | }; 311 | } 312 | } 313 | 314 | 315 | export const floatingLogWindow = new FloatingLogWindow(); 316 | 317 | 318 | api.addEventListener('logs_updated', ({ detail, }) => { 319 | let nodesWithLogs = detail.nodes_with_logs; 320 | let prompt_id = detail.prompt_id; 321 | 322 | app.graph._nodes.forEach((node) => { 323 | let strNodeId = "" + node.id; 324 | node.has_log = nodesWithLogs.includes(strNodeId); 325 | }); 326 | 327 | // If the floating log window is showing logs for a node that has a new log, refresh it: 328 | if (floatingLogWindow.currentPromptId != prompt_id && nodesWithLogs.includes(floatingLogWindow.currentNodeId + "")) { 329 | floatingLogWindow.resetStream(); 330 | } 331 | floatingLogWindow.currentPromptId = prompt_id; 332 | app.canvas.setDirty(true); 333 | }, false); 334 | 335 | app.registerExtension({ 336 | name: "EasyNodes.log_streaming", 337 | async setup(app) { 338 | api.addEventListener("status", ({ detail }) => { 339 | if (!detail) { 340 | app.graph._nodes.forEach((node) => { 341 | node.has_log = false; 342 | }); 343 | app.canvas.setDirty(true); 344 | } 345 | }); 346 | 347 | api.addEventListener("reconnected", () => { 348 | api.fetchApi('/easy_nodes/trigger_log', { method: 'POST' }); 349 | }); 350 | }, 351 | async afterConfigureGraph(missingNodeTypes) { 352 | app.graph._nodes.forEach((node) => { 353 | node.has_log = false; 354 | }); 355 | api.fetchApi('/easy_nodes/trigger_log', { method: 'POST' }); 356 | }, 357 | }); 358 | 359 | 360 | -------------------------------------------------------------------------------- /easy_nodes/log_streaming.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import html 3 | import io 4 | import logging 5 | import os 6 | import re 7 | import traceback 8 | from typing import Dict, List, Tuple 9 | 10 | import folder_paths 11 | from aiohttp import web 12 | from ansi2html import Ansi2HTMLConverter 13 | from colorama import Fore 14 | from server import PromptServer 15 | 16 | import easy_nodes.config_service as config_service 17 | 18 | routes = PromptServer.instance.routes 19 | 20 | 21 | class CloseableBufferWrapper: 22 | def __init__(self, buffer: io.StringIO): 23 | self._buffer: io.StringIO = buffer 24 | self._value: str = None 25 | self._closed: bool = False 26 | 27 | def close(self): 28 | self._value = self._buffer.getvalue() 29 | self._buffer.close() 30 | self._buffer = None 31 | self._closed = True 32 | 33 | async def stream_buffer(self, offset=0): 34 | try: 35 | buffer = self._buffer if not self._closed else io.StringIO(self._value) 36 | buffer.seek(0, io.SEEK_END) 37 | buffer_size = buffer.tell() 38 | if offset == -1: 39 | start_position = 0 40 | else: 41 | start_position = max(0, buffer_size - offset) 42 | 43 | buffer.seek(start_position) 44 | content = buffer.read() 45 | yield content 46 | 47 | last_position = buffer.tell() 48 | 49 | while True: 50 | # TODO: fix the microscopic chance for a race condition here. 51 | # close() needs to async so we can await a lock there and here. 52 | if self._closed: 53 | # Stream the remaining content and exit 54 | remaining_content = self._value[last_position:] 55 | if remaining_content: 56 | yield remaining_content 57 | break 58 | 59 | buffer.seek(0, io.SEEK_END) 60 | if buffer.tell() > last_position: 61 | buffer.seek(last_position) 62 | content = buffer.read() 63 | yield content 64 | last_position = buffer.tell() 65 | 66 | await asyncio.sleep(0.1) 67 | 68 | except Exception as _: 69 | logging.error(f"Error in stream_buffer: {traceback.format_exc()}") 70 | 71 | 72 | # Keyed on node ID, first value of tuple is prompt_id. 73 | _buffers: Dict[str, Tuple[str, List[CloseableBufferWrapper]]] = {} 74 | _prompt_id = None 75 | _last_node_id = None 76 | 77 | 78 | async def tail_file(filename, offset): 79 | file_size = os.path.getsize(filename) 80 | if offset == -1: 81 | start_position = 0 82 | else: 83 | start_position = max(0, file_size - offset) 84 | 85 | with open(filename, 'r') as f: 86 | f.seek(start_position) 87 | # First, yield any existing content from the offset 88 | content = f.read() 89 | if content: 90 | yield content 91 | 92 | # Then, continue to tail the file 93 | f.seek(0, os.SEEK_END) 94 | while True: 95 | line = f.readline() 96 | if not line: 97 | await asyncio.sleep(0.1) 98 | continue 99 | yield line 100 | 101 | 102 | async def tail_string(content: str, offset: int): 103 | if offset == -1: 104 | yield content 105 | else: 106 | yield content[-offset:] 107 | 108 | 109 | def minify_html(html): 110 | # Remove comments 111 | html = re.sub(r'', '', html, flags=re.DOTALL) 112 | # Remove whitespace between tags 113 | html = re.sub(r'>\s+<', '><', html) 114 | # Remove leading and trailing whitespace 115 | html = html.strip() 116 | # Combine multiple spaces into one 117 | html = re.sub(r'\s+', ' ', html) 118 | return html 119 | 120 | 121 | header = minify_html(""" 122 | 123 | 124 | ComfyUI Log 125 | 131 | 132 | 133 |
""")
134 | 
135 | 
136 | async def send_header(request) -> web.StreamResponse:
137 |     response = web.StreamResponse(
138 |         status=200,
139 |         reason='OK',
140 |         headers={'Content-Type': 'text/html'},
141 |     )
142 |     await response.prepare(request)
143 |     await response.write(header.encode('utf-8'))
144 |     return response
145 | 
146 | 
147 | _converter = Ansi2HTMLConverter(inline=True)
148 | 
149 | 
150 | def convert_text(text: str):
151 |     # Convert ANSI codes to HTML
152 |     converted = _converter.convert(text, full=False, ensure_trailing_newline=False)
153 |     editor_prefix = config_service.get_config_value("easy_nodes.EditorPathPrefix", "")
154 |     source_prefix = config_service.get_config_value("easy_nodes.SourcePathPrefix")
155 | 
156 |     def replace_with_link(match):
157 |         full_path = match.group(1)
158 |         line_no = match.group(2)
159 |         full_link = f"{editor_prefix}{full_path}:{line_no}"
160 |         
161 |         # Remove source path prefix if it exists
162 |         if source_prefix and full_path.startswith(source_prefix):
163 |             full_path = full_path[len(source_prefix):]
164 |         
165 |         return f'{full_path}:{line_no}'
166 | 
167 |     # Regex pattern to match the [[LINK:filepath:lineno]] format
168 |     log_pattern = r'\[\[LINK:([^:]+):([^:]+)\]\]'
169 |     converted = re.sub(log_pattern, replace_with_link, converted)
170 |     
171 |     # Also look for anything matching source path prefix and convert to link 
172 |     if editor_prefix:
173 |         stack_trace_pattern = r'File "([^"]+)", line (\d+), in (\w+)'
174 |         converted = re.sub(stack_trace_pattern, 
175 |                            lambda m: f'{replace_with_link(m)} in {m.group(3)}', 
176 |                            converted)
177 |     
178 |     return converted.encode('utf-8')
179 | 
180 | 
181 | async def stream_content(response, content_generator):
182 |     try:
183 |         async for line in content_generator:
184 |             try:
185 |                 await send_text(response, line)
186 |             except ConnectionResetError:
187 |                 break
188 |     except asyncio.CancelledError:
189 |         pass
190 |     except Exception as e:
191 |         print(f"Error in stream_content: {str(e)}")
192 |     finally:
193 |         return response
194 | 
195 | 
196 | async def send_footer(response):
197 |     await response.write(b"
") 198 | response.force_close() 199 | 200 | 201 | def send_node_update(): 202 | nodes_with_logs = [key for key in _buffers.keys()] 203 | PromptServer.instance.send_sync("logs_updated", {"nodes_with_logs": nodes_with_logs, "prompt_id": _prompt_id}, None) 204 | 205 | 206 | @routes.post("/easy_nodes/trigger_log") 207 | async def trigger_log(request): 208 | send_node_update() 209 | return web.Response(status=200) 210 | 211 | 212 | async def send_text(response: web.Request, text: str): 213 | debug = False 214 | if debug: 215 | with open("log_streaming.log", "a") as f: 216 | f.write(f"{text}") 217 | 218 | await response.write(convert_text(text)) 219 | await response.drain() 220 | 221 | 222 | @routes.get("/easy_nodes/show_log") 223 | async def show_log(request: web.Request): 224 | offset = int(request.rel_url.query.get("offset", "-1")) 225 | if "node" in request.rel_url.query: 226 | try: 227 | node_id = str(request.rel_url.query["node"]) 228 | if node_id not in _buffers: 229 | logging.error(f"Node {node_id} not found in buffers: {_buffers}") 230 | return web.json_response({"node not found": node_id, 231 | "valid nodes": [str(key) for key in _buffers.keys()]}, status=404) 232 | 233 | response = await send_header(request) 234 | await send_text(response, "Sent header!\n") 235 | node_class, prompt_id, buffer_list = _buffers[node_id] 236 | await send_text( 237 | response, 238 | f"Logs for node {Fore.GREEN}{node_id}{Fore.RESET}" 239 | + f" ({Fore.GREEN}{node_class}{Fore.RESET})" 240 | + f" in prompt {Fore.GREEN}{prompt_id}{Fore.RESET}\n\n", 241 | ) 242 | 243 | invocation = 1 244 | last_buffer_index = 0 245 | 246 | while True: 247 | for i in range(last_buffer_index, len(buffer_list)): 248 | input_desc, buffer = buffer_list[i] 249 | input_desc_str = "\n".join(input_desc) if isinstance(input_desc, list) else input_desc 250 | invocation_header = f"======== Node invocation {Fore.GREEN}{invocation:3d}{Fore.RESET} ========\n" 251 | await send_text(response, invocation_header) 252 | await send_text(response, f"Params passed to node:\n{Fore.CYAN}{input_desc_str}{Fore.RESET}\n--\n") 253 | invocation += 1 254 | await stream_content(response, buffer.stream_buffer(offset)) 255 | last_buffer_index = i + 1 256 | 257 | # Wait for a second to check for new logs in case there's more coming. 258 | if _last_node_id != node_id: 259 | logging.info(f"Node ID changed from {_last_node_id} to {node_id} {type(_last_node_id)} {type(node_id)}") 260 | break 261 | 262 | # If the next node wasn't an EasyNode or this was the actual last node in the prompt, we can't be completely 263 | # sure if there's more logs coming. So we'll just wait for a second and check again. 264 | if len(buffer_list) == last_buffer_index: 265 | await asyncio.sleep(0.5) 266 | if len(buffer_list) == last_buffer_index: 267 | break 268 | 269 | await send_text(response, "=====================================\n\nEnd of node logs.") 270 | await send_footer(response) 271 | except Exception as e: 272 | # Most exceptions seem to be related to the response object being closed (user closed the window) 273 | logging.debug(f"Error in show_log for node {node_id} (last node id: {_last_node_id}): {str(e)} {traceback.format_exc()}") 274 | return web.Response(status=500) 275 | return response 276 | 277 | response = await send_header(request) 278 | await stream_content(response, tail_file("comfyui.log", offset)) 279 | await send_footer(response) 280 | return response 281 | 282 | 283 | def add_log_buffer(node_id: str, node_class: str, prompt_id: str, input_desc: str, 284 | buffer_wrapper: CloseableBufferWrapper): 285 | global _prompt_id 286 | _prompt_id = prompt_id 287 | 288 | global _last_node_id 289 | _last_node_id = node_id 290 | 291 | node_id = str(node_id) 292 | 293 | if node_id in _buffers: 294 | existing_node_class, existing_prompt_id, buffers = _buffers[node_id] 295 | if existing_prompt_id != prompt_id: 296 | log_list = [] 297 | _buffers[node_id] = (node_class, prompt_id, log_list) 298 | else: 299 | log_list = buffers 300 | else: 301 | log_list = [] 302 | _buffers[node_id] = (node_class, prompt_id, log_list) 303 | 304 | log_list.append((input_desc, buffer_wrapper)) 305 | send_node_update() 306 | 307 | 308 | @routes.get("/easy_nodes/verify_image") 309 | async def verify_image(request): 310 | if "filename" in request.rel_url.query: 311 | filename = request.rel_url.query["filename"] 312 | filename, output_dir = folder_paths.annotated_filepath(filename) 313 | 314 | # validation for security: prevent accessing arbitrary path 315 | if filename[0] == '/' or '..' in filename: 316 | return web.Response(status=400) 317 | 318 | if output_dir is None: 319 | type = request.rel_url.query.get("type", "output") 320 | output_dir = folder_paths.get_directory_by_type(type) 321 | 322 | if output_dir is None: 323 | return web.Response(status=400) 324 | 325 | if "subfolder" in request.rel_url.query: 326 | full_output_dir = os.path.join(output_dir, request.rel_url.query["subfolder"]) 327 | if os.path.commonpath((os.path.abspath(full_output_dir), output_dir)) != output_dir: 328 | return web.Response(status=403) 329 | output_dir = full_output_dir 330 | 331 | file = os.path.join(output_dir, filename) 332 | return web.json_response({"exists": os.path.isfile(file)}) 333 | 334 | return web.Response(status=400) 335 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # Effortless Nodes for ComfyUI 2 | 3 | This package aims to make adding new [ComfyUI](https://github.com/comfyanonymous/ComfyUI) nodes as easy as possible, allowing you to write basic annotated Python and automatically turn it into a ComfyUI node definition via a simple `@ComfyNode` annotation. 4 | 5 | For example: 6 | ```python 7 | from easy_nodes import ComfyNode, ImageTensor, MaskTensor, NumberInput 8 | 9 | @ComfyNode(color="#0066cc", bg_color="#ffcc00", return_names=["Below", "Above"]) 10 | def threshold_image(image: ImageTensor, 11 | threshold: float = NumberInput(0.5, 0, 1, 0.01, display="slider")) -> tuple[MaskTensor, MaskTensor]: 12 | """Returns separate masks for values above and below the threshold value.""" 13 | mask_below = torch.any(image < threshold, dim=-1) 14 | return mask_below.float(), (~mask_below).float() 15 | ``` 16 | 17 | That (plus [a tiny bit of initialization](#installation) in `__init__.py`) and your node is ready for ComfyUI! 18 | 19 | In addition, it provides enhanced node customization previously only available with custom JavaScript (e.g. color, and adding preview images/text), and several general ComfyUI quality of life improvements. 20 | 21 | More examples can be found [here](example/example_nodes.py). 22 | 23 | ## Features 24 | 25 | ### Core Functionality 26 | - **@ComfyNode Decorator**: Simplifies custom node declaration with automagic node definition based on Python type annotations. Just write annotated Python and turn your functions into nodes with a simple @ComfyNode decorator. 27 | - **ComfyUI Type Support**: Includes common types (e.g., ImageTensor, MaskTensor) to support ComfyUI's connection semantics. Register your own custom types with `easy_nodes.register_type()` 28 | - **Widget Support**: Special classes (StringInput, NumberInput and Choice) provide full support for ComfyUI's widget behavior when used as defaults to int, float and string inputs. 29 | - **Automatic List and Tuple Handling**: Simplifies input/output for functions expecting or returning collections. 30 | - **Init-time andChecking**: Provides early alerts for common setup issues like duplicate node definitions. 31 | - **Built-in Text and Image Previews**: Easily add previews to nodes without JavaScript using `easy_nodes.show_text()` and `easy_nodes.show_image()`. 32 | - **Change node defaults**: Set node colors and initial dimensions via the decorator, also avoiding the need to write custom JavaScript. 33 | 34 | ### Advanced Features 35 | - **Dynamic Node Creation**: Automatically create nodes from existing Python classes, adding widgets for every field (for basic types like string, int and float). 36 | - **ComfyUI Node Definition Support**: Includes options for validate_input, is_output_node, and other ComfyUI-specific features. 37 | - **Log Streaming**: Stream node logs directly to the browser for real-time debugging. Just hover the mouse over the 📜 icon to show the latest captured log from that node in a pop-up window, or click through to open in a new tab. 38 | - **Deep Source Links**: Quick access to the source code for your nodes in IDEs or GitHub when base paths are configured in options. 39 | - **Info Tooltips**: Auto-generated from function docstrings. 40 | - **Custom Type Verification**: Verify tensor shapes and data types according to catch problematic behavior early. 41 | - **LLM-based Debugging**: Optional ChatGPT-powered debugging and automatic code fixing for exceptions during node execution. 42 | 43 | ### ComfyUI Quality of Life Improvements 44 | - **Better Stack Traces**: Enhanced exception windows with deep source links when configured (works for all nodes). 45 | - **Preview Image Persistence**: Keep your preview images across browser sessions, so that you don't have to re-run your prompts just to see them again. 46 | - **Automatic Module Reloading**: Immediately see code changes to EasyNodes nodes on the next run with this optional setting, saving you time that would normally be spent restarting ComfyUI. 47 | 48 | ## New Features in Action 49 | 50 | ||| 51 | |------|-------| 52 | |basic exampleNew icons on node titlebars: Logs, Info, and Source.
Node colors set via @ComfyNode decorator.|Log streamingLive log streaming. Just hover over the 📜 icon, and click the pin to make the window persistent.| 53 | |New menu optionsAll options.|Better stack tracesBetter stack traces. Set the stack trace prefix to get prettier dialogues with links directly to the source locations.| 54 | 55 | 56 | ## Changelog 57 | 58 | ### New in 1.2: 59 | 60 | - Stream node logs right to your browser; when an EasyNode is run it will show a log icon on the title bar. Clicking this will open up a new tab where you can see the logs accumulated during that node's execution. Icon rendering can be disabled via settings option if you want to keep things cleaner; in this case access via right-click menu option. 61 | - Added save_node_list function to export nodes to a json file. This can be helpful e.g. for ingestion by ComfyUI-Manager. 62 | - Set default node width and height, and add force_input to NumberInput (thanks isaacwasserman) 63 | - Retain preview images across browser refreshes if option is enabled (applies to all ComfyUI nodes) 64 | - Bug fixes and cleanup. 65 | 66 | ### New in 1.1: 67 | 68 | - Custom verifiers for types on input and output for your nodes. For example, it will automatically verify that images always have 1, 3 or 4 channels (B&W, RGB and RGBA). Set `verify_level` when calling initialize_easy_nodes to either CheckSeverityMode OFF, WARN, or FATAL (default is WARN). You can write your own verifiers. See [comfy_types.py](easy_nodes/comfy_types.py) for examples of types with verifiers. 69 | - Expanded ComfyUI type support. See [comfy_types.py](easy_nodes/comfy_types.py) for the full list of registered types. 70 | - Added warnings if relying on node auto-registration without explicitly asking for it (while also supporting get_node_mappings() at the same time). This is because the default for auto_register will change to False in a future release, in order to make ComfyUI-EasyNodes more easily findable by indexers like ComfyUI-Manager (which expects your nodes to be found in your `__init__.py`). Options: 71 | - If you wish to retain the previous behavior, you can enable auto-registration explicitly with `easy_nodes.initialize_easy_nodes(auto_register=True)`. 72 | - Otherwise, export your nodes the normal way as shown in the [installation](#installation) section. 73 | 74 | ### New in 1.0: 75 | 76 | - Renamed to ComfyUI-EasyNodes from ComfyUI-Annotations to better reflect the package's goal (rather than the means) 77 | - Package is now `easy_nodes` rather than `comfy_annotations` 78 | - Now on pip/PyPI! ```pip install ComfyUI-EasyNodes``` 79 | - Set node foreground and background color via Python argument, no JS required: `@ComfyNode(color="FF0000", bg_color="00FF00")` 80 | - Add previews to nodes without JavaScript. Just drop either of these in the body of your node's function: 81 | - `easy_nodes.show_text("hello world")` 82 | - `easy_nodes.show_image(image)` 83 | - Automatically create nodes from existing Python classes. The dynamic node will automatically add a widget for every field. 84 | - Info tooltip on nodes auto-generated from your function's docstring 85 | - New optional settings features: 86 | - Make images persist across browser refreshes via a settings option (provided they're still on the server) 87 | - Automatic module reloading: if you turn on the setting, immediately see the changes to code on the next run. 88 | - LLM-based debugging: optionally have ChatGPT take a crack at fixing your code 89 | - Deep links to source code if you set a base source path (e.g. to github or your IDE) 90 | - Bug fixes 91 | 92 | ## Installation 93 | 94 | To use this module in your ComfyUI project, follow these steps: 95 | 96 | 1. **Install the Module**: Run the following command to install the ComfyUI-EasyNodes module: 97 | 98 | ```bash 99 | pip install ComfyUI-EasyNodes 100 | ``` 101 | or, if you want to have an editable version: 102 | ```bash 103 | git clone https://github.com/andrewharp/ComfyUI-EasyNodes 104 | pip install -e ComfyUI-EasyNodes 105 | ``` 106 | Note that this is not a typical ComfyUI nodepack, so does not itself live under custom_nodes. 107 | 108 | However, after installing you can copy the example node directory into custom_nodes to test them out: 109 | ```bash 110 | git clone --depth=1 https://github.com/andrewharp/ComfyUI-EasyNodes.git /tmp/easynodes 111 | mv /tmp/easynodes/example $COMFYUI_DIR/custom_nodes/easynodes 112 | ``` 113 | 114 | 3. **Integrate into Your Project**: 115 | In `__init__.py`: 116 | 117 | ```python 118 | import easy_nodes 119 | easy_nodes.initialize_easy_nodes(default_category=my_category, auto_register=False) 120 | 121 | # This must come after calling initialize_easy_nodes. 122 | import your_node_module # noqa: E402 123 | 124 | NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS = easy_nodes.get_node_mappings() 125 | 126 | # Export so that ComfyUI can pick them up. 127 | __all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS'] 128 | 129 | # Optional: export the node list to a file so that e.g. ComfyUI-Manager can pick it up. 130 | easy_nodes.save_node_list(os.path.join(os.path.dirname(__file__), "node_list.json")) 131 | ``` 132 | 133 | You can also initialize with auto_register=True, in which case you won't have to do anything else after the import. However, this may be problematic for having your nodes indexed so will default to False in a future update (currently not setting it explicitly will auto-register and complain). 134 | 135 | 136 | ## Initialization options 137 | 138 | The options passed to `easy_nodes.initialize_easy_nodes` will apply to all nodes registered until the next time `easy_nodes.initialize_easy_nodes` is called. 139 | 140 | The settings mostly control defaults and some optional features that I find nice to have, but which may not work for everybody, so some are turned off by default. 141 | 142 | - `default_category`: The default category for nodes. Defaults to "EasyNodes". 143 | - `auto_register`: Whether to automatically register nodes with ComfyUI (so you don't have to export). Previously defaulted to True; now defaults to half-true (will auto-register, allow you to export, and print a warning). In a future release will default to False. 144 | - `docstring_mode`: The mode for generating node descriptions that show up in tooltips. Defaults to AutoDescriptionMode.FULL. 145 | - `verify_level`: Whether to verify tensors for shape and data type according to ComfyUI type (MASK, IMAGE, etc). Runs on inputs and outputs. Can be set to CheckSeverityMode.OFF, WARN, or FATAL. Defaults to WARN, as I've made some assumptions about shapes that may not be universal. 146 | - `auto_move_tensors`: Whether to automatically move torch Tensors to the GPU before your function gets called, and then to the CPU on output. Defaults to False. 147 | 148 | 149 | ## Using the decorator 150 | 151 | 1. **Annotate Functions with @ComfyNode**: Decorate your processing functions with `@ComfyNode`. The decorator accepts the following parameters: 152 | - `category`: Specifies the category under which the node will be listed in ComfyUI. Default is `"ComfyNode"`. 153 | - `display_name`: Optionally specifies a human-readable name for the node as it will appear in ComfyUI. If not provided, a name is generated based on the function name. 154 | - `workflow_name`: The internal unique identifier for this node type. If not provided, a name is generated based on the function name. 155 | - `description`: An optional description for the node. If not provided the function's docstring, if any, will be used according to `easy_nodes.docstring_mode`. 156 | - `is_output_node`: Maps to ComfyUI's IS_OUTPUT_NODE. 157 | - `return_types`: Maps to ComfyUI's RETURN_TYPES. Use if the return type of the function itself is dynamic. 158 | - `return_names`: Maps to ComfyUI's RETURN_NAMES. 159 | - `validate_inputs`: Maps to ComfyUI's VALIDATE_INPUTS. 160 | - `is_changed`: Maps to ComfyUI's IS_CHANGED. 161 | - `always_run`: Makes the node always run by generating a random IS_CHANGED. 162 | - `debug`: A boolean that makes this node print out extra information during its lifecycle. 163 | - `color`: Changes the node's color. 164 | - `bg_color`: Changes the node's color. If color is set and not bg_color, bg_color will just be a slightly darker color. 165 | - `width`: Default width for this node type on creation. 166 | - `height`: Default height for this node type on creation. 167 | 168 | Example: 169 | ```python 170 | from easy_nodes import ComfyNode, ImageTensor, NumberInput 171 | 172 | @ComfyNode(category="Image Processing", 173 | display_name="Enhance Image", 174 | is_output_node=True, 175 | debug=True, 176 | color="#FF00FF") 177 | def enhance_image(image: ImageTensor, factor: NumberInput(0.5, 0, 1, 0.1)) -> ImageTensor: 178 | output_image = enhance_my_image(image, factor) 179 | easy_nodes.show_image(output_image) # Will show the image on the node, so you don't need a separate PreviewImage node. 180 | return output_image 181 | ``` 182 | 183 | 2. **Annotate your function inputs and outputs**: Fully annotate function parameters and return types, using `list` to wrap types as appropriate. `tuple[output1, output2]` should be used if you have multiple outputs, otherwise you can just return the naked type (in the example below, that would be `list[int]`). This information is used to generate the fields of the internal class definition `@ComfyNode` sends to ComfyUI. If you don't annotate the inputs, the input will be treated as a wildcard. If you don't annotate the output, you won't see anything at all in ComfyUI. 184 | 185 | Example: 186 | ```python 187 | @ComfyNode("Utilities") 188 | def add_value(img_list: list[ImageTensor], val: int) -> list[int]: 189 | return [img + val for img in img_list] 190 | ``` 191 | 192 | ### Registering new types: 193 | 194 | Say you want a new type of special Tensor that ComfyUI will treat differently from Images; perhaps a rotation matrix. Just create a placeholder class for it and use that in your annotations -- it's just for semantics; internally your functions will get whatever type of class they're handed (though with the verification settings turned on, you can still be assured it's a Tensor object (and you are free to create your own custom verifier for more control). 195 | 196 | ```python 197 | class RotationMatrix(torch.Tensor): 198 | def __init__(self): 199 | raise TypeError("!") # Will never be instantiated 200 | 201 | easy_nodes.register_type(RotationMatrix, "ROTATION_MATRIX", verifier=TensorVerifier("ROTATION_MATRIX")) 202 | 203 | @ComfyNode() 204 | def rotate_matrix_more(rot1: RotationMatrix, rot2: RotationMatrix) -> RotationMatrix: 205 | return rot1 * rot2 206 | ``` 207 | 208 | Making the class extend a torch.Tensor is not necessary, but it will give you nice type hints in IDEs. 209 | 210 | ### Creating dynamic nodes from classes 211 | 212 | You can also automatically create nodes that will expose the fields of a class as widgets (as long as it has a default constructor). Say you have a complex options class from a third-party library you want to pass to a node. 213 | 214 | ```python 215 | from some_library import ComplexOptions 216 | 217 | easy_nodes.register_type(ComplexOptions) 218 | 219 | easy_nodes.create_field_setter_node(ComplexOptions) 220 | ``` 221 | 222 | Now you should be should find a node named ComplexOptions that will have all the basic field types (str, int, float, bool) exposed as widgets. 223 | 224 | ## Automatic LLM Debugging 225 | 226 | To enable the experimental LLM-based debugging, set your OPENAI_API_KEY prior to starting ComfyUI. 227 | 228 | e.g.: 229 | ```bash 230 | export OPENAI_API_KEY=sk-P#$@%J345jsd... 231 | python main.py 232 | ``` 233 | 234 | Then open settings and turn the LLM debugging option to either "On" or "AutoFix". 235 | 236 | Behavior: 237 | * "On": any exception in execution by an EasyNodes node (not regular nodes) will cause EasyNodes to collect all the relevent data and package it into a prompt for ChatGPT, which is instructed to reply with a fixed version of your function function from which a patch is created. That patch is displayed in the console and also saved to disk for evaluation. 238 | * "AutoFix": All of the above, and EasyNodes will also apply the patch and attempt to run the prompt again. This will repeat up to the configurable retry limit. 239 | 240 | This feature is very experimental, and any contributions for things like improving the prompt flow and suporting other LLMs are welcome! You can find the implementation in [easy_nodes/llm_debugging.py](easy_nodes/llm_debugging.py). 241 | 242 | ## Contributing 243 | 244 | Contributions are welcome! Please submit pull requests or open issues for any bugs, features, or improvements. 245 | -------------------------------------------------------------------------------- /easy_nodes/web/easy_nodes.js: -------------------------------------------------------------------------------- 1 | import { app } from '../../scripts/app.js' 2 | import { api } from '../../scripts/api.js' 3 | import { ComfyWidgets } from "../../scripts/widgets.js"; 4 | import { createSetting } from "./config_service.js"; 5 | import { floatingLogWindow } from './log_streaming.js'; 6 | 7 | const sourcePathPrefixId = "easy_nodes.SourcePathPrefix"; 8 | const editorPathPrefixId = "easy_nodes.EditorPathPrefix"; 9 | const reloadOnEditId = "easy_nodes.ReloadOnEdit"; 10 | const renderIconsId = "easy_nodes.RenderIcons"; 11 | 12 | 13 | function resizeShowValueWidgets(node, numValues, app) { 14 | const numShowValueWidgets = (node.showValueWidgets?.length ?? 0); 15 | numValues = Math.max(numValues, 0); 16 | 17 | if (numValues > numShowValueWidgets) { 18 | for (let i = numShowValueWidgets; i < numValues; i++) { 19 | const showValueWidget = ComfyWidgets["STRING"](node, `output${i}`, ["STRING", { multiline: true }], app).widget; 20 | showValueWidget.inputEl.readOnly = true; 21 | if (!node.showValueWidgets) { 22 | node.showValueWidgets = []; 23 | } 24 | node.showValueWidgets.push(showValueWidget); 25 | } 26 | } else if (numValues < numShowValueWidgets) { 27 | const removedWidgets = node.showValueWidgets.splice(numValues); 28 | node.widgets.splice(node.origWidgetCount + numValues); 29 | 30 | // Remove the detached widgets from the DOM 31 | removedWidgets.forEach(widget => { 32 | widget.inputEl.parentNode.removeChild(widget.inputEl); 33 | }); 34 | } 35 | } 36 | 37 | const startOffset = 10; 38 | 39 | function renderSourceLinkAndInfo(node, ctx, titleHeight) { 40 | if (node?.flags?.collapsed) { 41 | return; 42 | } 43 | 44 | let currentX = node.size[0] - startOffset; 45 | if (node.sourceLoc) { 46 | node.srcLink = node.sourceLoc; 47 | 48 | const linkText = "src"; 49 | ctx.fillStyle = "#2277FF"; 50 | node.srcLinkWidth = ctx.measureText(linkText).width; 51 | currentX -= node.srcLinkWidth; 52 | ctx.fillText( 53 | linkText, 54 | currentX, 55 | LiteGraph.NODE_TITLE_TEXT_Y - titleHeight 56 | ); 57 | } else { 58 | node.srcLinkWidth = 0; 59 | } 60 | 61 | if (node.description?.trim()) { 62 | const infoText = " ℹ️ "; 63 | node.infoWidth = ctx.measureText(infoText).width; 64 | currentX -= node.infoWidth; 65 | ctx.fillText(infoText, currentX, LiteGraph.NODE_TITLE_TEXT_Y - titleHeight); 66 | } else { 67 | node.infoWidth = 0; 68 | } 69 | 70 | if (node?.has_log) { 71 | const logText = "📜"; 72 | node.logWidth = ctx.measureText(logText).width; 73 | currentX -= node.logWidth; 74 | ctx.fillText(logText, currentX, LiteGraph.NODE_TITLE_TEXT_Y - titleHeight); 75 | } else { 76 | node.logWidth = 0; 77 | } 78 | } 79 | 80 | 81 | function isInsideRectangle(x, y, left, top, width, height) { 82 | if (left < x && left + width > x && top < y && top + height > y) { 83 | return true; 84 | } 85 | return false; 86 | } 87 | 88 | 89 | app.registerExtension({ 90 | name: "EasyNodes", 91 | async setup() { 92 | createSetting( 93 | editorPathPrefixId, 94 | "🪄 Stack trace link prefix (makes stack traces clickable, e.g. 'vscode://vscode-remote/wsl+Ubuntu')", 95 | "text", 96 | "" 97 | ); 98 | createSetting( 99 | sourcePathPrefixId, 100 | "🪄 Stack trace remove prefix (common prefix to remove, e.g '/home/user/project/')", 101 | "text", 102 | "" 103 | ); 104 | createSetting( 105 | reloadOnEditId, 106 | "🪄 Auto-reload EasyNodes source files on edits.", 107 | "boolean", 108 | false, 109 | ); 110 | createSetting( 111 | renderIconsId, 112 | "🪄 Render src, log, and info icons in node titlebars. If false, can still be accessed via menu.", 113 | "boolean", 114 | true, 115 | ); 116 | }, 117 | 118 | async beforeRegisterNodeDef(nodeType, nodeData, app) { 119 | const easyNodesJsonPrefix = "EasyNodesInfo="; 120 | if (nodeData?.description?.startsWith(easyNodesJsonPrefix)) { 121 | nodeData.isEasyNode = true; 122 | // EasyNodes metadata will be crammed into the first line of the description in json format. 123 | const [nodeInfo, ...descriptionLines] = nodeData.description.split('\n'); 124 | const { color, bgColor, width, height, sourceLocation } = JSON.parse(nodeInfo.replace(easyNodesJsonPrefix, "")); 125 | 126 | nodeData.description = descriptionLines.join('\n'); 127 | 128 | const editorPathPrefix = app.ui.settings.getSettingValue(editorPathPrefixId); 129 | 130 | function applyColorsAndSource() { 131 | if (color) { 132 | this.color = color; 133 | } 134 | if (bgColor) { 135 | this.bgcolor = bgColor; 136 | } 137 | if (sourceLocation && editorPathPrefix) { 138 | this.sourceLoc = editorPathPrefix + sourceLocation; 139 | } 140 | this.description = nodeData.description; 141 | } 142 | 143 | // Apply colors and source location when the node is created 144 | const onNodeCreated = nodeType.prototype.onNodeCreated; 145 | nodeType.prototype.onNodeCreated = function () { 146 | onNodeCreated?.apply(this, arguments); 147 | applyColorsAndSource.call(this); 148 | if (width) { 149 | this.size[0] = width; 150 | } 151 | if (height) { 152 | this.size[1] = height; 153 | } 154 | this.origWidgetCount = this.widgets?.length ?? 0; 155 | }; 156 | 157 | // Apply color and source location when configuring the node 158 | const onConfigure = nodeType.prototype.onConfigure; 159 | nodeType.prototype.onConfigure = function () { 160 | onConfigure?.apply(this, arguments); 161 | applyColorsAndSource.call(this); 162 | 163 | this.origWidgetCount = this.widgets?.length ?? 0; 164 | const widgetValsLength = this.widgets_values?.length ?? 0; 165 | 166 | const numShowVals = widgetValsLength - this.origWidgetCount; 167 | resizeShowValueWidgets(this, numShowVals, app); 168 | 169 | for (let i = 0; i < numShowVals; i++) { 170 | this.showValueWidgets[i].value = this.widgets_values[this.origWidgetCount + i]; 171 | } 172 | }; 173 | 174 | const onExecuted = nodeType.prototype.onExecuted; 175 | nodeType.prototype.onExecuted = function (message) { 176 | onExecuted?.apply(this, [message]); 177 | 178 | if (!message || !message.text) { 179 | return; 180 | } 181 | 182 | const numShowVals = message.text.length; 183 | 184 | resizeShowValueWidgets(this, numShowVals, app); 185 | 186 | for (let i = 0; i < numShowVals; i++) { 187 | this.showValueWidgets[i].value = message.text[i]; 188 | } 189 | 190 | this.setSize(this.computeSize()); 191 | this.setDirtyCanvas(true, true); 192 | app.graph.setDirtyCanvas(true, true); 193 | } 194 | 195 | const onDrawForeground = nodeType.prototype.onDrawForeground; 196 | nodeType.prototype.onDrawForeground = function (ctx, canvas, graphMouse) { 197 | onDrawForeground?.apply(this, arguments); 198 | if (app.ui.settings.getSettingValue(renderIconsId)) { 199 | renderSourceLinkAndInfo(this, ctx, LiteGraph.NODE_TITLE_HEIGHT); 200 | } 201 | }; 202 | 203 | const onDrawBackground = nodeType.prototype.onDrawBackground; 204 | nodeType.prototype.onDrawBackground = function (ctx, canvas) { 205 | onDrawBackground?.apply(this, arguments); 206 | } 207 | 208 | const onMouseDown = nodeType.prototype.onMouseDown; 209 | nodeType.prototype.onMouseDown = function (e, localPos, graphMouse) { 210 | onMouseDown?.apply(this, arguments); 211 | 212 | if (!app.ui.settings.getSettingValue(renderIconsId)) { 213 | return; 214 | } 215 | 216 | if (this.srcLink && !this.flags.collapsed && isInsideRectangle(localPos[0], localPos[1], this.size[0] - this.srcLinkWidth - startOffset, 217 | -LiteGraph.NODE_TITLE_HEIGHT, this.srcLinkWidth, LiteGraph.NODE_TITLE_HEIGHT)) { 218 | window.open(this.srcLink, "_blank"); 219 | return true; 220 | } 221 | 222 | const leftPos = this.size[0] - this.srcLinkWidth - this.logWidth - this.infoWidth - startOffset; 223 | 224 | // Check if log icon is clicked 225 | if (this?.has_log && !this.flags.collapsed && isInsideRectangle(localPos[0], localPos[1], leftPos, 226 | -LiteGraph.NODE_TITLE_HEIGHT, this.logWidth, LiteGraph.NODE_TITLE_HEIGHT)) { 227 | window.open(`/easy_nodes/show_log?node=${this.id}`, "_blank"); 228 | return true; 229 | } 230 | }; 231 | 232 | const getExtraMenuOptions = nodeType.prototype.getExtraMenuOptions; 233 | nodeType.prototype.getExtraMenuOptions = function (canvas, options) { 234 | getExtraMenuOptions?.apply(this, arguments); 235 | 236 | if (this.sourceLoc) { 237 | options.push({ 238 | content: "Open Source", 239 | callback: () => { 240 | window.open(this.sourceLoc, "_blank"); 241 | } 242 | }); 243 | } 244 | 245 | if (this.has_log) { 246 | options.push({ 247 | content: "View Log", 248 | callback: () => { 249 | window.open(`/easy_nodes/show_log?node=${this.id}`, "_blank"); 250 | } 251 | }); 252 | } 253 | 254 | return options; 255 | }; 256 | } 257 | }, 258 | }); 259 | 260 | 261 | const origProcessMouseMove = LGraphCanvas.prototype.processMouseMove; 262 | LGraphCanvas.prototype.processMouseMove = function(e) { 263 | const res = origProcessMouseMove.apply(this, arguments); 264 | 265 | if (!app.ui.settings.getSettingValue(renderIconsId)) { 266 | return res; 267 | } 268 | 269 | var node = this.graph.getNodeOnPos(e.canvasX,e.canvasY,this.visible_nodes); 270 | 271 | if (!node || !this.canvas || node.flags.collapsed) { 272 | return res; 273 | } 274 | 275 | var srcLinkWidth = node?.srcLinkWidth ?? 0; 276 | var linkHeight = LiteGraph.NODE_TITLE_HEIGHT; 277 | 278 | var infoWidth = node?.infoWidth ?? 0; 279 | var logWidth = node?.logWidth ?? 0; 280 | 281 | var linkX = node.pos[0] + node.size[0] - srcLinkWidth - startOffset; 282 | var linkY = node.pos[1] - LiteGraph.NODE_TITLE_HEIGHT; 283 | 284 | var infoX = linkX - infoWidth; 285 | var infoY = linkY; 286 | 287 | var logX = infoX - logWidth; 288 | var logY = linkY; 289 | 290 | const desc = node.description?.trim(); 291 | if (node.srcLink && isInsideRectangle(e.canvasX, e.canvasY, linkX, linkY, srcLinkWidth, linkHeight)) { 292 | this.canvas.style.cursor = "pointer"; 293 | this.tooltip_text = node.srcLink; 294 | this.tooltip_pos = [e.canvasX, e.canvasY]; 295 | this.dirty_canvas = true; 296 | } else if (desc && isInsideRectangle(e.canvasX, e.canvasY, infoX, infoY, infoWidth, linkHeight)) { 297 | this.canvas.style.cursor = "help"; 298 | this.tooltip_text = desc; 299 | this.tooltip_pos = [e.canvasX, e.canvasY]; 300 | this.dirty_canvas = true; 301 | } else if (node?.has_log && isInsideRectangle(e.canvasX, e.canvasY, logX, logY, logWidth, linkHeight)) { 302 | this.canvas.style.cursor = "pointer"; 303 | this.tooltip_text = "View Log"; 304 | this.tooltip_pos = [e.canvasX, e.canvasY]; 305 | this.dirty_canvas = true; 306 | 307 | floatingLogWindow.show(e.canvasX, e.canvasY, node.id); 308 | } else { 309 | this.tooltip_text = null; 310 | floatingLogWindow.scheduleHide(); 311 | } 312 | 313 | return res; 314 | }; 315 | 316 | 317 | LGraphCanvas.prototype.drawNodeTooltip = function(ctx, text, pos) { 318 | if (text === null) return; 319 | 320 | ctx.save(); 321 | ctx.font = "14px Consolas, 'Courier New', monospace"; 322 | 323 | var lines = text.split('\n'); 324 | var lineHeight = 18; 325 | var totalHeight = lines.length * lineHeight; 326 | 327 | var w = 0; 328 | for (var i = 0; i < lines.length; i++) { 329 | var info = ctx.measureText(lines[i].trim()); 330 | w = Math.max(w, info.width); 331 | } 332 | w += 20; 333 | 334 | ctx.shadowColor = "rgba(0, 0, 0, 0.5)"; 335 | ctx.shadowOffsetX = 2; 336 | ctx.shadowOffsetY = 2; 337 | ctx.shadowBlur = 5; 338 | 339 | ctx.fillStyle = "#2E2E2E"; 340 | ctx.beginPath(); 341 | ctx.roundRect(pos[0] - w / 2, pos[1] - 15 - totalHeight, w, totalHeight, 5, 5); 342 | ctx.moveTo(pos[0] - 10, pos[1] - 15); 343 | ctx.lineTo(pos[0] + 10, pos[1] - 15); 344 | ctx.lineTo(pos[0], pos[1] - 5); 345 | ctx.fill(); 346 | 347 | ctx.shadowColor = "transparent"; 348 | ctx.textAlign = "left"; 349 | 350 | for (var i = 0; i < lines.length; i++) { 351 | var line = lines[i].trim(); 352 | 353 | // Render the colored line 354 | var el = document.createElement('div'); 355 | 356 | el.innerHTML = line; 357 | 358 | var parts = el.childNodes; 359 | var x = pos[0] - w / 2 + 10; 360 | 361 | for (var j = 0; j < parts.length; j++) { 362 | var part = parts[j]; 363 | ctx.fillStyle = "#E4E4E4"; 364 | ctx.fillText(part.textContent, x, pos[1] - 15 - totalHeight + (i + 0.8) * lineHeight); 365 | x += ctx.measureText(part.textContent).width; 366 | } 367 | } 368 | 369 | ctx.restore(); 370 | }; 371 | 372 | 373 | const origdrawFrontCanvas = LGraphCanvas.prototype.drawFrontCanvas; 374 | LGraphCanvas.prototype.drawFrontCanvas = function() { 375 | origdrawFrontCanvas.apply(this, arguments); 376 | if (this.tooltip_text) { 377 | this.ctx.save(); 378 | this.ds.toCanvasContext(this.ctx); 379 | this.drawNodeTooltip(this.ctx, this.tooltip_text, this.tooltip_pos); 380 | this.ctx.restore(); 381 | } 382 | }; 383 | 384 | 385 | const formatExecutionError = function(error) { 386 | if (error == null) { 387 | return "(unknown error)"; 388 | } 389 | 390 | // Joining the traceback if it's an array, or directly using it if it's already a string 391 | let traceback = Array.isArray(error.traceback) ? error.traceback.join("") : error.traceback; 392 | let exceptionMessage = error.exception_message; 393 | 394 | const nodeId = error.node_id; 395 | const nodeType = error.node_type; 396 | 397 | // Regular expression to match "File _, in_ " patterns 398 | const fileLineRegex = /File "(.+)", line (\d+), in .+/g; 399 | 400 | // Replace "File _, in_ " patterns with ":" 401 | traceback = traceback.replace(fileLineRegex, "$1:$2"); 402 | exceptionMessage = exceptionMessage.replace(fileLineRegex, "$1:$2"); 403 | 404 | const editorPathPrefix = this.ui.settings.getSettingValue(editorPathPrefixId); 405 | const filePathPrefix = this.ui.settings.getSettingValue(sourcePathPrefixId); 406 | 407 | let formattedExceptionMessage = exceptionMessage; 408 | let formattedTraceback = traceback; 409 | 410 | if (editorPathPrefix) { 411 | // Escape special characters in filePathPrefix to be used in a regular expression 412 | const escapedPathPrefix = filePathPrefix ? filePathPrefix.replace(/[-\/\\^$*+?.()|[\]{}]/g, '\\$&') : ""; 413 | 414 | // Creating the regular expression using RegExp constructor to match file paths 415 | const filePathRegex = new RegExp(`(${escapedPathPrefix || "/"})(.*?):(\\d+)`, 'g'); 416 | 417 | // Replace ":" patterns with links in the exception message 418 | formattedExceptionMessage = exceptionMessage.replace(filePathRegex, (match, prefix, p1, p2) => { 419 | const displayPath = filePathPrefix ? p1 : `${prefix}${p1}`; 420 | return `${displayPath}:${p2}`; 421 | }); 422 | 423 | // Check if the exception message contains ":" matches 424 | const hasFileLineMatches = filePathRegex.test(exceptionMessage); 425 | 426 | if (!hasFileLineMatches) { 427 | // Replace ":" patterns with links in the traceback 428 | formattedTraceback = traceback.replace(filePathRegex, (match, prefix, p1, p2) => { 429 | const displayPath = filePathPrefix ? p1 : `${prefix}${p1}`; 430 | return `${displayPath}:${p2}`; 431 | }); 432 | } 433 | } 434 | 435 | let formattedOutput = `Error occurred when executing ${nodeType} [${nodeId}]:\n\n` + 436 | `${formattedExceptionMessage}`; 437 | 438 | if (formattedTraceback !== exceptionMessage) { 439 | formattedOutput += `\n\n${formattedTraceback}`; 440 | } 441 | 442 | return formattedOutput; 443 | } 444 | 445 | var otherShow = null; 446 | const customShow = function(html) { 447 | // If this is not an exception let it through as normal. 448 | if (!html.includes("Error occurred when executing")) { 449 | return otherShow.apply(this, arguments); 450 | } 451 | 452 | // Since we know it's an exception now, only let it through 453 | // if the source is our event listener below, which will have 454 | // added the special tag to the error while reformatting. 455 | if (html.includes('class="custom-error"')) { 456 | return otherShow.apply(this, arguments); 457 | } 458 | }; 459 | 460 | api.addEventListener("execution_error", function(e) { 461 | // Make the dialog upgrade opt-in. 462 | // If the user hasn't set the editor path prefix or the file path prefix, don't do anything. 463 | const editorPathPrefix = app.ui.settings.getSettingValue(editorPathPrefixId); 464 | const filePathPrefix = app.ui.settings.getSettingValue(sourcePathPrefixId); 465 | if (!editorPathPrefix && !filePathPrefix) { 466 | console.log(editorPathPrefix, filePathPrefix); 467 | return; 468 | } 469 | 470 | // Replace the default dialog.show with our custom one if we haven't already. 471 | // We can't do it earlier because other extensions might run later and replace 472 | // it out from under us in that case. 473 | if (!otherShow) { 474 | otherShow = app.ui.dialog.show; 475 | app.ui.dialog.show = customShow; 476 | } 477 | const formattedError = formatExecutionError.call(app, e.detail); 478 | app.ui.dialog.show(formattedError); 479 | app.canvas.draw(true, true); 480 | }); 481 | -------------------------------------------------------------------------------- /example/example_workflow.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 143, 3 | "last_link_id": 207, 4 | "nodes": [ 5 | { 6 | "id": 139, 7 | "type": "PreviewImage", 8 | "pos": [ 9 | 2496.723609474713, 10 | 1235.674149344319 11 | ], 12 | "size": { 13 | "0": 210, 14 | "1": 246 15 | }, 16 | "flags": {}, 17 | "order": 7, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "images", 22 | "type": "IMAGE", 23 | "link": 203, 24 | "slot_index": 0 25 | } 26 | ], 27 | "properties": { 28 | "Node name for S&R": "PreviewImage" 29 | } 30 | }, 31 | { 32 | "id": 106, 33 | "type": "PreviewExample", 34 | "pos": [ 35 | 1232.0424397167956, 36 | 1251.8729675781246 37 | ], 38 | "size": { 39 | "0": 210, 40 | "1": 225.99998474121094 41 | }, 42 | "flags": {}, 43 | "order": 6, 44 | "mode": 0, 45 | "inputs": [ 46 | { 47 | "name": "str2", 48 | "type": "STRING", 49 | "link": 147, 50 | "widget": { 51 | "name": "str2" 52 | } 53 | } 54 | ], 55 | "outputs": [ 56 | { 57 | "name": "STRING", 58 | "type": "STRING", 59 | "links": null, 60 | "shape": 3 61 | } 62 | ], 63 | "properties": { 64 | "Node name for S&R": "PreviewExample" 65 | }, 66 | "widgets_values": [ 67 | "", 68 | "hello: Sample text", 69 | "hello: Sample text", 70 | "hello: Sample text", 71 | "hello: Sample text" 72 | ] 73 | }, 74 | { 75 | "id": 142, 76 | "type": "MyFunClass", 77 | "pos": [ 78 | 1808.7236094747136, 79 | 1245.674149344319 80 | ], 81 | "size": { 82 | "0": 315, 83 | "1": 106 84 | }, 85 | "flags": {}, 86 | "order": 0, 87 | "mode": 0, 88 | "outputs": [ 89 | { 90 | "name": "FUN_CLASS", 91 | "type": "FUN_CLASS", 92 | "links": [ 93 | 202 94 | ], 95 | "shape": 3, 96 | "slot_index": 0 97 | } 98 | ], 99 | "properties": { 100 | "Node name for S&R": "MyFunClass" 101 | }, 102 | "widgets_values": [ 103 | 0.5, 104 | 640, 105 | 640 106 | ] 107 | }, 108 | { 109 | "id": 4, 110 | "type": "LoadImage", 111 | "pos": [ 112 | 348, 113 | 402 114 | ], 115 | "size": { 116 | "0": 315, 117 | "1": 314 118 | }, 119 | "flags": {}, 120 | "order": 1, 121 | "mode": 0, 122 | "outputs": [ 123 | { 124 | "name": "IMAGE", 125 | "type": "IMAGE", 126 | "links": [ 127 | 100, 128 | 116, 129 | 182, 130 | 204 131 | ], 132 | "shape": 3, 133 | "slot_index": 0 134 | }, 135 | { 136 | "name": "MASK", 137 | "type": "MASK", 138 | "links": [], 139 | "shape": 3, 140 | "slot_index": 1 141 | } 142 | ], 143 | "properties": { 144 | "Node name for S&R": "LoadImage" 145 | }, 146 | "widgets_values": [ 147 | "dog.png", 148 | "image" 149 | ] 150 | }, 151 | { 152 | "id": 91, 153 | "type": "TextRepeater", 154 | "pos": [ 155 | 839.0424397167969, 156 | 1247.8729675781246 157 | ], 158 | "size": { 159 | "0": 298.5838928222656, 160 | "1": 82 161 | }, 162 | "flags": {}, 163 | "order": 2, 164 | "mode": 0, 165 | "outputs": [ 166 | { 167 | "name": "STRING", 168 | "type": "STRING", 169 | "links": [ 170 | 147 171 | ], 172 | "shape": 6, 173 | "slot_index": 0 174 | } 175 | ], 176 | "properties": { 177 | "Node name for S&R": "TextRepeater" 178 | }, 179 | "widgets_values": [ 180 | "Sample text", 181 | 4 182 | ] 183 | }, 184 | { 185 | "id": 119, 186 | "type": "ExampleMaskImage", 187 | "pos": [ 188 | 1234, 189 | 728 190 | ], 191 | "size": { 192 | "0": 303.48785400390625, 193 | "1": 371.7372741699219 194 | }, 195 | "flags": {}, 196 | "order": 11, 197 | "mode": 0, 198 | "inputs": [ 199 | { 200 | "name": "image", 201 | "type": "IMAGE", 202 | "link": 182 203 | }, 204 | { 205 | "name": "mask", 206 | "type": "MASK", 207 | "link": 206 208 | } 209 | ], 210 | "outputs": [ 211 | { 212 | "name": "IMAGE", 213 | "type": "IMAGE", 214 | "links": [ 215 | 184, 216 | 185 217 | ], 218 | "shape": 3, 219 | "slot_index": 0 220 | } 221 | ], 222 | "properties": { 223 | "Node name for S&R": "ExampleMaskImage" 224 | }, 225 | "widgets_values": [ 226 | 0 227 | ], 228 | "color": "#0000FF", 229 | "bgcolor": "#000099" 230 | }, 231 | { 232 | "id": 143, 233 | "type": "ThresholdImage", 234 | "pos": [ 235 | 839, 236 | 589 237 | ], 238 | "size": { 239 | "0": 315, 240 | "1": 78 241 | }, 242 | "flags": {}, 243 | "order": 5, 244 | "mode": 0, 245 | "inputs": [ 246 | { 247 | "name": "image", 248 | "type": "IMAGE", 249 | "link": 204 250 | } 251 | ], 252 | "outputs": [ 253 | { 254 | "name": "Below", 255 | "type": "MASK", 256 | "links": [ 257 | 205, 258 | 207 259 | ], 260 | "shape": 3, 261 | "slot_index": 0 262 | }, 263 | { 264 | "name": "Above", 265 | "type": "MASK", 266 | "links": [ 267 | 206 268 | ], 269 | "shape": 3, 270 | "slot_index": 1 271 | } 272 | ], 273 | "properties": { 274 | "Node name for S&R": "ThresholdImage" 275 | }, 276 | "widgets_values": [ 277 | 0.5 278 | ], 279 | "color": "#0066cc", 280 | "bgcolor": "#ffcc00" 281 | }, 282 | { 283 | "id": 83, 284 | "type": "ExampleMaskImage", 285 | "pos": [ 286 | 1251, 287 | 339 288 | ], 289 | "size": { 290 | "0": 275.1528015136719, 291 | "1": 347.391845703125 292 | }, 293 | "flags": {}, 294 | "order": 9, 295 | "mode": 0, 296 | "inputs": [ 297 | { 298 | "name": "image", 299 | "type": "IMAGE", 300 | "link": 116 301 | }, 302 | { 303 | "name": "mask", 304 | "type": "MASK", 305 | "link": 205 306 | } 307 | ], 308 | "outputs": [ 309 | { 310 | "name": "IMAGE", 311 | "type": "IMAGE", 312 | "links": [ 313 | 163, 314 | 165 315 | ], 316 | "shape": 3, 317 | "slot_index": 0 318 | } 319 | ], 320 | "properties": { 321 | "Node name for S&R": "ExampleMaskImage" 322 | }, 323 | "widgets_values": [ 324 | 0 325 | ], 326 | "color": "#0000FF", 327 | "bgcolor": "#000099" 328 | }, 329 | { 330 | "id": 141, 331 | "type": "MyFunClassNodeProcessor", 332 | "pos": [ 333 | 2181.1306656359343, 334 | 1254.2489846781252 335 | ], 336 | "size": { 337 | "0": 257.3725891113281, 338 | "1": 46.323490142822266 339 | }, 340 | "flags": {}, 341 | "order": 3, 342 | "mode": 0, 343 | "inputs": [ 344 | { 345 | "name": "fun_class", 346 | "type": "FUN_CLASS", 347 | "link": 202 348 | } 349 | ], 350 | "outputs": [ 351 | { 352 | "name": "IMAGE", 353 | "type": "IMAGE", 354 | "links": [ 355 | 203 356 | ], 357 | "shape": 3, 358 | "slot_index": 0 359 | } 360 | ], 361 | "properties": { 362 | "Node name for S&R": "MyFunClassNodeProcessor" 363 | }, 364 | "color": "#4F006F", 365 | "bgcolor": "#2F0042" 366 | }, 367 | { 368 | "id": 109, 369 | "type": "CombineLists", 370 | "pos": [ 371 | 1817, 372 | 767 373 | ], 374 | "size": { 375 | "0": 210, 376 | "1": 46 377 | }, 378 | "flags": {}, 379 | "order": 13, 380 | "mode": 0, 381 | "inputs": [ 382 | { 383 | "name": "image1", 384 | "type": "IMAGE", 385 | "link": 163 386 | }, 387 | { 388 | "name": "image2", 389 | "type": "IMAGE", 390 | "link": 185 391 | } 392 | ], 393 | "outputs": [ 394 | { 395 | "name": "IMAGE", 396 | "type": "IMAGE", 397 | "links": [ 398 | 168 399 | ], 400 | "shape": 6, 401 | "slot_index": 0 402 | } 403 | ], 404 | "properties": { 405 | "Node name for S&R": "CombineLists" 406 | } 407 | }, 408 | { 409 | "id": 116, 410 | "type": "PreviewImage", 411 | "pos": [ 412 | 2321, 413 | 716 414 | ], 415 | "size": [ 416 | 387.5997876780352, 417 | 406.0673642007994 418 | ], 419 | "flags": {}, 420 | "order": 16, 421 | "mode": 0, 422 | "inputs": [ 423 | { 424 | "name": "images", 425 | "type": "IMAGE", 426 | "link": 193 427 | } 428 | ], 429 | "properties": { 430 | "Node name for S&R": "PreviewImage" 431 | } 432 | }, 433 | { 434 | "id": 84, 435 | "type": "CombineLists", 436 | "pos": [ 437 | 2078, 438 | 719 439 | ], 440 | "size": { 441 | "0": 210, 442 | "1": 46 443 | }, 444 | "flags": {}, 445 | "order": 15, 446 | "mode": 0, 447 | "inputs": [ 448 | { 449 | "name": "image1", 450 | "type": "IMAGE", 451 | "link": 125 452 | }, 453 | { 454 | "name": "image2", 455 | "type": "IMAGE", 456 | "link": 168 457 | } 458 | ], 459 | "outputs": [ 460 | { 461 | "name": "IMAGE", 462 | "type": "IMAGE", 463 | "links": [ 464 | 193 465 | ], 466 | "shape": 6, 467 | "slot_index": 0 468 | } 469 | ], 470 | "properties": { 471 | "Node name for S&R": "CombineLists" 472 | } 473 | }, 474 | { 475 | "id": 15, 476 | "type": "PreviewImage", 477 | "pos": [ 478 | 2329, 479 | 354 480 | ], 481 | "size": [ 482 | 290.5997876780352, 483 | 310.0673642007994 484 | ], 485 | "flags": {}, 486 | "order": 14, 487 | "mode": 0, 488 | "inputs": [ 489 | { 490 | "name": "images", 491 | "type": "IMAGE", 492 | "link": 112 493 | } 494 | ], 495 | "properties": { 496 | "Node name for S&R": "PreviewImage" 497 | } 498 | }, 499 | { 500 | "id": 81, 501 | "type": "AddImages", 502 | "pos": [ 503 | 2075, 504 | 356 505 | ], 506 | "size": { 507 | "0": 210, 508 | "1": 66 509 | }, 510 | "flags": {}, 511 | "order": 12, 512 | "mode": 0, 513 | "inputs": [ 514 | { 515 | "name": "image1", 516 | "type": "IMAGE", 517 | "link": 165 518 | }, 519 | { 520 | "name": "image2", 521 | "type": "IMAGE", 522 | "link": 184 523 | }, 524 | { 525 | "name": "image3", 526 | "type": "IMAGE", 527 | "link": null 528 | } 529 | ], 530 | "outputs": [ 531 | { 532 | "name": "IMAGE", 533 | "type": "IMAGE", 534 | "links": [ 535 | 112 536 | ], 537 | "shape": 3, 538 | "slot_index": 0 539 | } 540 | ], 541 | "properties": { 542 | "Node name for S&R": "AddImages" 543 | } 544 | }, 545 | { 546 | "id": 49, 547 | "type": "ConvertToImage", 548 | "pos": [ 549 | 1577, 550 | 603 551 | ], 552 | "size": { 553 | "0": 210, 554 | "1": 26 555 | }, 556 | "flags": {}, 557 | "order": 10, 558 | "mode": 0, 559 | "inputs": [ 560 | { 561 | "name": "mask", 562 | "type": "MASK", 563 | "link": 207, 564 | "slot_index": 0 565 | } 566 | ], 567 | "outputs": [ 568 | { 569 | "name": "IMAGE", 570 | "type": "IMAGE", 571 | "links": [ 572 | 125 573 | ], 574 | "shape": 3, 575 | "slot_index": 0 576 | } 577 | ], 578 | "properties": { 579 | "Node name for S&R": "ConvertToImage" 580 | }, 581 | "color": "#00FF00", 582 | "bgcolor": "#009900" 583 | }, 584 | { 585 | "id": 125, 586 | "type": "PreviewImage", 587 | "pos": [ 588 | 1245.7276206015622, 589 | -139.76685204003934 590 | ], 591 | "size": [ 592 | 345.5997876780352, 593 | 368.0673642007994 594 | ], 595 | "flags": {}, 596 | "order": 8, 597 | "mode": 0, 598 | "inputs": [ 599 | { 600 | "name": "images", 601 | "type": "IMAGE", 602 | "link": 196 603 | } 604 | ], 605 | "properties": { 606 | "Node name for S&R": "PreviewImage" 607 | } 608 | }, 609 | { 610 | "id": 71, 611 | "type": "AnnotatedExample", 612 | "pos": [ 613 | 835.7276206015616, 614 | -138.76685204003934 615 | ], 616 | "size": [ 617 | 333.5997876780352, 618 | 147.1673642007991 619 | ], 620 | "flags": {}, 621 | "order": 4, 622 | "mode": 0, 623 | "inputs": [ 624 | { 625 | "name": "image", 626 | "type": "IMAGE", 627 | "link": 100 628 | } 629 | ], 630 | "outputs": [ 631 | { 632 | "name": "IMAGE", 633 | "type": "IMAGE", 634 | "links": [ 635 | 196 636 | ], 637 | "shape": 3, 638 | "slot_index": 0 639 | } 640 | ], 641 | "properties": { 642 | "Node name for S&R": "AnnotatedExample" 643 | }, 644 | "widgets_values": [ 645 | "Hello World!", 646 | 0, 647 | 1, 648 | "enabled" 649 | ] 650 | } 651 | ], 652 | "links": [ 653 | [ 654 | 100, 655 | 4, 656 | 0, 657 | 71, 658 | 0, 659 | "IMAGE" 660 | ], 661 | [ 662 | 112, 663 | 81, 664 | 0, 665 | 15, 666 | 0, 667 | "IMAGE" 668 | ], 669 | [ 670 | 116, 671 | 4, 672 | 0, 673 | 83, 674 | 0, 675 | "IMAGE" 676 | ], 677 | [ 678 | 125, 679 | 49, 680 | 0, 681 | 84, 682 | 0, 683 | "IMAGE" 684 | ], 685 | [ 686 | 147, 687 | 91, 688 | 0, 689 | 106, 690 | 0, 691 | "STRING" 692 | ], 693 | [ 694 | 163, 695 | 83, 696 | 0, 697 | 109, 698 | 0, 699 | "IMAGE" 700 | ], 701 | [ 702 | 165, 703 | 83, 704 | 0, 705 | 81, 706 | 0, 707 | "IMAGE" 708 | ], 709 | [ 710 | 168, 711 | 109, 712 | 0, 713 | 84, 714 | 1, 715 | "IMAGE" 716 | ], 717 | [ 718 | 182, 719 | 4, 720 | 0, 721 | 119, 722 | 0, 723 | "IMAGE" 724 | ], 725 | [ 726 | 184, 727 | 119, 728 | 0, 729 | 81, 730 | 1, 731 | "IMAGE" 732 | ], 733 | [ 734 | 185, 735 | 119, 736 | 0, 737 | 109, 738 | 1, 739 | "IMAGE" 740 | ], 741 | [ 742 | 193, 743 | 84, 744 | 0, 745 | 116, 746 | 0, 747 | "IMAGE" 748 | ], 749 | [ 750 | 196, 751 | 71, 752 | 0, 753 | 125, 754 | 0, 755 | "IMAGE" 756 | ], 757 | [ 758 | 202, 759 | 142, 760 | 0, 761 | 141, 762 | 0, 763 | "FUN_CLASS" 764 | ], 765 | [ 766 | 203, 767 | 141, 768 | 0, 769 | 139, 770 | 0, 771 | "IMAGE" 772 | ], 773 | [ 774 | 204, 775 | 4, 776 | 0, 777 | 143, 778 | 0, 779 | "IMAGE" 780 | ], 781 | [ 782 | 205, 783 | 143, 784 | 0, 785 | 83, 786 | 1, 787 | "MASK" 788 | ], 789 | [ 790 | 206, 791 | 143, 792 | 1, 793 | 119, 794 | 1, 795 | "MASK" 796 | ], 797 | [ 798 | 207, 799 | 143, 800 | 0, 801 | 49, 802 | 0, 803 | "MASK" 804 | ] 805 | ], 806 | "groups": [ 807 | { 808 | "title": "Adding Preview Text", 809 | "bounding": [ 810 | 827, 811 | 1175, 812 | 624, 813 | 313 814 | ], 815 | "color": "#3f789e", 816 | "font_size": 24 817 | }, 818 | { 819 | "title": "Colorful Nodes and adding Preview Images", 820 | "bounding": [ 821 | 829, 822 | 265, 823 | 1890, 824 | 867 825 | ], 826 | "color": "#3f789e", 827 | "font_size": 24 828 | }, 829 | { 830 | "title": "Dynamic nodes from Python classes", 831 | "bounding": [ 832 | 1770, 833 | 1160, 834 | 947, 835 | 330 836 | ], 837 | "color": "#3f789e", 838 | "font_size": 24 839 | }, 840 | { 841 | "title": "ComfyUI example node", 842 | "bounding": [ 843 | 826, 844 | -214, 845 | 776, 846 | 452 847 | ], 848 | "color": "#3f789e", 849 | "font_size": 24 850 | } 851 | ], 852 | "config": {}, 853 | "extra": { 854 | "ds": { 855 | "scale": 0.5131581182307069, 856 | "offset": { 857 | "0": 1298.943361825788, 858 | "1": 936.8752894516717 859 | } 860 | } 861 | }, 862 | "version": 0.4 863 | } -------------------------------------------------------------------------------- /easy_nodes/llm_debugging.py: -------------------------------------------------------------------------------- 1 | import difflib 2 | import os 3 | import shutil 4 | import sys 5 | import inspect 6 | import logging 7 | import re 8 | import subprocess 9 | import tempfile 10 | import traceback 11 | 12 | from colorama import Fore, Style 13 | from openai import OpenAI 14 | 15 | import easy_nodes.config_service as config_service 16 | 17 | 18 | def create_openai_client() -> OpenAI: 19 | openai_key = os.environ.get("OPENAI_API_KEY") 20 | 21 | if not openai_key: 22 | raise ValueError("OpenAI API key not found in OPENAI_API_KEY environment variable. " 23 | + "Please set the API key to use LLM debugging.") 24 | return OpenAI(api_key=openai_key) 25 | 26 | 27 | chatgpt_role_description = """ 28 | You are tasked as a Python debugging assistant, focusing solely on correcting provided function code. Your responses should exclusively consist of the amended function (including decorators), including any essential comments on modifications made. Ensure the function signature remains intact. 29 | 30 | - Directly return one module-level element (function, class, method, module, etc.) per response. If an element does not need updating, do not return it. 31 | - Limit responses to the corrected function, with changes clearly commented within. 32 | - Incorporate new imports at the function's beginning if they're necessary for introduced logic or corrections. 33 | - Assume all mentioned entities and functions are already defined and pre-imported in the function's context. Only import *new* elements. In such cases, prepend imports to the function body in place of the explicit comment marker referring to them. 34 | - If a single-step fix isn't feasible, integrate debug logging to aid further troubleshooting, annotating these additions for potential later removal. 35 | - Avoid markdown or other formatting in your response to ensure seamless file integration. 36 | - Recognize that terms like Matrix, ImageTensor, DepthMap and MaskTensor refer to torch.Tensor annotations and don't necessitate additional imports or actions. 37 | - Image tensors should be shape BxHxWx3, mask tensors should be BxHxW, and DepthMaps should be BxHxWx1, where B is the batch size, H is the height, and W is the width. 38 | - Include any decorators that the function had attached. 39 | - If a comment starts with "NOTE(GPT):", it is a note for you and should not be included in the final output. 40 | - If you're not positive what the issue is, it's fine to add copious logging to help diagnose the problem. Output with logging.debug calls. 41 | - If you can't provide any improvements at all, return a message starting with "! Unable to fix this:" and then provide a brief explanation of why it can't be fixed. 42 | Acceptable reasons being that the problem exists outside the function's scope or that the function is being invoked incorrectly. 43 | """ 44 | 45 | 46 | def extract_function_or_class_name(source): 47 | """ 48 | Extracts the name of the function or class from the given source code. 49 | Parameters: 50 | - source: The source code of the function or class. 51 | Returns: 52 | - The name of the function or class. 53 | """ 54 | # Regular expression pattern to match function or class names 55 | pattern = r'(?:def|class)\s+(\w+)' 56 | 57 | # Search for the pattern in the source code 58 | match = re.search(pattern, source) 59 | 60 | if match: 61 | return match.group(1) 62 | else: 63 | return None 64 | 65 | 66 | def module_to_file_path(module_name): 67 | """ 68 | Converts a module name to its corresponding file path. 69 | 70 | Parameters: 71 | - module_name: The name of the module. 72 | 73 | Returns: 74 | - The file path of the module. 75 | """ 76 | # Get the module object based on the module name 77 | module_obj = sys.modules.get(module_name) 78 | 79 | if module_obj is None: 80 | # If the module is not found, raise an error or return None 81 | raise ValueError(f"Module '{module_name}' not found.") 82 | 83 | # Get the file path of the module 84 | file_path = getattr(module_obj, '__file__', None) 85 | 86 | if file_path is None: 87 | # If the file path is not available, raise an error or return None 88 | raise ValueError(f"File path not found for module '{module_name}'.") 89 | 90 | # Normalize the file path 91 | file_path = os.path.abspath(file_path) 92 | 93 | # If the file path ends with '.pyc' or '.pyo', remove the 'c' or 'o' extension 94 | if file_path.endswith(('.pyc', '.pyo')): 95 | file_path = file_path[:-1] 96 | 97 | return file_path 98 | 99 | 100 | def split_top_level_entries(code): 101 | """ 102 | Splits the code into top-level entries (classes, functions, and statements). 103 | Parameters: 104 | - code: The code containing top-level entries. 105 | Returns: 106 | - A list of top-level entry code strings. 107 | """ 108 | entries = [] 109 | current_entry = [] 110 | start_indices = [] 111 | 112 | for i, line in enumerate(code.split('\n')): 113 | indent_level = len(line) - len(line.lstrip()) 114 | line_empty = len(line.strip()) == 0 115 | 116 | # It's a new entry if the line has an indent level of 0 and is not empty or a comment, 117 | # and the previous line is not a decorator 118 | if ( 119 | indent_level == 0 120 | and not line_empty 121 | and not line.strip().startswith('#') 122 | and (not current_entry or not current_entry[-1].strip().startswith('@')) 123 | ): 124 | if current_entry: 125 | entries.append('\n'.join(current_entry)) 126 | current_entry = [] 127 | start_indices.append(i) 128 | 129 | current_entry.append(line) 130 | 131 | if current_entry: 132 | entries.append('\n'.join(current_entry)) 133 | 134 | return entries, start_indices 135 | 136 | 137 | def replace_source_with_updates(entry_code: str, original_source: dict[str, list[str]]): 138 | entry_name = extract_function_or_class_name(entry_code) 139 | logging.info("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!") 140 | logging.info(f"Replacing entry {entry_name}") 141 | logging.info("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!") 142 | 143 | # Find the original source code for the entry 144 | for module_name, source_items in original_source.items(): 145 | for source_item in source_items: 146 | if f"def {entry_name}(" in source_item or f"class {entry_name}(" in source_item: 147 | original_entry_code = source_item 148 | original_module_name = module_name 149 | 150 | assert original_entry_code is not None, f"Original source code for {entry_name} not found" 151 | assert original_module_name is not None, f"Original module name for {entry_name} not found" 152 | 153 | # Replace the original entry code with the modified entry code 154 | modified_source = original_entry_code.replace(original_entry_code, entry_code) 155 | 156 | # Determine the file path based on the module name 157 | original_file_path = module_to_file_path(original_module_name) 158 | 159 | # Backup the full file first 160 | backup_file_path = create_backup_file(original_file_path) 161 | logging.info(f"Backed up {original_file_path} to '{backup_file_path}'") 162 | 163 | # Write the modified source code to a temp file in /tmp directory 164 | tmp_file_path = replace_function_in_file(original_file_path, modified_source) 165 | logging.info(f"Modified entry written to temporary file for review: {tmp_file_path}") 166 | 167 | # Create a diff between the original and modified source 168 | _, patch_file = tempfile.mkstemp(suffix=".patch") 169 | create_patch(original_file_path, tmp_file_path, patch_file) 170 | logging.info(f"Patch file created: {patch_file}") 171 | print_patch_with_color(patch_file) 172 | 173 | if config_service.get_config_value("easy_nodes.llm_debugging", "Off") == "AutoFix": 174 | logging.info("Applying the patch to the original file...") 175 | # Apply the patch to a copy of the original file to test changes 176 | _, patched_file = tempfile.mkstemp() 177 | apply_patch(original_file_path, patch_file, patched_file) 178 | 179 | verify_same(tmp_file_path, patched_file) 180 | 181 | # If the diff was applied correctly, consider updating the original file 182 | shutil.copy(patched_file, original_file_path) 183 | logging.info(f"Original file '{original_file_path}' updated with the patch.") 184 | else: 185 | logging.info("Skipping automatic patch application. Set 'easy_nodes.llm_debugging' to 'AutoFix' to apply the patch.") 186 | 187 | 188 | def process_exception_logic(func, exception, input_desc, buffer): 189 | """ 190 | Processes an exception by generating a patch using OpenAI's suggestions and applying it. 191 | 192 | Parameters: 193 | - func: The function that raised the exception. 194 | - exception: The exception instance. 195 | - input_desc: Description of the input for creating the chat prompt. 196 | - buffer: StringIO buffer capturing stdout and logging output. 197 | """ 198 | buffer_content = buffer.getvalue() 199 | prompt, original_source = create_llm_prompt(func, input_desc, buffer_content, exception) 200 | 201 | # Prepare the chat prompt for OpenAI 202 | messages = [ 203 | {"role": "system", "content": chatgpt_role_description}, 204 | {"role": "user", "content": prompt} 205 | ] 206 | 207 | openai_client = create_openai_client() 208 | 209 | model_name = config_service.get_config_value("easy_nodes.llm_model", "gpt-4o") 210 | 211 | # Send the prompt to OpenAI and get the response 212 | # Assuming send_prompt_to_openai returns a structured response with the modified function code 213 | response = send_prompt_to_openai( 214 | client=openai_client, 215 | max_tokens=4096, 216 | model=model_name, 217 | messages=messages, 218 | verbose=True 219 | ) 220 | 221 | # # Process the response to extract the modified source code 222 | function_code = response.choices[0].message.content 223 | # function_code = canned_response 224 | 225 | if function_code.strip().startswith("!"): 226 | logging.error(f"OpenAI was unable to provide a fix for the function: {function_code}") 227 | return 228 | 229 | function_code = remove_code_block_delimiters(function_code) 230 | logging.info(f"Modified function code:\n{function_code}") 231 | 232 | function_code = function_code[function_code.index("@ComfyFunc"):] 233 | 234 | # Split the function_code into top-level entries (classes and functions) 235 | top_level_entries, _ = split_top_level_entries(function_code) 236 | 237 | for entry_code in top_level_entries: 238 | replace_source_with_updates(entry_code, original_source) 239 | 240 | 241 | def create_patch(file_a_path, file_b_path, patch_file_path): 242 | """ 243 | Creates a patch file that can be applied to file A to produce file B. 244 | 245 | Parameters: 246 | - file_a_path: Path to the original file (file A). 247 | - file_b_path: Path to the modified file (file B). 248 | - patch_file_path: Path where the patch file will be saved. 249 | """ 250 | with open(file_a_path, 'r') as file_a: 251 | file_a_lines = [line.rstrip('\n') for line in file_a.readlines()] 252 | 253 | with open(file_b_path, 'r') as file_b: 254 | file_b_lines = [line.rstrip('\n') for line in file_b.readlines()] 255 | 256 | diff = difflib.unified_diff( 257 | file_a_lines, file_b_lines, 258 | fromfile=file_a_path, tofile=file_b_path, 259 | lineterm='' 260 | ) 261 | 262 | with open(patch_file_path, 'w') as patch_file: 263 | for line in diff: 264 | patch_file.write(line + '\n') 265 | 266 | 267 | def find_first_indented_line(the_source): 268 | """ 269 | Finds the first indented line in the_source and returns a tuple containing 270 | the character position of the start of the line and the indentation level. 271 | """ 272 | char_location = 0 273 | for line in the_source.split("\n"): 274 | if line.strip() and not line.strip().startswith("#"): 275 | indent_level = len(line) - len(line.lstrip()) 276 | if indent_level > 0: 277 | return (char_location, indent_level) 278 | char_location += len(line) + 1 279 | return None 280 | 281 | 282 | def print_patch_with_color(patch_file_path): 283 | """ 284 | Prints the contents of a patch file with color highlighting using logging and colorama. 285 | 286 | Parameters: 287 | - patch_file_path: Path to the patch file containing changes. 288 | """ 289 | # Open and read the patch file 290 | with open(patch_file_path, 'r') as patch_file: 291 | patch_lines = patch_file.readlines() 292 | 293 | # Iterate through each line in the patch file 294 | for line in patch_lines: 295 | if line.startswith('+'): 296 | logging.info(Fore.GREEN + line.rstrip()) # Green for additions 297 | elif line.startswith('-'): 298 | logging.info(Fore.RED + line.rstrip()) # Red for deletions 299 | elif line.startswith('@'): 300 | logging.info(Fore.CYAN + line.rstrip()) # Cyan for headers 301 | else: 302 | logging.info(line.rstrip()) # Default color for context and other lines 303 | 304 | 305 | def apply_patch(original_file_path, patch_file_path, output_file_path): 306 | """ 307 | Applies a patch file to an original file and writes the result to an output file. 308 | 309 | Parameters: 310 | - original_file_path: Path to the original file to which the patch will be applied. 311 | - patch_file_path: Path to the patch file containing changes. 312 | - output_file_path: Path where the modified file will be written. 313 | """ 314 | cmd = ['patch', original_file_path, patch_file_path, '-o', output_file_path] 315 | subprocess.run(cmd, check=True) 316 | 317 | 318 | def replace_function_in_file(file_path, modified_source): 319 | with open(file_path, 'r') as file: 320 | file_source = file.read() 321 | 322 | entries, chunk_starts = split_top_level_entries(file_source) 323 | what_to_replace = extract_function_or_class_name(modified_source) 324 | 325 | for i, entry in enumerate(entries): 326 | if extract_function_or_class_name(entry) == what_to_replace: 327 | entries[i] = modified_source + "\n" 328 | break 329 | else: 330 | assert False, f"Function or class {what_to_replace} not found in file {file_path}" 331 | 332 | # Create a temporary file 333 | with tempfile.NamedTemporaryFile(mode='w', delete=False) as tmp_file: 334 | new_text = "\n".join(entries) 335 | tmp_file.write(new_text) 336 | tmp_file_path = tmp_file.name 337 | 338 | return tmp_file_path 339 | 340 | 341 | def send_prompt_to_openai(client: OpenAI, messages: list[dict], max_tokens: int, model: str, verbose: bool): 342 | # Validate messages format 343 | if not all(isinstance(message, dict) and 'role' in message and 'content' in message for message in messages): 344 | raise ValueError("All messages must be dictionaries with 'role' and 'content' keys") 345 | 346 | if verbose: 347 | print(Fore.BLUE + "Sending the following prompt to ChatGPT:" + Style.RESET_ALL) 348 | for message in messages: 349 | print( 350 | Fore.LIGHTBLACK_EX 351 | + f"{message['role'].title()}: {message['content']}" 352 | + Style.RESET_ALL 353 | ) 354 | 355 | response = client.chat.completions.create( 356 | model=model, messages=messages, max_tokens=max_tokens 357 | ) 358 | return response 359 | 360 | 361 | def get_source_from_exception_and_callable(exception, callable_obj, allowed_paths): 362 | source_dict = {} 363 | seen_names = set() 364 | 365 | tb = exception.__traceback__ 366 | 367 | # Convert allowed paths to absolute paths 368 | allowed_paths = [os.path.abspath(path) for path in allowed_paths] 369 | 370 | 371 | 372 | def add_exception_marker(source, lineno): 373 | if 0 <= lineno < len(source): 374 | source[lineno] = source[lineno].rstrip() + " # <------- NOTE(GPT): Exception here\n" 375 | 376 | def process_frame(frame, lineno, is_exception=True): 377 | module_name = frame.f_globals.get('__name__') 378 | file_path = os.path.abspath(frame.f_code.co_filename) 379 | if any(file_path.startswith(path) for path in allowed_paths): 380 | if module_name not in source_dict: 381 | source_dict[module_name] = [] 382 | try: 383 | # Check if the frame is associated with a class method 384 | class_name = frame.f_code.co_name.split('.')[0] 385 | if class_name != '': 386 | # Retrieve the source code for the entire class 387 | class_obj = frame.f_locals.get('self').__class__ 388 | try: 389 | source, _ = inspect.getsourcelines(class_obj) 390 | # Find the start line number of the class 391 | class_start_lineno = inspect.getsourcelines(class_obj)[1] 392 | # Adjust the line number relative to the class start 393 | class_lineno = lineno - class_start_lineno 394 | if 0 <= class_lineno < len(source) and is_exception: 395 | add_exception_marker(source, class_lineno) 396 | except TypeError: 397 | # Handle the case when class_obj is None 398 | source, start_lineno = inspect.getsourcelines(frame) 399 | if start_lineno <= lineno < start_lineno + len(source) and is_exception: 400 | add_exception_marker(source, lineno - start_lineno) 401 | else: 402 | # Retrieve the source code for the function or module level 403 | source, start_lineno = inspect.getsourcelines(frame) 404 | if start_lineno <= lineno < start_lineno + len(source) and is_exception: 405 | add_exception_marker(source, lineno - start_lineno) 406 | if ''.join(source) not in source_dict[module_name]: 407 | source_dict[module_name].append(''.join(source).strip()) 408 | seen_names.add(module_name + '.' + frame.f_code.co_name) 409 | except OSError: 410 | pass 411 | 412 | # Process exception traceback 413 | while tb is not None: 414 | frame = tb.tb_frame 415 | lineno = frame.f_lineno 416 | process_frame(frame, lineno) 417 | tb = tb.tb_next 418 | 419 | # Process callable if it's not already in the source_dict 420 | callable_module_name = callable_obj.__module__ 421 | if callable_module_name not in source_dict: 422 | source_dict[callable_module_name] = [] 423 | try: 424 | callable_source, _ = inspect.getsourcelines(callable_obj) 425 | callable_source_str = ''.join(callable_source).strip() 426 | 427 | global_name = callable_module_name + '.' + callable_obj.__name__ 428 | logging.info(f"Global name: {global_name}") 429 | if global_name not in seen_names: 430 | # logging.info(f"Retrieving source for {callable_obj.__name__} : {callable_source_str}") 431 | # logging.info(f"Keys: {source_dict.keys()}") 432 | if callable_source_str not in source_dict[callable_module_name]: 433 | source_dict[callable_module_name].append(callable_source_str) 434 | else: 435 | logging.info(f"Skipping {callable_obj.__name__} as it's already in the source_dict") 436 | except OSError: 437 | logging.error(f"Failed to retrieve source for {callable_obj.__name__}") 438 | 439 | logging.error(f"Seen names: {seen_names}") 440 | 441 | return source_dict 442 | 443 | 444 | def create_llm_prompt(func, input_desc, buffer_content, e: Exception) -> tuple[str, dict[str, list[str]]]: 445 | """ 446 | Creates a prompt for the ChatGPT based on function details, input descriptions, 447 | execution logs, and the encountered exception. 448 | 449 | Args: 450 | func (Callable): The function that raised an exception during execution. 451 | input_desc (List[str]): Descriptions of the function's input parameters. 452 | buffer_content (str): Content captured from the execution's stdout and logging. 453 | e (Exception): The exception that was raised during function execution. 454 | 455 | Returns: 456 | List[str]: A list of strings composing the complete ChatGPT prompt. 457 | """ 458 | original_source = get_source_from_exception_and_callable(e, func, [os.path.dirname(func.__code__.co_filename)]) 459 | combined_source = "" 460 | for k, v in original_source.items(): 461 | logging.info(f"Original source for {k}") 462 | for item in v: 463 | combined_source += item + "\n\n\n" 464 | 465 | chat_gpt_prompt = [ 466 | f"Details for function {func.__name__} in file {func.__code__.co_filename}:", 467 | "----------------------------------------------------", 468 | "Function argument name, type, value:", 469 | " " + "\n ".join(input_desc), 470 | "----------------------------------------------------", 471 | "Error:", 472 | f"```\n{str(e)}\n```", 473 | "----------------------------------------------------", 474 | "Stack trace:", 475 | f"```\n{traceback.format_exc()}\n```", 476 | "----------------------------------------------------", 477 | "Execution log:", 478 | f"```\n{buffer_content}```", 479 | "----------------------------------------------------", 480 | "Function source code:", 481 | f"```\n{combined_source}```", 482 | ] 483 | return "\n".join(chat_gpt_prompt), original_source 484 | 485 | 486 | def create_backup_file(original_file_path): 487 | """ 488 | Creates a backup of the given file with a numeric suffix (.bak.N), 489 | where N is a three-digit number starting with 000. 490 | 491 | Args: 492 | original_file_path (str): The path to the original file to back up. 493 | 494 | Returns: 495 | str: The path to the created backup file. 496 | """ 497 | base_name = original_file_path + ".bak" 498 | suffix = 0 499 | backup_file_path = f"{base_name}.{suffix:03d}" 500 | 501 | # Increment the suffix if the backup file already exists 502 | while os.path.exists(backup_file_path): 503 | suffix += 1 504 | backup_file_path = f"{base_name}.{suffix:03d}" 505 | 506 | # Copy the original file to the new backup file path 507 | with open(original_file_path, "rb") as original_file: 508 | with open(backup_file_path, "wb") as backup_file: 509 | backup_file.write(original_file.read()) 510 | 511 | logging.info(f"Backed up '{original_file_path}' to '{backup_file_path}'") 512 | return backup_file_path 513 | 514 | 515 | def remove_code_block_delimiters(text): 516 | """ 517 | Remove ```python at the start and ``` at the end of a code block. 518 | 519 | :param text: The text containing the code block delimiters. 520 | :return: The text with the code block delimiters removed. 521 | """ 522 | # This regex pattern matches ```python at the start of the string and ``` at the end of the string. 523 | pattern = r'^```python\n|\n```$' 524 | cleaned_text = re.sub(pattern, '', text, flags=re.MULTILINE) 525 | return cleaned_text 526 | 527 | 528 | def verify_same(file_a_path, file_b_path): 529 | # Now make sure that both files are identical 530 | with open(file_a_path, 'r') as new_file: 531 | updated_content_lines = new_file.readlines() 532 | 533 | with open(file_b_path, 'r') as tmp_file: 534 | patched_content_lines = tmp_file.readlines() 535 | 536 | # Use difflib to find differences 537 | differences = list(difflib.unified_diff( 538 | updated_content_lines, patched_content_lines, 539 | fromfile='updated_file', tofile='patched_file', 540 | lineterm='' 541 | )) 542 | 543 | if differences: 544 | for line in differences[:10]: 545 | logging.error(f"DIFFERENCE: {line}") 546 | assert False, f"The diff was not applied correctly between {file_a_path} and {file_b_path}" 547 | else: 548 | logging.info("Files are identical, no differences found.") 549 | -------------------------------------------------------------------------------- /easy_nodes/easy_nodes.py: -------------------------------------------------------------------------------- 1 | 2 | import enum 3 | import functools 4 | import hashlib 5 | import importlib 6 | import inspect 7 | import io 8 | import json 9 | import logging 10 | import math 11 | import os 12 | import sys 13 | import traceback 14 | import typing 15 | from dataclasses import dataclass 16 | from enum import Enum 17 | from importlib import import_module 18 | from pathlib import Path 19 | from typing import Callable, Dict, Union, get_args, get_origin 20 | 21 | import coloredlogs 22 | import nodes as comfyui_nodes 23 | import numpy as np 24 | import server 25 | import torch 26 | from colorama import Fore 27 | from PIL import Image 28 | 29 | import easy_nodes 30 | import easy_nodes.config_service as config_service 31 | import easy_nodes.llm_debugging as llm_debugging 32 | import easy_nodes.log_streaming as log_streaming 33 | 34 | # Export the web directory so ComfyUI can pick up the JavaScript. 35 | _web_path = os.path.join(os.path.dirname(__file__), "web") 36 | 37 | if os.path.exists(_web_path): 38 | comfyui_nodes.EXTENSION_WEB_DIRS["ComfyUI-EasyNodes"] = _web_path 39 | logging.debug(f"Registered ComfyUI-EasyNodes web directory: '{_web_path}'") 40 | else: 41 | logging.warning(f"ComfyUI-EasyNodes: Web directory not found at {_web_path}. Some features may not be available.") 42 | 43 | 44 | class AutoDescriptionMode(Enum): 45 | NONE = "none" 46 | BRIEF = "brief" 47 | FULL = "full" 48 | 49 | 50 | class CheckSeverityMode(Enum): 51 | OFF = "off" 52 | WARN = "warn" 53 | FATAL = "fatal" 54 | 55 | 56 | @dataclass 57 | class EasyNodesConfig: 58 | default_category: str 59 | auto_register: bool 60 | docstring_mode: AutoDescriptionMode 61 | verify_level: CheckSeverityMode 62 | auto_move_tensors: bool 63 | NODE_CLASS_MAPPINGS: dict 64 | NODE_DISPLAY_NAME_MAPPINGS: dict 65 | num_registered: int = 0 66 | get_node_mappings_called: bool = False 67 | 68 | 69 | # Keep track of the config from the last init, because different custom_nodes modules 70 | # could possibly want different settings. 71 | _current_config: EasyNodesConfig = None 72 | 73 | 74 | # Changing the default of auto-register to false in 1.1, so catch if the user hasn't set it explicitly so we can give them a warning. 75 | class AutoRegisterSentinel(enum.Enum): 76 | DEFAULT = enum.auto() 77 | 78 | 79 | def initialize_easy_nodes(default_category: str = "EasyNodes", 80 | auto_register: bool = AutoRegisterSentinel.DEFAULT, 81 | docstring_mode: AutoDescriptionMode = AutoDescriptionMode.FULL, 82 | verify_level: CheckSeverityMode = CheckSeverityMode.WARN, 83 | auto_move_tensors: bool = False): 84 | """ 85 | Initializes the EasyNodes library with the specified configuration options. 86 | 87 | All nodes created after this call until the next call of init() will use the specified configuration options. 88 | 89 | Args: 90 | default_category (str, optional): The default category for nodes. Defaults to "EasyNodes". 91 | auto_register (bool, optional): Whether to automatically register nodes with ComfyUI (so you don't have to export). Defaults to False. Experimental. 92 | docstring_mode (AutoDescriptionMode, optional): The mode for generating node docstrings. Defaults to AutoDescriptionMode.FULL. 93 | verify_level (bool, optional): Whether to verify tensors for shape and data type according to ComfyUI type (MASK, IMAGE, etc). Runs on inputs and outputs. Defaults to False. 94 | auto_move_tensors (bool, optional): Whether to automatically move torch Tensors to the GPU before your function gets called, and then to the CPU on output. Defaults to False. 95 | """ 96 | # If the user has already requested a prompt, that means auto-reload could conceivably re-import this module. 97 | # In that case, we should just return and not re-initialize. 98 | if _after_first_prompt: 99 | return 100 | 101 | global _current_config 102 | if _current_config: 103 | assert _current_config.num_registered > 0, "Re-initializing EasyNodes, but no Nodes have been registered since last initialization. This may indicate an issue." 104 | assert _current_config.auto_register or not _current_config.NODE_CLASS_MAPPINGS, ( 105 | f"Auto-registration was turned off by previous initializer, but {len(_current_config.NODE_CLASS_MAPPINGS)} nodes were not picked up.") 106 | 107 | NODE_CLASS_MAPPINGS = {} 108 | NODE_DISPLAY_NAME_MAPPINGS = {} 109 | 110 | auto_register_message = "" 111 | if auto_register is AutoRegisterSentinel.DEFAULT: 112 | auto_register_message = " NOTE: Auto-registration not set explicitly, running in mixed-mode. The default will change to False in a future version. If already calling get_node_mappings(), you can ignore this message (or pass auto-register explicitly to make it go away)." 113 | 114 | logging.info(f"Initializing EasyNodes. Auto-registration: {auto_register}{auto_register_message}") 115 | 116 | if auto_register is True: 117 | NODE_CLASS_MAPPINGS = comfyui_nodes.NODE_CLASS_MAPPINGS 118 | NODE_DISPLAY_NAME_MAPPINGS = comfyui_nodes.NODE_DISPLAY_NAME_MAPPINGS 119 | 120 | if auto_register is True or auto_register is AutoRegisterSentinel.DEFAULT: 121 | frame = sys._getframe(1).f_globals['__name__'] 122 | _ensure_package_dicts_exist(frame) 123 | 124 | _current_config = EasyNodesConfig(default_category, auto_register, docstring_mode, verify_level, auto_move_tensors, 125 | NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS) 126 | 127 | 128 | def get_node_mappings(): 129 | assert _current_config is not None, "EasyNodes not initialized. Call easy_nodes.initialize_easy_nodes() before using ComfyNode." 130 | assert _current_config.num_registered > 0, "No nodes registered. Use the @ComfyNode() decorator to register nodes after calling easy_nodes.initialize_easy_nodes()." 131 | assert _current_config.auto_register is not True, "Auto-node registration is on. Call easy_nodes.initialize_easy_nodes(auto_register=False) if you want to export manually." 132 | assert not _current_config.get_node_mappings_called, "get_node_mappings() already called. This function should only be called once." 133 | _current_config.get_node_mappings_called = True 134 | return _current_config.NODE_CLASS_MAPPINGS, _current_config.NODE_DISPLAY_NAME_MAPPINGS 135 | 136 | 137 | def save_node_list(filepath: str = "node_list.json"): 138 | """ 139 | Save a list of all registered nodes to a JSON file. 140 | 141 | Args: 142 | filepath (str): The path where the JSON file will be saved. 143 | 144 | The resulting JSON file will contain a dictionary where the keys are 145 | the display names of the nodes and the values are their descriptions. 146 | This can be ingested by ComfyUI-Manager. 147 | """ 148 | node_list = {} 149 | 150 | for workflow_name, display_name in _current_config.NODE_DISPLAY_NAME_MAPPINGS.items(): 151 | node_class = _current_config.NODE_CLASS_MAPPINGS[workflow_name] 152 | description = getattr(node_class, 'DESCRIPTION', '') 153 | 154 | # Remove the EasyNodesInfo from the description 155 | if description.startswith("EasyNodesInfo="): 156 | description = description.split("\n", 1)[1] 157 | 158 | node_list[display_name] = description.strip().split("\n")[0] 159 | 160 | with open(filepath, 'w') as f: 161 | json.dump(node_list, f, indent=2) 162 | 163 | print(f"Node list saved to {filepath}") 164 | 165 | 166 | def _get_curr_config() -> EasyNodesConfig: 167 | if _current_config is None: 168 | logging.warning("easy_nodes.initialize_easy_nodes() should be called prior to any other EasyNodes activity. Initializing now with easy_nodes.initialize_easy_nodes() for backwards compatibility.") 169 | easy_nodes.initialize_easy_nodes() 170 | return _current_config 171 | 172 | 173 | # Use as a default str value to show choices to the user. 174 | class Choice(str): 175 | def __new__(cls, choices: list[str]): 176 | instance = super().__new__(cls, choices[0]) 177 | instance.choices = choices 178 | return instance 179 | 180 | def __str__(self): 181 | return self.choices[0] 182 | 183 | 184 | class StringInput(str): 185 | def __new__(cls, value, multiline=False, force_input=False, optional=False, hidden=False): 186 | instance = super().__new__(cls, value) 187 | instance.value = value 188 | instance.multiline = multiline 189 | instance.force_input = force_input 190 | instance.optional = optional 191 | instance.hidden = hidden 192 | return instance 193 | 194 | def to_dict(self): 195 | return { 196 | "default": self.value, 197 | "multiline": self.multiline, 198 | "display": "input", 199 | "forceInput": self.force_input, 200 | } 201 | 202 | 203 | class NumberInput(float): 204 | def __new__( 205 | cls, 206 | default, 207 | min=None, 208 | max=None, 209 | step=None, 210 | round=None, 211 | display: str = "number", 212 | optional=False, 213 | hidden=False, 214 | force_input=False 215 | ): 216 | if min is not None and default < min: 217 | raise ValueError(f"Value {default} is less than the minimum allowed {min}.") 218 | if max is not None and default > max: 219 | raise ValueError( 220 | f"Value {default} is greater than the maximum allowed {max}." 221 | ) 222 | instance = super().__new__(cls, default) 223 | instance.min = min 224 | instance.max = max 225 | instance.display = display 226 | instance.step = step 227 | instance.round = round 228 | instance.optional = optional 229 | instance.hidden = hidden 230 | instance.force_input = force_input 231 | return instance 232 | 233 | def to_dict(self): 234 | metadata = { 235 | "default": self, 236 | "display": self.display, 237 | "min": self.min, 238 | "max": self.max, 239 | "step": self.step, 240 | "round": self.round, 241 | "forceInput": self.force_input, 242 | } 243 | metadata = {k: v for k, v in metadata.items() if v is not None} 244 | return metadata 245 | 246 | def __repr__(self): 247 | return f"{super().__repr__()} (Min: {self.min}, Max: {self.max})" 248 | 249 | 250 | _ANNOTATION_TO_COMFYUI_TYPE = {} 251 | _SHOULD_AUTOCONVERT = {"str": True} 252 | _DEFAULT_FORCE_INPUT = {} 253 | _COMFYUI_TYPE_TO_ANNOTATION_CLS = {} 254 | 255 | _after_first_prompt = False 256 | _module_reload_times = {} 257 | _module_dict = {} 258 | 259 | _function_dict = {} 260 | _function_checksums = {} 261 | _function_update_times = {} 262 | 263 | _curr_preview = {} 264 | _curr_unique_id = None 265 | 266 | 267 | class CustomVerifier: 268 | def __init__(self): 269 | raise NotImplementedError() 270 | 271 | def __call__(self, arg): 272 | raise NotImplementedError() 273 | 274 | 275 | # This is the default validator that just ensures that one of them is directly descended 276 | # from the other. This allows the semantic classes and the actual classes to be used interchangeably. 277 | class SubclassVerifier(CustomVerifier): 278 | def __init__(self, cls: type): 279 | self.cls = cls 280 | 281 | def __call__(self, arg): 282 | assert issubclass(self.cls, type(arg)) or issubclass(type(arg), self.cls), ( 283 | f"Expected a {self.cls.__name__}, but got received a value ({arg}) of type {type(arg).__name__}.") 284 | 285 | 286 | class TypeVerifier(CustomVerifier): 287 | def __init__(self, allowed_types: list): 288 | self.allowed_types = allowed_types 289 | 290 | def __call__(self, value): 291 | for allowed_type in self.allowed_types: 292 | if isinstance(value, allowed_type): 293 | return 294 | raise ValueError(f"Expected one of {self.allowed_types}, got {type(value)}") 295 | 296 | 297 | class AnythingVerifier(CustomVerifier): 298 | def __init__(self): 299 | pass 300 | def __call__(self, value): 301 | pass 302 | 303 | 304 | @dataclass 305 | class TensorVerifier(CustomVerifier): 306 | tensor_type_name: str 307 | allowed_shapes: list = None 308 | allowed_dims: list = None 309 | allowed_channels: list = None 310 | allowed_range: list = None 311 | 312 | def __call__(self, tensor): 313 | assert isinstance(tensor, torch.Tensor), f"Expected an {self.tensor_type_name}, got {type(tensor).__name__}" 314 | 315 | if self.allowed_range is not None: 316 | assert tensor.min() >= self.allowed_range[0] and tensor.max() <= self.allowed_range[1], f"{self.tensor_type_name} tensor must have values between {self.allowed_range[0]} and {self.allowed_range[1]}, got min {tensor.min()} and max {tensor.max()}" 317 | 318 | if self.allowed_shapes is not None: 319 | assert len(tensor.shape) in self.allowed_shapes, f"{self.tensor_type_name} tensor must have shape in {self.allowed_shapes}, got {tensor.shape}" 320 | 321 | if self.allowed_dims is not None: 322 | for i, dim in enumerate(self.allowed_dims): 323 | assert tensor.shape[i] <= dim, f"{self.tensor_type_name} tensor dimension {i} must be less than or equal to {dim}, got {tensor.shape[i]}" 324 | 325 | if self.allowed_channels is not None: 326 | assert tensor.shape[-1] in self.allowed_channels, f"{self.tensor_type_name} tensor must have the number of channels in {self.allowed_channels}, got {tensor.shape[-1]}" 327 | 328 | 329 | 330 | _custom_verifiers: Dict[str, CustomVerifier] = {} 331 | 332 | 333 | def _get_fully_qualified_name(cls: type) -> str: 334 | return f"{cls.__module__}.{cls.__qualname__}" 335 | 336 | 337 | def register_type( 338 | cls: type, 339 | name: str = None, 340 | should_autoconvert: bool = False, 341 | is_auto_register: bool = False, 342 | force_input: bool = False, 343 | verifier: CustomVerifier = None 344 | ): 345 | """Register a type for ComfyUI. 346 | 347 | Args: 348 | cls (type): The type to register. 349 | name (str): The name of the type. 350 | should_autoconvert (bool, optional): Whether the type should be automatically converted to the expected type before being passed to the wrapped function. Defaults to False. 351 | is_auto_register (bool, optional): Whether the type is automatically registered. Defaults to False. 352 | force_input (bool, optional): Whether the type should be forced as an input. Defaults to False. 353 | """ 354 | if _after_first_prompt: 355 | return 356 | 357 | if name is None: 358 | name = cls.__name__ 359 | 360 | key = _get_fully_qualified_name(cls) 361 | if not is_auto_register: 362 | assert key not in _ANNOTATION_TO_COMFYUI_TYPE, f"Type {cls} already registered." 363 | # assert name not in _COMFYUI_TYPE_TO_ANNOTATION, f"Type {name} already registered." 364 | 365 | if key in _ANNOTATION_TO_COMFYUI_TYPE: 366 | return 367 | 368 | # Assume the first type registered is the most general, and ignore later ones. 369 | # This means you can register multiple types of string-like object as STRING for 370 | # semantic purposes, but return values will just be checked if they're isinstance(v, str) 371 | if name not in _COMFYUI_TYPE_TO_ANNOTATION_CLS: 372 | _COMFYUI_TYPE_TO_ANNOTATION_CLS[name] = cls 373 | if verifier: 374 | _custom_verifiers[name] = verifier 375 | else: 376 | _custom_verifiers[name] = SubclassVerifier(cls) 377 | elif verifier: 378 | logging.warning(f"Custom verifier for {name} already registered. Ignoring new one.") 379 | 380 | _ANNOTATION_TO_COMFYUI_TYPE[key] = name 381 | _SHOULD_AUTOCONVERT[key] = should_autoconvert 382 | _DEFAULT_FORCE_INPUT[key] = force_input 383 | 384 | 385 | # Made to match any and all other types. 386 | class AnyType(str): 387 | def __ne__(self, __value: object) -> bool: 388 | return False 389 | 390 | any_type = AnyType("*") 391 | 392 | 393 | def _get_type_str(the_type: type) -> str: 394 | key = _get_fully_qualified_name(the_type) 395 | if key not in _ANNOTATION_TO_COMFYUI_TYPE and get_origin(the_type) is list: 396 | return _get_type_str(get_args(the_type)[0]) 397 | 398 | if key not in _ANNOTATION_TO_COMFYUI_TYPE and the_type is not inspect._empty: 399 | logging.warning( 400 | f"Type '{the_type}' not registered with ComfyUI, treating as wildcard" 401 | ) 402 | raise ValueError(f"Type '{the_type}' not registered with ComfyUI") 403 | 404 | type_str = _ANNOTATION_TO_COMFYUI_TYPE.get(key, any_type) 405 | return type_str 406 | 407 | 408 | _gpu_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 409 | _cpu_device = torch.device("cpu") 410 | 411 | 412 | def show_image(image: torch.Tensor, type: str = None): 413 | if type is None: 414 | retain_previews = config_service.get_config_value("easy_nodes.RetainPreviews", False) 415 | type = "output" if retain_previews else "temp" 416 | 417 | images = image 418 | for image in images: 419 | if len(image.shape) == 2: 420 | image = image.unsqueeze(-1) 421 | 422 | if image.shape[-1] == 1: 423 | image = torch.cat([image] * 3, axis=-1) 424 | 425 | image = image.cpu().numpy() 426 | 427 | image = Image.fromarray(np.clip(image * 255.0, 0, 255).astype(np.uint8)) 428 | 429 | import folder_paths 430 | 431 | unique = hashlib.md5(image.tobytes()).hexdigest()[:8] 432 | 433 | filename = f"preview-{_curr_unique_id}_{unique}.png" 434 | 435 | # TODO: make configurable. 436 | subfolder = "ComfyUI-EasyNodes" 437 | full_output_path = Path(folder_paths.get_directory_by_type(type)) / subfolder / filename 438 | 439 | full_output_path.parent.mkdir(parents=True, exist_ok=True) 440 | image.save(str(full_output_path), compress_level=4) 441 | 442 | if "images" not in _curr_preview: 443 | _curr_preview["images"] = [] 444 | _curr_preview["images"].append({"filename": filename, "subfolder": subfolder, "type": type}) 445 | 446 | 447 | def show_text(text: str): 448 | """Add a preview text to the ComfyUI node. 449 | 450 | Args: 451 | text (str): The text to display. 452 | """ 453 | if "text" not in _curr_preview: 454 | _curr_preview["text"] = [] 455 | _curr_preview["text"].append(text) 456 | 457 | 458 | def _verify_values(config: EasyNodesConfig, 459 | list_type: str, 460 | values: list[any], 461 | types: list[str], 462 | names: list[str], 463 | code_origin_loc: str, 464 | debug: bool=False): 465 | for i, val in enumerate(values): 466 | param_type = types[i] 467 | 468 | # It's a Choice. 469 | if isinstance(param_type, list): 470 | continue 471 | 472 | if val is None: 473 | continue 474 | 475 | if debug: 476 | logging.info(f"Result {i} is {type(val)}, expected {types[i]}") 477 | def recursive_verify(verifier: callable, val: any): 478 | if isinstance(val, list): 479 | for v in val: 480 | recursive_verify(verifier, v) 481 | else: 482 | verifier(val) 483 | 484 | # def verify(verifier: callable, val: any, return_name: str, severity: CheckSeverityMode): 485 | 486 | 487 | param_name = f"'{names[i]}'" if names else f"{list_type}_{i}" 488 | 489 | if param_type in _custom_verifiers: 490 | if config.verify_level in [CheckSeverityMode.WARN, CheckSeverityMode.FATAL]: 491 | try: 492 | recursive_verify(_custom_verifiers[param_type], val) 493 | except Exception as e: 494 | error_str = f"Error verifying {list_type} {param_name}: {str(e)}\n{code_origin_loc}" 495 | logging.error(error_str) 496 | if config.verify_level == CheckSeverityMode.FATAL: 497 | raise ValueError(error_str) from None 498 | else: 499 | logging.warning(f"No verifier for {param_type}. Skipping verification.") 500 | 501 | 502 | def _move_all_tensors_to_device(device: torch.device, tensor_or_tensors: Union[any, list]): 503 | if isinstance(tensor_or_tensors, torch.Tensor): 504 | return tensor_or_tensors.to(device) 505 | elif isinstance(tensor_or_tensors, list): 506 | return [_move_all_tensors_to_device(device, a) for a in tensor_or_tensors] 507 | return tensor_or_tensors 508 | 509 | 510 | def _image_info(image: Union[torch.Tensor, np.ndarray]) -> str: 511 | if isinstance(image, torch.Tensor): 512 | if image.dtype in [torch.long, torch.int, torch.int32, torch.int64, torch.bool]: 513 | image = image.float() 514 | 515 | return (f"shape={image.shape} dtype={image.dtype} min={image.min()} max={image.max()}" 516 | + f" mean={image.mean()} sum={image.sum()} device={image.device}") 517 | elif isinstance(image, np.ndarray): 518 | return f"shape={image.shape}, dtype={image.dtype}, min={image.min()}, max={image.max()} mean={image.mean()} sum={image.sum()} " 519 | 520 | 521 | class BufferHandler(logging.Handler): 522 | def __init__(self, buffer): 523 | logging.Handler.__init__(self) 524 | self.buffer = buffer 525 | 526 | def emit(self, record): 527 | msg = self.format(record) 528 | self.buffer.write(msg + '\n') 529 | 530 | 531 | class Tee(object): 532 | def __init__(self, *files): 533 | self.files = files 534 | 535 | def write(self, obj): 536 | for f in self.files: 537 | f.write(obj) 538 | 539 | def flush(self): 540 | for f in self.files: 541 | f.flush() 542 | 543 | 544 | def _compute_function_checksum(func_to_check): 545 | try: 546 | source_code = inspect.getsource(func_to_check) 547 | except Exception as e: 548 | logging.debug(f"Could not get source code for {func_to_check}: {e}") 549 | return 0 550 | return int(hashlib.sha256(source_code.encode('utf-8')).hexdigest(), 16) 551 | 552 | 553 | def _register_function(func: callable, checksum, timestamp): 554 | if func.__qualname__ in _function_dict: 555 | assert _function_update_times[func.__qualname__] < timestamp, f"Function {func.__qualname__} already registered with later timestamp! {_function_update_times[func.__qualname__]} < {timestamp}" 556 | assert _function_checksums[func.__qualname__] != checksum, f"Function {func.__qualname__} already registered with same checksum! {_function_checksums[func.__qualname__]} == {checksum}" 557 | 558 | _function_dict[func.__qualname__] = func 559 | _function_checksums[func.__qualname__] = checksum 560 | _function_update_times[func.__qualname__] = timestamp 561 | 562 | 563 | def _get_latest_version_of_module(module_name: str, debug: bool = False): 564 | if module_name not in _module_dict: 565 | _module_dict[module_name] = importlib.import_module(module_name) 566 | module = _module_dict[module_name] 567 | 568 | module_file = module.__file__ 569 | 570 | # First reload the module if it needs to be reloaded. 571 | current_modified_time = os.path.getmtime(module_file) 572 | module_reload_time = _module_reload_times.get(module_name, 0) 573 | if current_modified_time > module_reload_time: 574 | time_diff = current_modified_time - module_reload_time 575 | logging.info(f"{Fore.LIGHTMAGENTA_EX}Reloading module {module_name} because file was edited. ({time_diff:.1f}s between versions){Fore.RESET}") 576 | # Set _has_prompt_been_requested so that any calls to ComfyFunc will get 577 | # ignored rather than tripping the already-registered assert. 578 | global _after_first_prompt 579 | _after_first_prompt = True 580 | importlib.reload(module) 581 | _module_reload_times[module_name] = current_modified_time 582 | elif debug: 583 | logging.info(f"{module_name} up to date: {current_modified_time} vs {module_reload_time}") 584 | 585 | return module, current_modified_time 586 | 587 | 588 | def _get_latest_version_of_func(func: callable, debug: bool = False): 589 | reload_modules = config_service.get_config_value("easy_nodes.ReloadOnEdit", False) 590 | if reload_modules and func.__module__: 591 | module, current_modified_time = _get_latest_version_of_module(func.__module__, debug) 592 | 593 | old_checksum = _function_checksums.get(func.__qualname__, 0) 594 | 595 | # Now pull the updated function from the module. 596 | last_function_update_time = _function_update_times.get(func.__qualname__, 0) 597 | if current_modified_time > last_function_update_time: 598 | time_diff = current_modified_time - last_function_update_time 599 | if hasattr(module, func.__name__): 600 | updated_func = getattr(module, func.__name__) 601 | current_checksum = _compute_function_checksum(updated_func) 602 | if current_checksum != old_checksum: 603 | logging.info(f"{Fore.LIGHTMAGENTA_EX}Updating {func.__qualname__} because function was modified. ({time_diff:.1f}s between versions){Fore.RESET}") 604 | _register_function(updated_func, current_checksum, current_modified_time) 605 | elif debug: 606 | logging.error(f"{func.__qualname__} up to date: {current_modified_time} vs {last_function_update_time}") 607 | logging.error(inspect.getsource(_function_dict[func.__qualname__])) 608 | 609 | return _function_dict[func.__qualname__] 610 | 611 | 612 | def get_formatter(): 613 | fmt="%(levelname)s [[LINK:%(pathname)s:%(lineno)s]] %(funcName)s: %(message)s" 614 | 615 | coloredFormatter = coloredlogs.ColoredFormatter( 616 | fmt=fmt, 617 | level_styles=dict( 618 | debug=dict(color="white"), 619 | info=dict(color="white"), 620 | warning=dict(color="yellow", bright=True), 621 | error=dict(color="red", bold=True, bright=True), 622 | critical=dict(color="black", bold=True, background="red"), 623 | ), 624 | field_styles=dict( 625 | funcName=dict(color="cyan"), 626 | ), 627 | ) 628 | return coloredFormatter 629 | 630 | 631 | def _call_function_and_verify_result(config: EasyNodesConfig, func: callable, 632 | args, kwargs, debug, input_desc, adjusted_return_types, 633 | wrapped_name, node_class, return_names=None): 634 | try_count = 0 635 | llm_debugging_mode = config_service.get_config_value("easy_nodes.llm_debugging", "Off") 636 | llm_debugging_enabled = llm_debugging_mode != "Off" 637 | max_tries = int(config_service.get_config_value("easy_nodes.max_tries", 1)) if llm_debugging_mode == "AutoFix" else 1 638 | 639 | logging.debug(f"Running {func.__qualname__} for {_curr_unique_id} with {max_tries} tries. {llm_debugging_enabled}") 640 | 641 | prompt_id = server.PromptServer.instance.last_prompt_id 642 | save_logs = True 643 | 644 | while try_count < max_tries: 645 | try_count += 1 646 | try: 647 | return_line_number = func.__code__.co_firstlineno 648 | node_logger = logging.getLogger() 649 | node_logger.setLevel(logging.INFO) 650 | buffer = io.StringIO() 651 | buffer_wrapper = log_streaming.CloseableBufferWrapper(buffer) 652 | 653 | buffer_handler = BufferHandler(buffer) 654 | node_logger.addHandler(buffer_handler) 655 | buffer_handler.setFormatter(get_formatter()) 656 | sys.stdout = Tee(sys.stdout, buffer) 657 | 658 | if save_logs: 659 | log_streaming.add_log_buffer(str(_curr_unique_id), node_class, prompt_id, input_desc, 660 | buffer_wrapper) 661 | 662 | _curr_preview.clear() 663 | result = func(*args, **kwargs) 664 | 665 | code_origin_loc = f"\n Source: {func.__qualname__} {func.__code__.co_filename}:{return_line_number}" 666 | num_expected_returns = len(adjusted_return_types) 667 | if num_expected_returns == 0: 668 | assert result is None, f"{wrapped_name}: Return value is not None, but no return type specified.\n{code_origin_loc}" 669 | return (None,) 670 | 671 | if not isinstance(result, tuple): 672 | result = (result,) 673 | assert len(result) == len( 674 | adjusted_return_types 675 | ), f"{wrapped_name}: Number of return values {len(result)} does not match number of return types {len(adjusted_return_types)}\n{code_origin_loc}" 676 | 677 | for i, ret in enumerate(result): 678 | if ret is None: 679 | logging.warning(f"Result {i} is None") 680 | 681 | new_result = _move_all_tensors_to_device(_cpu_device, list(result)) if config.auto_move_tensors else list(result) 682 | _verify_values(config, "OUTPUT", result, adjusted_return_types, return_names, code_origin_loc, debug=debug) 683 | 684 | for i, ret in enumerate(new_result): 685 | new_result[i] = maybe_autoconvert(adjusted_return_types[i], ret) 686 | 687 | result = tuple(new_result) 688 | 689 | # If preview items were added, wrap the result. 690 | if _curr_preview: 691 | result = {"ui": _curr_preview.copy(), "result": result} 692 | return result 693 | 694 | except Exception as e: 695 | if llm_debugging_enabled: 696 | logging.info("Handling exception with LLM debugging") 697 | llm_debugging.process_exception_logic(func, e, input_desc, buffer) 698 | logging.info("Handled exception with LLM debugging") 699 | 700 | if try_count == max_tries: 701 | # Calculate the number of interesting stack levels. 702 | _, _, tb = sys.exc_info() 703 | the_stack = traceback.extract_tb(tb) 704 | e.num_interesting_levels = len(the_stack) - 1 705 | formatted_stack = "\n".join(traceback.format_exception(type(e), e, tb)) 706 | logging.warning(f"\n\nException in node {_curr_unique_id} ({node_class}):\n{formatted_stack}") 707 | raise e 708 | 709 | finally: 710 | node_logger.removeHandler(buffer_handler) 711 | sys.stdout = sys.__stdout__ 712 | if buffer_wrapper: 713 | buffer_wrapper.close() 714 | 715 | assert False, "Should never reach this point" 716 | 717 | 718 | def _ensure_package_dicts_exist(module_name: str): 719 | package_name = module_name.split('.')[-2] 720 | 721 | try: 722 | package = import_module(package_name) 723 | 724 | if not package.__file__.endswith("__init__.py"): 725 | raise ValueError(f"Package {package_name} is not a package. Cannot export.") 726 | 727 | if not hasattr(package, '__all__'): 728 | package.__all__ = [] 729 | 730 | def add_if_not_there(dict_name): 731 | if dict_name not in package.__all__: 732 | package.__all__.append(dict_name) 733 | if not hasattr(package, dict_name): 734 | setattr(package, dict_name, {}) 735 | 736 | add_if_not_there('NODE_CLASS_MAPPINGS') 737 | add_if_not_there('NODE_DISPLAY_NAME_MAPPINGS') 738 | except Exception as e: 739 | error_str = (f"Could not automatically find import package {package_name}. " 740 | + "Try initializing with easy_nodes.init(auto_register=False) and export manually in your __init__.py " 741 | + "with easy_nodes.get_node_mappings()") 742 | logging.error(error_str) 743 | raise e 744 | 745 | 746 | def maybe_autoconvert(comfyui_type_name: str, arg: any): 747 | # Choices don't come with a registered type, they're just a list of strings. 748 | if isinstance(comfyui_type_name, list): 749 | return arg 750 | 751 | if _SHOULD_AUTOCONVERT.get(comfyui_type_name, False): 752 | comfyui_type = _COMFYUI_TYPE_TO_ANNOTATION_CLS[comfyui_type_name] 753 | if isinstance(arg, list): 754 | arg = [comfyui_type(el) for el in arg] 755 | else: 756 | arg = comfyui_type(arg) 757 | return arg 758 | 759 | 760 | def ComfyNode( 761 | category: str = None, 762 | display_name: str = None, 763 | workflow_name: str = None, 764 | description: str = None, 765 | is_output_node: bool = False, 766 | return_types: list = None, 767 | return_names: list[str] = None, 768 | validate_inputs: Callable = None, 769 | is_changed: Callable = None, 770 | always_run: bool = False, 771 | debug: bool = False, 772 | color: str = None, 773 | bg_color: str = None, 774 | width: int = None, 775 | height: int = None, 776 | ): 777 | """ 778 | Decorator function for creating ComfyUI nodes. 779 | 780 | Args: 781 | category (str): The category of the node. 782 | display_name (str): The display name of the node. If not provided, it will be generated from the function name. 783 | workflow_name (str): The workflow name of the node. If not provided, it will be generated from the function name. 784 | description (str): The description of the node. If not set, it will be generated from the function docstring. 785 | is_output_node (bool): Indicates whether the node is an output node and should be run regardless of if anything depends on it. 786 | return_types (list): A list of types to return. If not provided, it will be inferred from the function's annotations. 787 | return_names (list[str]): The names of the outputs. Must match the number of return types. 788 | validate_inputs (Callable): A function used to validate the inputs of the node. 789 | is_changed (Callable): A function used to determine if the node's inputs have changed. 790 | always_run (bool): Indicates whether the node should always run, regardless of whether its inputs have changed. 791 | debug (bool): Indicates whether to enable debug logging for this node. 792 | color (str): The color of the node. 793 | bg_color (str): The background color of the node. 794 | width (int): The default width of the node. 795 | height (int): The default height of the node. 796 | 797 | Returns: 798 | A callable used that can be used with a function to create a ComfyUI node. 799 | """ 800 | curr_config = _get_curr_config() 801 | 802 | if not category: 803 | category = curr_config.default_category 804 | 805 | def decorator(func: callable): 806 | if _after_first_prompt: 807 | # Sorry, we're closed for business. 808 | return func 809 | 810 | assert func.__qualname__ not in _function_dict, f"Function {func.__qualname__} already registered" 811 | 812 | if func.__qualname__ in _function_dict: 813 | return func 814 | 815 | modify_time = os.path.getmtime(func.__code__.co_filename) if os.path.exists(func.__code__.co_filename) else 0 816 | _module_reload_times[func.__module__] = modify_time 817 | _register_function(func, _compute_function_checksum(func), modify_time) 818 | 819 | filename = func.__code__.co_filename 820 | 821 | wrapped_name = func.__qualname__ + "_comfynode_wrapper" 822 | source_location = f"{filename}:{func.__code__.co_firstlineno}" 823 | code_origin_loc = f"\n Source: {func.__qualname__} {source_location}" 824 | original_is_changed = is_changed 825 | 826 | def wrapped_is_changed(*args, **kwargs): 827 | if always_run: 828 | if debug: 829 | logging.info(f"Always running {func.__qualname__}") 830 | return float("nan") 831 | 832 | unique_id = kwargs["unique_id"] 833 | updated_func = _get_latest_version_of_func(func, debug) 834 | current_checksum = _function_checksums[updated_func.__qualname__] 835 | if debug: 836 | logging.info(f"{func.__qualname__} {unique_id} is_changed: Checking if {original_is_changed} with args {args} and kwargs {kwargs.keys()}") 837 | for key in kwargs.keys(): 838 | logging.info(f"kwarg {key}: {type(kwargs[key])} {kwargs[key].shape if isinstance(kwargs[key], torch.Tensor) else ''}") 839 | 840 | try: 841 | if original_is_changed: 842 | original_is_changed_params = inspect.signature(original_is_changed).parameters 843 | filtered_kwargs = {key: value for key, value in kwargs.items() if key in original_is_changed_params} 844 | original_num = original_is_changed(*args, **filtered_kwargs) 845 | original_num = hash(original_num) 846 | else: 847 | original_num = 0 848 | 849 | if math.isnan(original_num): 850 | return float("nan") 851 | except Exception as e: 852 | logging.error(f"Error in is_changed function: {e} {func.__qualname__} {args} {kwargs.keys()}") 853 | raise e 854 | 855 | is_changed_val = current_checksum ^ original_num 856 | 857 | if debug: 858 | logging.info(f"{Fore.GREEN}{func.__qualname__}{Fore.RESET} {Fore.WHITE}{unique_id}{Fore.RESET} is_changed={Fore.LIGHTMAGENTA_EX}{is_changed_val}") 859 | return is_changed_val 860 | 861 | if debug: 862 | logger = logging.getLogger(wrapped_name) 863 | logger.info( 864 | "-------------------------------------------------------------------" 865 | ) 866 | logger.info(f"Decorating {func.__qualname__}") 867 | 868 | node_class = _get_node_class(func) 869 | 870 | is_static = _is_static_method(node_class, func.__name__) 871 | is_cls_mth = _is_class_method(node_class, func.__name__) 872 | is_member = node_class is not None and not is_static and not is_cls_mth 873 | 874 | required_inputs, hidden_inputs, optional_inputs, input_is_list_map, input_type_map = ( 875 | _infer_input_types_from_annotations(func, is_member, debug) 876 | ) 877 | 878 | if debug: 879 | logger.info(f"{func.__name__} Is static: {is_static} Is member: {is_member} Class method: {is_cls_mth}") 880 | logger.info(f"Required inputs: {required_inputs} optional: {optional_inputs} input_is_list: {input_is_list_map} input_type_map: {input_type_map}") 881 | 882 | adjusted_return_types = [] 883 | output_is_list = [] 884 | if return_types is not None: 885 | adjusted_return_types, output_is_list = _infer_return_types_from_annotations( 886 | return_types, debug 887 | ) 888 | else: 889 | adjusted_return_types, output_is_list = _infer_return_types_from_annotations(func, debug) 890 | 891 | if return_names: 892 | assert len(return_names) == len( 893 | adjusted_return_types 894 | ), f"Number of output names must match number of return types. Got {len(return_names)} names and {len(return_types)} return types." 895 | 896 | # There's not much point in a node that doesn't have any outputs 897 | # and isn't an output itself, so auto-promote in that case. 898 | force_output = len(adjusted_return_types) == 0 899 | name_parts = [x.title() for x in func.__name__.split("_")] 900 | input_is_list = any(input_is_list_map.values()) 901 | 902 | sig = inspect.signature(func) 903 | param_names = list(sig.parameters.keys()) 904 | 905 | final_display_name = display_name if display_name else " ".join(name_parts) 906 | final_workflow_name = workflow_name if workflow_name else "".join(name_parts) 907 | 908 | @functools.wraps(func) 909 | def wrapper(*args, **kwargs): 910 | if curr_config.auto_register == AutoRegisterSentinel.DEFAULT and curr_config.get_node_mappings_called is False: 911 | logging.warning("EasyNodes auto-registration not explicitly enabled, and easy_nodes.get_node_mappings() has not been called. " 912 | + "In the future auto_register will default to False, so please set explicitly via easy_nodes.initialize_easy_nodes(auto_register=True), " 913 | + "or use easy_nodes.get_node_mappings() to export to ComfyUI after all ComfyNodes have been created.") 914 | 915 | if debug: 916 | logger.info( 917 | f"Calling {func.__name__} with {len(args)} args and {len(kwargs)} kwargs. Is class method: {is_cls_mth}" 918 | ) 919 | for i, arg in enumerate(args): 920 | logger.info(f"arg {i}: {type(arg)}") 921 | for key, arg in kwargs.items(): 922 | logger.info(f"kwarg {key}: {type(arg)}") 923 | 924 | all_inputs = {**required_inputs, **optional_inputs} 925 | 926 | input_desc = [] 927 | keys = list(kwargs.keys()) 928 | 929 | for key in keys: 930 | arg = kwargs[key] 931 | # Remove extra_pnginfo and unique_id from the kwargs if they weren't requested by the user. 932 | if key == "unique_id": 933 | # logging.info(f"Setting unique_id to {arg}") 934 | global _curr_unique_id 935 | if isinstance(arg, list): 936 | _curr_unique_id = arg[0] 937 | else: 938 | _curr_unique_id = arg 939 | 940 | if key not in param_names: 941 | # logging.info(f"Removing extra kwarg {key}") 942 | kwargs.pop(key) 943 | continue 944 | 945 | arg = _move_all_tensors_to_device(_gpu_device, arg) if curr_config.auto_move_tensors else arg 946 | 947 | # TODO: Remove this special handling for mask once I remember what needed it. 948 | if (key in required_inputs and required_inputs[key][0] == "MASK"): 949 | if isinstance(arg, torch.Tensor): 950 | if len(arg.shape) == 2: 951 | arg = arg.unsqueeze(0) 952 | elif isinstance(arg, list): 953 | for i, a in enumerate(arg): 954 | if len(a.shape) == 2: 955 | arg[i] = a.unsqueeze(0) 956 | 957 | # TODO: Move this into _call_function_and_verify_result 958 | if key in all_inputs: 959 | arg = maybe_autoconvert(all_inputs[key][0], arg) 960 | 961 | desc_name = _get_fully_qualified_name(type(arg)) 962 | if isinstance(arg, torch.Tensor): 963 | input_desc.append(f"{key} ({desc_name}): {_image_info(arg)}") 964 | else: 965 | input_desc.append(f"{key} ({desc_name}): {arg}") 966 | 967 | kwargs[key] = arg 968 | 969 | # TODO: Move this into _call_function_and_verify_result 970 | input_names = [key for key in list(kwargs.keys()) if key not in hidden_inputs] 971 | input_values = [kwargs[key] for key in input_names] 972 | input_types = [all_inputs[key][0] for key in input_names] 973 | _verify_values(curr_config, 974 | "INPUT", 975 | input_values, 976 | input_types, 977 | input_names, 978 | code_origin_loc, debug=debug) 979 | 980 | # For some reason self still gets passed with class methods. 981 | if is_cls_mth: 982 | args = args[1:] 983 | 984 | # If the python function didn't annotate it as a list, 985 | # but INPUT_TYPES does, then we need to convert make it not a list. 986 | if input_is_list: 987 | for arg_name in kwargs.keys(): 988 | if debug: 989 | logging.info(f"kwarg: {arg_name} {len(kwargs[arg_name])}") 990 | if not input_is_list_map[arg_name]: 991 | assert len(kwargs[arg_name]) == 1, f"Expected a single value for {arg_name}, but got {len(kwargs[arg_name])}" 992 | kwargs[arg_name] = kwargs[arg_name][0] 993 | 994 | latest_func = _get_latest_version_of_func(func, debug) 995 | 996 | result = _call_function_and_verify_result(curr_config, latest_func, args, kwargs, debug, input_desc, adjusted_return_types, wrapped_name, 997 | node_class=final_workflow_name, return_names=return_names) 998 | 999 | return result 1000 | 1001 | if node_class is None or is_static: 1002 | wrapper = staticmethod(wrapper) 1003 | 1004 | if is_cls_mth: 1005 | wrapper = classmethod(wrapper) 1006 | 1007 | the_description = description 1008 | if the_description is None: 1009 | the_description = "" 1010 | if curr_config.docstring_mode is not AutoDescriptionMode.NONE and func.__doc__: 1011 | the_description = func.__doc__.strip() 1012 | if curr_config.docstring_mode == AutoDescriptionMode.BRIEF: 1013 | the_description = the_description.split("\n")[0] 1014 | 1015 | _create_comfy_node( 1016 | wrapped_name, 1017 | category, 1018 | node_class, 1019 | wrapper, 1020 | final_display_name, 1021 | final_workflow_name, 1022 | required_inputs, 1023 | hidden_inputs, 1024 | optional_inputs, 1025 | input_is_list, 1026 | adjusted_return_types, 1027 | return_names, 1028 | output_is_list, 1029 | description=the_description, 1030 | is_output_node=is_output_node or force_output, 1031 | validate_inputs=validate_inputs, 1032 | is_changed=wrapped_is_changed, 1033 | color=color, 1034 | bg_color=bg_color, 1035 | width=width, 1036 | height=height, 1037 | debug=debug, 1038 | source_location=source_location, 1039 | easy_nodes_config=curr_config, 1040 | ) 1041 | 1042 | # Return the original function so it can still be used as normal (only ComfyUI sees the wrapper function). 1043 | return func 1044 | 1045 | return decorator 1046 | 1047 | 1048 | def _annotate_input( 1049 | param_name, annotation, default=inspect.Parameter.empty, debug=False 1050 | ) -> tuple[tuple, bool, bool]: 1051 | type_name = _get_type_str(annotation) 1052 | 1053 | if isinstance(default, Choice): 1054 | return (default.choices,), False, False 1055 | 1056 | if debug: 1057 | logging.info(f"{param_name} Default: {default} type: {type(default)} {isinstance(default, float)} {isinstance(default, NumberInput)}") 1058 | 1059 | if isinstance(default, str) and not isinstance(default, StringInput): 1060 | default = StringInput(default) 1061 | elif isinstance(default, (int, float)) and not isinstance(default, NumberInput): 1062 | default = NumberInput(default) 1063 | 1064 | if isinstance(default, StringInput) or isinstance(default, NumberInput): 1065 | return (type_name, default.to_dict()), default.optional, default.hidden 1066 | 1067 | metadata = {} 1068 | if default is None: 1069 | # If the user specified None explicitly, assume they're ok with it being optional. 1070 | metadata["optional"] = True 1071 | metadata["forceInput"] = True 1072 | elif default == inspect.Parameter.empty: 1073 | # If they didn't give it a default value at all, then forceInput so that the UI 1074 | # doesn't end up giving them a default that they may not want. 1075 | metadata["forceInput"] = True 1076 | else: 1077 | metadata["default"] = default 1078 | 1079 | # This is the exception where they may have given it a default, but we still 1080 | # want to force it as an input because changing that value will be rare. 1081 | if _DEFAULT_FORCE_INPUT.get(_get_fully_qualified_name(annotation), False): 1082 | metadata["forceInput"] = True 1083 | 1084 | return (type_name, metadata), default != inspect.Parameter.empty, False 1085 | 1086 | 1087 | def _infer_input_types_from_annotations(func, skip_first, debug=False): 1088 | """ 1089 | Infer input types based on function annotations. 1090 | """ 1091 | input_is_list = {} 1092 | input_type_map = {} 1093 | sig = inspect.signature(func) 1094 | required_inputs = {} 1095 | hidden_input_types = {"unique_id": "UNIQUE_ID", "extra_pnginfo": "EXTRA_PNGINFO"} 1096 | optional_input_types = {} 1097 | 1098 | params = list(sig.parameters.items()) 1099 | 1100 | if debug: 1101 | logging.info(f"ALL PARAMS {params}") 1102 | 1103 | if skip_first: 1104 | if debug: 1105 | logging.info(f"SKIPPING FIRST PARAM {params[0]}") 1106 | params = params[1:] 1107 | 1108 | for param_name, param in params: 1109 | origin = get_origin(param.annotation) 1110 | input_is_list[param_name] = origin is list 1111 | input_type_map[param_name] = param.annotation 1112 | 1113 | if debug: 1114 | logging.info(f"Param default: {param.default}") 1115 | 1116 | the_param, is_optional, is_hidden = _annotate_input(param_name, param.annotation, param.default, debug) 1117 | 1118 | if param_name == "unique_id" or param_name == "extra_pnginfo": 1119 | pass 1120 | elif not is_optional: 1121 | required_inputs[param_name] = the_param 1122 | elif is_hidden: 1123 | hidden_input_types[param_name] = the_param 1124 | else: 1125 | optional_input_types[param_name] = the_param 1126 | return required_inputs, hidden_input_types, optional_input_types, input_is_list, input_type_map 1127 | 1128 | 1129 | def _infer_return_types_from_annotations(func_or_types, debug=False): 1130 | """ 1131 | Infer whether each element in a function's return tuple is a list or a single item, 1132 | handling direct list inputs as well as function annotations. 1133 | """ 1134 | if isinstance(func_or_types, list): 1135 | # Direct list of types provided 1136 | return_args = func_or_types 1137 | origin = tuple # Assume tuple if directly provided with a list 1138 | else: 1139 | # Assuming it's a function, inspect its return annotation 1140 | return_annotation = inspect.signature(func_or_types).return_annotation 1141 | return_args = get_args(return_annotation) 1142 | origin = get_origin(return_annotation) 1143 | 1144 | if debug: 1145 | logging.info(f"return_annotation: '{return_annotation}'") 1146 | logging.info(f"return_args: '{return_args}'") 1147 | logging.info(f"origin: '{origin}'") 1148 | logging.info(f"{type(return_annotation)}, {return_annotation}") 1149 | 1150 | types_mapped = [] 1151 | output_is_list = [] 1152 | 1153 | if origin is tuple: 1154 | for arg in return_args: 1155 | if get_origin(arg) is list: 1156 | output_is_list.append(True) 1157 | list_arg = get_args(arg)[0] 1158 | types_mapped.append(_get_type_str(list_arg)) 1159 | else: 1160 | output_is_list.append(False) 1161 | types_mapped.append(_get_type_str(arg)) 1162 | elif origin is list: 1163 | if debug: 1164 | logging.info(_get_type_str(return_annotation)) 1165 | logging.info(return_annotation) 1166 | logging.info(return_args) 1167 | types_mapped.append(_get_type_str(return_args[0])) 1168 | output_is_list.append(origin is list) 1169 | elif return_annotation is not inspect.Parameter.empty: 1170 | types_mapped.append(_get_type_str(return_annotation)) 1171 | output_is_list.append(False) 1172 | 1173 | return_types_tuple = tuple(types_mapped) 1174 | output_is_lists_tuple = tuple(output_is_list) 1175 | if debug: 1176 | logging.info( 1177 | f"return_types_tuple: '{return_types_tuple}', output_is_lists_tuple: '{output_is_lists_tuple}'" 1178 | ) 1179 | 1180 | return return_types_tuple, output_is_lists_tuple 1181 | 1182 | 1183 | def hex_to_color(color: str) -> list[float]: 1184 | col = color.strip('#').strip().upper() 1185 | assert len(col) == 6, f"Color must be a hex color code, got {color}" 1186 | assert all(c in "0123456789ABCDEF" for c in col), f"Color must be a hex color code, got {color}" 1187 | color_rgb = [int(col[i : i + 2], 16) for i in [0, 2, 4]] 1188 | return color_rgb 1189 | 1190 | 1191 | def _create_comfy_node( 1192 | cname, 1193 | category, 1194 | node_class, 1195 | process_function, 1196 | display_name, 1197 | workflow_name, 1198 | required_inputs, 1199 | hidden_inputs, 1200 | optional_inputs, 1201 | input_is_list, 1202 | return_types, 1203 | return_names, 1204 | output_is_list, 1205 | description=None, 1206 | is_output_node=False, 1207 | validate_inputs=None, 1208 | is_changed=None, 1209 | color=None, 1210 | bg_color=None, 1211 | width=None, 1212 | height=None, 1213 | source_location=None, 1214 | debug=False, 1215 | easy_nodes_config: EasyNodesConfig=None, 1216 | ): 1217 | all_inputs = {"required": required_inputs, "hidden": hidden_inputs, "optional": optional_inputs} 1218 | 1219 | node_info = {} 1220 | if color is not None: 1221 | color_rgb = hex_to_color(color) 1222 | node_info["color"] = color 1223 | if not bg_color: 1224 | bg_color = "#" + "".join(f"{int(c * 0.6):02X}" for c in color_rgb) 1225 | 1226 | if bg_color is not None: 1227 | _ = hex_to_color(bg_color) # Check that it's a valid color 1228 | node_info["bgColor"] = bg_color 1229 | 1230 | if width is not None: 1231 | node_info["width"] = width 1232 | 1233 | if height is not None: 1234 | node_info["height"] = height 1235 | 1236 | if source_location is not None: 1237 | node_info["sourceLocation"] = source_location 1238 | 1239 | # Smuggle it in with the description. A bit hacky, but it works and I 1240 | # don't know of a better way to do it without modifying the ComfyUI code. 1241 | if node_info: 1242 | description = f"EasyNodesInfo={json.dumps(node_info)}\n" + description 1243 | 1244 | # Initial class dictionary setup 1245 | class_dict = { 1246 | "INPUT_TYPES": classmethod(lambda cls: all_inputs), 1247 | "CATEGORY": category, 1248 | "RETURN_TYPES": return_types, 1249 | "FUNCTION": cname, 1250 | "INPUT_IS_LIST": input_is_list, 1251 | "OUTPUT_IS_LIST": output_is_list, 1252 | "OUTPUT_NODE": is_output_node, 1253 | "RETURN_NAMES": return_names, 1254 | "VALIDATE_INPUTS": validate_inputs, 1255 | "IS_CHANGED": is_changed, 1256 | "DESCRIPTION": description, 1257 | cname: process_function, 1258 | } 1259 | class_dict = {k: v for k, v in class_dict.items() if v is not None} 1260 | 1261 | if debug: 1262 | logger = logging.getLogger(cname) 1263 | for key, value in class_dict.items(): 1264 | logger.info(f"{key}: {value}") 1265 | 1266 | class_map = easy_nodes_config.NODE_CLASS_MAPPINGS 1267 | display_map = easy_nodes_config.NODE_DISPLAY_NAME_MAPPINGS 1268 | 1269 | if not _after_first_prompt: 1270 | all_workflow_names = set(class_map.keys()) | set(comfyui_nodes.NODE_CLASS_MAPPINGS.keys()) 1271 | all_display_names = set(display_map.values()) | set(comfyui_nodes.NODE_DISPLAY_NAME_MAPPINGS.values()) 1272 | all_node_classes = set(class_map.values()) | set(comfyui_nodes.NODE_CLASS_MAPPINGS.values()) 1273 | 1274 | assert workflow_name not in all_workflow_names, f"Node class '{workflow_name} ({cname})' already exists!" 1275 | assert display_name not in all_display_names, f"Display name '{display_name}' already exists!" 1276 | assert node_class not in all_node_classes, f"Only one method from '{node_class}' can be used as a ComfyUI node." 1277 | 1278 | if node_class: 1279 | for key, value in class_dict.items(): 1280 | setattr(node_class, key, value) 1281 | else: 1282 | node_class = type(workflow_name, (object,), class_dict) 1283 | 1284 | class_map[workflow_name] = node_class 1285 | display_map[workflow_name] = display_name 1286 | 1287 | # Temporary for backwards compatibility. 1288 | if easy_nodes_config.auto_register is AutoRegisterSentinel.DEFAULT: 1289 | comfyui_nodes.NODE_CLASS_MAPPINGS[workflow_name] = node_class 1290 | comfyui_nodes.NODE_DISPLAY_NAME_MAPPINGS[workflow_name] = display_name 1291 | 1292 | easy_nodes_config.num_registered += 1 1293 | 1294 | 1295 | def _is_static_method(cls, attr): 1296 | """Check if a method is a static method.""" 1297 | if cls is None: 1298 | return False 1299 | attr_value = inspect.getattr_static(cls, attr, None) 1300 | is_static = isinstance(attr_value, staticmethod) 1301 | return is_static 1302 | 1303 | 1304 | def _is_class_method(cls, attr): 1305 | if cls is None: 1306 | return False 1307 | attr_value = inspect.getattr_static(cls, attr, None) 1308 | is_class_method = isinstance(attr_value, classmethod) 1309 | return is_class_method 1310 | 1311 | 1312 | def _get_node_class(func): 1313 | split_name = func.__qualname__.split(".") 1314 | 1315 | if len(split_name) > 1: 1316 | class_name = split_name[-2] 1317 | node_class = globals().get(class_name, None) 1318 | if node_class is None and hasattr(func, "__globals__"): 1319 | node_class = func.__globals__.get(class_name, None) 1320 | return node_class 1321 | return None 1322 | 1323 | 1324 | T = typing.TypeVar("T") 1325 | 1326 | 1327 | def create_field_setter_node(cls: type, category=None, debug=False) -> typing.Callable[..., T]: 1328 | if category is None: 1329 | category = _get_curr_config().default_category 1330 | if debug: 1331 | logging.info(f"Registering setter for class '{cls.__name__}'") 1332 | key = _get_fully_qualified_name(cls) 1333 | assert key in _ANNOTATION_TO_COMFYUI_TYPE, f"Type '{key}' not registered with ComfyUI, call register_type() and give it a name first." 1334 | dynamic_function = _create_dynamic_setter(cls, debug=debug) 1335 | ComfyNode(category, display_name=cls.__name__, workflow_name=cls.__name__, debug=debug)( 1336 | dynamic_function) 1337 | 1338 | 1339 | def _create_dynamic_setter(cls: type, debug=False) -> typing.Callable[..., T]: 1340 | obj = cls() 1341 | func_name = cls.__name__ 1342 | setter_name = func_name + "_setter" 1343 | 1344 | properties = {} 1345 | all_type_names = set([]) 1346 | 1347 | # Collect properties and infer types from their current instantiated values. 1348 | for attr_name in dir(obj): 1349 | attr = getattr(obj, attr_name, None) 1350 | if attr is not None and not callable(attr) and not attr_name.startswith("__"): 1351 | if isinstance(attr, property) and attr.fset is not None: 1352 | # Handle properties 1353 | current_value = getattr(obj, attr_name, None) 1354 | prop_type = type(current_value) if current_value is not None else typing.Any 1355 | properties[attr_name] = (prop_type, current_value) 1356 | 1357 | if debug: 1358 | logging.info( 1359 | f"Property '{attr_name}' has type '{prop_type}' and value '{current_value}'" 1360 | ) 1361 | else: 1362 | # Handle instance attributes 1363 | current_value = getattr(obj, attr_name, None) 1364 | prop_type = type(current_value) if current_value is not None else typing.Any 1365 | properties[attr_name] = (prop_type, current_value) 1366 | 1367 | if debug: 1368 | logging.info( 1369 | f"Instance attribute '{attr_name}' has type '{prop_type}' and value '{current_value}'" 1370 | ) 1371 | 1372 | # Automatically register the type and its subtypes, allowing duplicates 1373 | register_type( 1374 | prop_type, _get_fully_qualified_name(prop_type), is_auto_register=True 1375 | ) 1376 | if hasattr(prop_type, "__args__"): 1377 | for subtype in prop_type.__args__: 1378 | register_type( 1379 | subtype, 1380 | _get_fully_qualified_name(subtype), 1381 | is_auto_register=True, 1382 | ) 1383 | 1384 | # Extract module name from the property type 1385 | fully_qualled_name = _get_fully_qualified_name(prop_type) 1386 | if "." in fully_qualled_name: 1387 | all_type_names.add(fully_qualled_name) 1388 | 1389 | def get_default_value(prop_type, current_value): 1390 | default_values = { 1391 | int: f"NumberInput({current_value})", 1392 | float: f"NumberInput({current_value}, -1000000, 10000000, 0.0001)", 1393 | str: f"StringInput('{current_value}')", 1394 | bool: f"{current_value}", 1395 | } 1396 | return default_values.get(prop_type, "None") 1397 | 1398 | func_params = [] 1399 | for prop, (prop_type, current_value) in properties.items(): 1400 | qualified_type_name = _get_fully_qualified_name(prop_type).replace('builtins.', '') 1401 | default_value = get_default_value(prop_type, current_value) 1402 | func_params.append(f"{prop}: {qualified_type_name}={default_value}") 1403 | 1404 | def_str = f"def {setter_name}(" 1405 | join_str = ",\n" + " " * len(def_str) 1406 | func_params_str = join_str.join(func_params) 1407 | 1408 | # Generate import statements 1409 | import_statements = [ 1410 | "import typing", 1411 | "import importlib", 1412 | "from easy_nodes import NumberInput, StringInput", 1413 | # "import example.example_nodes", 1414 | ] 1415 | 1416 | for module_name in all_type_names: 1417 | if module_name.startswith("builtins."): 1418 | continue 1419 | package_name, type_name = module_name.rsplit(".", 1) 1420 | import_statements.append(f"import {package_name}") 1421 | 1422 | # Alphabetize them and make unique 1423 | import_statements = sorted(list(dict.fromkeys(import_statements))) 1424 | 1425 | func_body_lines = [ 1426 | f"cls = getattr(module, '{func_name}')", 1427 | "new_obj = cls()", 1428 | ] 1429 | for prop in properties.keys(): 1430 | func_body_lines.append(f"if {prop} is not None: setattr(new_obj, '{prop}', {prop})") 1431 | func_body_lines.append("return new_obj") 1432 | func_body_lines = [f" {line}" for line in func_body_lines] 1433 | 1434 | func_lines = import_statements + [f"module = importlib.import_module('{cls.__module__}')", 1435 | f"{def_str}{func_params_str}) -> module.{cls.__name__}:"] + func_body_lines 1436 | func_code = "\n".join(func_lines) 1437 | 1438 | if debug: 1439 | logging.info(f"Creating dynamic setter with code: '{func_code}'") 1440 | 1441 | globals_dict = { 1442 | "typing": typing, 1443 | "importlib": importlib, 1444 | "NumberInput": NumberInput, 1445 | "StringInput": StringInput, 1446 | "module": importlib.import_module(cls.__module__), 1447 | } 1448 | locals_dict = {} 1449 | 1450 | # Update the global namespace with the module names 1451 | # for module_name in module_names: 1452 | # globals_dict[module_name] = importlib.import_module(module_name) 1453 | 1454 | # Execute the function code 1455 | exec(func_code, globals_dict, locals_dict) 1456 | 1457 | # Get the function object from the local namespace 1458 | func = locals_dict[setter_name] 1459 | 1460 | return func 1461 | --------------------------------------------------------------------------------