├── levelpixel.json ├── levelpixel.default.json ├── requirements.txt ├── .github └── workflows │ └── publish.yml ├── __init__.py ├── pyproject.toml ├── web └── js │ ├── showText.js │ ├── showcontrol.js │ └── widgets.js ├── nodes ├── unloaders │ ├── override_device_LP.py │ └── model_unloaders_LP.py ├── io │ ├── text_outputs_LP.py │ ├── numbers_utils_LP.py │ ├── lora_tag_loader_LP.py │ ├── text_inputs_LP.py │ ├── image_outputs_LP.py │ ├── folder_workers_LP.py │ ├── image_loaders_LP.py │ └── iterators_LP.py ├── utils │ └── utils_LP.py ├── convert │ └── convert_LP.py ├── text │ └── text_utils_LP.py ├── tags │ └── tags_utils_LP.py └── image │ └── image_utils_LP.py ├── .gitignore ├── install_init.py ├── node_list.json └── README.md /levelpixel.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "LevelPixel", 3 | "logging": false 4 | } 5 | -------------------------------------------------------------------------------- /levelpixel.default.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "LevelPixel", 3 | "logging": false 4 | } 5 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | torch>=2.0.1 2 | numpy 3 | matplotlib 4 | langdetect 5 | deep_translator>=1.11.4 6 | wordninja -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish to Comfy registry 2 | on: 3 | workflow_dispatch: 4 | push: 5 | branches: 6 | - main 7 | paths: 8 | - "pyproject.toml" 9 | 10 | permissions: 11 | issues: write 12 | 13 | jobs: 14 | publish-node: 15 | name: Publish Custom Node to registry 16 | runs-on: ubuntu-latest 17 | if: ${{ github.repository_owner == 'LevelPixel' }} 18 | steps: 19 | - name: Check out code 20 | uses: actions/checkout@v4 21 | - name: Publish Custom Node 22 | uses: Comfy-Org/publish-node-action@v1 23 | with: 24 | ## Add your own personal access token to your Github Repository secrets and reference it here. 25 | personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }} 26 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | from .install_init import init 3 | 4 | init() 5 | 6 | node_list = [ 7 | "convert.convert_LP", 8 | "image.image_utils_LP", 9 | "image.inpaint_crop_stitch_LP", 10 | "io.iterators_LP", 11 | "io.folder_workers_LP", 12 | "io.image_loaders_LP", 13 | "io.image_outputs_LP", 14 | "io.lora_tag_loader_LP", 15 | "io.numbers_utils_LP", 16 | "io.text_inputs_LP", 17 | "io.text_outputs_LP", 18 | "tags.tags_utils_LP", 19 | "text.text_utils_LP", 20 | "unloaders.model_unloaders_LP", 21 | "unloaders.override_device_LP", 22 | "utils.utils_LP", 23 | ] 24 | 25 | NODE_CLASS_MAPPINGS = {} 26 | NODE_DISPLAY_NAME_MAPPINGS = {} 27 | 28 | for module_name in node_list: 29 | imported_module = importlib.import_module(f".nodes.{module_name}", __name__) 30 | 31 | NODE_CLASS_MAPPINGS = {**NODE_CLASS_MAPPINGS, **imported_module.NODE_CLASS_MAPPINGS} 32 | NODE_DISPLAY_NAME_MAPPINGS = {**NODE_DISPLAY_NAME_MAPPINGS, **imported_module.NODE_DISPLAY_NAME_MAPPINGS} 33 | 34 | WEB_DIRECTORY = "./web" 35 | __all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS", "WEB_DIRECTORY"] -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "comfyui-levelpixel" 3 | description = "Main nodes of the Level Pixel company (levelpixel, LP). In this package you will find easy-to-use atomic nodes that can help automate various processes - memory cleaning automation (RAM free, VRAM free), Model unload, Lora loading like in Automatic1111, powerful inpaint crop and stitch, pipeline, declaring standard variables, converting variables, google translate, filtering tags by categories, working with file system, counting files in file system, counting objects, counting images. The names of the key nodes in this package are: Inpaint-CropAndStitch, Inpaint Crop, Inpaint Stitch, Resize Image, Resize Mask, Model Unloader, Free memory, Tag Filters, Tag Category Filters, Tag Choice Parser, File counter, Image Loader From Path (with counters), Google Translator, Language Translator, Override Device, Delay, Lora Tag Loader, Pipeline, Resize Masks, Resize Image, Iterators. Licensed under GNU GPLv3. Repository: https://github.com/LevelPixel/ComfyUI-LevelPixel" 4 | version = "1.3.3" 5 | license = { file = "LICENSE.txt" } 6 | dependencies = [ 7 | "torch>=2.0.1", 8 | "numpy", 9 | "matplotlib", 10 | "langdetect", 11 | "deep_translator>=1.11.4", 12 | "wordninja" ] 13 | 14 | [project.urls] 15 | Repository = "https://github.com/LevelPixel/ComfyUI-LevelPixel" 16 | 17 | [tool.comfy] 18 | PublisherId = "levelpixel" 19 | DisplayName = "ComfyUI Level Pixel" 20 | Icon = "https://avatars.githubusercontent.com/u/175361036" 21 | -------------------------------------------------------------------------------- /web/js/showText.js: -------------------------------------------------------------------------------- 1 | import { app } from "../../../scripts/app.js"; 2 | import { ComfyWidgets } from "../../../scripts/widgets.js"; 3 | 4 | // Displays input text on a node 5 | app.registerExtension({ 6 | name: "levelpixel.ShowText", 7 | async beforeRegisterNodeDef(nodeType, nodeData, app) { 8 | if (nodeData.name === "ShowText|LP") { 9 | function populate(text) { 10 | if (this.widgets) { 11 | const isConvertedWidget = +!!this.inputs?.[0].widget; 12 | for (let i = isConvertedWidget; i < this.widgets.length; i++) { 13 | this.widgets[i].onRemove?.(); 14 | } 15 | this.widgets.length = isConvertedWidget; 16 | } 17 | 18 | const v = [...text]; 19 | if (!v[0]) { 20 | v.shift(); 21 | } 22 | for (let list of v) { 23 | if (!(list instanceof Array)) list = [list]; 24 | for (const l of list) { 25 | const w = ComfyWidgets["STRING"](this, "text_" + this.widgets?.length ?? 0, ["STRING", { multiline: true }], app).widget; 26 | w.inputEl.readOnly = true; 27 | w.inputEl.style.opacity = 0.6; 28 | w.value = l; 29 | } 30 | } 31 | 32 | requestAnimationFrame(() => { 33 | const sz = this.computeSize(); 34 | if (sz[0] < this.size[0]) { 35 | sz[0] = this.size[0]; 36 | } 37 | if (sz[1] < this.size[1]) { 38 | sz[1] = this.size[1]; 39 | } 40 | this.onResize?.(sz); 41 | app.graph.setDirtyCanvas(true, false); 42 | }); 43 | } 44 | 45 | // When the node is executed we will be sent the input text, display this in the widget 46 | const onExecuted = nodeType.prototype.onExecuted; 47 | nodeType.prototype.onExecuted = function (message) { 48 | onExecuted?.apply(this, arguments); 49 | populate.call(this, message.text); 50 | }; 51 | 52 | const VALUES = Symbol(); 53 | const configure = nodeType.prototype.configure; 54 | nodeType.prototype.configure = function () { 55 | this[VALUES] = arguments[0]?.widgets_values; 56 | return configure?.apply(this, arguments); 57 | }; 58 | 59 | const onConfigure = nodeType.prototype.onConfigure; 60 | nodeType.prototype.onConfigure = function () { 61 | onConfigure?.apply(this, arguments); 62 | const widgets_values = this[VALUES]; 63 | if (widgets_values?.length) { 64 | requestAnimationFrame(() => { 65 | populate.call(this, widgets_values.slice(+(widgets_values.length > 1 && this.inputs?.[0].widget))); 66 | }); 67 | } 68 | }; 69 | } 70 | }, 71 | }); -------------------------------------------------------------------------------- /nodes/unloaders/override_device_LP.py: -------------------------------------------------------------------------------- 1 | import types 2 | import torch 3 | 4 | class OverrideDevice: 5 | @classmethod 6 | def INPUT_TYPES(s): 7 | devices = ["auto","cpu",] 8 | for k in range(0, torch.cuda.device_count()): 9 | devices.append(f"cuda:{k}") 10 | 11 | return { 12 | "required": { 13 | "device": (devices, {"default":"cpu"}), 14 | } 15 | } 16 | 17 | FUNCTION = "patch" 18 | CATEGORY = "LevelPixel/Unloaders" 19 | 20 | def override(self, model, model_attr, device): 21 | if device == "auto": 22 | return (model,) 23 | 24 | torch.device(device) 25 | 26 | model.device = device 27 | patcher = getattr(model, "patcher", model) 28 | for name in ["device", "load_device", "offload_device", "current_device", "output_device"]: 29 | setattr(patcher, name, device) 30 | 31 | py_model = getattr(model, model_attr) 32 | py_model.to = types.MethodType(torch.nn.Module.to, py_model) 33 | py_model.to(device) 34 | 35 | def to(*args, **kwargs): 36 | pass 37 | py_model.to = types.MethodType(to, py_model) 38 | return (model,) 39 | 40 | def patch(self, *args, **kwargs): 41 | raise NotImplementedError 42 | 43 | class OverrideCLIPDevice(OverrideDevice): 44 | @classmethod 45 | def INPUT_TYPES(s): 46 | k = super().INPUT_TYPES() 47 | k["required"]["clip"] = ("CLIP",) 48 | return k 49 | 50 | RETURN_TYPES = ("CLIP",) 51 | CATEGORY = "LevelPixel/Unloaders" 52 | 53 | def patch(self, clip, device): 54 | return self.override(clip, "cond_stage_model", device) 55 | 56 | class OverrideVAEDevice(OverrideDevice): 57 | @classmethod 58 | def INPUT_TYPES(s): 59 | k = super().INPUT_TYPES() 60 | k["required"]["vae"] = ("VAE",) 61 | return k 62 | 63 | RETURN_TYPES = ("VAE",) 64 | CATEGORY = "LevelPixel/Unloaders" 65 | 66 | def patch(self, vae, device): 67 | return self.override(vae, "first_stage_model", device) 68 | 69 | class OverrideCLIPVisionDevice(OverrideDevice): 70 | @classmethod 71 | def INPUT_TYPES(s): 72 | k = super().INPUT_TYPES() 73 | k["required"]["clip_vision"] = ("CLIP_VISION",) 74 | return k 75 | 76 | RETURN_TYPES = ("CLIP_VISION",) 77 | CATEGORY = "LevelPixel/Unloaders" 78 | 79 | def patch(self, clip_vision, device): 80 | return self.override(clip_vision, "model", device) 81 | 82 | NODE_CLASS_MAPPINGS = { 83 | "OverrideCLIPDevice|LP": OverrideCLIPDevice, 84 | "OverrideVAEDevice|LP": OverrideVAEDevice, 85 | "OverrideCLIPVisionDevice|LP": OverrideCLIPVisionDevice, 86 | } 87 | 88 | NODE_DISPLAY_NAME_MAPPINGS = { 89 | "OverrideCLIPDevice|LP": "Override CLIP Device [LP]", 90 | "OverrideVAEDevice|LP": "Override VAE Device [LP]", 91 | "OverrideCLIPVisionDevice|LP": "Override CLIP Vision Device [LP]", 92 | } -------------------------------------------------------------------------------- /nodes/io/text_outputs_LP.py: -------------------------------------------------------------------------------- 1 | class ShowText: 2 | @classmethod 3 | def INPUT_TYPES(s): 4 | return { 5 | "required": { 6 | "text": ("STRING", {"forceInput": True}), 7 | }, 8 | "hidden": { 9 | "unique_id": "UNIQUE_ID", 10 | "extra_pnginfo": "EXTRA_PNGINFO", 11 | }, 12 | } 13 | 14 | INPUT_IS_LIST = True 15 | RETURN_TYPES = ("STRING",) 16 | FUNCTION = "show_text" 17 | OUTPUT_NODE = True 18 | OUTPUT_IS_LIST = (True,) 19 | 20 | CATEGORY = "LevelPixel/IO" 21 | 22 | def show_text(self, text, unique_id=None, extra_pnginfo=None): 23 | if unique_id is not None and extra_pnginfo is not None: 24 | if not isinstance(extra_pnginfo, list): 25 | print("Error: extra_pnginfo is not a list") 26 | elif ( 27 | not isinstance(extra_pnginfo[0], dict) 28 | or "workflow" not in extra_pnginfo[0] 29 | ): 30 | print("Error: extra_pnginfo[0] is not a dict or missing 'workflow' key") 31 | else: 32 | workflow = extra_pnginfo[0]["workflow"] 33 | node = next( 34 | (x for x in workflow["nodes"] if str(x["id"]) == str(unique_id[0])), 35 | None, 36 | ) 37 | if node: 38 | node["widgets_values"] = [text] 39 | 40 | return {"ui": {"text": text}, "result": (text,)} 41 | 42 | class ShowTextBridge: 43 | @classmethod 44 | def INPUT_TYPES(s): 45 | return { 46 | "required": { 47 | "text": ("STRING", {"forceInput": True}), 48 | }, 49 | "hidden": { 50 | "unique_id": "UNIQUE_ID", 51 | "extra_pnginfo": "EXTRA_PNGINFO", 52 | }, 53 | } 54 | 55 | INPUT_IS_LIST = True 56 | RETURN_TYPES = ("STRING",) 57 | FUNCTION = "show_text_bridge" 58 | OUTPUT_NODE = False 59 | OUTPUT_IS_LIST = (True,) 60 | 61 | CATEGORY = "LevelPixel/IO" 62 | 63 | def show_text_bridge(self, text, unique_id=None, extra_pnginfo=None): 64 | if unique_id is not None and extra_pnginfo is not None: 65 | if not isinstance(extra_pnginfo, list): 66 | print("Error: extra_pnginfo is not a list") 67 | elif ( 68 | not isinstance(extra_pnginfo[0], dict) 69 | or "workflow" not in extra_pnginfo[0] 70 | ): 71 | print("Error: extra_pnginfo[0] is not a dict or missing 'workflow' key") 72 | else: 73 | workflow = extra_pnginfo[0]["workflow"] 74 | node = next( 75 | (x for x in workflow["nodes"] if str(x["id"]) == str(unique_id[0])), 76 | None, 77 | ) 78 | if node: 79 | node["widgets_values"] = [text] 80 | 81 | return {"ui": {"text": text}, "result": (text,)} 82 | 83 | NODE_CLASS_MAPPINGS = { 84 | "ShowText|LP": ShowText, 85 | "ShowTextBridge|LP": ShowTextBridge, 86 | } 87 | 88 | NODE_DISPLAY_NAME_MAPPINGS = { 89 | "ShowText|LP": "Show Text [LP]", 90 | "ShowTextBridge|LP": "Show Text Bridge [LP]", 91 | } -------------------------------------------------------------------------------- /nodes/io/numbers_utils_LP.py: -------------------------------------------------------------------------------- 1 | class FloatSlider: 2 | @classmethod 3 | def INPUT_TYPES(s): 4 | return {"required": { 5 | "number":("FLOAT", { 6 | "default": 0, 7 | "min": 0.000000, 8 | "max": 1.000000, 9 | "step": 0.000001, 10 | "display": "slider" 11 | }), 12 | }, 13 | } 14 | 15 | RETURN_TYPES = ("FLOAT",) 16 | RETURN_NAMES = ('FLOAT',) 17 | FUNCTION = "run" 18 | 19 | CATEGORY = "LevelPixel/IO" 20 | 21 | INPUT_IS_LIST = False 22 | OUTPUT_IS_LIST = (False,) 23 | 24 | def run(self, number): 25 | if number < 0.000000: 26 | number = 0.000000 27 | elif number > 1.000000: 28 | number = 1.000000 29 | return (number,) 30 | 31 | class TenthsFloatSlider: 32 | @classmethod 33 | def INPUT_TYPES(s): 34 | return {"required": { 35 | "number":("FLOAT", { 36 | "default": 0, 37 | "min": 0.0, 38 | "max": 1.0, 39 | "step": 0.1, 40 | "display": "slider" 41 | }), 42 | }, 43 | } 44 | 45 | RETURN_TYPES = ("FLOAT",) 46 | RETURN_NAMES = ('FLOAT',) 47 | FUNCTION = "tenthsFloatSlider" 48 | 49 | CATEGORY = "LevelPixel/IO" 50 | 51 | INPUT_IS_LIST = False 52 | OUTPUT_IS_LIST = (False,) 53 | 54 | def tenthsFloatSlider(self, number): 55 | if number < 0.0: 56 | number = 0.0 57 | elif number > 1.0: 58 | number = 1.0 59 | return (number,) 60 | 61 | class HundredthsFloatSlider: 62 | @classmethod 63 | def INPUT_TYPES(s): 64 | return {"required": { 65 | "number":("FLOAT", { 66 | "default": 0, 67 | "min": 0.00, 68 | "max": 1.00, 69 | "step": 0.01, 70 | "display": "slider" 71 | }), 72 | }, 73 | } 74 | 75 | RETURN_TYPES = ("FLOAT",) 76 | RETURN_NAMES = ('FLOAT',) 77 | FUNCTION = "hundredthsFloatSlider" 78 | 79 | CATEGORY = "LevelPixel/IO" 80 | 81 | INPUT_IS_LIST = False 82 | OUTPUT_IS_LIST = (False,) 83 | 84 | def hundredthsFloatSlider(self, number): 85 | if number < 0.00: 86 | number = 0.00 87 | elif number > 1.00: 88 | number = 1.00 89 | return (number,) 90 | 91 | class Seed: 92 | @classmethod 93 | def INPUT_TYPES(cls): 94 | return {"required": {"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff})}} 95 | 96 | RETURN_TYPES = ("INT", ) 97 | RETURN_NAMES = ("seed INT", ) 98 | FUNCTION = "seedint" 99 | OUTPUT_NODE = True 100 | CATEGORY = "LevelPixel/IO" 101 | 102 | @staticmethod 103 | def seedint(seed): 104 | return (seed,) 105 | 106 | NODE_CLASS_MAPPINGS = { 107 | "SimpleFloatSlider|LP": FloatSlider, 108 | "TenthsSimpleFloatSlider|LP": TenthsFloatSlider, 109 | "HundredthsSimpleFloatSlider|LP": HundredthsFloatSlider, 110 | "Seed|LP": Seed, 111 | } 112 | 113 | NODE_DISPLAY_NAME_MAPPINGS = { 114 | "SimpleFloatSlider|LP": "Simple Float Slider [LP]", 115 | "TenthsSimpleFloatSlider|LP": "Simple Float Slider - Tenths Step [LP]", 116 | "HundredthsSimpleFloatSlider|LP": "Simple Float Slider - Hundredths Step [LP]", 117 | "Seed|LP": "Seed [LP]", 118 | } -------------------------------------------------------------------------------- /nodes/io/lora_tag_loader_LP.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | import folder_paths 3 | import re 4 | 5 | # Import ComfyUI files 6 | import comfy.sd 7 | import comfy.utils 8 | 9 | class LoraTagLoader: 10 | def __init__(self): 11 | self.loaded_lora = None 12 | self.tag_pattern = r"\<[0-9a-zA-Z\:\_\-\.\s\/\(\)\\\\]+\>" 13 | 14 | @classmethod 15 | def INPUT_TYPES(s): 16 | return {"required": { "model": ("MODEL",), 17 | "clip": ("CLIP", ), 18 | "text": ("STRING", {"multiline": True}), 19 | }} 20 | RETURN_TYPES = ("MODEL", "CLIP", "STRING") 21 | RETURN_NAMES = ("MODEL", "CLIP", "STRING") 22 | FUNCTION = "load_lora" 23 | OUTPUT_NODE = False 24 | 25 | CATEGORY = "LevelPixel/IO" 26 | 27 | def load_lora(self, model, clip, text): 28 | # print(f"\nLoraTagLoader input text: { text }") 29 | 30 | founds = re.findall(self.tag_pattern, text) 31 | # print(f"\nfoound lora tags: { founds }") 32 | 33 | if len(founds) < 1: 34 | return (model, clip, text) 35 | 36 | model_lora = model 37 | clip_lora = clip 38 | 39 | log = [] 40 | log.append("") 41 | 42 | lora_files = folder_paths.get_filename_list("loras") 43 | for f in founds: 44 | tag = f[1:-1] 45 | pak = tag.split(":") 46 | type = pak[0] 47 | if type != 'lora': 48 | continue 49 | name = None 50 | if len(pak) > 1 and len(pak[1]) > 0: 51 | name = pak[1] 52 | else: 53 | continue 54 | wModel = wClip = 0 55 | try: 56 | if len(pak) > 2 and len(pak[2]) > 0: 57 | wModel = float(pak[2]) 58 | wClip = wModel 59 | if len(pak) > 3 and len(pak[3]) > 0: 60 | wClip = float(pak[3]) 61 | except ValueError: 62 | continue 63 | if name == None: 64 | continue 65 | lora_name = None 66 | for lora_file in lora_files: 67 | if Path(lora_file).name.startswith(name) or lora_file.startswith(name): 68 | lora_name = lora_file 69 | break 70 | if lora_name == None: 71 | log[0] = log[0] + f"NOT found LoRA '{name}' \n" 72 | print(f"bypassed lora tag: { (type, name, wModel, wClip) } >> { lora_name }") 73 | continue 74 | print(f"detected lora tag: { (type, name, wModel, wClip) } >> { lora_name }") 75 | 76 | lora_path = folder_paths.get_full_path("loras", lora_name) 77 | lora = None 78 | if self.loaded_lora is not None: 79 | if self.loaded_lora[0] == lora_path: 80 | lora = self.loaded_lora[1] 81 | else: 82 | temp = self.loaded_lora 83 | self.loaded_lora = None 84 | del temp 85 | 86 | if lora is None: 87 | lora = comfy.utils.load_torch_file(lora_path, safe_load=True) 88 | self.loaded_lora = (lora_path, lora) 89 | 90 | model_lora, clip_lora = comfy.sd.load_lora_for_models(model_lora, clip_lora, lora, wModel, wClip) 91 | 92 | plain_prompt = re.sub(self.tag_pattern, "", text) 93 | return {"ui": {"log": log}, "result": (model_lora, clip_lora, plain_prompt)} 94 | 95 | NODE_CLASS_MAPPINGS = { 96 | "LoraTagLoader|LP": LoraTagLoader, 97 | } 98 | 99 | NODE_DISPLAY_NAME_MAPPINGS = { 100 | "LoraTagLoader|LP": "Load LoRA Tag [LP]", 101 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__ 3 | __pycache__/ 4 | *.py[cod] 5 | *$py.class 6 | 7 | # C extensions 8 | *.so 9 | 10 | # Distribution / packaging 11 | .Python 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | cover/ 54 | 55 | # Translations 56 | *.mo 57 | *.pot 58 | 59 | # Django stuff: 60 | *.log 61 | local_settings.py 62 | db.sqlite3 63 | db.sqlite3-journal 64 | 65 | # Flask stuff: 66 | instance/ 67 | .webassets-cache 68 | 69 | # Scrapy stuff: 70 | .scrapy 71 | 72 | # Sphinx documentation 73 | docs/_build/ 74 | 75 | # PyBuilder 76 | .pybuilder/ 77 | target/ 78 | 79 | # Jupyter Notebook 80 | .ipynb_checkpoints 81 | 82 | # IPython 83 | profile_default/ 84 | ipython_config.py 85 | 86 | # pyenv 87 | # For a library or package, you might want to ignore these files since the code is 88 | # intended to run in multiple environments; otherwise, check them in: 89 | # .python-version 90 | 91 | # pipenv 92 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 93 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 94 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 95 | # install all needed dependencies. 96 | #Pipfile.lock 97 | 98 | # poetry 99 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 100 | # This is especially recommended for binary packages to ensure reproducibility, and is more 101 | # commonly ignored for libraries. 102 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 103 | #poetry.lock 104 | 105 | # pdm 106 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 107 | #pdm.lock 108 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 109 | # in version control. 110 | # https://pdm.fming.dev/#use-with-ide 111 | .pdm.toml 112 | 113 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 114 | __pypackages__/ 115 | 116 | # Celery stuff 117 | celerybeat-schedule 118 | celerybeat.pid 119 | 120 | # SageMath parsed files 121 | *.sage.py 122 | 123 | # Environments 124 | .env 125 | .venv 126 | env/ 127 | venv/ 128 | ENV/ 129 | env.bak/ 130 | venv.bak/ 131 | 132 | # Spyder project settings 133 | .spyderproject 134 | .spyproject 135 | 136 | # Rope project settings 137 | .ropeproject 138 | 139 | # mkdocs documentation 140 | /site 141 | 142 | # mypy 143 | .mypy_cache/ 144 | .dmypy.json 145 | dmypy.json 146 | 147 | # Pyre type checker 148 | .pyre/ 149 | 150 | # pytype static type analyzer 151 | .pytype/ 152 | 153 | # Cython debug symbols 154 | cython_debug/ 155 | 156 | # PyCharm 157 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 158 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 159 | # and can be added to the global gitignore or merged into this file. For a more nuclear 160 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 161 | #.idea/ 162 | -------------------------------------------------------------------------------- /nodes/io/text_inputs_LP.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | class AnyType(str): 4 | def __ne__(self, __value: object) -> bool: 5 | return False 6 | 7 | any = AnyType("*") 8 | 9 | class Text: 10 | 11 | def __init__(self): 12 | pass 13 | 14 | @classmethod 15 | def INPUT_TYPES(s): 16 | return {"required": { 17 | "TEXT": ("STRING", {"default": "", "multiline": True, "placeholder": "Text"}),} 18 | } 19 | 20 | RETURN_TYPES = ("STRING",) 21 | RETURN_NAMES = ("TEXT",) 22 | FUNCTION = "text" 23 | 24 | CATEGORY = "LevelPixel/IO" 25 | 26 | @staticmethod 27 | def text(TEXT): 28 | return TEXT, 29 | 30 | class String: 31 | 32 | def __init__(self): 33 | pass 34 | 35 | @classmethod 36 | def INPUT_TYPES(s): 37 | return {"required": { 38 | "STRING": ("STRING", {"default": "", "multiline": False, "placeholder": "String"}),} 39 | } 40 | 41 | RETURN_TYPES = ("STRING",) 42 | RETURN_NAMES = ("STRING",) 43 | FUNCTION = "string" 44 | 45 | CATEGORY = "LevelPixel/IO" 46 | 47 | @staticmethod 48 | def string(STRING): 49 | return STRING, 50 | 51 | class FindValueFromFile: 52 | 53 | def __init__(self): 54 | pass 55 | 56 | @classmethod 57 | def INPUT_TYPES(s): 58 | 59 | return {"required": { 60 | "key": ("STRING", {"default": '', "multiline": False}), 61 | "input_path": ("STRING", {"default": '', "multiline": False}) 62 | } 63 | } 64 | 65 | RETURN_TYPES = ("STRING", "BOOLEAN") 66 | RETURN_NAMES = ("Value STRING", "Value received BOOL") 67 | 68 | FUNCTION = "find_value_from_file" 69 | CATEGORY = "LevelPixel/IO" 70 | 71 | def find_value_from_file(self, key, input_path=None): 72 | valueString = "" 73 | boolResult = True 74 | log = [] 75 | log.append("") 76 | try: 77 | with open(os.path.normpath(input_path), 'r', encoding='utf-8') as file: 78 | for line in file: 79 | line = line.strip() 80 | 81 | if "-->" in line: 82 | keyLine, valueLine = line.split("-->", 1) 83 | if keyLine.strip() == key: 84 | valueString = valueLine.strip() 85 | break 86 | except FileNotFoundError: 87 | log[0] = log[0] + f"Error: File not found at {input_path}" 88 | boolResult = False 89 | except Exception as e: 90 | log[0] = log[0] + f"Error: {e}" 91 | boolResult = False 92 | 93 | if valueString == "": 94 | boolResult = False 95 | 96 | return {"ui": {"text": valueString, "log": log,}, "result": (valueString, boolResult,)} 97 | 98 | class StringCycler: 99 | 100 | @classmethod 101 | def INPUT_TYPES(s): 102 | return {"required": { 103 | "text": ("STRING", {"multiline": True, "default": ""}), 104 | "repeats": ("INT", {"default": 1, "min": 1, "max": 99999}), 105 | "loops": ("INT", {"default": 1, "min": 1, "max": 99999}), 106 | } 107 | } 108 | 109 | RETURN_TYPES = (any,) 110 | RETURN_NAMES = ("STRING",) 111 | OUTPUT_IS_LIST = (True,) 112 | FUNCTION = "string_cycle" 113 | CATEGORY = "LevelPixel/IO" 114 | 115 | def string_cycle(self, text, repeats, loops=1): 116 | 117 | lines = text.split('\n') 118 | list_out = [] 119 | 120 | for i in range(loops): 121 | for text_item in lines: 122 | for _ in range(repeats): 123 | list_out.append(text_item) 124 | 125 | return (list_out, ) 126 | 127 | NODE_CLASS_MAPPINGS = { 128 | "Text|LP": Text, 129 | "String|LP": String, 130 | "FindValueFromFile|LP": FindValueFromFile, 131 | "StringCycler|LP": StringCycler, 132 | } 133 | 134 | NODE_DISPLAY_NAME_MAPPINGS = { 135 | "Text|LP": "Text [LP]", 136 | "String|LP": "String [LP]", 137 | "FindValueFromFile|LP": "Find Value From File [LP]", 138 | "StringCycler|LP": "String Cycler [LP]", 139 | } -------------------------------------------------------------------------------- /install_init.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import shutil 4 | import inspect 5 | from server import PromptServer 6 | 7 | config = None 8 | 9 | def is_logging_enabled(): 10 | config = get_extension_config() 11 | if "logging" not in config: 12 | return False 13 | return config["logging"] 14 | 15 | def log(message, type=None, always=False, name=None): 16 | if not always and not is_logging_enabled(): 17 | return 18 | 19 | if type is not None: 20 | message = f"[{type}] {message}" 21 | 22 | if name is None: 23 | name = get_extension_config()["name"] 24 | 25 | print(f"(levelpixel-nodes:{name}) {message}") 26 | 27 | def link_js(src, dst): 28 | src = os.path.abspath(src) 29 | dst = os.path.abspath(dst) 30 | if os.name == "nt": 31 | try: 32 | import _winapi 33 | _winapi.CreateJunction(src, dst) 34 | return True 35 | except: 36 | pass 37 | try: 38 | os.symlink(src, dst) 39 | return True 40 | except: 41 | import logging 42 | logging.exception('') 43 | return False 44 | 45 | def get_ext_dir(subpath=None, mkdir=False): 46 | dir = os.path.dirname(__file__) 47 | if subpath is not None: 48 | dir = os.path.join(dir, subpath) 49 | 50 | dir = os.path.abspath(dir) 51 | 52 | if mkdir and not os.path.exists(dir): 53 | os.makedirs(dir) 54 | return dir 55 | 56 | def get_extension_config(reload=False): 57 | global config 58 | if reload == False and config is not None: 59 | return config 60 | 61 | config_path = get_ext_dir("levelpixel.json") 62 | default_config_path = get_ext_dir("levelpixel.default.json") 63 | if not os.path.exists(config_path): 64 | if os.path.exists(default_config_path): 65 | shutil.copy(default_config_path, config_path) 66 | if not os.path.exists(config_path): 67 | log(f"Failed to create config at {config_path}", type="ERROR", always=True, name="???") 68 | print(f"Extension path: {get_ext_dir()}") 69 | return {"name": "Unknown", "version": -1} 70 | 71 | else: 72 | log("Missing levelpixel.default.json, this extension may not work correctly. Please reinstall the extension.", 73 | type="ERROR", always=True, name="???") 74 | print(f"Extension path: {get_ext_dir()}") 75 | return {"name": "Unknown", "version": -1} 76 | 77 | with open(config_path, "r") as f: 78 | config = json.loads(f.read()) 79 | return config 80 | 81 | def get_comfy_dir(subpath=None, mkdir=False): 82 | dir = os.path.dirname(inspect.getfile(PromptServer)) 83 | if subpath is not None: 84 | dir = os.path.join(dir, subpath) 85 | 86 | dir = os.path.abspath(dir) 87 | 88 | if mkdir and not os.path.exists(dir): 89 | os.makedirs(dir) 90 | return dir 91 | 92 | def get_web_ext_dir(): 93 | config = get_extension_config() 94 | name = config["name"] 95 | dir = get_comfy_dir("web/extensions/levelpixel") 96 | if not os.path.exists(dir): 97 | os.makedirs(dir) 98 | dir = os.path.join(dir, name) 99 | return dir 100 | 101 | def is_junction(path): 102 | if os.name != "nt": 103 | return False 104 | try: 105 | return bool(os.readlink(path)) 106 | except OSError: 107 | return False 108 | 109 | def should_install_js(): 110 | return not hasattr(PromptServer.instance, "supports") or "custom_nodes_from_web" not in PromptServer.instance.supports 111 | 112 | def install_js(): 113 | src_dir = get_ext_dir("web/js") 114 | if not os.path.exists(src_dir): 115 | log("No JS") 116 | return 117 | 118 | should_install = should_install_js() 119 | if should_install: 120 | log("it looks like you're running an old version of ComfyUI that requires manual setup of web files, it is recommended you update your installation.", "warning", True) 121 | dst_dir = get_web_ext_dir() 122 | linked = os.path.islink(dst_dir) or is_junction(dst_dir) 123 | if linked or os.path.exists(dst_dir): 124 | if linked: 125 | if should_install: 126 | log("JS already linked") 127 | else: 128 | os.unlink(dst_dir) 129 | log("JS unlinked, PromptServer will serve extension") 130 | elif not should_install: 131 | shutil.rmtree(dst_dir) 132 | log("JS deleted, PromptServer will serve extension") 133 | return 134 | 135 | if not should_install: 136 | log("JS skipped, PromptServer will serve extension") 137 | return 138 | 139 | if link_js(src_dir, dst_dir): 140 | log("JS linked") 141 | return 142 | 143 | log("Copying JS files") 144 | shutil.copytree(src_dir, dst_dir, dirs_exist_ok=True) 145 | 146 | def init(check_imports=None): 147 | log("Init") 148 | 149 | if check_imports is not None: 150 | import importlib.util 151 | for imp in check_imports: 152 | spec = importlib.util.find_spec(imp) 153 | if spec is None: 154 | log(f"{imp} is required, please check requirements are installed.", 155 | type="ERROR", always=True) 156 | return False 157 | 158 | install_js() 159 | return True 160 | -------------------------------------------------------------------------------- /nodes/io/image_outputs_LP.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import sys 4 | from PIL import Image 5 | from PIL.PngImagePlugin import PngInfo 6 | import folder_paths 7 | import random 8 | import json 9 | sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy")) 10 | from comfy.cli_args import args 11 | 12 | class SaveImage: 13 | def __init__(self): 14 | self.output_dir = folder_paths.get_output_directory() 15 | self.type = "output" 16 | self.prefix_append = "" 17 | self.compress_level = 4 18 | self.downscale_preview = False 19 | self.size = 512 20 | self.downscale_mode = True 21 | 22 | @classmethod 23 | def INPUT_TYPES(s): 24 | return { 25 | "required": { 26 | "images": ("IMAGE", {"tooltip": "The images to save."}), 27 | "filename_prefix": ("STRING", {"default": "ComfyUI", "tooltip": "The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."}), 28 | "downscale_preview": ("BOOLEAN", {"default": True, "label_on": "On", "label_off": "Off"}), 29 | "size": ("INT", {"default": 512, "min": 1, "max": 8192, "step": 1}), 30 | "downscale_mode": ("BOOLEAN", {"default": True, "label_on": "max", "label_off": "min"}), 31 | "compress_level": ("INT", {"default": 1, "min": 0, "max": 9, "step": 1}), 32 | }, 33 | "hidden": { 34 | "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO" 35 | }, 36 | } 37 | 38 | RETURN_TYPES = ("IMAGE",) 39 | RETURN_NAMES = ("original IMAGE",) 40 | FUNCTION = "save_images" 41 | 42 | OUTPUT_NODE = False 43 | 44 | CATEGORY = "LevelPixel/IO" 45 | DESCRIPTION = "Saves the input images to your ComfyUI output directory." 46 | 47 | def save_images(self, images, filename_prefix="ComfyUI", downscale_preview=False, size=512, downscale_mode=True, compress_level=1, prompt=None, extra_pnginfo=None): 48 | self.downscale_preview = downscale_preview 49 | self.size = size 50 | self.downscale_mode = downscale_mode 51 | self.compress_level = compress_level 52 | filename_prefix += self.prefix_append 53 | full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]) 54 | results = list() 55 | for (batch_number, image) in enumerate(images): 56 | i = 255. * image.cpu().numpy() 57 | img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) 58 | 59 | if self.downscale_preview == True: 60 | w, h = img.size 61 | target = max(w, h) if self.downscale_mode else min(w, h) 62 | if target > self.size: 63 | scale = self.size / target 64 | new_size = (round(w * scale), round(h * scale)) 65 | img = img.resize(new_size, Image.Resampling.LANCZOS) 66 | 67 | metadata = None 68 | if not args.disable_metadata: 69 | metadata = PngInfo() 70 | if prompt is not None: 71 | metadata.add_text("prompt", json.dumps(prompt)) 72 | if extra_pnginfo is not None: 73 | for x in extra_pnginfo: 74 | metadata.add_text(x, json.dumps(extra_pnginfo[x])) 75 | 76 | filename_with_batch_num = filename.replace("%batch_num%", str(batch_number)) 77 | file = f"{filename_with_batch_num}_{counter:05}_.png" 78 | img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=self.compress_level) 79 | results.append({ 80 | "filename": file, 81 | "subfolder": subfolder, 82 | "type": self.type 83 | }) 84 | counter += 1 85 | 86 | return { "ui": { "images": results }, "result": (images,) } 87 | 88 | class PreviewImageForConditions(SaveImage): 89 | def __init__(self): 90 | self.output_dir = folder_paths.get_temp_directory() 91 | self.type = "temp" 92 | self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5)) 93 | self.compress_level = 0 94 | self.downscale_preview = True 95 | 96 | @classmethod 97 | def INPUT_TYPES(s): 98 | return {"required":{ 99 | "images": ("IMAGE", ), 100 | "size": ("INT", {"default": 512, "min": 1, "max": 8192, "step": 1}), 101 | "downscale_preview": ("BOOLEAN", {"default": True, "label_on": "On", "label_off": "Off"}), 102 | "size": ("INT", {"default": 512, "min": 1, "max": 8192, "step": 1}), 103 | "downscale_mode": ("BOOLEAN", {"default": True, "label_on": "max", "label_off": "min"}), 104 | "compress_level": ("INT", {"default": 0, "min": 0, "max": 9, "step": 1}), 105 | }, 106 | "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, 107 | } 108 | 109 | NODE_CLASS_MAPPINGS = { 110 | "PreviewImageForConditions|LP": PreviewImageForConditions 111 | } 112 | 113 | NODE_DISPLAY_NAME_MAPPINGS = { 114 | "PreviewImageForConditions|LP": "Preview Image Bridge [LP]" 115 | } 116 | 117 | -------------------------------------------------------------------------------- /web/js/showcontrol.js: -------------------------------------------------------------------------------- 1 | import { app } from "../../../scripts/app.js"; 2 | 3 | // Some fragments of this code are from https://github.com/LucianoCirino/efficiency-nodes-comfyui 4 | 5 | function inpaintCropStitchHandler(node) { 6 | if (node.comfyClass == "InpaintCrop|LP") { 7 | toggleWidget(node, findWidgetByName(node, "target_size")); 8 | toggleWidget(node, findWidgetByName(node, "aspect_ratio_limit")); 9 | toggleWidget(node, findWidgetByName(node, "force_width")); 10 | toggleWidget(node, findWidgetByName(node, "force_height")); 11 | if (findWidgetByName(node, "mode").value == "none") { 12 | toggleWidget(node, findWidgetByName(node, "target_size"), false); 13 | toggleWidget(node, findWidgetByName(node, "aspect_ratio_limit"), false); 14 | toggleWidget(node, findWidgetByName(node, "force_width"), false); 15 | toggleWidget(node, findWidgetByName(node, "force_height"), false); 16 | } 17 | else if (findWidgetByName(node, "mode").value == "input size parameters") { 18 | toggleWidget(node, findWidgetByName(node, "target_size"), false); 19 | toggleWidget(node, findWidgetByName(node, "aspect_ratio_limit"), false); 20 | toggleWidget(node, findWidgetByName(node, "force_width"), false); 21 | toggleWidget(node, findWidgetByName(node, "force_height"), false); 22 | } 23 | else if (findWidgetByName(node, "mode").value == "aspect size") { 24 | toggleWidget(node, findWidgetByName(node, "target_size"), true); 25 | toggleWidget(node, findWidgetByName(node, "aspect_ratio_limit"), true); 26 | toggleWidget(node, findWidgetByName(node, "force_width"), false); 27 | toggleWidget(node, findWidgetByName(node, "force_height"), false); 28 | } 29 | else if (findWidgetByName(node, "mode").value == "forced size") { 30 | toggleWidget(node, findWidgetByName(node, "target_size"), false); 31 | toggleWidget(node, findWidgetByName(node, "aspect_ratio_limit"), false); 32 | toggleWidget(node, findWidgetByName(node, "force_width"), true); 33 | toggleWidget(node, findWidgetByName(node, "force_height"), true); 34 | } 35 | } 36 | return; 37 | } 38 | 39 | function resizeImageAndMasksHandler(node) { 40 | if (node.comfyClass == "ResizeImageAndMasks|LP") { 41 | toggleWidget(node, findWidgetByName(node, "preresize_min_width")); 42 | toggleWidget(node, findWidgetByName(node, "preresize_min_height")); 43 | toggleWidget(node, findWidgetByName(node, "preresize_max_width")); 44 | toggleWidget(node, findWidgetByName(node, "preresize_max_height")); 45 | if (findWidgetByName(node, "preresize_mode").value == "ensure minimum resolution") { 46 | toggleWidget(node, findWidgetByName(node, "preresize_min_width"), true); 47 | toggleWidget(node, findWidgetByName(node, "preresize_min_height"), true); 48 | toggleWidget(node, findWidgetByName(node, "preresize_max_width"), false); 49 | toggleWidget(node, findWidgetByName(node, "preresize_max_height"), false); 50 | } 51 | else if (findWidgetByName(node, "preresize_mode").value == "ensure minimum and maximum resolution") { 52 | toggleWidget(node, findWidgetByName(node, "preresize_min_width"), true); 53 | toggleWidget(node, findWidgetByName(node, "preresize_min_height"), true); 54 | toggleWidget(node, findWidgetByName(node, "preresize_max_width"), true); 55 | toggleWidget(node, findWidgetByName(node, "preresize_max_height"), true); 56 | } 57 | else if (findWidgetByName(node, "preresize_mode").value == "ensure maximum resolution") { 58 | toggleWidget(node, findWidgetByName(node, "preresize_min_width"), false); 59 | toggleWidget(node, findWidgetByName(node, "preresize_min_height"), false); 60 | toggleWidget(node, findWidgetByName(node, "preresize_max_width"), true); 61 | toggleWidget(node, findWidgetByName(node, "preresize_max_height"), true); 62 | } 63 | } 64 | return; 65 | } 66 | 67 | const findWidgetByName = (node, name) => { 68 | return node.widgets ? node.widgets.find((w) => w.name === name) : null; 69 | }; 70 | 71 | function toggleWidget(node, widget, show = false, suffix = "") { 72 | if (!widget) return; 73 | widget.disabled = !show 74 | widget.linkedWidgets?.forEach(w => toggleWidget(node, w, ":" + widget.name, show)); 75 | } 76 | 77 | app.registerExtension({ 78 | name: "levelpixel.showcontrol", 79 | nodeCreated(node) { 80 | if (!node.comfyClass.startsWith("Inpaint") && !node.comfyClass.startsWith("ResizeImageAndMasks")) { 81 | return; 82 | } 83 | 84 | inpaintCropStitchHandler(node); 85 | resizeImageAndMasksHandler(node); 86 | for (const w of node.widgets || []) { 87 | let widgetValue = w.value; 88 | let originalDescriptor = Object.getOwnPropertyDescriptor(w, 'value') || 89 | Object.getOwnPropertyDescriptor(Object.getPrototypeOf(w), 'value'); 90 | if (!originalDescriptor) { 91 | originalDescriptor = Object.getOwnPropertyDescriptor(w.constructor.prototype, 'value'); 92 | } 93 | 94 | Object.defineProperty(w, 'value', { 95 | get() { 96 | let valueToReturn = originalDescriptor && originalDescriptor.get 97 | ? originalDescriptor.get.call(w) 98 | : widgetValue; 99 | 100 | return valueToReturn; 101 | }, 102 | set(newVal) { 103 | if (originalDescriptor && originalDescriptor.set) { 104 | originalDescriptor.set.call(w, newVal); 105 | } else { 106 | widgetValue = newVal; 107 | } 108 | 109 | inpaintCropStitchHandler(node); 110 | resizeImageAndMasksHandler(node); 111 | } 112 | }); 113 | } 114 | } 115 | }); 116 | -------------------------------------------------------------------------------- /nodes/io/folder_workers_LP.py: -------------------------------------------------------------------------------- 1 | import folder_paths 2 | import glob 3 | import os 4 | 5 | class FileCounter: 6 | @classmethod 7 | def INPUT_TYPES(cls): 8 | return { 9 | "required": { 10 | "directory_path": ("STRING", {"default": '', "multiline": False}), 11 | "patterns": ("STRING", {"default": '*.jpg|*.png|*.jpeg', "multiline": False}), 12 | "rescan_each_queue": ("BOOLEAN", {"default": True}), 13 | }, 14 | } 15 | 16 | RETURN_TYPES = ("INT","STRING") 17 | RETURN_NAMES = ("Total INT","Total STRING") 18 | FUNCTION = "file_counter" 19 | 20 | OUTPUT_NODE = True 21 | CATEGORY = "LevelPixel/IO" 22 | 23 | @classmethod 24 | def IS_CHANGED(cls, directory_path, patterns, rescan_each_queue, *v): 25 | if rescan_each_queue == True: 26 | return float("NaN") 27 | else: 28 | return False 29 | 30 | def file_counter(self, directory_path, patterns, rescan_each_queue): 31 | if not os.path.isdir(directory_path): 32 | return (0,) 33 | total_int = 0 34 | for pattern in patterns.split("|"): 35 | files = list(glob.glob(pattern, root_dir=directory_path)) 36 | total_int += len(files) 37 | total_string = str(total_int) 38 | print("total " + str(total_int)) 39 | return (total_int, total_string) 40 | 41 | class GetComfyUIFolderPath: 42 | @classmethod 43 | def INPUT_TYPES(cls): 44 | try: 45 | folder_names = list(folder_paths.folder_names_and_paths.keys()) 46 | except Exception: 47 | folder_names = [] 48 | standard_folders = ["base", "model", "output", "temp", "input"] 49 | for name in standard_folders: 50 | if name not in folder_names: 51 | folder_names.append(name) 52 | return { 53 | "required": { 54 | "folder_name": (folder_names, {"default": folder_names[0] if folder_names else "input"}), 55 | }, 56 | } 57 | 58 | RETURN_TYPES = ("STRING",) 59 | RETURN_NAMES = ("folder_path",) 60 | 61 | CATEGORY = "LevelPixel/IO" 62 | FUNCTION = "get_comfyui_folder_path" 63 | 64 | def get_comfyui_folder_path(self, folder_name): 65 | try: 66 | if folder_name == "base": 67 | return (str(folder_paths.base_path),) 68 | elif folder_name == "model": 69 | return (str(folder_paths.models_dir),) 70 | elif folder_name == "output": 71 | return (str(folder_paths.get_output_directory()),) 72 | elif folder_name == "temp": 73 | return (str(folder_paths.get_temp_directory()),) 74 | elif folder_name == "input": 75 | return (str(folder_paths.get_input_directory()),) 76 | path = folder_paths.get_folder_paths(folder_name) 77 | if isinstance(path, list): 78 | return (path[0],) 79 | return (str(path),) 80 | except Exception as e: 81 | return (f"Error: {e}",) 82 | 83 | class GetComfyUIHttpFolderPath: 84 | @classmethod 85 | def INPUT_TYPES(cls): 86 | folder_names = ["output", "temp", "input"] 87 | return { 88 | "required": { 89 | "folder_name": (folder_names, {"default": folder_names[0]}), 90 | }, 91 | } 92 | 93 | RETURN_TYPES = ("STRING",) 94 | RETURN_NAMES = ("folder_path",) 95 | 96 | CATEGORY = "LevelPixel/IO" 97 | FUNCTION = "get_comfyui_http_folder_path" 98 | 99 | def get_comfyui_http_folder_path(self, folder_name): 100 | try: 101 | path = folder_paths.get_directory_by_type(folder_name) 102 | if path is not None: 103 | return (str(path),) 104 | return (f"Not allowed or unknown folder: {folder_name}",) 105 | except Exception as e: 106 | return (f"Error: {e}",) 107 | 108 | 109 | class GetFilenameByIndexInFolder: 110 | @classmethod 111 | def INPUT_TYPES(cls): 112 | return { 113 | "required": { 114 | "folder_path": ("STRING", {"default": "", "multiline": False}), 115 | "file_index": ("INT", {"default": 0, "min": 0, "step": 1}), 116 | "patterns": ("STRING", {"default": "*", "multiline": False}), 117 | }, 118 | } 119 | 120 | RETURN_TYPES = ("STRING", "STRING", "STRING") 121 | RETURN_NAMES = ("full_filename", "filename", "extension") 122 | 123 | CATEGORY = "LevelPixel/IO" 124 | FUNCTION = "get_filename_by_index_in_folder" 125 | 126 | def get_filename_by_index_in_folder(self, folder_path, file_index, patterns): 127 | if not os.path.isdir(folder_path): 128 | return ("Error: not a directory", "", "") 129 | files = [] 130 | for pattern in patterns.split("|"): 131 | files.extend(glob.glob(os.path.join(folder_path, pattern))) 132 | if not files: 133 | return ("Error: no files found", "", "") 134 | files.sort(key=lambda x: os.path.getctime(x)) 135 | if file_index < 0 or file_index >= len(files): 136 | return (f"Error: index {file_index} out of range (found {len(files)} files)", "", "") 137 | full_filename = os.path.basename(files[file_index]) 138 | filename, extension = os.path.splitext(full_filename) 139 | extension = extension[1:] if extension.startswith(".") else extension 140 | return (full_filename, filename, extension) 141 | 142 | NODE_CLASS_MAPPINGS = { 143 | "FileCounter|LP": FileCounter, 144 | "GetComfyUIFolderPath|LP": GetComfyUIFolderPath, 145 | "GetComfyUIHttpFolderPath|LP": GetComfyUIHttpFolderPath, 146 | "GetFilenameByIndexInFolder|LP": GetFilenameByIndexInFolder, 147 | } 148 | 149 | NODE_DISPLAY_NAME_MAPPINGS = { 150 | "FileCounter|LP": "File Counter [LP]", 151 | "GetComfyUIFolderPath|LP": "Get ComfyUI Folder Path [LP]", 152 | "GetComfyUIHttpFolderPath|LP": "Get ComfyUI HTTP Folder Path [LP]", 153 | "GetFilenameByIndexInFolder|LP": "Get Filename By Index In Folder [LP]", 154 | } 155 | 156 | -------------------------------------------------------------------------------- /nodes/unloaders/model_unloaders_LP.py: -------------------------------------------------------------------------------- 1 | from comfy import model_management 2 | import gc 3 | import torch 4 | import requests 5 | from comfy.cli_args import args as comfy_args 6 | 7 | class AnyType(str): 8 | def __ne__(self, __value: object) -> bool: 9 | return False 10 | 11 | any = AnyType("*") 12 | 13 | class ModelUnloader: 14 | def __init__(self): 15 | pass 16 | 17 | @classmethod 18 | def INPUT_TYPES(cls): 19 | return { 20 | "required": {"source": (any, )}, 21 | "optional": {"model for unload": (any, )}, 22 | } 23 | 24 | @classmethod 25 | def VALIDATE_INPUTS(s, **kwargs): 26 | return True 27 | 28 | RETURN_TYPES = (any, ) 29 | FUNCTION = "unload_model" 30 | CATEGORY = "LevelPixel/Unloaders" 31 | OUTPUT_NODE = True 32 | 33 | def unload_model(self, **kwargs): 34 | loaded_models = model_management.loaded_models() 35 | if kwargs.get("source") in loaded_models: 36 | loaded_models.remove(kwargs.get("model for unload")) 37 | else: 38 | model = kwargs.get("model for unload") 39 | if type(model) == dict: 40 | keys = [(key, type(value).__name__) for key, value in model.items()] 41 | for key, model_type in keys: 42 | if key == 'model': 43 | print(f"Unloading model of type {model_type}") 44 | del model[key] 45 | model_management.free_memory(1e30, model_management.get_torch_device(), loaded_models) 46 | model_management.soft_empty_cache(True) 47 | try: 48 | gc.collect() 49 | torch.cuda.empty_cache() 50 | torch.cuda.ipc_collect() 51 | except: 52 | print("Unable to clear cache") 53 | return (kwargs.get("source"),) 54 | 55 | class SoftModelUnloader: 56 | def __init__(self): 57 | pass 58 | 59 | @classmethod 60 | def INPUT_TYPES(s): 61 | return { 62 | "required": { 63 | "source": (any, {}), 64 | }, 65 | } 66 | 67 | RETURN_TYPES = (any,) 68 | FUNCTION = "soft_unload_model" 69 | OUTPUT_NODE = True 70 | CATEGORY = "LevelPixel/Unloaders" 71 | 72 | def soft_unload_model(self, **kwargs): 73 | model_management.soft_empty_cache() 74 | gc.collect() 75 | torch.cuda.empty_cache() 76 | return (kwargs["source"],) 77 | 78 | class HardModelUnloader: 79 | def __init__(self): 80 | pass 81 | 82 | @classmethod 83 | def INPUT_TYPES(cls): 84 | return { 85 | "required": {"source": (any, )}, 86 | } 87 | 88 | @classmethod 89 | def VALIDATE_INPUTS(s, **kwargs): 90 | return True 91 | 92 | RETURN_TYPES = (any, ) 93 | FUNCTION = "hard_unload_model" 94 | CATEGORY = "LevelPixel/Unloaders" 95 | OUTPUT_NODE = True 96 | 97 | def hard_unload_model(self, **kwargs): 98 | print("Unload Models") 99 | loadedmodels = model_management.current_loaded_models 100 | for i in range(len(loadedmodels) -1, -1, -1): 101 | m = loadedmodels.pop(i) 102 | m.model_unload() 103 | del m 104 | 105 | try: 106 | gc.collect() 107 | torch.cuda.empty_cache() 108 | torch.cuda.ipc_collect() 109 | except: 110 | print("Unable to clear cache") 111 | 112 | model_management.unload_all_models() 113 | model_management.soft_empty_cache(True) 114 | 115 | try: 116 | gc.collect() 117 | torch.cuda.empty_cache() 118 | torch.cuda.ipc_collect() 119 | except: 120 | print("Unable to clear cache") 121 | return (kwargs.get("source"),) 122 | 123 | def _server_base_url(): 124 | tls = bool(getattr(comfy_args, "tls_keyfile", None) and getattr(comfy_args, "tls_certfile", None)) 125 | scheme = "https" if tls else "http" 126 | 127 | port = getattr(comfy_args, "port", 8188) 128 | listen = str(getattr(comfy_args, "listen", "127.0.0.1") or "127.0.0.1") 129 | 130 | hosts = [h.strip() for h in listen.split(",") if h.strip()] 131 | def is_wildcard(h): return h in ("0.0.0.0", "::") 132 | host = next((h for h in hosts if not is_wildcard(h)), None) or "127.0.0.1" 133 | 134 | if ":" in host and not host.startswith("["): 135 | host = f"[{host}]" 136 | 137 | return f"{scheme}://{host}:{port}" 138 | 139 | class SoftFullCleanRAMAndVRAM: 140 | @classmethod 141 | def INPUT_TYPES(cls): 142 | return { 143 | "required": { 144 | "source": (any, {}), 145 | "free_execution_cache": ("BOOLEAN", {"default": True}), 146 | } 147 | } 148 | 149 | RETURN_TYPES = (any,) 150 | FUNCTION = "soft_free_models" 151 | CATEGORY = "LevelPixel/Unloaders" 152 | OUTPUT_NODE = True 153 | 154 | def soft_free_models(self, free_execution_cache, **kwargs): 155 | 156 | url = f"{_server_base_url()}/free" 157 | if free_execution_cache: 158 | payload = {"unload_models": True, "free_memory": True} 159 | else: 160 | payload = {"unload_models": True} 161 | 162 | res = requests.post(url, json=payload) 163 | if res.status_code == 200: 164 | print("Models unloaded (and execution cache cleared)" if free_execution_cache else " Models unloaded") 165 | else: 166 | print("Failed to unload models. Maybe outdated ComfyUI version.") 167 | return (kwargs["source"],) 168 | 169 | NODE_CLASS_MAPPINGS = { 170 | "ModelUnloader|LP": ModelUnloader, 171 | "SoftModelUnloader|LP": SoftModelUnloader, 172 | "HardModelUnloader|LP": HardModelUnloader, 173 | "SoftFullCleanRAMAndVRAM|LP": SoftFullCleanRAMAndVRAM, 174 | } 175 | 176 | NODE_DISPLAY_NAME_MAPPINGS = { 177 | "ModelUnloader|LP": "Unload Model [LP]", 178 | "SoftModelUnloader|LP": "Soft Unload Models Data [LP]", 179 | "HardModelUnloader|LP": "Hard Unload All Models [LP]", 180 | "SoftFullCleanRAMAndVRAM|LP": "Soft Full Clean RAM and VRAM [LP]", 181 | } 182 | 183 | -------------------------------------------------------------------------------- /nodes/io/image_loaders_LP.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | from PIL import Image, ImageOps, ImageSequence 4 | import folder_paths 5 | import re 6 | import torch 7 | import node_helpers 8 | import hashlib 9 | 10 | class ImageLoaderFromPath: 11 | 12 | @classmethod 13 | def INPUT_TYPES(s): 14 | 15 | input_dir = folder_paths.input_directory 16 | image_folder = [name for name in os.listdir(input_dir) if os.path.isdir(os.path.join(input_dir,name))] 17 | image_folder.append("") 18 | 19 | return {"required": {"input_folder": (sorted(image_folder), ), 20 | "start_index": ("INT", {"default": 0, "min": 0, "max": 9999}), 21 | "max_images": ("INT", {"default": 1, "min": 1, "max": 9999}), 22 | "white_bg": (["disable","enable"],), 23 | "patterns": ("STRING", {"default": '*.jpg|*.png|*.jpeg', "multiline": False}), 24 | }, 25 | "optional": {"input_path": ("STRING", {"default": '', "multiline": False}), 26 | } 27 | } 28 | 29 | RETURN_TYPES = ("IMAGE", "MASK", "STRING" ) 30 | RETURN_NAMES = ("IMAGE", "MASK", "Filename STRING" ) 31 | OUTPUT_IS_LIST = (True, True, True) 32 | FUNCTION = "loader_images" 33 | CATEGORY = "LevelPixel/IO" 34 | 35 | def loader_images(self, start_index, max_images, white_bg, patterns, input_folder="", input_path=None): 36 | 37 | if input_path != '' and input_path is not None: 38 | if not os.path.exists(input_path): 39 | print(f"[Warning] Image Loader From Path: The input_path `{input_path}` does not exist") 40 | return ("",) 41 | in_path = input_path 42 | else: 43 | input_dir = folder_paths.input_directory 44 | in_path = os.path.join(input_dir, input_folder) 45 | 46 | if not os.listdir(in_path): 47 | print(f"[Warning] Image Loader From Path: The folder `{in_path}` is empty") 48 | return None 49 | 50 | file_list = sorted(os.listdir(in_path), key=lambda s: sum(((s, int(n)) for s, n in re.findall(r'(\D+)(\d+)', 'a%s0' % s)), ())) 51 | extensions = tuple(patterns.replace('*', '').split('|')) 52 | file_list = [f for f in file_list if f.lower().endswith(extensions)] 53 | 54 | image_list = [] 55 | mask_list = [] 56 | filename_list = [] 57 | 58 | start_index = max(0, start_index) 59 | end_index = min(start_index + max_images, len(file_list)) 60 | 61 | for num in range(start_index, end_index): 62 | img = Image.open(os.path.join(in_path, file_list[num])) 63 | 64 | image = img.convert("RGB") 65 | image = np.array(image).astype(np.float32) / 255.0 66 | image = torch.from_numpy(image)[None,] 67 | if 'A' in img.getbands(): 68 | mask = np.array(img.getchannel('A')).astype(np.float32) / 255.0 69 | mask = 1. - torch.from_numpy(mask) 70 | if white_bg=="enable": 71 | nw = mask.unsqueeze(0).unsqueeze(-1).repeat(1, 1, 1, 3) 72 | image[nw == 1] = 1.0 73 | else: 74 | mask = torch.zeros((64,64), dtype=torch.float32, device="cpu") 75 | 76 | image_list.append(image) 77 | mask_list.append(mask) 78 | filename_list.append(file_list[num]) 79 | 80 | if not image_list: 81 | print("Image Loader From Path: No images found.") 82 | return None 83 | 84 | return (image_list, mask_list, filename_list) 85 | 86 | class LoadImage: 87 | @classmethod 88 | def INPUT_TYPES(s): 89 | input_dir = folder_paths.get_input_directory() 90 | files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))] 91 | return {"required": 92 | {"image": (sorted(files), {"image_upload": True})}, 93 | } 94 | 95 | RETURN_TYPES = ("IMAGE", "MASK", "STRING" ) 96 | RETURN_NAMES = ("IMAGE", "MASK", "Filename STRING") 97 | 98 | FUNCTION = "load_image" 99 | 100 | CATEGORY = "LevelPixel/IO" 101 | 102 | def load_image(self, image): 103 | image_path = folder_paths.get_annotated_filepath(image) 104 | file_name = os.path.basename(image_path) 105 | 106 | img = node_helpers.pillow(Image.open, image_path) 107 | 108 | output_images = [] 109 | output_masks = [] 110 | output_filenames = [] 111 | w, h = None, None 112 | 113 | excluded_formats = ['MPO'] 114 | 115 | for i in ImageSequence.Iterator(img): 116 | i = node_helpers.pillow(ImageOps.exif_transpose, i) 117 | 118 | if i.mode == 'I': 119 | i = i.point(lambda i: i * (1 / 255)) 120 | image = i.convert("RGB") 121 | 122 | if len(output_images) == 0: 123 | w = image.size[0] 124 | h = image.size[1] 125 | 126 | if image.size[0] != w or image.size[1] != h: 127 | continue 128 | 129 | image = np.array(image).astype(np.float32) / 255.0 130 | image = torch.from_numpy(image)[None,] 131 | if 'A' in i.getbands(): 132 | mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 133 | mask = 1. - torch.from_numpy(mask) 134 | else: 135 | mask = torch.zeros((64,64), dtype=torch.float32, device="cpu") 136 | output_images.append(image) 137 | output_masks.append(mask.unsqueeze(0)) 138 | output_filenames.append(file_name) 139 | 140 | if len(output_images) > 1 and img.format not in excluded_formats: 141 | output_image = torch.cat(output_images, dim=0) 142 | output_mask = torch.cat(output_masks, dim=0) 143 | else: 144 | output_image = output_images[0] 145 | output_mask = output_masks[0] 146 | output_filename = output_filenames[0] 147 | 148 | return (output_image, output_mask, output_filename) 149 | 150 | @classmethod 151 | def IS_CHANGED(s, image): 152 | image_path = folder_paths.get_annotated_filepath(image) 153 | m = hashlib.sha256() 154 | with open(image_path, 'rb') as f: 155 | m.update(f.read()) 156 | return m.digest().hex() 157 | 158 | @classmethod 159 | def VALIDATE_INPUTS(s, image): 160 | if not folder_paths.exists_annotated_filepath(image): 161 | return "Invalid image file: {}".format(image) 162 | 163 | return True 164 | 165 | NODE_CLASS_MAPPINGS = { 166 | "ImageLoaderFromPath|LP": ImageLoaderFromPath, 167 | "LoadImage|LP": LoadImage, 168 | } 169 | 170 | NODE_DISPLAY_NAME_MAPPINGS = { 171 | "ImageLoaderFromPath|LP": "Image Loader From Path [LP]", 172 | "LoadImage|LP": "Load Image [LP]", 173 | } 174 | 175 | -------------------------------------------------------------------------------- /nodes/io/iterators_LP.py: -------------------------------------------------------------------------------- 1 | import os 2 | import glob 3 | import cv2 4 | 5 | class GetIteratorDataVideos: 6 | @classmethod 7 | def INPUT_TYPES(cls): 8 | return { 9 | "required": { 10 | "directory_path": ("STRING", {"default": '', "multiline": False}), 11 | "patterns": ("STRING", {"default": '*.mp4|*.avi|*.mov|*.mkv', "multiline": False}), 12 | "rescan_each_queue": ("BOOLEAN", {"default": True}), 13 | }, 14 | } 15 | 16 | RETURN_TYPES = ("ITERATOR_DATA", "INT", "INT") 17 | RETURN_NAMES = ("iterator_data", "video_count", "frame_count") 18 | FUNCTION = "prepare_iterator_data_from_videos" 19 | CATEGORY = "LevelPixel/Iterators" 20 | 21 | @classmethod 22 | def IS_CHANGED(cls, directory_path, patterns, rescan_each_queue, *v): 23 | if rescan_each_queue == True: 24 | return float("NaN") 25 | else: 26 | return False 27 | 28 | def prepare_iterator_data_from_videos(self, directory_path, patterns, rescan_each_queue): 29 | if not os.path.isdir(directory_path): 30 | return ([], 0, 0) 31 | iterator_data = [] 32 | video_files = [] 33 | for pattern in patterns.split("|"): 34 | video_files.extend(glob.glob(os.path.join(directory_path, pattern))) 35 | video_files = sorted(video_files) 36 | total_frames = 0 37 | for idx, video_path in enumerate(video_files): 38 | name = os.path.basename(video_path) 39 | try: 40 | cap = cv2.VideoCapture(video_path) 41 | count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) 42 | cap.release() 43 | except Exception: 44 | count = 0 45 | iterator_data.append({"index": idx, "name": name, "count": count}) 46 | total_frames += count 47 | return (iterator_data, len(video_files), total_frames) 48 | 49 | 50 | class GetIteratorDataImageFolders: 51 | @classmethod 52 | def INPUT_TYPES(cls): 53 | return { 54 | "required": { 55 | "directory_path": ("STRING", {"default": '', "multiline": False}), 56 | "patterns": ("STRING", {"default": '*.jpg|*.png|*.jpeg', "multiline": False}), 57 | "subfolder": ("STRING", {"default": '', "multiline": False}), 58 | "rescan_each_queue": ("BOOLEAN", {"default": True}), 59 | }, 60 | } 61 | 62 | RETURN_TYPES = ("ITERATOR_DATA", "INT", "INT") 63 | RETURN_NAMES = ("iterator_data", "folder_count", "image_count") 64 | FUNCTION = "prepare_iterator_data_from_image_folders" 65 | CATEGORY = "LevelPixel/Iterators" 66 | 67 | @classmethod 68 | def IS_CHANGED(cls, directory_path, patterns, subfolder, rescan_each_queue, *v): 69 | if rescan_each_queue == True: 70 | return float("NaN") 71 | else: 72 | return False 73 | 74 | def prepare_iterator_data_from_image_folders(self, directory_path, patterns, subfolder, rescan_each_queue): 75 | if not os.path.isdir(directory_path): 76 | return ([], 0, 0) 77 | iterator_data = [] 78 | folders = [f for f in os.listdir(directory_path) if os.path.isdir(os.path.join(directory_path, f))] 79 | total_images = 0 80 | for idx, folder in enumerate(sorted(folders)): 81 | if subfolder: 82 | search_path = os.path.join(directory_path, folder, subfolder) 83 | else: 84 | search_path = os.path.join(directory_path, folder) 85 | count = 0 86 | if os.path.isdir(search_path): 87 | for pattern in patterns.split("|"): 88 | count += len(glob.glob(os.path.join(search_path, pattern))) 89 | iterator_data.append({"index": idx, "name": folder, "count": count}) 90 | total_images += count 91 | return (iterator_data, len(folders), total_images) 92 | 93 | class ImageDataIterator: 94 | @classmethod 95 | def INPUT_TYPES(cls): 96 | return { 97 | "required": { 98 | "iterator_data": ("ITERATOR_DATA",), 99 | "global_index": ("INT", {"default": 0, "min": 0, "control_after_generate": True}), 100 | } 101 | } 102 | 103 | RETURN_TYPES = ("INT", "INT", "STRING", "INT") 104 | RETURN_NAMES = ("item_index", "set_index", "set_name", "global_index") 105 | FUNCTION = "iterate" 106 | CATEGORY = "LevelPixel/Iterators" 107 | 108 | def iterate(self, iterator_data, global_index): 109 | total = 0 110 | total_frames = sum(item["count"] for item in iterator_data) 111 | if global_index >= total_frames: 112 | raise RuntimeError("Image iteration finished") 113 | for i, item in enumerate(iterator_data): 114 | count = item["count"] 115 | if global_index < total + count: 116 | item_index = global_index - total 117 | set_index = item["index"] 118 | set_name = item["name"] 119 | return (item_index, set_index, set_name, global_index) 120 | total += count 121 | return (0, 0, '', global_index) 122 | 123 | class Iterator: 124 | @classmethod 125 | def INPUT_TYPES(cls): 126 | return { 127 | "required": { 128 | "index": ("INT", {"default": 0, "min": 0, "max": 999999, "control_after_generate": True}), 129 | "limit": ("INT", {"default": 0, "min": 0, "max": 999999}), 130 | "mode": ([ 131 | "Greater (index > limit)", 132 | "Less (index < limit)", 133 | "Equal (index == limit)", 134 | "Greater or Equal (index >= limit)", 135 | "Less or Equal (index <= limit)" 136 | ], {"default": "Greater (index > limit)"}), 137 | }, 138 | } 139 | 140 | RETURN_TYPES = ("INT",) 141 | RETURN_NAMES = ("index",) 142 | FUNCTION = "indexate" 143 | CATEGORY = "LevelPixel/Iterators" 144 | 145 | def indexate(self, index, limit, mode): 146 | if mode == "Greater (index > limit)": 147 | if index > limit: 148 | raise RuntimeError("Iteration finished (index > limit)") 149 | elif mode == "Less (index < limit)": 150 | if index < limit: 151 | raise RuntimeError("Iteration finished (index < limit)") 152 | elif mode == "Equal (index == limit)": 153 | if index == limit: 154 | raise RuntimeError("Iteration finished (index == limit)") 155 | elif mode == "Greater or Equal (index >= limit)": 156 | if index >= limit: 157 | raise RuntimeError("Iteration finished (index >= limit)") 158 | elif mode == "Less or Equal (index <= limit)": 159 | if index <= limit: 160 | raise RuntimeError("Iteration finished (index <= limit)") 161 | return (index,) 162 | 163 | NODE_CLASS_MAPPINGS = { 164 | "GetIteratorDataVideos|LP": GetIteratorDataVideos, 165 | "GetIteratorDataImageFolders|LP": GetIteratorDataImageFolders, 166 | "ImageDataIterator|LP": ImageDataIterator, 167 | "Iterator|LP": Iterator, 168 | } 169 | 170 | NODE_DISPLAY_NAME_MAPPINGS = { 171 | "GetIteratorDataVideos|LP": "Get Iterator Data From Videos [LP]", 172 | "GetIteratorDataImageFolders|LP": "Get Iterator Data From Image Folders [LP]", 173 | "ImageDataIterator|LP": "Image Data Iterator [LP]", 174 | "Iterator|LP": "Iterator [LP]", 175 | } 176 | -------------------------------------------------------------------------------- /web/js/widgets.js: -------------------------------------------------------------------------------- 1 | import { app } from "../../../scripts/app.js"; 2 | import { ComfyWidgets } from "../../../scripts/widgets.js"; 3 | import { api } from "../../../scripts/api.js"; 4 | 5 | class LevelPixel { 6 | constructor() { 7 | if (!window.__levelpixel__) { 8 | window.__levelpixel__ = Symbol("__levelpixel__"); 9 | } 10 | this.symbol = window.__levelpixel__; 11 | } 12 | 13 | getState(node) { 14 | return node[this.symbol] || {}; 15 | } 16 | 17 | setState(node, state) { 18 | node[this.symbol] = state; 19 | app.canvas.setDirty(true); 20 | } 21 | 22 | addStatusTagHandler(nodeType) { 23 | if (nodeType[this.symbol]?.statusTagHandler) { 24 | return; 25 | } 26 | if (!nodeType[this.symbol]) { 27 | nodeType[this.symbol] = {}; 28 | } 29 | nodeType[this.symbol] = { 30 | statusTagHandler: true, 31 | }; 32 | 33 | api.addEventListener("levelpixel/update_status", ({ detail }) => { 34 | let { node, progress, text } = detail; 35 | const n = app.graph.getNodeById(+(node || app.runningNodeId)); 36 | if (!n) return; 37 | const state = this.getState(n); 38 | state.status = Object.assign(state.status || {}, { progress: text ? progress : null, text: text || null }); 39 | this.setState(n, state); 40 | }); 41 | 42 | const self = this; 43 | const onDrawForeground = nodeType.prototype.onDrawForeground; 44 | nodeType.prototype.onDrawForeground = function (ctx) { 45 | const r = onDrawForeground?.apply?.(this, arguments); 46 | const state = self.getState(this); 47 | if (!state?.status?.text) { 48 | return r; 49 | } 50 | 51 | const { fgColor, bgColor, text, progress, progressColor } = { ...state.status }; 52 | 53 | ctx.save(); 54 | ctx.font = "12px sans-serif"; 55 | const sz = ctx.measureText(text); 56 | ctx.fillStyle = bgColor || "dodgerblue"; 57 | ctx.beginPath(); 58 | ctx.roundRect(0, -LiteGraph.NODE_TITLE_HEIGHT - 20, sz.width + 12, 20, 5); 59 | ctx.fill(); 60 | 61 | if (progress) { 62 | ctx.fillStyle = progressColor || "green"; 63 | ctx.beginPath(); 64 | ctx.roundRect(0, -LiteGraph.NODE_TITLE_HEIGHT - 20, (sz.width + 12) * progress, 20, 5); 65 | ctx.fill(); 66 | } 67 | 68 | ctx.fillStyle = fgColor || "#fff"; 69 | ctx.fillText(text, 6, -LiteGraph.NODE_TITLE_HEIGHT - 6); 70 | ctx.restore(); 71 | return r; 72 | }; 73 | } 74 | } 75 | 76 | const levelpixel = new LevelPixel(); 77 | 78 | app.registerExtension({ 79 | name: "levelpixel.Autotagger", 80 | async beforeRegisterNodeDef(nodeType, nodeData, app) { 81 | levelpixel.addStatusTagHandler(nodeType); 82 | 83 | if (nodeData.name === "Autotagger|LP") { 84 | const onExecuted = nodeType.prototype.onExecuted; 85 | nodeType.prototype.onExecuted = function (message) { 86 | const r = onExecuted?.apply?.(this, arguments); 87 | 88 | const pos = this.widgets.findIndex((w) => w.name === "tags"); 89 | if (pos !== -1) { 90 | for (let i = pos; i < this.widgets.length; i++) { 91 | this.widgets[i].onRemove?.(); 92 | } 93 | this.widgets.length = pos; 94 | } 95 | 96 | for (const list of message.tags) { 97 | const w = ComfyWidgets["STRING"](this, "tags", ["STRING", { multiline: true }], app).widget; 98 | w.inputEl.readOnly = true; 99 | w.inputEl.style.opacity = 0.6; 100 | w.value = list; 101 | } 102 | 103 | this.onResize?.(this.size); 104 | 105 | return r; 106 | }; 107 | } else { 108 | const getExtraMenuOptions = nodeType.prototype.getExtraMenuOptions; 109 | nodeType.prototype.getExtraMenuOptions = function (_, options) { 110 | const r = getExtraMenuOptions?.apply?.(this, arguments); 111 | let img; 112 | if (this.imageIndex != null) { 113 | // An image is selected so select that 114 | img = this.imgs[this.imageIndex]; 115 | } else if (this.overIndex != null) { 116 | // No image is selected but one is hovered 117 | img = this.imgs[this.overIndex]; 118 | } 119 | if (img) { 120 | let pos = options.findIndex((o) => o.content === "Save Image"); 121 | if (pos === -1) { 122 | pos = 0; 123 | } else { 124 | pos++; 125 | } 126 | options.splice(pos, 0, { 127 | content: "Autotagger", 128 | callback: async () => { 129 | let src = img.src; 130 | src = src.replace("/view?", `/levelpixel/autotagger/tag?node=${this.id}&clientId=${api.clientId}&`); 131 | const res = await (await fetch(src)).json(); 132 | alert(res); 133 | }, 134 | }); 135 | } 136 | 137 | return r; 138 | }; 139 | } 140 | }, 141 | }); 142 | 143 | 144 | app.registerExtension({ 145 | name: "levelpixel.LoraTagLoader", 146 | async beforeRegisterNodeDef(nodeType, nodeData, app) { 147 | levelpixel.addStatusTagHandler(nodeType); 148 | 149 | if (nodeData.name === "LoraTagLoader|LP") { 150 | const onExecuted = nodeType.prototype.onExecuted; 151 | nodeType.prototype.onExecuted = function (message) { 152 | const r = onExecuted?.apply?.(this, arguments); 153 | 154 | const pos = this.widgets.findIndex((w) => w.name === "log"); 155 | if (pos !== -1) { 156 | for (let i = pos; i < this.widgets.length; i++) { 157 | this.widgets[i].onRemove?.(); 158 | } 159 | this.widgets.length = pos; 160 | } 161 | 162 | for (const list of message.log) { 163 | const w = ComfyWidgets["STRING"](this, "log", ["STRING", { multiline: true }], app).widget; 164 | w.inputEl.readOnly = true; 165 | w.inputEl.style.opacity = 0.6; 166 | w.value = list; 167 | } 168 | 169 | this.onResize?.(this.size); 170 | 171 | return r; 172 | }; 173 | } else { 174 | const getExtraMenuOptions = nodeType.prototype.getExtraMenuOptions; 175 | nodeType.prototype.getExtraMenuOptions = function (_, options) { 176 | const r = getExtraMenuOptions?.apply?.(this, arguments); 177 | let img; 178 | if (this.imageIndex != null) { 179 | // An image is selected so select that 180 | img = this.imgs[this.imageIndex]; 181 | } else if (this.overIndex != null) { 182 | // No image is selected but one is hovered 183 | img = this.imgs[this.overIndex]; 184 | } 185 | if (img) { 186 | let pos = options.findIndex((o) => o.content === "Save Image"); 187 | if (pos === -1) { 188 | pos = 0; 189 | } else { 190 | pos++; 191 | } 192 | options.splice(pos, 0, { 193 | content: "LoraTagLoader", 194 | callback: async () => { 195 | let src = img.src; 196 | src = src.replace("/view?", `/levelpixel/loratagloader/tag?node=${this.id}&clientId=${api.clientId}&`); 197 | const res = await (await fetch(src)).json(); 198 | alert(res); 199 | }, 200 | }); 201 | } 202 | 203 | return r; 204 | }; 205 | } 206 | }, 207 | }); 208 | 209 | 210 | app.registerExtension({ 211 | name: "levelpixel.FindValueFromFile", 212 | async beforeRegisterNodeDef(nodeType, nodeData, app) { 213 | levelpixel.addStatusTagHandler(nodeType); 214 | 215 | if (nodeData.name === "FindValueFromFile|LP") { 216 | const onExecuted = nodeType.prototype.onExecuted; 217 | nodeType.prototype.onExecuted = function (message) { 218 | const r = onExecuted?.apply?.(this, arguments); 219 | 220 | const pos = this.widgets.findIndex((w) => w.name === "log"); 221 | if (pos !== -1) { 222 | for (let i = pos; i < this.widgets.length; i++) { 223 | this.widgets[i].onRemove?.(); 224 | } 225 | this.widgets.length = pos; 226 | } 227 | 228 | for (const list of message.log) { 229 | const w = ComfyWidgets["STRING"](this, "log", ["STRING", { multiline: true }], app).widget; 230 | w.inputEl.readOnly = true; 231 | w.inputEl.style.opacity = 0.6; 232 | w.value = list; 233 | } 234 | 235 | this.onResize?.(this.size); 236 | 237 | return r; 238 | }; 239 | } 240 | }, 241 | }); -------------------------------------------------------------------------------- /node_list.json: -------------------------------------------------------------------------------- 1 | { 2 | "Convert String To Int [LP]":"Convert String To Int", 3 | "Convert String To Float [LP]":"Convert String To Float", 4 | "Convert String To Bool [LP]":"Convert String To Bool", 5 | "Convert String To Number [LP]":"Convert String To Number", 6 | "Convert String To Combo [LP]":"Convert String To Combo", 7 | "Convert Int To String [LP]":"Convert Int To String", 8 | "Convert Float To String [LP]":"Convert Float To String", 9 | "Convert Bool To String [LP]":"Convert Bool To String", 10 | "Convert Float To Int [LP]":"Convert Float To Int", 11 | "Convert Int To Float [LP]":"Convert Int To Float", 12 | "Convert Int To Bool [LP]":"Convert Int To Bool", 13 | "Convert Bool To Int [LP]":"Convert Bool To Int", 14 | "Convert Combo To Text [LP]":"Convert Combo To Text", 15 | "Convert Any To Text [LP]":"Convert Any To Text", 16 | "Image Overlay [LP]":"Image Overlay", 17 | "Fast Checker Pattern [LP]":"Fast Checker Pattern", 18 | "File Counter [LP]":"Counts the number of files in the specified folder", 19 | "Image Loader From Path [LP]":"Loading multiple images from specific folder", 20 | "Load Image [LP]":"Load Image by filepath", 21 | "Preview Image Bridge [LP]":"Preview Image Bridge is needed for conditions, cycles and other cases when it is necessary to output an image with an output connection. It has an output pin, unlike the usual Preview Image.", 22 | "Load LoRA Tag [LP]":"Loads lores by text tags from a text field. An implementation that exactly copies the behavior of Automatic1111 or ForgeWebUI.", 23 | "Simple Float Slider [LP]":"A simple slider that has no precision or upper/lower values settings. From 0.00000 to 1.00000", 24 | "Simple Float Slider - Tenths Step [LP]":"A simple slider that has no precision or upper/lower values settings. From 0.0 to 1.0", 25 | "Simple Float Slider - Hundredths Step [LP]":"A simple slider that has no precision or upper/lower values settings. From 0.00 to 1.00", 26 | "Text [LP]":"Input fields for multiline text", 27 | "String [LP]":"Input fields for single-line text (string)", 28 | "Find Value From File [LP]":"Finding a value by key from a text file", 29 | "Show Text [LP]":"Show Text node", 30 | "Show Text Bridge [LP]":"Show Text node for conditions and cycles", 31 | "Tag Category [LP]":"Defines categories for tags that are passed as input. By default, a list with categories placed on custom_nodes/ComfyUI-LevelPixel/nodes/tags/tag_category.json", 32 | "Tag Category Filter [LP]":"Filters tags to exclude or keep only those that match the specified categories.", 33 | "Tag Category Keeper [LP]":"Filters tags to keep only those that match the specified categories.", 34 | "Tag Category Remover [LP]":"Filters tags to exclude only those that match the specified categories.", 35 | "Tag Switcher [LP]":"Tag Switcher", 36 | "Tag Merger [LP]":"Tag Merger", 37 | "Tag Replace [LP]":"Tag Replace", 38 | "Tag Remover [LP]":"Removes the specified tags from the text", 39 | "Resorting Tags [LP]":"Merge and re-sort input arrays of tags using a rating system, producing sorted and combined text (tags). The node is needed to create the most relevant tags by combining several tag variants from different neural models (from LLM, VLM, and others) and selecting the best and most frequently occurring ones. Use with the Text To List node. Input tags from priority_texts have a higher rating and are all used in the output set. Input tags from inclusive_texts have a medium rating and are all used in the output set. Input tags from auxiliary_texts have a lower rating, and tags that are not in priority_texts and inclusive_texts will not be added to the output tag set.", 40 | "Remove Duplicate Tags [LP]":"Removes duplicate tags, leaving only one tag in the text.", 41 | "Keep Only English Tags [LP]":"Removes all tags with symbols that do not correspond to English characters. The entire tag is removed (from comma to comma), regardless of whether it contains English characters along with other characters. The node is needed to exclude any words and characters from tags except English.", 42 | "Remove Banned Tags From Tags [LP]":"Removes from the text those tags that are prohibited for use by the user. By default, a list with names of people and brands protected by copyright is used at custom_nodes/ComfyUI-LevelPixel/nodes/tags/example_banned_tags.txt.", 43 | "Remove Banned Tags From Text [LP]":"Removes from the tag list those tags that are prohibited for use by the user. By default, a list with names of people and brands protected by copyright is used at custom_nodes/ComfyUI-LevelPixel/nodes/tags/example_banned_tags.txt.", 44 | "Text Choice Parser [LP]":"Randomly (or by the specified number) selects the text that is specified in curly brackets with the separator |. You can also set variables in the text using square brackets and replace them in the second field using the syntax variable=value. Convenient for mass generation and controlled substitution of text in the general prompt.", 45 | "CLIP Text Encode Translate [LP]":"Tranlsate text from any languages to english", 46 | "Text Translate [LP]":"Tranlsate text from any languages to english", 47 | "Text To List [LP]":"Convert many texts to list of texts", 48 | "Split Compound Text [LP]":"Split Compound Text", 49 | "Keep Only English Words [LP]":"Removes all words with symbols that do not correspond to English symbols. The entire word is removed (from space to space), regardless of whether it contains English symbols along with other symbols. The node is necessary for excluding any words and symbols from the text except English.", 50 | "Unload Model [LP]":"Unload a specific specified model from VRAM. The node can be used anywhere in the workflow where you need to unload a specific model.", 51 | "Soft Unload Models Data [LP]":"Unloads from VRAM those files and cache that are not currently in use. Does not unload models.", 52 | "Hard Unload All Models [LP]":"Force unloads all neural models that are in the model manager from VRAM. Does not unload models that are loaded into VRAM bypassing the model manager of ComfyUI.", 53 | "Soft Full Clean RAM and VRAM [LP]":"Completely clears all models and cached data from RAM and VRAM upon completion of the current workflow in the assigned queue task. Can be placed anywhere in the workflow, but will only work after the current workflow in the current queue task has completed. This node is useful when workflows and their nodes leave a lot of garbage in RAM and VRAM during operation, disrupting the operation of subsequent workflows in the queue tasks.", 54 | "Override CLIP Device [LP]":"Changes the main computing processor and the RAM used for CLIP. You can specify any video card or processor that is available in the system. ATTENTION! When using this node, manual cleaning of models from memory using nodes that clean models from VRAM/RAM memory stops working!", 55 | "Override VAE Device [LP]":"Changes the main computing processor and the RAM used for VAE. You can specify any video card or processor that is available in the system. ATTENTION! When using this node, manual cleaning of models from memory using nodes that clean models from VRAM/RAM memory stops working!", 56 | "Override CLIP Vision Device [LP]":"Changes the main computing processor and the RAM used for CLIP Vision. You can specify any video card or processor that is available in the system. ATTENTION! When using this node, manual cleaning of models from memory using nodes that clean models from VRAM/RAM memory stops working!", 57 | "Delay [LP]":"Delay", 58 | "Seed [LP]":"Seed", 59 | "String Cycler [LP]":"String Cycler", 60 | "Text Replace [LP]":"Text Replace", 61 | "Pipe [LP]":"Pipe", 62 | "Pipe In [LP]":"Pipe In", 63 | "Pipe Out [LP]":"Pipe Out", 64 | "Count Objects [LP]":"Count Objects - counts the number of objects that were fed to the input (this can be a list or one single object). Accepts any type of input.", 65 | "Resize Image To Target Size [LP]":"Resize Image To Target Size", 66 | "Calculate Target Size By Mask [LP]":"Calculate Target Size By Mask", 67 | "Resize Image and Masks [LP]":"Resize Image and Masks", 68 | "Inpaint Crop [LP]":"Inpaint Crop by masks", 69 | "Inpaint Stitch [LP]":"Inpaint Stitch", 70 | "Extend Factor Parameters [LP]":"Extend Factor Parameters for Inpaint Crop", 71 | "Cropped Aspect Size Parameters [LP]":"Cropped Aspect Size Parameters for Inpaint Crop", 72 | "Cropped Forsed Size Parameters [LP]":"Cropped Forsed Size Parameters for Inpaint Crop", 73 | "Cropped Ranged Size Parameters [LP]":"Cropped Ranged Size Parameters for Inpaint Crop", 74 | "Cropped Free Size Parameters [LP]":"Cropped Free Size Parameter for Inpaint Crops", 75 | "Get Iterator Data From Videos [LP]": "Get Iterator Data From Videos", 76 | "Get Iterator Data From Image Folders [LP]": "Get Iterator Data From Image Folders", 77 | "Image Data Iterator [LP]": "Image Data Iterator", 78 | "Iterator [LP]": "Iterator", 79 | "Get ComfyUI Folder Path [LP]": "Get ComfyUI Folder Path", 80 | "Get ComfyUI HTTP Folder Path [LP]": "Get ComfyUI HTTP Folder Path", 81 | "Get Filename By Index In Folder [LP]": "Get Filename By Index In Folder" 82 | } -------------------------------------------------------------------------------- /nodes/utils/utils_LP.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | class AnyType(str): 4 | def __ne__(self, __value: object) -> bool: 5 | return False 6 | 7 | any = AnyType("*") 8 | 9 | class Delay: 10 | @classmethod 11 | def INPUT_TYPES(cls): 12 | return { 13 | "required": { 14 | "input": (any, {"defaultInput": True}), 15 | "delay_seconds": ("FLOAT", { 16 | "default": 1.0, 17 | "min": 0.0, 18 | "step": 0.1 19 | }), 20 | }, 21 | } 22 | 23 | RETURN_TYPES = (any,) 24 | RETURN_NAMES = ("output",) 25 | FUNCTION = "add_delay" 26 | CATEGORY = "LevelPixel/Utils" 27 | 28 | def add_delay(self, input, delay_seconds): 29 | delay_text = f"{delay_seconds:.1f} second{'s' if delay_seconds != 1 else ''}" 30 | print(f"[Delay Node] Starting delay of {delay_text}") 31 | time.sleep(delay_seconds) 32 | print(f"[Delay Node] Delay of {delay_text} completed") 33 | return (input,) 34 | 35 | class PipeOut: 36 | @classmethod 37 | def INPUT_TYPES(s): 38 | return { 39 | "required": {"pipe": ("PIPE_LINE",)}, 40 | } 41 | 42 | RETURN_TYPES = ("PIPE_LINE", "MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "CONTROL_NET", "IMAGE", "INT", any, any, any, any, any,) 43 | RETURN_NAMES = ("pipe", "model", "pos", "neg", "latent", "vae", "clip", "controlnet", "image", "seed", "any1", "any2", "any3", "any4", "any5",) 44 | FUNCTION = "pipe_out" 45 | CATEGORY = "LevelPixel/Utils" 46 | 47 | def pipe_out(self, pipe): 48 | model, pos, neg, latent, vae, clip, controlnet, image, seed, any1, any2, any3, any4, any5 = pipe 49 | return (pipe, model, pos, neg, latent, vae, clip, controlnet, image, seed, any1, any2, any3, any4, any5, ) 50 | 51 | class PipeIn: 52 | @classmethod 53 | def INPUT_TYPES(s): 54 | return { 55 | "optional": { 56 | "pipe": ("PIPE_LINE",), 57 | "model": ("MODEL",), 58 | "pos": ("CONDITIONING",), 59 | "neg": ("CONDITIONING",), 60 | "latent": ("LATENT",), 61 | "vae": ("VAE",), 62 | "clip": ("CLIP",), 63 | "controlnet": ("CONTROL_NET",), 64 | "image": ("IMAGE",), 65 | "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), 66 | "any1": (any, {"defaultInput": True}), 67 | "any2": (any, {"defaultInput": True}), 68 | "any3": (any, {"defaultInput": True}), 69 | "any4": (any, {"defaultInput": True}), 70 | "any5": (any, {"defaultInput": True}), 71 | }, 72 | } 73 | 74 | RETURN_TYPES = ("PIPE_LINE",) 75 | RETURN_NAMES = ("pipe",) 76 | FUNCTION = "pipe_in" 77 | CATEGORY = "LevelPixel/Utils" 78 | 79 | def pipe_in(self, pipe=None, model=None, pos=None, neg=None, latent=None, vae=None, clip=None, controlnet=None, image=None, seed=None, any1=None, any2=None, any3=None, any4=None, any5=None,): 80 | 81 | new_model = None 82 | new_pos = None 83 | new_neg = None 84 | new_latent = None 85 | new_vae = None 86 | new_clip = None 87 | new_controlnet = None 88 | new_image = None 89 | new_seed = None 90 | new_any1 = None 91 | new_any2 = None 92 | new_any3 = None 93 | new_any4 = None 94 | new_any5 = None 95 | 96 | if pipe is not None: 97 | new_model, new_pos, new_neg, new_latent, new_vae, new_clip, new_controlnet, new_image, new_seed, new_any1, new_any2, new_any3, new_any4, new_any5 = pipe 98 | 99 | if model is not None: 100 | new_model = model 101 | 102 | if pos is not None: 103 | new_pos = pos 104 | 105 | if neg is not None: 106 | new_neg = neg 107 | 108 | if latent is not None: 109 | new_latent = latent 110 | 111 | if vae is not None: 112 | new_vae = vae 113 | 114 | if clip is not None: 115 | new_clip = clip 116 | 117 | if controlnet is not None: 118 | new_controlnet = controlnet 119 | 120 | if image is not None: 121 | new_image = image 122 | 123 | if seed is not None: 124 | new_seed = seed 125 | 126 | if any1 is not None: 127 | new_any1 = any1 128 | 129 | if any2 is not None: 130 | new_any2 = any2 131 | 132 | if any3 is not None: 133 | new_any3 = any3 134 | 135 | if any4 is not None: 136 | new_any4 = any4 137 | 138 | if any5 is not None: 139 | new_any5 = any5 140 | 141 | pipe = new_model, new_pos, new_neg, new_latent, new_vae, new_clip, new_controlnet, new_image, new_seed, new_any1, new_any2, new_any3, new_any4, new_any5 142 | 143 | return (pipe, ) 144 | 145 | class Pipe: 146 | @classmethod 147 | def INPUT_TYPES(s): 148 | return { 149 | "optional": { 150 | "pipe": ("PIPE_LINE",), 151 | "model": ("MODEL",), 152 | "pos": ("CONDITIONING",), 153 | "neg": ("CONDITIONING",), 154 | "latent": ("LATENT",), 155 | "vae": ("VAE",), 156 | "clip": ("CLIP",), 157 | "controlnet": ("CONTROL_NET",), 158 | "image": ("IMAGE",), 159 | "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), 160 | "any1": (any, {"defaultInput": True}), 161 | "any2": (any, {"defaultInput": True}), 162 | "any3": (any, {"defaultInput": True}), 163 | "any4": (any, {"defaultInput": True}), 164 | "any5": (any, {"defaultInput": True}), 165 | }, 166 | } 167 | 168 | RETURN_TYPES = ("PIPE_LINE", "MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "CONTROL_NET", "IMAGE", "INT", any, any, any, any, any,) 169 | RETURN_NAMES = ("pipe", "model", "pos", "neg", "latent", "vae", "clip", "controlnet", "image", "seed", "any1", "any2", "any3", "any4", "any5",) 170 | FUNCTION = "pipe" 171 | CATEGORY = "LevelPixel/Utils" 172 | 173 | def pipe(self, pipe=None, model=None, pos=None, neg=None, latent=None, vae=None, clip=None, controlnet=None, image=None, seed=None, any1=None, any2=None, any3=None, any4=None, any5=None,): 174 | 175 | new_model = None 176 | new_pos = None 177 | new_neg = None 178 | new_latent = None 179 | new_vae = None 180 | new_clip = None 181 | new_controlnet = None 182 | new_image = None 183 | new_seed = None 184 | new_any1 = None 185 | new_any2 = None 186 | new_any3 = None 187 | new_any4 = None 188 | new_any5 = None 189 | 190 | if pipe is not None: 191 | new_model, new_pos, new_neg, new_latent, new_vae, new_clip, new_controlnet, new_image, new_seed, new_any1, new_any2, new_any3, new_any4, new_any5 = pipe 192 | 193 | if model is not None: 194 | new_model = model 195 | 196 | if pos is not None: 197 | new_pos = pos 198 | 199 | if neg is not None: 200 | new_neg = neg 201 | 202 | if latent is not None: 203 | new_latent = latent 204 | 205 | if vae is not None: 206 | new_vae = vae 207 | 208 | if clip is not None: 209 | new_clip = clip 210 | 211 | if controlnet is not None: 212 | new_controlnet = controlnet 213 | 214 | if image is not None: 215 | new_image = image 216 | 217 | if seed is not None: 218 | new_seed = seed 219 | 220 | if any1 is not None: 221 | new_any1 = any1 222 | 223 | if any2 is not None: 224 | new_any2 = any2 225 | 226 | if any3 is not None: 227 | new_any3 = any3 228 | 229 | if any4 is not None: 230 | new_any4 = any4 231 | 232 | if any5 is not None: 233 | new_any5 = any5 234 | 235 | pipe = new_model, new_pos, new_neg, new_latent, new_vae, new_clip, new_controlnet, new_image, new_seed, new_any1, new_any2, new_any3, new_any4, new_any5 236 | 237 | return (pipe, new_model, new_pos, new_neg, new_latent, new_vae, new_clip, new_controlnet, new_image, new_seed, new_any1, new_any2, new_any3, new_any4, new_any5,) 238 | 239 | class CountObjects: 240 | @classmethod 241 | def INPUT_TYPES(s): 242 | return { 243 | "required": { 244 | "input": (any,), 245 | "base_value": ("INT", {"default": 0, "min": 0}), 246 | } 247 | } 248 | 249 | RETURN_TYPES = ("INT",) 250 | INPUT_IS_LIST = True 251 | FUNCTION = "count_objects" 252 | CATEGORY = "LevelPixel/Image" 253 | 254 | def count_objects(self, input, base_value): 255 | if input is None: 256 | count = 0 257 | elif isinstance(input, list): 258 | count = len(input) 259 | else: 260 | count = 1 261 | 262 | return (base_value[0] + count,) 263 | 264 | NODE_CLASS_MAPPINGS = { 265 | "Delay|LP": Delay, 266 | "PipeOut|LP": PipeOut, 267 | "PipeIn|LP": PipeIn, 268 | "Pipe|LP": Pipe, 269 | "CountObjects|LP": CountObjects 270 | } 271 | 272 | NODE_DISPLAY_NAME_MAPPINGS = { 273 | "Delay|LP": "Delay [LP]", 274 | "PipeOut|LP": "Pipe Out [LP]", 275 | "PipeIn|LP": "Pipe In [LP]", 276 | "Pipe|LP": "Pipe [LP]", 277 | "CountObjects|LP": "Count Objects [LP]" 278 | } 279 | 280 | -------------------------------------------------------------------------------- /nodes/convert/convert_LP.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | class AnyType(str): 4 | def __ne__(self, __value: object) -> bool: 5 | return False 6 | 7 | any = AnyType("*") 8 | 9 | class StringToFloat: 10 | @classmethod 11 | def INPUT_TYPES(s): 12 | return { 13 | "required": { 14 | "string": ("STRING", {"default": ""}), 15 | } 16 | } 17 | 18 | RETURN_TYPES = ("FLOAT",) 19 | FUNCTION = "string_to_float" 20 | CATEGORY = "LevelPixel/Conversion" 21 | 22 | def string_to_float(self, string): 23 | return (float(string),) 24 | 25 | class StringToInt: 26 | @classmethod 27 | def INPUT_TYPES(s): 28 | return { 29 | "required": { 30 | "string": ("STRING", {"default": ""}), 31 | } 32 | } 33 | 34 | RETURN_TYPES = ("INT",) 35 | FUNCTION = "string_to_int" 36 | CATEGORY = "LevelPixel/Conversion" 37 | 38 | def string_to_int(self, string): 39 | return (int(string),) 40 | 41 | class StringToBool: 42 | @classmethod 43 | def INPUT_TYPES(cls): 44 | return { 45 | "required": { 46 | "string": ("STRING", {"multiline": False, "default": ""}), 47 | }, 48 | } 49 | 50 | RETURN_TYPES = ("BOOLEAN",) 51 | RETURN_NAMES = ("BOOLEAN",) 52 | FUNCTION = "string_to_bool" 53 | CATEGORY = "LevelPixel/Conversion" 54 | 55 | def string_to_bool(self, string): 56 | 57 | if string == "True" or string == "true" or string == "yes" or string == "1": 58 | boolean_out = True 59 | if string == "False" or string == "false" or string == "no" or string == "0": 60 | boolean_out = False 61 | else: 62 | if string.startswith('-') and string[1:].replace('.','',1).isdigit(): 63 | float_out = -float(string[1:]) 64 | if float_out > 0: 65 | boolean_out = True 66 | if float_out <= 0: 67 | boolean_out = False 68 | else: 69 | if string.replace('.','',1).isdigit(): 70 | float_out = float(string) 71 | if float_out > 0: 72 | boolean_out = True 73 | if float_out <= 0: 74 | boolean_out = False 75 | else: 76 | pass 77 | 78 | return (boolean_out,) 79 | 80 | class StringToNumber: 81 | @classmethod 82 | def INPUT_TYPES(s): 83 | return {"required": {"string": ("STRING", {"multiline": False, "default": ""}), 84 | "round_integer": (["round", "round down","round up"],), 85 | }, 86 | } 87 | 88 | RETURN_TYPES = ("INT", "FLOAT",) 89 | RETURN_NAMES = ("INT", "FLOAT",) 90 | FUNCTION = "string_to_number" 91 | CATEGORY = "LevelPixel/Conversion" 92 | 93 | def string_to_number(self, string, round_integer): 94 | if string.startswith('-') and string[1:].replace('.','',1).isdigit(): 95 | float_out = -float(string[1:]) 96 | else: 97 | if string.replace('.','',1).isdigit(): 98 | float_out = float(string) 99 | else: 100 | print(f"[Error] String To Number. Not a number.") 101 | return {} 102 | 103 | if round_integer == "round up": 104 | if string.startswith('-'): 105 | int_out = int(float_out) 106 | else: 107 | int_out = int(float_out) + 1 108 | elif round_integer == "round down": 109 | if string.startswith('-'): 110 | int_out = int(float_out) - 1 111 | else: 112 | int_out = int(float_out) 113 | else: 114 | int_out = round(float_out) 115 | 116 | return (int_out, float_out,) 117 | 118 | class StringToCombo: 119 | @classmethod 120 | def INPUT_TYPES(cls): 121 | return { 122 | "required": { 123 | "string": ("STRING", {"multiline": False, "default": ""}), 124 | }, 125 | } 126 | 127 | RETURN_TYPES = (any,) 128 | RETURN_NAMES = ("any",) 129 | FUNCTION = "string_to_combo" 130 | CATEGORY = "LevelPixel/Conversion" 131 | 132 | def string_to_combo(self, string): 133 | text_list = list() 134 | if string != "": 135 | values = string.split(',') 136 | text_list = values[0] 137 | print(text_list) 138 | 139 | return (text_list,) 140 | 141 | class IntToString: 142 | @classmethod 143 | def INPUT_TYPES(s): 144 | return {"required": {"int": ("INT", {"default": 0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff, }), 145 | } 146 | } 147 | 148 | RETURN_TYPES = ("STRING",) 149 | RETURN_NAMES = ("STRING",) 150 | FUNCTION = 'int_to_string' 151 | CATEGORY = "LevelPixel/Conversion" 152 | 153 | def int_to_string(self, int): 154 | return (f'{int}', ) 155 | 156 | class FloatToString: 157 | @classmethod 158 | def INPUT_TYPES(s): 159 | return {"required": {"float": ("FLOAT", {"default": 0.0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff, }), 160 | } 161 | } 162 | 163 | RETURN_TYPES = ('STRING', ) 164 | RETURN_NAMES = ('STRING', ) 165 | FUNCTION = 'float_to_string' 166 | CATEGORY = "LevelPixel/Conversion" 167 | 168 | def float_to_string(self, float): 169 | return (f'{float}', ) 170 | 171 | class BoolToString: 172 | @classmethod 173 | def INPUT_TYPES(s): 174 | return {"required": {"bool": ("BOOLEAN", {"default": False,}), 175 | } 176 | } 177 | 178 | RETURN_TYPES = ('STRING', ) 179 | RETURN_NAMES = ('STRING', ) 180 | FUNCTION = 'bool_to_string' 181 | CATEGORY = "LevelPixel/Conversion" 182 | 183 | def bool_to_string(self, bool): 184 | return (f'{bool}', ) 185 | 186 | class FloatToInt: 187 | @classmethod 188 | def INPUT_TYPES(cls): 189 | return {"required": {"float": ("FLOAT", {"default": 0.0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff, }), 190 | "round_integer": (["round", "round down","round up"],), 191 | } 192 | } 193 | 194 | RETURN_TYPES = ("INT",) 195 | RETURN_NAMES = ("INT",) 196 | FUNCTION = "float_to_int" 197 | CATEGORY = "LevelPixel/Conversion" 198 | 199 | def float_to_int(self, float, round_integer): 200 | if round_integer == "round up": 201 | if float < 0.0: 202 | int_out = int(float) 203 | else: 204 | int_out = int(float) + 1 205 | elif round_integer == "round down": 206 | if float < 0.0: 207 | int_out = int(float) - 1 208 | else: 209 | int_out = int(float) 210 | else: 211 | int_out = round(float) 212 | return (int_out,) 213 | 214 | class IntToFloat: 215 | @classmethod 216 | def INPUT_TYPES(cls): 217 | return {"required": {"int": ("INT", {"default": 0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff, }), 218 | } 219 | } 220 | 221 | RETURN_TYPES = ("FLOAT",) 222 | RETURN_NAMES = ("FLOAT",) 223 | FUNCTION = "int_to_float" 224 | CATEGORY = "LevelPixel/Conversion" 225 | 226 | def int_to_float(self, int): 227 | return (float(int),) 228 | 229 | class IntToBool: 230 | @classmethod 231 | def INPUT_TYPES(cls): 232 | return { 233 | "required": { 234 | "int": ("INT", {"default": 0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff, }), 235 | }, 236 | } 237 | 238 | RETURN_TYPES = ("BOOLEAN",) 239 | RETURN_NAMES = ("BOOLEAN",) 240 | FUNCTION = "int_to_bool" 241 | CATEGORY = "LevelPixel/Conversion" 242 | 243 | def int_to_bool(self, int): 244 | if int > 0: 245 | boolean_out = True 246 | if int < 1: 247 | boolean_out = False 248 | else: 249 | pass 250 | 251 | return (boolean_out,) 252 | 253 | class BoolToInt: 254 | @classmethod 255 | def INPUT_TYPES(cls): 256 | return { 257 | "required": { 258 | "bool": ("BOOLEAN", {"multiline": False, "default": False}), 259 | }, 260 | } 261 | 262 | RETURN_TYPES = ("INT",) 263 | RETURN_NAMES = ("INT",) 264 | FUNCTION = "bool_to_int" 265 | CATEGORY = "LevelPixel/Conversion" 266 | 267 | def bool_to_int(self, bool): 268 | if bool == True: 269 | int_out = 1 270 | if bool == False: 271 | int_out = 0 272 | else: 273 | pass 274 | 275 | return (int_out,) 276 | 277 | class ComboToText: 278 | @classmethod 279 | def INPUT_TYPES(cls): 280 | return { 281 | "required": { 282 | "combo_list": ("COMBO",), 283 | "delimiter": ("STRING", {"default": ", ", "multiline": False}), 284 | "as_json": ("BOOLEAN", {"default": False}), 285 | } 286 | } 287 | 288 | RETURN_TYPES = ("STRING",) 289 | RETURN_NAMES = ("text",) 290 | FUNCTION = "combo_to_text" 291 | CATEGORY = "LevelPixel/Conversion" 292 | 293 | def combo_to_text(self, combo_list, delimiter, as_json): 294 | import json 295 | if as_json: 296 | try: 297 | text = json.dumps(combo_list, ensure_ascii=False, indent=2) 298 | except Exception as e: 299 | text = f"Error: {e}" 300 | else: 301 | lines = [] 302 | for item in combo_list: 303 | if isinstance(item, dict): 304 | line = delimiter.join(f"{k}: {v}" for k, v in item.items()) 305 | else: 306 | line = str(item) 307 | lines.append(line) 308 | text = "\n".join(lines) 309 | return (text,) 310 | 311 | class AnyToText: 312 | @classmethod 313 | def INPUT_TYPES(cls): 314 | return { 315 | "required": { 316 | "any_value": (any, {"defaultInput": True}), 317 | "as_json": ("BOOLEAN", {"default": False}), 318 | } 319 | } 320 | 321 | RETURN_TYPES = ("STRING",) 322 | RETURN_NAMES = ("text",) 323 | FUNCTION = "any_to_text" 324 | CATEGORY = "LevelPixel/Conversion" 325 | 326 | def any_to_text(self, any_value, as_json): 327 | import json 328 | if as_json: 329 | try: 330 | text = json.dumps(any_value, ensure_ascii=False, indent=2) 331 | except Exception as e: 332 | text = f"Error: {e}" 333 | else: 334 | text = str(any_value) 335 | return (text,) 336 | 337 | 338 | NODE_CLASS_MAPPINGS = { 339 | "StringToInt|LP": StringToInt, 340 | "StringToFloat|LP": StringToFloat, 341 | "StringToBool|LP": StringToBool, 342 | "StringToNumber|LP": StringToNumber, 343 | "StringToCombo|LP": StringToCombo, 344 | "IntToString|LP": IntToString, 345 | "FloatToString|LP": FloatToString, 346 | "BoolToString|LP": BoolToString, 347 | "FloatToInt|LP": FloatToInt, 348 | "IntToFloat|LP": IntToFloat, 349 | "IntToBool|LP": IntToBool, 350 | "BoolToInt|LP": BoolToInt, 351 | "ComboToText|LP": ComboToText, 352 | "AnyToText|LP": AnyToText, 353 | } 354 | 355 | NODE_DISPLAY_NAME_MAPPINGS = { 356 | "StringToInt|LP": "Convert String To Int [LP]", 357 | "StringToFloat|LP": "Convert String To Float [LP]", 358 | "StringToBool|LP": "Convert String To Bool [LP]", 359 | "StringToNumber|LP": "Convert String To Number [LP]", 360 | "StringToCombo|LP": "Convert String To Combo [LP]", 361 | "IntToString|LP": "Convert Int To String [LP]", 362 | "FloatToString|LP": "Convert Float To String [LP]", 363 | "BoolToString|LP": "Convert Bool To String [LP]", 364 | "FloatToInt|LP": "Convert Float To Int [LP]", 365 | "IntToFloat|LP": "Convert Int To Float [LP]", 366 | "IntToBool|LP": "Convert Int To Bool [LP]", 367 | "BoolToInt|LP": "Convert Bool To Int [LP]", 368 | "ComboToText|LP": "Convert Combo To Text [LP]", 369 | "AnyToText|LP": "Convert Any To Text [LP]", 370 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Level Pixel nodes for ComfyUI 2 | 3 | ![banner_LevelPixel_with_logo](https://github.com/user-attachments/assets/ef79f2c9-04fb-485f-aba5-6cd00cb14d8c) 4 | 5 | The purpose of this package is to collect the most necessary and atomic nodes for working with any tasks, adapted for use in cycles and conditions. The package of nodes is aimed at those users who need all the basic things to create multitasking complex workflows using multimodal neural models and software solutions. 6 | 7 | *[Our dream is to see the possibilities for convenient creation of full automation in ComfyUI workflows. We will try to get closer to it.](https://www.patreon.com/LevelPixel)* 8 | 9 | **In this Level Pixel node pack you will find:** 10 | Inpaint Crop and Stitch, Pipeline, Tag Category Filter nodes, Model Unloader nodes, File Counter, Object Counter, Image Loader From Path, Load Image, Fast Checker Pattern, Float Slider, Load LoRA Tag, Image Overlay, Conversion nodes. 11 | 12 | Recommend that you install the advanced node package from Level Pixel Advanced for Multimodal Generators, Qwen2.5-VL gguf, LLM, VLM, RAM, Autotaggers, RemBG nodes:\ 13 | [https://github.com/LevelPixel/ComfyUI-LevelPixel-Advanced](https://github.com/LevelPixel/ComfyUI-LevelPixel-Advanced) 14 | 15 | The official repository of the current node package is located at this link:\ 16 | [https://github.com/LevelPixel/ComfyUI-LevelPixel](https://github.com/LevelPixel/ComfyUI-LevelPixel) 17 | 18 | **Like our nodes? Then we'd be happy to see your star on our GitHub repository!** 19 | 20 | ## Contacts, services and support 21 | 22 | Our official Patreon page:\ 23 | [https://www.patreon.com/LevelPixel](https://www.patreon.com/LevelPixel) 24 | 25 | On Patreon you can get services from us on issues related to ComfyUI, Forge, programming and AI tools. 26 | You can also support our project and support the development of our node packages. 27 | 28 | For cooperation, suggestions and ideas you can write to email: 29 | 30 | 31 | ## Installation 32 | 33 | ### Installation package using ComfyUI Manager (recommended) 34 | 35 | Install [ComfyUI Manager](https://github.com/ltdrdata/ComfyUI-Manager) and do steps introduced there to install this repo 'ComfyUI-LevelPixel'. 36 | The nodes of the current package will be updated automatically when you click "Update ALL" in ComfyUI Manager. 37 | 38 | ### Alternative installation package 39 | 40 | Clone the repository: 41 | `git clone https://github.com/LevelPixel/ComfyUI-LevelPixel.git` 42 | to your ComfyUI `custom_nodes` directory 43 | 44 | The script will then automatically install all custom scripts and nodes. 45 | It will attempt to use symlinks and junctions to prevent having to copy files and keep them up to date. 46 | 47 | - For uninstallation: 48 | - Delete the cloned repo in `custom_nodes` 49 | - Ensure `web/extensions/levelpixel` has also been removed 50 | - For manual update: 51 | - Navigate to the cloned repo e.g. `custom_nodes/ComfyUI-LevelPixel` 52 | - `git pull` 53 | 54 | ## Features 55 | 56 | All nodes Level Pixel in this package: 57 | 58 | ![level-pixel-nodes_1](https://github.com/user-attachments/assets/10fa67be-766c-4936-9fbd-fdf3830cd290) 59 | ![level-pixel-nodes_2](https://github.com/user-attachments/assets/f9c1e2ab-1bfc-4f4d-9fd5-b4c8c00e4423) 60 | 61 | ### Inpaint Crop and Stitch 62 | 63 | Inpaint Crop and Inpaint Stitch nodes allow you to work effectively with image fragments. 64 | Inpaint Crop cuts out a part of the image using a given mask, increases/decreases the cut out image fragment to the target size, and then, after any third-party modifications to the image fragment (regeneration, manual correction, repainting, etc.), using Inpaint Stitch, it stitches this modified fragment back into the main image (after having reduced/increased it to the size it had when cutting the fragment out of the main image). 65 | 66 | This is incredibly convenient for Inpaint, and especially convenient for automation. 67 | 68 | In addition, the peculiarity of these Inpaint Crop and Stitch nodes is that they have a special pin that accepts different parameters for the specified size of the cut out fragment. One of the key ones is "Cropped Aspect Size Parameters" (or the "aspect size" value embedded in Inpaint Crop in the mode widget), which allows you to set the target resolution for the cut fragment, and the program itself calculates the required dimensions, which can be incredibly useful for automation and regeneration of a picture fragment. 69 | 70 | ![level-pixel-nodes_inpaint_1](https://github.com/user-attachments/assets/c726cc87-06aa-4e7d-a45b-aa1221156591) 71 | 72 | In addition, Inpaint Crop and Stitch can easily work with Batch images, masks and context masks. This can be very useful in many scenarios when you need to mass-cut images by mask, and then also mass-return them to the main images. The only limitation is that the target size of the masks must be the same for batch images. 73 | The image below shows an example of using the batch mode. 74 | ![level-pixel-nodes_inpaint_2](https://github.com/user-attachments/assets/bead11da-7100-4ae2-a78f-db5e9a7d4364) 75 | 76 | ### Resize Image and Masks 77 | 78 | Resize Image and Masks is a node that allows you to do a great thing - bring an image/mask/context mask or a batch of images/masks/context masks to one target size. 79 | 80 | This can be incredibly useful when you need to bring images to one target size along with their masks, without any errors and in one go. 81 | In addition, you can also use this node for special cases of adjusting the size of an image (or mask, or context mask) to the target. 82 | 83 | An example of how you can use this node is below (and also on the previous image in Inpaint Crop and Stitch). 84 | ![level-pixel-nodes_inpaint_3](https://github.com/user-attachments/assets/2c89bcc9-3f86-453c-80af-fe2771471f8f) 85 | 86 | The core functionality is taken from [ComfyUI-Inpaint-CropAndStitch](https://github.com/lquesada/ComfyUI-Inpaint-CropAndStitch) and belongs to its authors. 87 | 88 | ### Tag Category Filter nodes 89 | 90 | A set of nodes that allow you to filter tags by category. There is an option to remove or leave certain categories of tags, there is a function for defining categories of all tags, there is a function for removing certain tags. 91 | 92 | Nodes are very convenient because you can use them to remove unnecessary tags by certain categories, for example, to clean up tags and prepare them for use. You can use this to get certain prompts from an image (for example, if you need a description of only the background from an image - you can get this category of tags if you set the "background" category in Tag Category Keeper). 93 | 94 | The core functionality is taken from [comfyui_tag_fillter](https://github.com/sugarkwork/comfyui_tag_fillter) and belongs to its authors. 95 | 96 | ### Load LoRA Tag 97 | 98 | LoRA loader from text in the style of Automatic1111 and Forge WebUI. For this version of loader, text output for errors when loading LoRA has been added as widget on node. 99 | 100 | The core functionality is taken from [comfyui_lora_tag_loader](https://github.com/badjeff/comfyui_lora_tag_loader) and belongs to its authors. 101 | 102 | ### Model Unloader nodes 103 | 104 | A node that automatically unloads all models from memory. It must be added to a sequential chain of nodes in the workflow. There are three versions of this node: Hard (complete unloading of all models from memory), Soft (without unloading models from memory, just soft cleaning of memory from garbage), Model Unload (unloading of specifiy model from memory), and "Soft Full Clean RAM and VRAM" for the case when it is necessary to unload all models from memory and clean memory from garbage after the workflow is completed (the task in the queue is finished). 105 | 106 | ### File Counter 107 | 108 | A simple counter of files in a given folder. Convenient when you need to count the number of files in a certain format (for example, for subsequent use in loops or conditions). 109 | 110 | ### Image Loader From Path 111 | 112 | Loads images from a specific folder or path. It is convenient because you can specify both absolute paths and local paths (from the input folder), as well as the ability to sequentially receive images into the workflow at each step of the cycle by number. In addition, you can load images in batches with a certain number of loaded images in a batch. And the cherry on the cake - you can get a list of image file names at the output. 113 | 114 | ### Load Image 115 | 116 | This is a new image loading node that can retrieve the name of the files you load into your workflow. 117 | 118 | ### Image Overlay 119 | 120 | A node that allows you to overlay one image on another with the ability to specify a mask. In this package, Image Overlay has an extended range of specified sizes for the final image, and also has another standard image size. 121 | 122 | The core functionality is taken from [efficiency-nodes-comfyui](https://github.com/jags111/efficiency-nodes-comfyui) and belongs to its authors. 123 | 124 | ### Fast Checker Pattern 125 | 126 | Quickly creates a background image with a checkerboard pattern according to the specified parameters for subsequent testing of images with a transparent background. You need to combine the resulting background image with your image with a transparent background in other ComfyUI nodes (at the moment there is no universal node, but perhaps we will make one in the future). 127 | 128 | ### Simple Float Slider 129 | 130 | Simple Float Slider is a handy slider from 0.0 to 1.0 to conveniently manage variables in your workflow. The min and max values cannot be changed on the interface (but you can change these values inside Python if you really need to). 131 | The pack contains two additional sliders - "Simple Float Slider - Tenths Step" and "Simple Float Slider - Hundredths Step" for working with more precisely defined values in tenths and hundredths (work correctly only if you have not changed the value of "Float widget rounding decimal places" in the ComfyUI settings. If you have changed it, then return the value to 0). 132 | 133 | The core functionality is taken from [comfyui-mixlab-nodes](https://github.com/shadowcz007/comfyui-mixlab-nodes) and belongs to its authors. 134 | 135 | ### Other nodes 136 | 137 | There are a few more nodes in this package that have some unusual uses: 138 | 139 | - Google Translate 140 | - Count Objects - counts the number of objects that were fed to the input (this can be a list or one single object). Accepts any type of input. 141 | - Preview Image Bridge - only output an image to the screen if there is a connection to the output node. Useful in loops and conditions where the execution of this node is not required due to current conditions (variables). 142 | - Show Text Bridge - only output text to the screen if there is a connection to the output node. Useful in loops and conditions where the execution of this node is not required due to current conditions (variables). 143 | - Show Text - output text to the screen with mandatory execution. The node is executed in any case, whether the output is connected or not. 144 | - Text - a simple node for entering multi-line text (similar to Prompt from other node packages). 145 | - String - a simple node for entering single-line text (similar to String from other node packages). 146 | - Conversion nodes - a variety of different nodes that allow you to transform different types of variables into other variables. The big difference from other current node packages is that they cover a larger number of variable types. Conversion nodes: StringToFloat, StringToInt, StringToBool, StringToNumber, StringToCombo, IntToString, FloatToString, BoolToString, FloatToInt, IntToFloat, IntToBool, BoolToInt. 147 | - Pipe - extremely useful and extremely easy to use node for building a beautiful pipeline. One Pipe node is both an input and an output, so I recommend using it where it is absolutely necessary. In addition, there are standard Pipe In and Pipe Out, if you want aesthetics. 148 | 149 | ### About LLM, LLaVa, VLM, Autotagger, RAM nodes 150 | 151 | All LLM nodes have been moved to a separate ComfyUI Level Pixel Advanced node package, as such nodes require the skill of configuring programs, drivers and libraries for correct use, as well as due to constant changes and other frequent changes that may affect all other functionality of the current node package. In addition, some technologies based on neural networks tend to quickly become obsolete (currently in 1-2 years), so they will be in a separate ComfyUI Level Pixel Advanced package. 152 | 153 | Link to Level Pixel Advanced nodes with LLM nodes: 154 | [https://github.com/LevelPixel/ComfyUI-LevelPixel-Advanced](https://github.com/LevelPixel/ComfyUI-LevelPixel-Advanced) 155 | 156 | ## Update History 157 | 158 | v1.3.0 - 08-06-2025 - Added powerful Inpaint Crop and Inpaint Stitch nodes, as well as Resize Image and Masks 159 | 160 | v1.2.0 - 27-05-2025 - The node package is divided into two independent packages - a package with logical nodes [ComfyUI-LevelPixel](https://github.com/LevelPixel/ComfyUI-LevelPixel) and a package with wrapper-nodes for neural models [ComfyUI-LevelPixel-Advanced](https://github.com/LevelPixel/ComfyUI-LevelPixel-Advanced) 161 | 162 | The license for this package has been changed from Apache 2.0 to GNU GPLv3 163 | 164 | ## Credits 165 | 166 | ComfyUI/[ComfyUI](https://github.com/comfyanonymous/ComfyUI) - A powerful and modular stable diffusion GUI. 167 | 168 | Tag Filter nodes for ComfyUI/[comfyui_tag_fillter](https://github.com/sugarkwork/comfyui_tag_fillter) - Best tag filter by category nodes for ComfyUI. 169 | 170 | Load LoRA Tag node for ComfyUI/[comfyui_lora_tag_loader](https://github.com/badjeff/comfyui_lora_tag_loader) - Thanks to the author for this great node for LoRAs! 171 | 172 | Efficiency-nodes-comfyui/[efficiency-nodes-comfyui](https://github.com/jags111/efficiency-nodes-comfyui) - Thanks for Image Overlay! 173 | 174 | ComfyUI-Inpaint-CropAndStitch/[ComfyUI-Inpaint-CropAndStitch](https://github.com/lquesada/ComfyUI-Inpaint-CropAndStitch) - Thanks for Inpaint Crop and Inpaint Stitch! 175 | 176 | ## License 177 | 178 | Copyright (c) 2024-present [Level Pixel](https://github.com/LevelPixel) 179 | 180 | Licensed under GNU GPLv3 181 | -------------------------------------------------------------------------------- /nodes/text/text_utils_LP.py: -------------------------------------------------------------------------------- 1 | import re 2 | import random 3 | import time 4 | import wordninja 5 | import string 6 | from deep_translator import GoogleTranslator, MyMemoryTranslator 7 | from langdetect import detect 8 | 9 | translators = ['GoogleTranslator', 'MyMemoryTranslator'] 10 | 11 | class AnyType(str): 12 | def __ne__(self, __value: object) -> bool: 13 | return False 14 | 15 | any = AnyType("*") 16 | 17 | def split_text(text, limit=499): 18 | if len(text) <= limit: 19 | return [text] 20 | 21 | parts = [] 22 | 23 | if '\n' in text: 24 | for line in text.split('\n'): 25 | if not line: 26 | parts.append('') 27 | elif len(line) <= limit: 28 | parts.append(line) 29 | else: 30 | parts.extend(split_text(line, limit)) 31 | return parts 32 | 33 | sentences = re.findall(r'.*?[\.!?]|.+$', text) 34 | 35 | chunk = '' 36 | for s in (s.strip() for s in sentences if s.strip()): 37 | if not chunk: 38 | chunk = s 39 | elif len(chunk) + 1 + len(s) <= limit: 40 | chunk += ' ' + s 41 | else: 42 | parts.append(chunk) 43 | chunk = s 44 | if chunk: 45 | parts.append(chunk) 46 | 47 | return parts 48 | 49 | def join_text(parts): 50 | return '\n\n'.join(parts) 51 | 52 | class TextChoiceParser: 53 | @classmethod 54 | def INPUT_TYPES(s): 55 | return { 56 | "required": { 57 | "text": ("STRING", {"multiline": True, "dynamicPrompts": False}), 58 | }, 59 | "optional": { 60 | "variables": ("STRING", {"multiline": True, "dynamicPrompts": False}), 61 | "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), 62 | }, 63 | } 64 | 65 | RETURN_TYPES = ("STRING",) 66 | RETURN_NAMES = ("text",) 67 | FUNCTION = "text_choice_parser" 68 | OUTPUT_NODE = True 69 | CATEGORY = "LevelPixel/Text" 70 | 71 | def text_choice_parser(self, text, variables="", seed=None): 72 | if len(text) > 10000: 73 | return ("Text too large to process at once",) 74 | 75 | if seed is None or seed == 0: 76 | seed = int(time.time() * 1000) 77 | random.seed(seed) 78 | 79 | var_dict = {} 80 | for line in variables.split('\n'): 81 | if '=' in line: 82 | key, value = line.split('=', 1) 83 | var_dict[key.strip()] = value.strip() 84 | 85 | for key, value in var_dict.items(): 86 | text = text.replace(f"[{key}]", value) 87 | 88 | pattern = r'\{([^}]+)\}' 89 | 90 | def replace_random(match): 91 | return random.choice(match.group(1).split('|')) 92 | 93 | result = re.sub(pattern, replace_random, text) 94 | 95 | return (result,) 96 | 97 | @classmethod 98 | def IS_CHANGED(s, text, variables="", seed=None): 99 | return (text, variables, seed) 100 | 101 | class CLIPTextEncodeTranslate: 102 | @classmethod 103 | def INPUT_TYPES(s): 104 | return {"required": {"text": ("STRING", {"multiline": True, "dynamicPrompts": True}), "clip": ("CLIP", )}} 105 | RETURN_TYPES = ("CONDITIONING",) 106 | FUNCTION = "clip_text_encode_translate" 107 | CATEGORY = "LevelPixel/Text" 108 | 109 | def clip_text_encode_translate(self, clip, text): 110 | if text.strip(): 111 | detected_lang = detect(text) 112 | if detected_lang != 'en': 113 | try: 114 | translator = GoogleTranslator(source='auto', target='en') 115 | text = translator.translate(text) 116 | except Exception as e: 117 | print(f"Translation error: {e}") 118 | tokens = clip.tokenize(text) 119 | cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True) 120 | return ([[cond, {"pooled_output": pooled}]], ) 121 | 122 | class TextTranslate: 123 | @classmethod 124 | def INPUT_TYPES(s): 125 | return {"required": {"text": ("STRING", {"default": "text", "multiline": True}), 126 | "translator": (translators, {"default":"GoogleTranslator"}) 127 | } 128 | } 129 | 130 | RETURN_TYPES = ("STRING",) 131 | RETURN_NAMES = ("english TEXT",) 132 | FUNCTION = "text_translate" 133 | CATEGORY = "LevelPixel/Text" 134 | 135 | def text_translate(self, text, translator='GoogleTranslator'): 136 | if text.strip(): 137 | detected_lang = detect(text) 138 | if detected_lang != 'en': 139 | try: 140 | if translator == 'MyMemoryTranslator': 141 | sentences = split_text(text, 499) 142 | translator = MyMemoryTranslator(source='auto', target='en-US') 143 | texts = translator.translate_batch(sentences) 144 | text = join_text(texts) 145 | else: 146 | sentences = split_text(text, 4999) 147 | translator = GoogleTranslator(source='auto', target='en') 148 | texts = translator.translate_batch(sentences) 149 | text = join_text(texts) 150 | except Exception as e: 151 | print(f"Translation error: {e}") 152 | return (text,) 153 | 154 | class TextTranslateManualAll: 155 | @classmethod 156 | def INPUT_TYPES(s): 157 | source_language_codes = [ 158 | 'auto', 'af', 'am', 'ar', 'az', 'be', 'bg', 'bn', 'bs', 'ca', 'ceb', 'co', 159 | 'cs', 'cy', 'da', 'de', 'el', 'en', 'eo', 'es', 'et', 'eu', 'fa', 'fi', 160 | 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'haw', 'he', 'hi', 'hmn', 'hr', 161 | 'ht', 'hu', 'hy', 'id', 'ig', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 162 | 'kn', 'ko', 'ku', 'ky', 'la', 'lb', 'lo', 'lt', 'lv', 'mg', 'mi', 'mk', 163 | 'ml', 'mn', 'mr', 'ms', 'mt', 'my', 'ne', 'nl', 'no', 'ny', 'pa', 'pl', 164 | 'ps', 'pt', 'ro', 'ru', 'rw', 'sd', 'si', 'sk', 'sl', 'sm', 'sn', 'so', 165 | 'sq', 'sr', 'st', 'su', 'sv', 'sw', 'ta', 'te', 'tg', 'th', 'tk', 'tl', 'tr', 166 | 'tt', 'ug', 'uk', 'ur', 'uz', 'vi', 'xh', 'yi', 'yo', 'zh-CN', 'zh-TW', 'zu' 167 | ] 168 | target_language_codes = [code for code in source_language_codes if code != 'auto'] 169 | return { 170 | "required": {"text": ("STRING", {"default": "text", "multiline": True}), 171 | "source_lang": (source_language_codes, {"default":"auto"}), 172 | "target_lang": (target_language_codes, {"default":"en"}), 173 | "translator": (translators, {"default":"GoogleTranslator"}) 174 | } 175 | } 176 | RETURN_TYPES = ("STRING",) 177 | RETURN_NAMES = ("text",) 178 | FUNCTION = "text_translate_manual_all" 179 | CATEGORY = "LevelPixel/Text" 180 | 181 | def text_translate_manual_all(self, text, source_lang='auto', target_lang='en', translator='GoogleTranslator'): 182 | if text.strip(): 183 | try: 184 | sentences = split_text(text, 4999) 185 | translator = GoogleTranslator(source=source_lang, target=target_lang) 186 | texts = translator.translate_batch(sentences) 187 | text = join_text(texts) 188 | except Exception as e: 189 | print(f"Translation error: {e}") 190 | return (text,) 191 | 192 | class TextTranslateManual: 193 | @classmethod 194 | def INPUT_TYPES(s): 195 | source_language_codes = [ 196 | 'auto', 197 | 'English', 198 | 'Русский', 199 | 'Deutsch', 200 | 'Français', 201 | 'Italiano', 202 | 'Polski', 203 | 'Українська', 204 | 'Nederlands', 205 | 'Español', 206 | '简体中文', 207 | '繁體中文', 208 | '日本語', 209 | 'हिन्दी', 210 | 'العربية', 211 | 'Português', 212 | 'বাংলা' 213 | ] 214 | target_language_codes = [code for code in source_language_codes if code != 'auto'] 215 | return { 216 | "required": {"text": ("STRING", {"default": "text", "multiline": True}), 217 | "source_lang": (source_language_codes, {"default":"auto"}), 218 | "target_lang": (target_language_codes, {"default":"English"}), 219 | "translator": (translators, {"default":"GoogleTranslator"}) 220 | } 221 | } 222 | RETURN_TYPES = ("STRING",) 223 | RETURN_NAMES = ("text",) 224 | FUNCTION = "text_translate_manual" 225 | CATEGORY = "LevelPixel/Text" 226 | 227 | def text_translate_manual(self, text, source_lang='auto', target_lang='English', translator='GoogleTranslator'): 228 | google_language_name_to_code = { 229 | 'auto': 'auto', 230 | 'English': 'en', 231 | 'Русский': 'ru', 232 | 'Deutsch': 'de', 233 | 'Français': 'fr', 234 | 'Italiano': 'it', 235 | 'Polski': 'pl', 236 | 'Українська': 'uk', 237 | 'Nederlands': 'nl', 238 | 'Español': 'es', 239 | '简体中文': 'zh-CN', 240 | '繁體中文': 'zh-TW', 241 | '日本語': 'ja', 242 | 'हिन्दी': 'hi', 243 | 'العربية': 'ar', 244 | 'Português': 'pt', 245 | 'বাংলা': 'bn', 246 | } 247 | 248 | mymemory_language_name_to_code = { 249 | 'auto': 'auto', 250 | 'English': 'en-US', 251 | 'Русский': 'ru-RU', 252 | 'Deutsch': 'de-DE', 253 | 'Français': 'fr-FR', 254 | 'Italiano': 'it-IT', 255 | 'Polski': 'pl-PL', 256 | 'Українська': 'uk-UA', 257 | 'Nederlands': 'nl-NL', 258 | 'Español': 'es-ES', 259 | '简体中文': 'zh-CN', 260 | '繁體中文': 'zh-TW', 261 | '日本語': 'ja-JP', 262 | 'हिन्दी': 'hi-IN', 263 | 'العربية': 'ar-SA', 264 | 'Português': 'pt-PT', 265 | 'বাংলা': 'bn-IN', 266 | } 267 | 268 | if text.strip(): 269 | try: 270 | if translator == 'MyMemoryTranslator': 271 | source_lang_code = mymemory_language_name_to_code.get(source_lang) 272 | target_lang_code = mymemory_language_name_to_code.get(target_lang) 273 | sentences = split_text(text, 499) 274 | translator = MyMemoryTranslator(source=source_lang_code, target=target_lang_code) 275 | texts = translator.translate_batch(sentences) 276 | text = join_text(texts) 277 | else: 278 | source_lang_code = google_language_name_to_code.get(source_lang) 279 | target_lang_code = google_language_name_to_code.get(target_lang) 280 | sentences = split_text(text, 4999) 281 | translator = GoogleTranslator(source=source_lang_code, target=target_lang_code) 282 | texts = translator.translate_batch(sentences) 283 | text = join_text(texts) 284 | except Exception as e: 285 | print(f"Translation error: {e}") 286 | return (text,) 287 | 288 | class TextToList: 289 | def __init__(self): 290 | pass 291 | 292 | @classmethod 293 | def INPUT_TYPES(cls): 294 | return { 295 | "required": { 296 | }, 297 | "optional": { 298 | "delimiter": ("STRING", {"default": " "}), 299 | "text_1": ("STRING", {"multiline": True, "default": "", "forceInput": True}), 300 | "text_2": ("STRING", {"multiline": True, "default": "", "forceInput": True}), 301 | "text_3": ("STRING", {"multiline": True, "default": "", "forceInput": True}), 302 | "text_4": ("STRING", {"multiline": True, "default": "", "forceInput": True}), 303 | "text_5": ("STRING", {"multiline": True, "default": "", "forceInput": True}), 304 | "text_6": ("STRING", {"multiline": True, "default": "", "forceInput": True}), 305 | "text_7": ("STRING", {"multiline": True, "default": "", "forceInput": True}), 306 | "text_8": ("STRING", {"multiline": True, "default": "", "forceInput": True}), 307 | "text_9": ("STRING", {"multiline": True, "default": "", "forceInput": True}), 308 | "text_10": ("STRING", {"multiline": True, "default": "", "forceInput": True}), 309 | } 310 | } 311 | 312 | RETURN_TYPES = ("STRING", "LIST",) 313 | RETURN_NAMES = ("concatenated STRING", "text LIST") 314 | OUTPUT_IS_LIST = (False, True, ) 315 | FUNCTION = "text_to_list" 316 | 317 | CATEGORY = "LevelPixel/Text" 318 | 319 | def text_to_list(self, 320 | delimiter="", 321 | text_1=None, 322 | text_2=None, 323 | text_3=None, 324 | text_4=None, 325 | text_5=None, 326 | text_6=None, 327 | text_7=None, 328 | text_8=None, 329 | text_9=None, 330 | text_10=None): 331 | 332 | list_str = [] 333 | 334 | if text_1 is not None and text_1 != "": 335 | list_str.append(text_1) 336 | if text_2 is not None and text_2 != "": 337 | list_str.append(text_2) 338 | if text_3 is not None and text_3 != "": 339 | list_str.append(text_3) 340 | if text_4 is not None and text_4 != "": 341 | list_str.append(text_4) 342 | if text_5 is not None and text_5 != "": 343 | list_str.append(text_5) 344 | if text_6 is not None and text_6 != "": 345 | list_str.append(text_6) 346 | if text_7 is not None and text_7 != "": 347 | list_str.append(text_7) 348 | if text_8 is not None and text_8 != "": 349 | list_str.append(text_8) 350 | if text_9 is not None and text_9 != "": 351 | list_str.append(text_9) 352 | if text_10 is not None and text_10 != "": 353 | list_str.append(text_10) 354 | 355 | return delimiter.join(list_str), [list_str] 356 | 357 | class SplitCompoundText: 358 | def __init__(self): 359 | pass 360 | 361 | @classmethod 362 | def INPUT_TYPES(cls): 363 | return { 364 | "required": { 365 | "text": ("STRING", {"multiline": True, "default": "", "forceInput": True}) 366 | } 367 | } 368 | 369 | RETURN_TYPES = ("STRING",) 370 | RETURN_NAMES = ("text TEXT",) 371 | 372 | FUNCTION = "split_compound_text" 373 | 374 | CATEGORY = "LevelPixel/Text" 375 | 376 | def split_compound_text(self, text): 377 | has_trailing_comma = text.rstrip().endswith(',') 378 | 379 | tokens = text.split(',') 380 | result_tokens = [] 381 | 382 | for token in tokens: 383 | token = token.strip() 384 | if token: 385 | splitted_words = wordninja.split(token) 386 | result_tokens.append(" ".join(splitted_words)) 387 | 388 | result = ", ".join(result_tokens) 389 | if has_trailing_comma: 390 | result += "," 391 | 392 | return (result,) 393 | 394 | def is_english_core(core: str) -> bool: 395 | for ch in core: 396 | if ch.isalpha() and ch not in string.ascii_letters: 397 | return False 398 | return True 399 | 400 | def process_token(token: str) -> str: 401 | core = token.strip(string.punctuation) 402 | if core == "": 403 | return token 404 | if is_english_core(core): 405 | return token 406 | else: 407 | return "" 408 | 409 | class KeepOnlyEnglishWords: 410 | def __init__(self): 411 | pass 412 | 413 | @classmethod 414 | def INPUT_TYPES(cls): 415 | return { 416 | "required": { 417 | "text": ("STRING", {"multiline": True, "default": "", "forceInput": True}) 418 | } 419 | } 420 | 421 | RETURN_TYPES = ("STRING",) 422 | RETURN_NAMES = ("text TEXT",) 423 | 424 | FUNCTION = "keep_only_english_words" 425 | 426 | CATEGORY = "LevelPixel/Text" 427 | 428 | def keep_only_english_words(self, text): 429 | tokens = text.split() 430 | processed_tokens = [] 431 | for token in tokens: 432 | filtered = process_token(token) 433 | if filtered: 434 | processed_tokens.append(filtered) 435 | result = " ".join(processed_tokens) 436 | 437 | result = re.sub(r'\s+([,.?!:;])', r'\1', result) 438 | 439 | text = text.rstrip() 440 | if text and text[-1] in ".!?" and (not result or result[-1] not in ".!?"): 441 | result += text[-1] 442 | 443 | return (result,) 444 | 445 | class TextReplace: 446 | 447 | @ classmethod 448 | def INPUT_TYPES(cls): 449 | return { 450 | "required": { 451 | "text": ("STRING", {"multiline": True, "default": "", "forceInput": True}), 452 | }, 453 | "optional": { 454 | "find1": ("STRING", {"multiline": False, "default": ""}), 455 | "replace1": ("STRING", {"multiline": False, "default": ""}), 456 | "find2": ("STRING", {"multiline": False, "default": ""}), 457 | "replace2": ("STRING", {"multiline": False, "default": ""}), 458 | "find3": ("STRING", {"multiline": False, "default": ""}), 459 | "replace3": ("STRING", {"multiline": False, "default": ""}), 460 | "find4": ("STRING", {"multiline": False, "default": ""}), 461 | "replace4": ("STRING", {"multiline": False, "default": ""}), 462 | "find5": ("STRING", {"multiline": False, "default": ""}), 463 | "replace5": ("STRING", {"multiline": False, "default": ""}), 464 | "find6": ("STRING", {"multiline": False, "default": ""}), 465 | "replace6": ("STRING", {"multiline": False, "default": ""}), 466 | "find7": ("STRING", {"multiline": False, "default": ""}), 467 | "replace7": ("STRING", {"multiline": False, "default": ""}), 468 | "find8": ("STRING", {"multiline": False, "default": ""}), 469 | "replace8": ("STRING", {"multiline": False, "default": ""}), 470 | "find9": ("STRING", {"multiline": False, "default": ""}), 471 | "replace9": ("STRING", {"multiline": False, "default": ""}), 472 | }, 473 | } 474 | 475 | RETURN_TYPES = (any, ) 476 | RETURN_NAMES = ("text TEXT", ) 477 | FUNCTION = "replace_text" 478 | CATEGORY = "LevelPixel/Text" 479 | 480 | def replace_text(self, text, find1="", replace1="", find2="", replace2="", find3="", replace3="", find4="", replace4="", find5="", replace5="", find6="", replace6="", find7="", replace7="", find8="", replace8="", find9="", replace9=""): 481 | 482 | text = text.replace(find1, replace1) 483 | text = text.replace(find2, replace2) 484 | text = text.replace(find3, replace3) 485 | text = text.replace(find4, replace4) 486 | text = text.replace(find5, replace5) 487 | text = text.replace(find6, replace6) 488 | text = text.replace(find7, replace7) 489 | text = text.replace(find8, replace8) 490 | text = text.replace(find9, replace9) 491 | 492 | return (text,) 493 | 494 | NODE_CLASS_MAPPINGS = { 495 | "TextChoiceParser|LP": TextChoiceParser, 496 | "CLIPTextEncodeTranslate|LP": CLIPTextEncodeTranslate, 497 | "TextTranslate|LP": TextTranslate, 498 | "TextTranslateManualAll|LP": TextTranslateManualAll, 499 | "TextTranslateManual|LP": TextTranslateManual, 500 | "TextToList|LP": TextToList, 501 | "SplitCompoundText|LP": SplitCompoundText, 502 | "KeepOnlyEnglishWords|LP": KeepOnlyEnglishWords, 503 | "TextReplace|LP": TextReplace, 504 | } 505 | 506 | NODE_DISPLAY_NAME_MAPPINGS = { 507 | "TextChoiceParser|LP": "Text Choice Parser [LP]", 508 | "CLIPTextEncodeTranslate|LP": "CLIP Text Encode Translate [LP]", 509 | "TextTranslate|LP": "Text Translate [LP]", 510 | "TextTranslateManualAll|LP": "Text Translate Manual (All langs) [LP]", 511 | "TextTranslateManual|LP": "Text Translate Manual [LP]", 512 | "TextToList|LP": "Text To List [LP]", 513 | "SplitCompoundText|LP": "Split Compound Text [LP]", 514 | "KeepOnlyEnglishWords|LP": "Keep Only English Words [LP]", 515 | "TextReplace|LP": "Text Replace [LP]", 516 | } -------------------------------------------------------------------------------- /nodes/tags/tags_utils_LP.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import string 4 | import re 5 | 6 | tag_category = json.load(open(os.path.join(os.path.dirname(os.path.realpath(__file__)),"tag_category.json"))) 7 | banned_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),"banned_tags.txt") 8 | 9 | def category_for_tags(tags): 10 | tags = [tag.strip() for tag in tags.replace(".", ",").replace("\n", ",").split(",")] 11 | tags2 = [tag.replace(" ", "_").lower() for tag in tags] 12 | 13 | result = [] 14 | for i, tag2 in enumerate(tags2): 15 | if tag2 in tag_category: 16 | category_list = tag_category.get(tag2, []) 17 | for category in category_list: 18 | if category not in result: 19 | result.append(category) 20 | break 21 | 22 | return ", ".join(result) 23 | 24 | class TagCategoryFilter: 25 | def __init__(self): 26 | pass 27 | 28 | @classmethod 29 | def INPUT_TYPES(s): 30 | return { 31 | "required": { 32 | "tags": ("STRING", {"default": ""}), 33 | "include_categories": ("STRING", {"default": ""}), 34 | "exclude_categories": ("STRING", {"default": ""}), 35 | }, 36 | } 37 | 38 | RETURN_TYPES = ("STRING",) 39 | RETURN_NAMES = ("result",) 40 | 41 | FUNCTION = "tag_category" 42 | 43 | CATEGORY = "LevelPixel/Tags" 44 | 45 | OUTPUT_NODE = True 46 | 47 | def tag_category(self, tags, include_categories="", exclude_categories=""): 48 | targets = [] 49 | exclude_targets = [] 50 | 51 | if not include_categories: 52 | print("categories - ", category_for_tags(tags)) 53 | include_categories = category_for_tags(tags) 54 | 55 | include_categories = include_categories.strip() 56 | if include_categories: 57 | targets += [category.strip() for category in include_categories.replace("\n",",").split(",")] 58 | 59 | if exclude_categories: 60 | exclude_targets = [category.strip() for category in exclude_categories.replace("\n",",").split(",")] 61 | targets = [target for target in targets if target not in exclude_targets] 62 | 63 | print("targets", targets) 64 | 65 | tags = [tag.strip() for tag in tags.replace(".", ",").replace("\n", ",").split(",")] 66 | tags2 = [tag.replace(" ", "_").lower() for tag in tags] 67 | 68 | result = [] 69 | for i, tag2 in enumerate(tags2): 70 | if tag2 in tag_category: 71 | category_list = tag_category.get(tag2, []) 72 | 73 | for category in category_list: 74 | if category in exclude_targets: 75 | break 76 | else: 77 | for category in category_list: 78 | if '*' in include_categories or ( category in targets and tags[i] not in result ): 79 | result.append(tags[i]) 80 | break 81 | 82 | return (", ".join(result),) 83 | 84 | class TagCategoryKeeper: 85 | def __init__(self): 86 | pass 87 | 88 | @classmethod 89 | def INPUT_TYPES(s): 90 | return { 91 | "required": { 92 | "tags": ("STRING", {"default": ""}), 93 | "include_categories": ("STRING", {"default": ""}), 94 | }, 95 | } 96 | 97 | RETURN_TYPES = ("STRING",) 98 | RETURN_NAMES = ("result",) 99 | 100 | FUNCTION = "tag_keeper" 101 | 102 | CATEGORY = "LevelPixel/Tags" 103 | 104 | OUTPUT_NODE = True 105 | 106 | def tag_keeper(self, tags, include_categories=""): 107 | 108 | targets = [] 109 | 110 | include_categories = include_categories.strip() 111 | if include_categories: 112 | targets += [category.strip() for category in include_categories.replace("\n",",").split(",")] 113 | 114 | print("targets", targets) 115 | 116 | tags = [tag.strip() for tag in tags.replace(".", ",").replace("\n", ",").split(",")] 117 | tags2 = [tag.replace(" ", "_").lower() for tag in tags] 118 | 119 | result = [] 120 | for i, tag2 in enumerate(tags2): 121 | if tag2 in tag_category: 122 | category_list = tag_category.get(tag2, []) 123 | for category in category_list: 124 | if '*' in include_categories or ( category in targets and tags[i] not in result ): 125 | result.append(tags[i]) 126 | break 127 | 128 | return (", ".join(result),) 129 | 130 | class TagCategoryRemover: 131 | def __init__(self): 132 | pass 133 | 134 | @classmethod 135 | def INPUT_TYPES(s): 136 | return { 137 | "required": { 138 | "tags": ("STRING", {"default": ""}), 139 | "exclude_categories": ("STRING", {"default": ""}), 140 | }, 141 | } 142 | 143 | RETURN_TYPES = ("STRING",) 144 | RETURN_NAMES = ("result",) 145 | 146 | FUNCTION = "tag_remover" 147 | 148 | CATEGORY = "LevelPixel/Tags" 149 | 150 | OUTPUT_NODE = True 151 | 152 | def tag_remover(self, tags, exclude_categories=""): 153 | 154 | targets = [] 155 | exclude_targets = [] 156 | 157 | if exclude_categories: 158 | exclude_targets = [category.strip() for category in exclude_categories.replace("\n",",").split(",")] 159 | targets = [target for target in targets if target not in exclude_targets] 160 | 161 | print("targets", targets) 162 | 163 | tags = [tag.strip() for tag in tags.replace(".", ",").replace("\n", ",").split(",")] 164 | tags2 = [tag.replace(" ", "_").lower() for tag in tags] 165 | 166 | result = [] 167 | for i, tag2 in enumerate(tags2): 168 | if tag2 in tag_category: 169 | category_list = tag_category.get(tag2, []) 170 | 171 | for category in category_list: 172 | if category in exclude_targets: 173 | break 174 | else: 175 | if tags[i] not in result: 176 | result.append(tags[i]) 177 | break 178 | 179 | return (", ".join(result),) 180 | 181 | class TagSwitcher: 182 | def __init__(self): 183 | pass 184 | 185 | @classmethod 186 | def INPUT_TYPES(s): 187 | return { 188 | "required": { 189 | "input_tags": ("STRING", {"default": ""}), 190 | "default_image": ("IMAGE", {"default": ""}), 191 | "tags1": ("STRING", {"default": ""}), 192 | "image1": ("IMAGE", {"default": ""}), 193 | "any1": ("BOOLEAN", {"default": True}), 194 | }, 195 | "optional": { 196 | "tags2": ("STRING", {"default": ""}), 197 | "image2": ("IMAGE", {"default": ""}), 198 | "any2": ("BOOLEAN", {"default": True}), 199 | "tags3": ("STRING", {"default": ""}), 200 | "image3": ("IMAGE", {"default": ""}), 201 | "any3": ("BOOLEAN", {"default": True}), 202 | "tags4": ("STRING", {"default": ""}), 203 | "image4": ("IMAGE", {"default": ""}), 204 | "any4": ("BOOLEAN", {"default": True}), 205 | } 206 | } 207 | 208 | RETURN_TYPES = ("IMAGE",) 209 | RETURN_NAMES = ("image",) 210 | 211 | FUNCTION = "tag_switcher" 212 | 213 | CATEGORY = "LevelPixel/Tags" 214 | 215 | OUTPUT_NODE = True 216 | 217 | def _tag_split(self, tags: str) -> list: 218 | return [tag.strip().replace("_", " ").lower().strip() for tag in tags.replace(".",",").replace("\n",",").split(",")] 219 | 220 | def tag_switcher(self, input_tags="", default_image=None, tags1="", image1=None, any1=True, tags2="", image2=None, any2=True, tags3="", image3=None, any3=True, tags4="", image4=None, any4=True): 221 | input_tags = self._tag_split(input_tags) 222 | 223 | target_tags = [] 224 | tags1 = set(self._tag_split(tags1)) 225 | target_tags.append((tags1, image1, any1)) 226 | 227 | tags2 = set(self._tag_split(tags2)) 228 | target_tags.append((tags2, image2, any2)) 229 | 230 | tags3 = set(self._tag_split(tags3)) 231 | target_tags.append((tags3, image3, any3)) 232 | 233 | tags4 = set(self._tag_split(tags4)) 234 | target_tags.append((tags4, image4, any4)) 235 | 236 | for tags, image, any_flag in target_tags: 237 | if any_flag: 238 | if any(tag in tags for tag in input_tags): 239 | return (image,) 240 | else: 241 | if all(tag in input_tags for tag in tags): 242 | return (image,) 243 | 244 | return (default_image,) 245 | 246 | 247 | class TagMerger: 248 | def __init__(self): 249 | pass 250 | 251 | @classmethod 252 | def INPUT_TYPES(s): 253 | return { 254 | "required": { 255 | "tags1": ("STRING", {"default": ""}), 256 | "tags2": ("STRING", {"default": ""}), 257 | "under_score": ("BOOLEAN", {"default": True}), 258 | }, 259 | } 260 | 261 | RETURN_TYPES = ("STRING",) 262 | RETURN_NAMES = ("result",) 263 | 264 | FUNCTION = "tag_merger" 265 | 266 | CATEGORY = "LevelPixel/Tags" 267 | 268 | OUTPUT_NODE = True 269 | 270 | def tag_merger(self, tags1:str, tags2:str, under_score=True): 271 | tags1 = [tag.strip().replace(" ", "_").lower() for tag in tags1.replace(".",",").replace("\n",",").split(",")] 272 | tags2 = [tag.strip().replace(" ", "_").lower() for tag in tags2.replace(".",",").replace("\n",",").split(",")] 273 | 274 | tags = tags1 + list(set(tags2) - set(tags1)) 275 | 276 | tags = [tag for tag in tags if tag] 277 | 278 | if not under_score: 279 | tags = [tag.replace("_", " ") for tag in tags] 280 | 281 | return (", ".join(tags),) 282 | 283 | 284 | class TagRemover: 285 | def __init__(self): 286 | pass 287 | 288 | @classmethod 289 | def INPUT_TYPES(s): 290 | return { 291 | "required": { 292 | "tags": ("STRING", {"default": ""}), 293 | "exclude_tags": ("STRING", {"default": ""}), 294 | }, 295 | } 296 | 297 | RETURN_TYPES = ("STRING",) 298 | RETURN_NAMES = ("result",) 299 | 300 | FUNCTION = "tag_remover" 301 | 302 | CATEGORY = "LevelPixel/Tags" 303 | 304 | OUTPUT_NODE = True 305 | 306 | def tag_remover(self, tags:str, exclude_tags:str=""): 307 | tags = [tag.strip() for tag in tags.replace("\n",",").split(",")] 308 | tags2 = [tag.replace(" ", "_").lower().strip() for tag in tags] 309 | 310 | exclude_tags = [tag.strip() for tag in exclude_tags.replace("\n",",").split(",")] 311 | exclude_tags2 = [tag.replace(" ", "_").lower().strip() for tag in exclude_tags] 312 | 313 | result = [] 314 | for i, tag2 in enumerate(tags2): 315 | if tag2 not in exclude_tags2: 316 | result.append(tags[i]) 317 | 318 | return (", ".join(result),) 319 | 320 | 321 | class TagReplace: 322 | def __init__(self): 323 | pass 324 | 325 | @classmethod 326 | def INPUT_TYPES(s): 327 | return { 328 | "required": { 329 | "tags": ("STRING", {"default": ""}), 330 | "replace_tags": ("STRING", {"default": ""}), 331 | "match": ("FLOAT", {"default": 0.3}), 332 | }, 333 | } 334 | 335 | RETURN_TYPES = ("STRING",) 336 | RETURN_NAMES = ("result",) 337 | 338 | FUNCTION = "tag_replace" 339 | 340 | CATEGORY = "LevelPixel/Tags" 341 | 342 | OUTPUT_NODE = True 343 | 344 | def _get_categories(self, tag: str) -> set: 345 | return set(tag_category.get(tag, [])) 346 | 347 | def _category_match_percentage(self, categories1: set, categories2: set) -> float: 348 | if not categories1 or not categories2: 349 | return 0 350 | intersection = categories1.intersection(categories2) 351 | union = categories1.union(categories2) 352 | return len(intersection) / len(union) 353 | 354 | def tag_replace(self, tags:str, replace_tags:str="", match:float=0.3): 355 | tags = [tag.strip() for tag in tags.replace("\n",",").split(",")] 356 | tags_normalized = [tag.replace(" ", "_").lower().strip() for tag in tags] 357 | 358 | replace_tags = [tag.strip() for tag in replace_tags.replace("\n",",").split(",")] 359 | replace_tags_normalized = [tag.replace(" ", "_").lower().strip() for tag in replace_tags] 360 | replace_tags_used = {tag:False for tag in replace_tags_normalized} 361 | 362 | result = [] 363 | for i, tag in enumerate(tags_normalized): 364 | tag_categories = self._get_categories(tag) 365 | best_match_tag = None 366 | best_match_tag_id = None 367 | best_match_percentage = 0 368 | 369 | for k, replace_tag in enumerate(replace_tags_normalized): 370 | replace_categories = self._get_categories(replace_tag) 371 | match_percentage = self._category_match_percentage(tag_categories, replace_categories) 372 | 373 | if match_percentage and match_percentage > best_match_percentage: 374 | best_match_percentage = match_percentage 375 | best_match_tag = replace_tag 376 | replace_tags_used[replace_tag] = True 377 | best_match_tag_id = k 378 | 379 | 380 | if best_match_tag and best_match_percentage >= match: 381 | result.append(replace_tags[best_match_tag_id]) 382 | else: 383 | result.append(tags[i]) 384 | 385 | extra_tags = [replace_tag for replace_tag, used in replace_tags_used.items() if not used] 386 | result.extend(extra_tags) 387 | 388 | return (", ".join(result),) 389 | 390 | class TagCategory: 391 | def __init__(self): 392 | pass 393 | 394 | @classmethod 395 | def INPUT_TYPES(s): 396 | return { 397 | "required": { 398 | "tags": ("STRING", {"default": ""}), 399 | }, 400 | } 401 | 402 | RETURN_TYPES = ("STRING",) 403 | RETURN_NAMES = ("result",) 404 | 405 | FUNCTION = "tag_category_info" 406 | 407 | CATEGORY = "LevelPixel/Tags" 408 | 409 | OUTPUT_NODE = True 410 | 411 | def tag_category_info(self, tags): 412 | tags = [tag.strip() for tag in tags.replace(".", ",").replace("\n", ",").split(",")] 413 | tags2 = [tag.replace(" ", "_").lower() for tag in tags] 414 | 415 | result = [] 416 | for i, tag2 in enumerate(tags2): 417 | if tag2 in tag_category: 418 | category_list = tag_category.get(tag2, []) 419 | for category in category_list: 420 | if category not in result: 421 | result.append(category) 422 | break 423 | 424 | return (", ".join(result),) 425 | 426 | def parse_tags(text): 427 | return [tag.strip() for tag in text.split(",") if tag.strip() != ""] 428 | 429 | def update_scores(scores, text, base, bonus_first, bonus_4_10, include_new=True): 430 | tags = parse_tags(text) 431 | for i, tag in enumerate(tags): 432 | if i < 3: 433 | points = base + bonus_first 434 | elif i < 10: 435 | points = base + bonus_4_10 436 | else: 437 | points = base 438 | 439 | if len(tag.split()) == 1: 440 | points += 1 441 | 442 | if tag in scores: 443 | scores[tag] += points 444 | else: 445 | if include_new: 446 | scores[tag] = points 447 | 448 | class ResortingTags: 449 | def __init__(self): 450 | pass 451 | 452 | @classmethod 453 | def INPUT_TYPES(cls): 454 | return { 455 | "required": { 456 | "priority_texts": ("LIST", {"forceInput": True}), 457 | "inclusive_texts": ("LIST", {"forceInput": True}), 458 | "auxiliary_texts": ("LIST", {"forceInput": True}), 459 | } 460 | } 461 | 462 | RETURN_TYPES = ("STRING", "STRING",) 463 | RETURN_NAMES = ("tags TEXT", "tags_with_rating TEXT",) 464 | 465 | FUNCTION = "resorting_tags" 466 | 467 | CATEGORY = "LevelPixel/Tags" 468 | 469 | def resorting_tags(self, priority_texts, inclusive_texts, auxiliary_texts): 470 | scores = {} 471 | for text in priority_texts: 472 | update_scores(scores, text, base=3, bonus_first=2, bonus_4_10=1, include_new=True) 473 | 474 | for text in inclusive_texts: 475 | update_scores(scores, text, base=2, bonus_first=2, bonus_4_10=1, include_new=True) 476 | 477 | for text in auxiliary_texts: 478 | update_scores(scores, text, base=1, bonus_first=1, bonus_4_10=0, include_new=False) 479 | 480 | sorted_tags = sorted(scores.items(), key=lambda item: (-item[1], item[0])) 481 | result = ", ".join(tag for tag, _ in sorted_tags) 482 | rating_text = ", ".join(f"{tag}:{points}" for tag, points in sorted_tags) 483 | 484 | print("Result tags:") 485 | print(result) 486 | print("\nRating result tags:") 487 | print(rating_text) 488 | 489 | return (result, rating_text,) 490 | 491 | class RemoveDuplicateTags: 492 | def __init__(self): 493 | pass 494 | 495 | @classmethod 496 | def INPUT_TYPES(cls): 497 | return { 498 | "required": { 499 | "text": ("STRING", {"multiline": True, "default": "", "forceInput": True}) 500 | } 501 | } 502 | 503 | RETURN_TYPES = ("STRING",) 504 | RETURN_NAMES = ("tags TEXT",) 505 | 506 | FUNCTION = "remove_duplicate_tags" 507 | 508 | CATEGORY = "LevelPixel/Tags" 509 | 510 | def remove_duplicate_tags(self, text): 511 | tags = text.split(',') 512 | seen = set() 513 | unique_tags = [] 514 | 515 | for tag in tags: 516 | tag_clean = tag.strip() 517 | if tag_clean and tag_clean not in seen: 518 | seen.add(tag_clean) 519 | unique_tags.append(tag_clean) 520 | 521 | result = ", ".join(unique_tags) + "," 522 | 523 | return (result,) 524 | 525 | def is_english_core(core: str) -> bool: 526 | for ch in core: 527 | if ch.isalpha() and ch not in string.ascii_letters: 528 | return False 529 | return True 530 | 531 | class KeepOnlyEnglishTags: 532 | def __init__(self): 533 | pass 534 | 535 | @classmethod 536 | def INPUT_TYPES(cls): 537 | return { 538 | "required": { 539 | "text": ("STRING", {"multiline": True, "default": "", "forceInput": True}) 540 | } 541 | } 542 | 543 | RETURN_TYPES = ("STRING",) 544 | RETURN_NAMES = ("tags TEXT",) 545 | 546 | FUNCTION = "keep_only_english_tags" 547 | 548 | CATEGORY = "LevelPixel/Tags" 549 | 550 | def keep_only_english_tags(self, text): 551 | tags = [tag.strip() for tag in text.split(',') if tag.strip()] 552 | 553 | filtered_tags = [tag for tag in tags if is_english_core(tag)] 554 | 555 | result = ', '.join(filtered_tags) 556 | if text.strip()[-1] == ',': 557 | result += ',' 558 | 559 | return (result,) 560 | 561 | class RemoveBannedTagsFromTags: 562 | def __init__(self): 563 | pass 564 | 565 | @classmethod 566 | def INPUT_TYPES(cls): 567 | return { 568 | "required": { 569 | "text": ("STRING", {"multiline": True, "default": "", "forceInput": True}) 570 | } 571 | } 572 | 573 | RETURN_TYPES = ("STRING",) 574 | RETURN_NAMES = ("tags TEXT",) 575 | 576 | FUNCTION = "remove_banned_tags_from_tags" 577 | 578 | CATEGORY = "LevelPixel/Tags" 579 | 580 | def remove_banned_tags_from_tags(self, text): 581 | with open(banned_file, "r", encoding="utf-8") as f: 582 | banned_tags = {line.strip().lower() for line in f if line.strip()} 583 | 584 | text_lower = text.lower() 585 | tokens = text_lower.split(',') 586 | wrapper_chars = '\"\'“”‘’()[]{}<>«»' 587 | 588 | kept_tokens = [] 589 | for token in tokens: 590 | token_for_check = token.strip() 591 | while token_for_check and token_for_check[0] in wrapper_chars: 592 | token_for_check = token_for_check[1:] 593 | while token_for_check and token_for_check[-1] in wrapper_chars: 594 | token_for_check = token_for_check[:-1] 595 | 596 | if token_for_check in banned_tags: 597 | continue 598 | else: 599 | kept_tokens.append(token) 600 | 601 | result = ",".join(kept_tokens) 602 | 603 | return (result,) 604 | 605 | class RemoveBannedTagsFromText: 606 | def __init__(self): 607 | pass 608 | 609 | @classmethod 610 | def INPUT_TYPES(cls): 611 | return { 612 | "required": { 613 | "text": ("STRING", {"multiline": True, "default": "", "forceInput": True}) 614 | } 615 | } 616 | 617 | RETURN_TYPES = ("STRING",) 618 | RETURN_NAMES = ("tags TEXT",) 619 | 620 | FUNCTION = "remove_banned_tags_from_text" 621 | 622 | CATEGORY = "LevelPixel/Tags" 623 | 624 | def remove_banned_tags_from_text(self, text): 625 | with open(banned_file, "r", encoding="utf-8") as f: 626 | banned_tags = [line.strip() for line in f if line.strip()] 627 | 628 | encl_chars = r'\"\'‘’“”«»\(\)\[\]\{\}<>' 629 | 630 | for tag in banned_tags: 631 | pattern = re.compile( 632 | r'(?= preresize_min_width and current_height >= preresize_min_height: 91 | return image, mask, optional_context_mask 92 | 93 | scale_factor_min_width = preresize_min_width / current_width 94 | scale_factor_min_height = preresize_min_height / current_height 95 | 96 | scale_factor = max(scale_factor_min_width, scale_factor_min_height) 97 | 98 | target_width = int(current_width * scale_factor) 99 | target_height = int(current_height * scale_factor) 100 | 101 | if image is not None: image = rescale_i(image, target_width, target_height, upscale_algorithm) 102 | if mask is not None: mask = rescale_m(mask, target_width, target_height, 'bilinear') 103 | if optional_context_mask is not None: optional_context_mask = rescale_m(optional_context_mask, target_width, target_height, 'bilinear') 104 | 105 | assert target_width >= preresize_min_width and target_height >= preresize_min_height, \ 106 | f"Internal error: After resizing, target size {target_width}x{target_height} is smaller than min size {preresize_min_width}x{preresize_min_height}" 107 | 108 | elif preresize_mode == "ensure minimum and maximum resolution": 109 | if preresize_min_width <= current_width <= preresize_max_width and preresize_min_height <= current_height <= preresize_max_height: 110 | return image, mask, optional_context_mask 111 | 112 | scale_factor_min_width = preresize_min_width / current_width 113 | scale_factor_min_height = preresize_min_height / current_height 114 | scale_factor_min = max(scale_factor_min_width, scale_factor_min_height) 115 | 116 | scale_factor_max_width = preresize_max_width / current_width 117 | scale_factor_max_height = preresize_max_height / current_height 118 | scale_factor_max = min(scale_factor_max_width, scale_factor_max_height) 119 | 120 | if scale_factor_min > 1 and scale_factor_max < 1: 121 | assert False, "Cannot meet both minimum and maximum resolution requirements with aspect ratio preservation." 122 | 123 | if scale_factor_min > 1: # We're upscaling to meet min resolution 124 | scale_factor = scale_factor_min 125 | rescale_algorithm = upscale_algorithm # Use upscale algorithm for min resolution 126 | else: # We're downscaling to meet max resolution 127 | scale_factor = scale_factor_max 128 | rescale_algorithm = downscale_algorithm # Use downscale algorithm for max resolution 129 | 130 | target_width = int(current_width * scale_factor) 131 | target_height = int(current_height * scale_factor) 132 | 133 | if image is not None: image = rescale_i(image, target_width, target_height, rescale_algorithm) 134 | if mask is not None: mask = rescale_m(mask, target_width, target_height, 'nearest') # Always nearest for efficiency 135 | if optional_context_mask is not None: optional_context_mask = rescale_m(optional_context_mask, target_width, target_height, 'nearest') # Always nearest for efficiency 136 | 137 | assert preresize_min_width <= target_width <= preresize_max_width, \ 138 | f"Internal error: Target width {target_width} is outside the range {preresize_min_width} - {preresize_max_width}" 139 | assert preresize_min_height <= target_height <= preresize_max_height, \ 140 | f"Internal error: Target height {target_height} is outside the range {preresize_min_height} - {preresize_max_height}" 141 | 142 | elif preresize_mode == "ensure maximum resolution": 143 | if current_width <= preresize_max_width and current_height <= preresize_max_height: 144 | return image, mask, optional_context_mask 145 | 146 | scale_factor_max_width = preresize_max_width / current_width 147 | scale_factor_max_height = preresize_max_height / current_height 148 | scale_factor_max = min(scale_factor_max_width, scale_factor_max_height) 149 | 150 | target_width = int(current_width * scale_factor_max) 151 | target_height = int(current_height * scale_factor_max) 152 | 153 | if image is not None: image = rescale_i(image, target_width, target_height, downscale_algorithm) 154 | if mask is not None: mask = rescale_m(mask, target_width, target_height, 'nearest') # Always nearest for efficiency 155 | if optional_context_mask is not None: optional_context_mask = rescale_m(optional_context_mask, target_width, target_height, 'nearest') # Always nearest for efficiency 156 | 157 | assert target_width <= preresize_max_width and target_height <= preresize_max_height, \ 158 | f"Internal error: Target size {target_width}x{target_height} is greater than max size {preresize_max_width}x{preresize_max_height}" 159 | 160 | return image, mask, optional_context_mask 161 | 162 | def compute_target_size(width, height, target_resolution, aspect_ratio_limit=2): 163 | ratio = max(1 / aspect_ratio_limit, min(width / height, aspect_ratio_limit)) 164 | height_new = target_resolution * 2 / (ratio + 1) 165 | width_new = ratio * height_new 166 | target_size = { 167 | "target_height": int(round(height_new)), 168 | "target_width": int(round(width_new)), 169 | } 170 | 171 | return target_size 172 | 173 | def calculate_target_size(mask, target_resolution, aspect_ratio_limit=2): 174 | B, H, W = mask.shape 175 | mask = mask.round() 176 | 177 | for b in range(B): 178 | rows = torch.any(mask[min(b, mask.shape[0]-1)] > 0, dim=1) 179 | cols = torch.any(mask[min(b, mask.shape[0]-1)] > 0, dim=0) 180 | 181 | row_indices = torch.where(rows)[0] 182 | col_indices = torch.where(cols)[0] 183 | 184 | if row_indices.numel() == 0 or col_indices.numel() == 0: 185 | width, height = W, H 186 | else: 187 | y_min, y_max = row_indices[[0, -1]] 188 | x_min, x_max = col_indices[[0, -1]] 189 | width = (x_max - x_min + 1).item() 190 | height = (y_max - y_min + 1).item() 191 | 192 | return compute_target_size(width, height, target_resolution, aspect_ratio_limit) 193 | 194 | class CalculateTargetSizeByMask: 195 | 196 | @classmethod 197 | def INPUT_TYPES(cls): 198 | return { 199 | "required": { 200 | "mask": ("MASK", ), 201 | "target_size": ("INT", {"default": 1024, "min": 0, "max": nodes.MAX_RESOLUTION, "step": 1}), 202 | "aspect_ratio_limit": ("FLOAT", {"default": 2, "min": 0, "max": 100, "step": 0.01}), 203 | }, 204 | } 205 | 206 | RETURN_TYPES = ("INT", "INT",) 207 | RETURN_NAMES = ("height", "width",) 208 | FUNCTION = "calculate_target_size" 209 | CATEGORY = "LevelPixel/Image" 210 | def calculate_target_size(self, mask, target_size=1024, aspect_ratio_limit=2): 211 | target_size = calculate_target_size(mask, target_size, aspect_ratio_limit) 212 | target_height, target_width = (target_size["target_height"], target_size["target_width"]) 213 | 214 | return (target_height, target_width, ) 215 | 216 | class FastCheckerPattern: 217 | 218 | @classmethod 219 | def INPUT_TYPES(s): 220 | return {"required": { 221 | "width": ("INT", {"default": 512, "min": 64, "max": 4096}), 222 | "height": ("INT", {"default": 512, "min": 64, "max": 4096}), 223 | "color_1": (COLORS,), 224 | "color_2": (COLORS,), 225 | "grid_frequency": ("INT", {"default": 50, "min": 1, "max": 200, "step": 1}), 226 | }, 227 | "optional": { 228 | "color1_hex": ("STRING", {"multiline": False, "default": "#C0C0C0"}), 229 | "color2_hex": ("STRING", {"multiline": False, "default": "#808080"}), 230 | } 231 | } 232 | 233 | RETURN_TYPES = ("IMAGE", ) 234 | RETURN_NAMES = ("IMAGE", ) 235 | FUNCTION = "draw" 236 | CATEGORY = "LevelPixel/Image" 237 | 238 | def draw(self, width, height, color_1, color_2, 239 | grid_frequency, color1_hex='#C0C0C0', color2_hex='#808080'): 240 | 241 | if color_1 == "custom": 242 | color1_rgb = hex_to_rgb(color1_hex) 243 | else: 244 | color1_rgb = color_mapping.get(color_1, (255, 255, 255)) 245 | 246 | if color_2 == "custom": 247 | color2_rgb = hex_to_rgb(color2_hex) 248 | else: 249 | color2_rgb = color_mapping.get(color_2, (0, 0, 0)) 250 | 251 | canvas = np.zeros((height, width, 3), dtype=np.uint8) 252 | 253 | grid_size = width // grid_frequency 254 | 255 | x_indices = np.arange(width) // grid_size 256 | y_indices = np.arange(height) // grid_size 257 | 258 | x_grid, y_grid = np.meshgrid(x_indices, y_indices) 259 | 260 | checker_pattern = (x_grid + y_grid) % 2 261 | 262 | canvas[checker_pattern == 0] = color1_rgb 263 | canvas[checker_pattern == 1] = color2_rgb 264 | 265 | fig, ax = plt.subplots(figsize=(width/100, height/100)) 266 | ax.imshow(canvas) 267 | 268 | plt.axis('off') 269 | plt.tight_layout(pad=0, w_pad=0, h_pad=0) 270 | plt.autoscale(tight=True) 271 | 272 | img_buf = io.BytesIO() 273 | plt.savefig(img_buf, format='png') 274 | img = Image.open(img_buf) 275 | 276 | image_out = pil2tensor(img.convert("RGB")) 277 | 278 | return (image_out,) 279 | 280 | class ImageOverlay: 281 | 282 | @classmethod 283 | def INPUT_TYPES(cls): 284 | return { 285 | "required": { 286 | "base_image": ("IMAGE",), 287 | "overlay_image": ("IMAGE",), 288 | "overlay_resize": (["None", "Fit", "Resize by rescale_factor", "Resize to width & heigth"],), 289 | "resize_method": (["nearest-exact", "bilinear", "area"],), 290 | "rescale_factor": ("FLOAT", {"default": 1, "min": 0.01, "max": 16.0, "step": 0.1}), 291 | "width": ("INT", {"default": 1024, "min": 0, "max": 32768, "step": 64}), 292 | "height": ("INT", {"default": 1024, "min": 0, "max": 32768, "step": 64}), 293 | "x_offset": ("INT", {"default": 0, "min": -48000, "max": 48000, "step": 10}), 294 | "y_offset": ("INT", {"default": 0, "min": -48000, "max": 48000, "step": 10}), 295 | "rotation": ("INT", {"default": 0, "min": -180, "max": 180, "step": 5}), 296 | "opacity": ("FLOAT", {"default": 0, "min": 0, "max": 100, "step": 5}), 297 | }, 298 | "optional": {"optional_mask": ("MASK",),} 299 | } 300 | 301 | RETURN_TYPES = ("IMAGE",) 302 | FUNCTION = "apply_overlay_image" 303 | CATEGORY = "LevelPixel/Image" 304 | 305 | def apply_overlay_image(self, base_image, overlay_image, overlay_resize, resize_method, rescale_factor, 306 | width, height, x_offset, y_offset, rotation, opacity, optional_mask=None): 307 | 308 | # Pack tuples and assign variables 309 | size = width, height 310 | location = x_offset, y_offset 311 | mask = optional_mask 312 | 313 | # Check for different sizing options 314 | if overlay_resize != "None": 315 | #Extract overlay_image size and store in Tuple "overlay_image_size" (WxH) 316 | overlay_image_size = overlay_image.size() 317 | overlay_image_size = (overlay_image_size[2], overlay_image_size[1]) 318 | if overlay_resize == "Fit": 319 | h_ratio = base_image.size()[1] / overlay_image_size[1] 320 | w_ratio = base_image.size()[2] / overlay_image_size[0] 321 | ratio = min(h_ratio, w_ratio) 322 | overlay_image_size = tuple(round(dimension * ratio) for dimension in overlay_image_size) 323 | elif overlay_resize == "Resize by rescale_factor": 324 | overlay_image_size = tuple(int(dimension * rescale_factor) for dimension in overlay_image_size) 325 | elif overlay_resize == "Resize to width & heigth": 326 | overlay_image_size = (size[0], size[1]) 327 | 328 | samples = overlay_image.movedim(-1, 1) 329 | overlay_image = comfy.utils.common_upscale(samples, overlay_image_size[0], overlay_image_size[1], resize_method, False) 330 | overlay_image = overlay_image.movedim(1, -1) 331 | 332 | overlay_image = tensor2pil(overlay_image) 333 | 334 | # Add Alpha channel to overlay 335 | overlay_image = overlay_image.convert('RGBA') 336 | overlay_image.putalpha(Image.new("L", overlay_image.size, 255)) 337 | 338 | # If mask connected, check if the overlay_image image has an alpha channel 339 | if mask is not None: 340 | # Convert mask to pil and resize 341 | mask = tensor2pil(mask) 342 | mask = mask.resize(overlay_image.size) 343 | # Apply mask as overlay's alpha 344 | overlay_image.putalpha(ImageOps.invert(mask)) 345 | 346 | # Rotate the overlay image 347 | overlay_image = overlay_image.rotate(rotation, expand=True) 348 | 349 | # Apply opacity on overlay image 350 | r, g, b, a = overlay_image.split() 351 | a = a.point(lambda x: max(0, int(x * (1 - opacity / 100)))) 352 | overlay_image.putalpha(a) 353 | 354 | # Split the base_image tensor along the first dimension to get a list of tensors 355 | base_image_list = torch.unbind(base_image, dim=0) 356 | 357 | # Convert each tensor to a PIL image, apply the overlay, and then convert it back to a tensor 358 | processed_base_image_list = [] 359 | for tensor in base_image_list: 360 | # Convert tensor to PIL Image 361 | image = tensor2pil(tensor) 362 | 363 | # Paste the overlay image onto the base image 364 | if mask is None: 365 | image.paste(overlay_image, location) 366 | else: 367 | image.paste(overlay_image, location, overlay_image) 368 | 369 | # Convert PIL Image back to tensor 370 | processed_tensor = pil2tensor(image) 371 | 372 | # Append to list 373 | processed_base_image_list.append(processed_tensor) 374 | 375 | # Combine the processed images back into a single tensor 376 | base_image = torch.stack([tensor.squeeze() for tensor in processed_base_image_list]) 377 | 378 | # Return the edited base image 379 | return (base_image,) 380 | 381 | def calculate_scale_factor(width, height, target_size, mode="max"): 382 | if mode == "min": 383 | base_size = min(width, height) 384 | elif mode == "max": 385 | base_size = max(width, height) 386 | elif mode == "avg": 387 | base_size = (width + height) / 2 388 | else: 389 | raise ValueError("Mode must be 'min', 'max', or 'avg'") 390 | 391 | scale_factor = target_size / base_size 392 | return scale_factor 393 | 394 | def target_scale_factor(width, height, target_size=9000): 395 | aspect_ratio = width / height if width > height else height / width 396 | 397 | if 0.9 <= aspect_ratio <= 1.1: 398 | mode = "max" 399 | elif aspect_ratio > 1.5 or aspect_ratio < 0.67: 400 | mode = "avg" 401 | else: 402 | mode = "max" 403 | 404 | scale_factor = calculate_scale_factor(width, height, target_size, mode) 405 | return scale_factor 406 | 407 | def tensor_to_pil(image_tensor): 408 | if image_tensor.ndimension() == 4: 409 | image_tensor = image_tensor.squeeze(0) 410 | if image_tensor.shape[0] in [1, 3]: 411 | image_tensor = image_tensor.permute(1, 2, 0) 412 | image_tensor = image_tensor.cpu().numpy() 413 | image_tensor = (image_tensor * 255).clip(0, 255).astype(np.uint8) 414 | return Image.fromarray(image_tensor) 415 | 416 | def pil_to_tensor(image): 417 | image = np.array(image).astype(np.float32) / 255.0 418 | image = torch.tensor(image).permute(2, 0, 1) 419 | return image 420 | 421 | class ResizeImageToTargetSize: 422 | 423 | @classmethod 424 | def INPUT_TYPES(cls): 425 | return { 426 | "required": { 427 | "image": ("IMAGE",), 428 | "resize_method": (["LANCZOS", "BICUBIC", "BILINEAR", "NEAREST"],), 429 | "target_size": ([1000, 2000, 9000],), 430 | }, 431 | } 432 | 433 | RETURN_TYPES = ("IMAGE",) 434 | FUNCTION = "resize_image_to_target_size" 435 | CATEGORY = "LevelPixel/Image" 436 | def resize_image_to_target_size(self, image, resize_method='LANCZOS', target_size=1000): 437 | size_table = { 438 | 1000: [(640, 1536), (768, 1344), (832, 1216), (896, 1152), (1024, 1024), (1152, 896), (1216, 832), (1344, 768), (1536, 640)], 439 | 2000: [(1280, 3072), (1536, 2688), (1664, 2432), (1792, 2304), (2048, 2048), (2304, 1792), (2432, 1664), (2688, 1536), (3072, 1280)], 440 | 9000: [(5000, 12500), (6000, 10500), (6000, 9000), (7000, 9000), (9000, 9000), (9000, 7000), (9000, 6000), (10500, 6000), (12500, 5000)] 441 | } 442 | 443 | interpolation_methods = { 444 | 'NEAREST': Image.NEAREST, 445 | 'BILINEAR': Image.BILINEAR, 446 | 'BICUBIC': Image.BICUBIC, 447 | 'LANCZOS': Image.LANCZOS 448 | } 449 | 450 | img = tensor2pil(image).convert("RGB") 451 | width, height = img.size 452 | 453 | scale_factor = target_scale_factor(width, height, target_size) 454 | new_width, new_height = int(width * scale_factor), int(height * scale_factor) 455 | closest_size = min(size_table[target_size], key=lambda s: abs(s[0] - new_width) + abs(s[1] - new_height)) 456 | 457 | aspect_ratio = width / height if width > height else height / width 458 | 459 | if 0.9 <= aspect_ratio <= 1.1: 460 | mode = "max" 461 | elif aspect_ratio > 1.5 or aspect_ratio < 0.67: 462 | mode = "avg" 463 | else: 464 | mode = "max" 465 | 466 | if mode == "avg": 467 | target_size = (closest_size[0] + closest_size[1]) / 2 468 | 469 | if target_size > 9000: 470 | target_size = 9000 471 | 472 | scale_factor = target_scale_factor(width, height, target_size) 473 | new_width, new_height = int(width * scale_factor), int(height * scale_factor) 474 | 475 | while ((new_width < closest_size[0]) | (new_height < closest_size[1])): 476 | target_size = target_size + 10 477 | scale_factor = target_scale_factor(width, height, target_size) 478 | new_width, new_height = int(width * scale_factor), int(height * scale_factor) 479 | 480 | img = img.resize((new_width, new_height), interpolation_methods[resize_method]) 481 | 482 | if new_width >= closest_size[0] and new_height >= closest_size[1]: 483 | left = (new_width - closest_size[0]) // 2 484 | top = (new_height - closest_size[1]) // 2 485 | img = img.crop((left, top, left + closest_size[0], top + closest_size[1])) 486 | 487 | return (pil2tensor(img),) 488 | 489 | class ResizeImageAndMasks: 490 | @classmethod 491 | def INPUT_TYPES(cls): 492 | return { 493 | "required": { 494 | "downscale_algorithm": (["nearest", "bilinear", "bicubic", "lanczos", "box", "hamming"], {"default": "lanczos"}), 495 | "upscale_algorithm": (["nearest", "bilinear", "bicubic", "lanczos", "box", "hamming"], {"default": "lanczos"}), 496 | "preresize_mode": (["ensure minimum resolution", "ensure maximum resolution", "ensure minimum and maximum resolution"], {"default": "ensure minimum resolution"}), 497 | "preresize_min_width": ("INT", {"default": 1024, "min": 0, "max": nodes.MAX_RESOLUTION, "step": 1}), 498 | "preresize_min_height": ("INT", {"default": 1024, "min": 0, "max": nodes.MAX_RESOLUTION, "step": 1}), 499 | "preresize_max_width": ("INT", {"default": nodes.MAX_RESOLUTION, "min": 0, "max": nodes.MAX_RESOLUTION, "step": 1}), 500 | "preresize_max_height": ("INT", {"default": nodes.MAX_RESOLUTION, "min": 0, "max": nodes.MAX_RESOLUTION, "step": 1}), 501 | }, 502 | "optional": { 503 | "image": ("IMAGE",), 504 | "mask": ("MASK",), 505 | "optional_context_mask": ("MASK",), 506 | } 507 | } 508 | 509 | FUNCTION = "resize_image_and_masks" 510 | CATEGORY = "LevelPixel/Image" 511 | DESCRIPTION = "Crops an image around a mask for inpainting, the optional context mask defines an extra area to keep for the context." 512 | 513 | RETURN_TYPES = ("IMAGE", "MASK", "MASK") 514 | RETURN_NAMES = ("image", "mask", "optional_context_mask") 515 | 516 | def resize_image_and_masks(self, downscale_algorithm, upscale_algorithm, preresize_mode, preresize_min_width, preresize_min_height, preresize_max_width, preresize_max_height, image=None, mask=None, optional_context_mask=None): 517 | result_image = [] 518 | result_mask = [] 519 | result_optional_context_mask = [] 520 | 521 | if image is not None: 522 | batch_size_image = image.shape[0] 523 | else: 524 | batch_size_image = 0 525 | 526 | if mask is not None: 527 | batch_size_mask = mask.shape[0] 528 | else: 529 | batch_size_mask = 0 530 | 531 | if optional_context_mask is not None: 532 | batch_size_optional_context_mask = optional_context_mask.shape[0] 533 | else: 534 | batch_size_optional_context_mask = 0 535 | 536 | batch_size = max(batch_size_image, batch_size_mask, batch_size_optional_context_mask) 537 | 538 | for b in range(batch_size): 539 | if image is not None: 540 | if b >= len(image) or image[b] is None or image[b].numel() == 0: 541 | one_image = None 542 | else: 543 | one_image = image[b].unsqueeze(0) 544 | else: 545 | one_image = None 546 | 547 | if mask is not None: 548 | if b >= len(mask) or mask[b] is None or mask[b].numel() == 0: 549 | one_mask = None 550 | else: 551 | one_mask = mask[b].unsqueeze(0) 552 | else: 553 | one_mask = None 554 | 555 | if optional_context_mask is not None: 556 | if b >= len(optional_context_mask) or optional_context_mask[b] is None or optional_context_mask[b].numel() == 0: 557 | one_optional_context_mask = None 558 | else: 559 | one_optional_context_mask = optional_context_mask[b].unsqueeze(0) 560 | else: 561 | one_optional_context_mask = None 562 | 563 | new_image, new_mask, new_optional_context_mask = preresize_imm(downscale_algorithm, upscale_algorithm, preresize_mode, preresize_min_width, preresize_min_height, preresize_max_width, preresize_max_height, one_image, one_mask, one_optional_context_mask) 564 | if new_image is not None: 565 | new_image = new_image.clone().squeeze(0) 566 | result_image.append(new_image) 567 | if new_mask is not None: 568 | new_mask = new_mask.clone().squeeze(0) 569 | result_mask.append(new_mask) 570 | if new_optional_context_mask is not None: 571 | new_optional_context_mask = new_optional_context_mask.clone().squeeze(0) 572 | result_optional_context_mask.append(new_optional_context_mask) 573 | 574 | filtered_result_image = [x for x in result_image if x is not None] 575 | filtered_result_mask = [x for x in result_mask if x is not None] 576 | filtered_result_optional_context_mask = [x for x in result_optional_context_mask if x is not None] 577 | 578 | result_image = torch.stack(filtered_result_image, dim=0) if len(filtered_result_image) > 0 else torch.empty(0) 579 | result_mask = torch.stack(filtered_result_mask, dim=0) if len(filtered_result_mask) > 0 else torch.empty(0) 580 | result_optional_context_mask = torch.stack(filtered_result_optional_context_mask, dim=0) if len(filtered_result_optional_context_mask) > 0 else torch.empty(0) 581 | 582 | return (result_image, result_mask, result_optional_context_mask) 583 | 584 | NODE_CLASS_MAPPINGS = { 585 | "ImageOverlay|LP": ImageOverlay, 586 | "FastCheckerPattern|LP": FastCheckerPattern, 587 | "ResizeImageToTargetSize|LP": ResizeImageToTargetSize, 588 | "CalculateTargetSizeByMask|LP": CalculateTargetSizeByMask, 589 | "ResizeImageAndMasks|LP": ResizeImageAndMasks 590 | } 591 | 592 | NODE_DISPLAY_NAME_MAPPINGS = { 593 | "ImageOverlay|LP": "Image Overlay [LP]", 594 | "FastCheckerPattern|LP": "Fast Checker Pattern [LP]", 595 | "ResizeImageToTargetSize|LP": "Resize Image To Target Size [LP]", 596 | "CalculateTargetSizeByMask|LP": "Calculate Target Size By Mask [LP]", 597 | "ResizeImageAndMasks|LP": "Resize Image and Masks [LP]" 598 | } 599 | --------------------------------------------------------------------------------