├── nodes
├── cache_data.py
├── python_nodes.py
├── __init__.py
├── logger.py
├── cacheable_nodes.py
├── image_merger.py
├── utils.py
├── image_nodes.py
├── komojini_nodes.py
└── video_loaders.py
├── requirements.txt
├── video_formats
├── ProRes.json
├── webm.json
├── h264-mp4.json
├── h265-mp4.json
└── av1-webm.json
├── js
├── komojini.chain.js
├── extension_template.js
├── videoinfo.js
├── status_viewer.js
├── widgethider.js
├── utils.js
├── comfy_shared.js
└── komojini_widgets.js
├── __init__.py
├── .gitignore
├── README.md
├── komojini_server.py
└── example_workflows
└── image_merger_example.json
/nodes/cache_data.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | CACHED_MAP = {}
4 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | pytube==15.0.0
2 | opencv-python
3 | numpy
4 | torch
5 | pillow
6 | psutil
7 | gputil
--------------------------------------------------------------------------------
/video_formats/ProRes.json:
--------------------------------------------------------------------------------
1 | {
2 | "main_pass":
3 | [
4 | "-n", "-c:v", "prores_ks",
5 | "-profile:v","3",
6 | "-pix_fmt", "yuv422p10"
7 | ],
8 | "audio_pass": ["-c:a", "pcm_s16le"],
9 | "extension": "mov"
10 | }
11 |
--------------------------------------------------------------------------------
/nodes/python_nodes.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | class PythonNode:
4 | @classmethod
5 | def INPUT_TYPES(s):
6 | return {
7 | "required": {
8 | "python_code": ("STRING", {"default": "", "multiline": True})
9 | },
10 | "optional": {
11 |
12 | },
13 | }
--------------------------------------------------------------------------------
/video_formats/webm.json:
--------------------------------------------------------------------------------
1 | {
2 | "main_pass":
3 | [
4 | "-n",
5 | "-pix_fmt", "yuv420p",
6 | "-crf", ["crf","INT", {"default": 20, "min": 0, "max": 100, "step": 1}],
7 | "-b:v", "0"
8 | ],
9 | "audio_pass": ["-c:a", "libvorbis"],
10 | "save_metadata": ["save_metadata", "BOOLEAN", {"default": true}],
11 | "extension": "webm"
12 | }
13 |
--------------------------------------------------------------------------------
/video_formats/h264-mp4.json:
--------------------------------------------------------------------------------
1 | {
2 | "main_pass":
3 | [
4 | "-n", "-c:v", "libx264",
5 | "-pix_fmt", ["pix_fmt", ["yuv420p", "yuv420p10le"]],
6 | "-crf", ["crf","INT", {"default": 19, "min": 0, "max": 100, "step": 1}]
7 | ],
8 | "audio_pass": ["-c:a", "aac"],
9 | "save_metadata": ["save_metadata", "BOOLEAN", {"default": true}],
10 | "extension": "mp4"
11 | }
12 |
--------------------------------------------------------------------------------
/video_formats/h265-mp4.json:
--------------------------------------------------------------------------------
1 | {
2 | "main_pass":
3 | [
4 | "-n", "-c:v", "libx265",
5 | "-pix_fmt", ["pix_fmt", ["yuv420p10le", "yuv420p"]],
6 | "-crf", ["crf","INT", {"default": 22, "min": 0, "max": 100, "step": 1}],
7 | "-preset", "medium",
8 | "-x265-params", "log-level=quiet"
9 | ],
10 | "audio_pass": ["-c:a", "aac"],
11 | "save_metadata": ["save_metadata", "BOOLEAN", {"default": true}],
12 | "extension": "mp4"
13 | }
14 |
--------------------------------------------------------------------------------
/video_formats/av1-webm.json:
--------------------------------------------------------------------------------
1 | {
2 | "main_pass":
3 | [
4 | "-n", "-c:v", "libsvtav1",
5 | "-pix_fmt", ["pix_fmt", ["yuv420p10le", "yuv420p"]],
6 | "-crf", ["crf","INT", {"default": 23, "min": 0, "max": 100, "step": 1}]
7 | ],
8 | "audio_pass": ["-c:a", "libopus"],
9 | "input_color_depth": ["input_color_depth", ["8bit", "16bit"]],
10 | "save_metadata": ["save_metadata", "BOOLEAN", {"default": true}],
11 | "extension": "webm",
12 | "environment": {"SVT_LOG": "1"}
13 | }
14 |
--------------------------------------------------------------------------------
/nodes/__init__.py:
--------------------------------------------------------------------------------
1 | from .video_loaders import YouTubeVideoLoader, UltimateVideoLoader
2 | from .image_merger import ImageMerger
3 | from .cacheable_nodes import (
4 | KSamplerCacheable,
5 | KSamplerAdvancedCacheable,
6 | )
7 | from .image_nodes import *
8 | from .komojini_nodes import *
9 |
10 | __all__ = [
11 | "YouTubeVideoLoader",
12 | "ImageMerger",
13 | "UltimateVideoLoader",
14 | "KSamplerCacheable",
15 | "KSamplerAdvancedCacheable",
16 | "From",
17 | "To",
18 | "ImageGetter",
19 | "FlowBuilder",
20 | "FlowBuilderSetter",
21 | "CachedGetter",
22 | "DragNUWAImageCanvas",
23 | "ImageCropByRatio",
24 | "ImageCropByRatioAndResize",
25 | "ImagesCropByRatioAndResizeBatch",
26 | "BatchCreativeInterpolationNodeDynamicSettings",
27 | ]
28 |
--------------------------------------------------------------------------------
/nodes/logger.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import copy
3 | import logging
4 |
5 |
6 | class ColoredFormatter(logging.Formatter):
7 | COLORS = {
8 | "DEBUG": "\033[0;36m", # CYAN
9 | "INFO": "\033[0;32m", # GREEN
10 | "WARNING": "\033[0;33m", # YELLOW
11 | "ERROR": "\033[0;31m", # RED
12 | "CRITICAL": "\033[0;37;41m", # WHITE ON RED
13 | "RESET": "\033[0m", # RESET COLOR
14 | }
15 |
16 | def format(self, record):
17 | colored_record = copy.copy(record)
18 | levelname = colored_record.levelname
19 | seq = self.COLORS.get(levelname, self.COLORS["RESET"])
20 | colored_record.levelname = f"{seq}{levelname}{self.COLORS['RESET']}"
21 | return super().format(colored_record)
22 |
23 |
24 | # Create a new logger
25 | logger = logging.getLogger("KomojiniCustomNodes")
26 | logger.propagate = False
27 |
28 | # Add handler if we don't have one.
29 | if not logger.handlers:
30 | handler = logging.StreamHandler(sys.stdout)
31 | handler.setFormatter(ColoredFormatter("[%(name)s] - %(levelname)s - %(message)s"))
32 | logger.addHandler(handler)
33 |
34 | # Configure logger
35 | loglevel = logging.DEBUG
36 | logger.setLevel(loglevel)
37 |
--------------------------------------------------------------------------------
/js/komojini.chain.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 | import { api } from '../../scripts/api.js';
3 | import { findWidgetByName, findWidgetsByType, enableOnlyRelatedNodes, waitForPromptId, DEBUG_STRING } from "./utils.js";
4 | import * as shared from "./comfy_shared.js";
5 |
6 |
7 | async function waitForQueueEnd(promptId) {
8 | while (true) {
9 | const { queue_running, queue_pending } = await fetch("/queue").then(re => re.json());
10 | const notFinishedIds = [
11 | ...queue_running.map(el => el[1]),
12 | ...queue_pending.map(el => el[1])
13 | ];
14 | if (!notFinishedIds.includes(promptId)) return;
15 | await new Promise(re => setTimeout(re, 1000));
16 | }
17 | }
18 |
19 |
20 | export async function executeAndWaitForTargetNode(app, targetNode) {
21 | shared.log("executeAndWaitForTargetNode started");
22 | const notAlreadyMutedBlacklist = enableOnlyRelatedNodes(targetNode);
23 | const promptIdPromise = waitForPromptId();
24 | try {
25 | await app.queuePrompt(0, 1);
26 |
27 | const promptId = await promptIdPromise;
28 |
29 | for (const node of notAlreadyMutedBlacklist) node.mode = 0;
30 | shared.log(`new prompt id: ${promptId}`);
31 | await waitForQueueEnd(promptId);
32 | }
33 | catch {
34 | console.error("Error while running flowbuilder queue");
35 | }
36 | }
37 |
38 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
1 | from .nodes import *
2 | import folder_paths
3 | from .komojini_server import server
4 |
5 | WEB_DIRECTORY = "js"
6 |
7 | END_EMOJI = "🔥"
8 |
9 | NODE_CLASS_MAPPINGS = {
10 | "YouTubeVideoLoader": YouTubeVideoLoader,
11 | "ImageMerger": ImageMerger,
12 | "UltimateVideoLoader": UltimateVideoLoader,
13 | "UltimateVideoLoader (simple)": UltimateVideoLoader,
14 | "KSamplerCacheable": KSamplerCacheable,
15 | "KSamplerAdvancedCacheable": KSamplerAdvancedCacheable,
16 | "Setter": To,
17 | "Getter": From,
18 | "ImageGetter": ImageGetter,
19 | "FlowBuilder": FlowBuilder,
20 | "FlowBuilder (advanced)": FlowBuilder,
21 | "FlowBuilder (adv)": FlowBuilder,
22 |
23 | "FlowBuilderSetter": FlowBuilderSetter,
24 | "FlowBuilder (advanced) Setter": FlowBuilderSetter,
25 | "FlowBuilderSetter (adv)": FlowBuilderSetter,
26 |
27 | "CachedGetter": CachedGetter,
28 | "DragNUWAImageCanvas": DragNUWAImageCanvas,
29 |
30 | "ImageCropByRatio": ImageCropByRatio,
31 | "ImageCropByRatioAndResize": ImageCropByRatioAndResize,
32 | "ImagesCropByRatioAndResizeBatch": ImagesCropByRatioAndResizeBatch,
33 | "BatchCreativeInterpolationNodeDynamicSettings": BatchCreativeInterpolationNodeDynamicSettings,
34 | }
35 |
36 | NODE_DISPLAY_NAME_MAPPINGS = {
37 | "YouTubeVideoLoader": "YouTube Video Loader",
38 | "ImageMerger": "Image Merger",
39 | "UltimateVideoLoader": "🎥Ultimate Video Loader🎥",
40 | "UltimateVideoLoader (simple)": "🎥Ultimate Video Loader (simple)🎥",
41 | "KSamplerCacheable": "KSampler (cacheable)",
42 | "KSamplerAdvancedCacheable": "KSamplerAdvanced (cacheable)",
43 | "Setter": "Setter",
44 | "Getter": "Getter",
45 | "CachedGetter": "CachedGetter",
46 | "ImageGetter": "ImageGetter",
47 | "FlowBuilder": END_EMOJI + " FlowBuilder",
48 | "FlowBuilder (advanced)": END_EMOJI + "(adv) FlowBuilder",
49 | # "FlowBuilder (adv)": END_EMOJI + "(adv) FlowBuilder",
50 |
51 | "FlowBuilderSetter": END_EMOJI + "FlowBuilderSetter",
52 | "FlowBuilder (advanced) Setter": END_EMOJI + "(adv) FlowBuilderSetter",
53 | # "FlowBuilderSetter (adv)": END_EMOJI + "(adv) FlowBuilder",
54 |
55 | "DragNUWAImageCanvas": "DragNUWAImageCanvas",
56 | "ImageCropByRatio": "ImageCropByRatio",
57 | "ImageCropByRatioAndResize": "ImageCropByRatioAndResize",
58 | "ImagesCropByRatioAndResizeBatch": "ImagesCropByRatioAndResizeBatch",
59 | "BatchCreativeInterpolationNodeDynamicSettings": "BatchCreativeInterpolationNodeDynamicSettings",
60 | }
61 |
62 |
63 | __all__ = [
64 | "NODE_CLASS_MAPPINGS",
65 | "NODE_DISPLAY_NAME_MAPPINGS",
66 | ]
67 |
--------------------------------------------------------------------------------
/js/extension_template.js:
--------------------------------------------------------------------------------
1 | import { ComfyWidgets } from "../../scripts/widgets.js";
2 | import { app } from "../../scripts/app.js";
3 | import * as shared from "./comfy_shared.js";
4 |
5 |
6 | class DragNUWAImageCanvas extends LiteGraph.LGraphNode {
7 | title = "DragNUWACanvas"
8 | category = "komojini/image"
9 |
10 | color = LGraphCanvas.node_colors.yellow.color
11 | bgcolor = LGraphCanvas.node_colors.yellow.bgcolor
12 | groupcolor = LGraphCanvas.node_colors.yellow.groupcolor
13 |
14 | constructor() {
15 | super()
16 | this.uuid = shared.makeUUID()
17 |
18 | shared.log(`Constructing DRAGNUWACanvas instance`)
19 |
20 | this.collapsable = true
21 | this.isVirtualNode = true
22 | this.shape = LiteGraph.BOX_SHAPE
23 | this.serialize_widgets = true
24 |
25 | const inner = document.createElement("div")
26 | inner.style.margin = "0"
27 | inner.style.padding = "0"
28 | inner.style.pointerEvents = "none"
29 |
30 | this.calculatedHeight = 0
31 |
32 | this.htmlWidget = this.addDOMWidget("HTML", "html", inner, {
33 | setValue: (val) => {
34 | this._raw_html = val
35 | },
36 | getValue: () => this._raw_html,
37 | getMinHeight: () => this.calculatedHeight,
38 | hideOnZoom: false,
39 | })
40 |
41 | this.setupDialog()
42 | }
43 |
44 | setupDialog() {
45 | this.dialog = new app.ui.dialog.constructor()
46 | this.dialog.element.classList.add('comfy-settings')
47 |
48 | const closeButton = this.dialog.element.querySelector('button')
49 | closeButton.textContent = 'CANCEL'
50 | const saveButton = document.createElement('button')
51 | saveButton.textContent = 'SAVE'
52 | saveButton.onclick = () => {
53 | this.closeEditorDialog(true)
54 | }
55 | closeButton.onclick = () => {
56 | this.closeEditorDialog(false)
57 | }
58 | closeButton.before(saveButton)
59 | }
60 |
61 | openEditorDialog() {
62 | const container = document.createElement("div")
63 |
64 | Object.assign(container.style, {
65 | display: 'flex',
66 | gap: '10px',
67 | flexDirection: 'column',
68 | })
69 |
70 | const editorsContainer = document.createElement('div')
71 | Object.assign(editorsContainer.style, {
72 | display: 'flex',
73 | gap: '10px',
74 | flexDirection: 'row',
75 | })
76 |
77 | container.append(editorsContainer)
78 |
79 | this.dialog.show('')
80 | this.dialog.textElement.append(container)
81 | }
82 |
83 | onCreate() {}
84 | onNodeCreated() {}
85 | onRemoved() {}
86 | getExtraMenuOptions() {}
87 | setMode(mode) {}
88 |
89 | }
90 |
91 | const komojiniCanvas = {
92 | name: "komojini.image",
93 | init: async () => {},
94 | setup: () => {},
95 | async beforeRegisterNodeDef(nodeType, nodeData, app) {},
96 |
97 | registerCustomNodes() {
98 |
99 | LiteGraph.registerNodeType("DragNUWAImageCanvas", DragNUWAImageCanvas)
100 |
101 | DragNUWAImageCanvas.title_mode = LiteGraph.NO_TITLE
102 |
103 | TestNode.category = "komojini.canvas";
104 | },
105 | }
106 |
107 | // app.registerExtension(komojiniCanvas)
108 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .js/*
2 | json_data/*
3 | js/test.*
4 |
5 | # Byte-compiled / optimized / DLL files
6 | __pycache__/
7 | *.py[cod]
8 | *$py.class
9 |
10 | # C extensions
11 | *.so
12 |
13 | # Distribution / packaging
14 | .Python
15 | build/
16 | develop-eggs/
17 | dist/
18 | downloads/
19 | eggs/
20 | .eggs/
21 | lib/
22 | lib64/
23 | parts/
24 | sdist/
25 | var/
26 | wheels/
27 | share/python-wheels/
28 | *.egg-info/
29 | .installed.cfg
30 | *.egg
31 | MANIFEST
32 |
33 | # PyInstaller
34 | # Usually these files are written by a python script from a template
35 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
36 | *.manifest
37 | *.spec
38 |
39 | # Installer logs
40 | pip-log.txt
41 | pip-delete-this-directory.txt
42 |
43 | # Unit test / coverage reports
44 | htmlcov/
45 | .tox/
46 | .nox/
47 | .coverage
48 | .coverage.*
49 | .cache
50 | nosetests.xml
51 | coverage.xml
52 | *.cover
53 | *.py,cover
54 | .hypothesis/
55 | .pytest_cache/
56 | cover/
57 |
58 | # Translations
59 | *.mo
60 | *.pot
61 |
62 | # Django stuff:
63 | *.log
64 | local_settings.py
65 | db.sqlite3
66 | db.sqlite3-journal
67 |
68 | # Flask stuff:
69 | instance/
70 | .webassets-cache
71 |
72 | # Scrapy stuff:
73 | .scrapy
74 |
75 | # Sphinx documentation
76 | docs/_build/
77 |
78 | # PyBuilder
79 | .pybuilder/
80 | target/
81 |
82 | # Jupyter Notebook
83 | .ipynb_checkpoints
84 |
85 | # IPython
86 | profile_default/
87 | ipython_config.py
88 |
89 | # pyenv
90 | # For a library or package, you might want to ignore these files since the code is
91 | # intended to run in multiple environments; otherwise, check them in:
92 | # .python-version
93 |
94 | # pipenv
95 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
96 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
97 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
98 | # install all needed dependencies.
99 | #Pipfile.lock
100 |
101 | # poetry
102 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
103 | # This is especially recommended for binary packages to ensure reproducibility, and is more
104 | # commonly ignored for libraries.
105 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
106 | #poetry.lock
107 |
108 | # pdm
109 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
110 | #pdm.lock
111 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
112 | # in version control.
113 | # https://pdm.fming.dev/#use-with-ide
114 | .pdm.toml
115 |
116 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
117 | __pypackages__/
118 |
119 | # Celery stuff
120 | celerybeat-schedule
121 | celerybeat.pid
122 |
123 | # SageMath parsed files
124 | *.sage.py
125 |
126 | # Environments
127 | .env
128 | .venv
129 | env/
130 | venv/
131 | ENV/
132 | env.bak/
133 | venv.bak/
134 |
135 | # Spyder project settings
136 | .spyderproject
137 | .spyproject
138 |
139 | # Rope project settings
140 | .ropeproject
141 |
142 | # mkdocs documentation
143 | /site
144 |
145 | # mypy
146 | .mypy_cache/
147 | .dmypy.json
148 | dmypy.json
149 |
150 | # Pyre type checker
151 | .pyre/
152 |
153 | # pytype static type analyzer
154 | .pytype/
155 |
156 | # Cython debug symbols
157 | cython_debug/
158 |
159 | # PyCharm
160 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
161 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
162 | # and can be added to the global gitignore or merged into this file. For a more nuclear
163 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
164 | #.idea/
165 |
--------------------------------------------------------------------------------
/nodes/cacheable_nodes.py:
--------------------------------------------------------------------------------
1 | import os
2 | import functools
3 |
4 | import comfy
5 | import folder_paths
6 | from nodes import common_ksampler, KSampler
7 | from .logger import logger
8 | from .utils import to_hashable, hashable_to_dict
9 |
10 |
11 | CACHE_MAX_SIZE = 4
12 |
13 | def to_cacheable_function(func: callable, maxsize=CACHE_MAX_SIZE, typed=False):
14 |
15 | @functools.lru_cache(maxsize=maxsize, typed=typed)
16 | def cacheable_function(kwargs):
17 | kwargs = hashable_to_dict(kwargs)
18 | return func(**kwargs)
19 |
20 | return cacheable_function
21 |
22 |
23 | class KSamplerCacheable(KSampler):
24 | def __init__(self):
25 | super().__init__()
26 | self.call = to_cacheable_function(super().sample)
27 |
28 | FUNCTION = "cache_call"
29 | def cache_call(self, **kwargs):
30 | kwargs = to_hashable(kwargs)
31 | return self.call(kwargs)
32 |
33 |
34 | class KSamplerAdvancedCacheable:
35 | def __init__(self):
36 | self.call = to_cacheable_function(common_ksampler)
37 |
38 | @classmethod
39 | def INPUT_TYPES(s):
40 | return {
41 | "required": {
42 | "model": ("MODEL",),
43 | "add_noise": (["enable", "disable"],),
44 | "noise_seed": (
45 | "INT",
46 | {"default": 0, "min": 0, "max": 0xFFFFFFFFFFFFFFFF},
47 | ),
48 | "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
49 | "cfg": (
50 | "FLOAT",
51 | {
52 | "default": 8.0,
53 | "min": 0.0,
54 | "max": 100.0,
55 | "step": 0.1,
56 | "round": 0.01,
57 | },
58 | ),
59 | "sampler_name": (comfy.samplers.KSampler.SAMPLERS,),
60 | "scheduler": (comfy.samplers.KSampler.SCHEDULERS,),
61 | "positive": ("CONDITIONING",),
62 | "negative": ("CONDITIONING",),
63 | "latent_image": ("LATENT",),
64 | "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
65 | "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
66 | "return_with_leftover_noise": (["disable", "enable"],),
67 | }
68 | }
69 |
70 | RETURN_TYPES = ("LATENT",)
71 | FUNCTION = "sample"
72 |
73 | CATEGORY = "komojini/sampling"
74 |
75 | def sample(
76 | self,
77 | model,
78 | add_noise,
79 | noise_seed,
80 | steps,
81 | cfg,
82 | sampler_name,
83 | scheduler,
84 | positive,
85 | negative,
86 | latent_image,
87 | start_at_step,
88 | end_at_step,
89 | return_with_leftover_noise,
90 | denoise=1.0,
91 | ):
92 | force_full_denoise = True
93 | if return_with_leftover_noise == "enable":
94 | force_full_denoise = False
95 | disable_noise = False
96 | if add_noise == "disable":
97 | disable_noise = True
98 |
99 | kwargs = {
100 | "model": model,
101 | "seed": noise_seed,
102 | "steps": steps,
103 | "cfg": cfg,
104 | "sampler_name": sampler_name,
105 | "scheduler": scheduler,
106 | "positive": positive,
107 | "negative": negative,
108 | "latent": latent_image,
109 | "denoise": denoise,
110 | "disable_noise": disable_noise,
111 | "start_step": start_at_step,
112 | "last_step": end_at_step,
113 | "force_full_denoise": force_full_denoise,
114 | }
115 | kwargs = to_hashable(kwargs)
116 |
117 | return self.call(kwargs)
118 |
119 |
120 | CACHED_STRINGS = {}
121 |
122 | class TextCacheable:
123 | @classmethod
124 | def INPUT_TYPES(cls):
125 | return {
126 | "required": {
127 | "text": ("STRING", {"default": "", "multiline": True}),
128 | },
129 | "hidden": {"unique_id": "UNIQUE_ID"},
130 | }
131 |
132 | FUNCTION = "call"
133 | RETURN_TYPES = ("STRING", )
134 | RETURN_NAMES = ("text", )
135 | def call(self, text, unique_id=None):
136 | if unique_id in CACHED_STRINGS:
137 | CACHED_STRINGS[unique_id].append(text)
138 | else:
139 | CACHED_STRINGS[unique_id] = [text]
140 |
141 | return (text, )
--------------------------------------------------------------------------------
/js/videoinfo.js:
--------------------------------------------------------------------------------
1 | import { app } from '../../scripts/app.js'
2 |
3 |
4 | function getVideoMetadata(file) {
5 | return new Promise((r) => {
6 | const reader = new FileReader();
7 | reader.onload = (event) => {
8 | const videoData = new Uint8Array(event.target.result);
9 | const dataView = new DataView(videoData.buffer);
10 |
11 | let decoder = new TextDecoder();
12 | // Check for known valid magic strings
13 | if (dataView.getUint32(0) == 0x1A45DFA3) {
14 | //webm
15 | //see http://wiki.webmproject.org/webm-metadata/global-metadata
16 | //and https://www.matroska.org/technical/elements.html
17 | //contrary to specs, tag seems consistently at start
18 | //COMMENT + 0x4487 + packed length?
19 | //length 0x8d8 becomes 0x48d8
20 | //
21 | //description for variable length ints https://github.com/ietf-wg-cellar/ebml-specification/blob/master/specification.markdown
22 | let offset = 4 + 8; //COMMENT is 7 chars + 1 to realign
23 | while(offset < videoData.length-16) {
24 | //Check for text tags
25 | if (dataView.getUint16(offset) == 0x4487) {
26 | //check that name of tag is COMMENT
27 | const name = String.fromCharCode(...videoData.slice(offset-7,offset));
28 | if (name === "COMMENT") {
29 | let vint = dataView.getUint32(offset+2);
30 | let n_octets = Math.clz32(vint)+1;
31 | if (n_octets < 4) {//250MB sanity cutoff
32 | let length = (vint >> (8*(4-n_octets))) & ~(1 << (7*n_octets));
33 | const content = decoder.decode(videoData.slice(offset+2+n_octets, offset+2+n_octets+length));
34 | const json = JSON.parse(content);
35 | r(json);
36 | return;
37 | }
38 | }
39 | }
40 | offset+=1;
41 | }
42 | } else if (dataView.getUint32(4) == 0x66747970 && dataView.getUint32(8) == 0x69736F6D) {
43 | //mp4
44 | //see https://developer.apple.com/documentation/quicktime-file-format
45 | //Seems to make no guarantee for alignment
46 | let offset = videoData.length-4;
47 | while (offset > 16) {//rough safe guess
48 | if (dataView.getUint32(offset) == 0x64617461) {//any data tag
49 | if (dataView.getUint32(offset - 8) == 0xa9636d74) {//cmt data tag
50 | let type = dataView.getUint32(offset+4); //seemingly 1
51 | let locale = dataView.getUint32(offset+8); //seemingly 0
52 | let size = dataView.getUint32(offset-4) - 4*4;
53 | const content = decoder.decode(videoData.slice(offset+12, offset+12+size));
54 | const json = JSON.parse(content);
55 | r(json);
56 | return;
57 | }
58 | }
59 |
60 | offset-=1;
61 | }
62 | } else {
63 | console.error("Unknown magic: " + dataView.getUint32(0))
64 | r();
65 | return;
66 | }
67 |
68 | };
69 |
70 | reader.readAsArrayBuffer(file);
71 | });
72 | }
73 | function isVideoFile(file) {
74 | if (file?.name?.endsWith(".webm")) {
75 | return true;
76 | }
77 | if (file?.name?.endsWith(".mp4")) {
78 | return true;
79 | }
80 |
81 | return false;
82 | }
83 |
84 | let originalHandleFile = app.handleFile;
85 | app.handleFile = handleFile;
86 | async function handleFile(file) {
87 | if (file?.type?.startsWith("video/") || isVideoFile(file)) {
88 | const videoInfo = await getVideoMetadata(file);
89 | if (videoInfo) {
90 | if (videoInfo.workflow) {
91 |
92 | app.loadGraphData(videoInfo.workflow);
93 | }
94 | //Potentially check for/parse A1111 metadata here.
95 | }
96 | } else {
97 | return await originalHandleFile.apply(this, arguments);
98 | }
99 | }
100 |
101 | //hijack comfy-file-input to allow webm/mp4
102 | document.getElementById("comfy-file-input").accept += ",video/webm,video/mp4";
103 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # komojini-comfyui-nodes
2 | Custom ComfyUI Nodes for video generation
3 |
4 | - [ DragNUWA Image Canvas](#dragnuwaimagecanvas)
5 | - [ Flow Nodes](#flownodes)
6 | - [Getter & Setter Nodes](#gettersetternodes)
7 | - [ Video Loading Nodes](#videoloadingnodes)
8 | - [ Ultimate Video Loader](#ultimatevideoloader)
9 | - [ YouTube Video Loader](#youtubevideoloader)
10 |
11 |
12 |
13 | ## DragNUWA Image Canvas
14 | 
15 | 
16 |
17 | Used for DragNUWA nodes witch is from: [https://github.com/chaojie/ComfyUI-DragNUWA](https://github.com/chaojie/ComfyUI-DragNUWA)
18 |
19 | DragNUWA main repo: https://github.com/ProjectNUWA/DragNUWA
20 |
21 |
22 | 
23 |
24 |
25 | ## Flow Nodes
26 | Flow node that ables to run only a part of the entire workflow.
27 | By using this, you will be able to generate images or videos "step by step"
28 | Add the "FlowBuilder" node right before the output node (PreviewImage, SaveImage, VideoCombine, etc.), then it will automatically parse only the nodes for generating that output.
29 |
30 | ### FlowBuilder
31 | 
32 |
33 | ### FlowBuilderSetter
34 |
35 | ### (advanced) Flowbuilder Nodes
36 |
37 |
38 | ## Getter & Setter Nodes
39 | 
40 |
41 | Getter & Setter nodes that ensures execution order by connecting them when starting the prompt.
42 |
43 |
44 | ## Video Loading Nodes
45 |
46 | ### Ultimate Video Loader
47 | Able to load video from several sources (filepath, YouTube, etc.)
48 | 3 source types available:
49 | - file path
50 | - file upload
51 | - youtube
52 | - empty video
53 |
54 | 
55 |
56 |
57 | Common Args:
58 | - start_sec: float
59 | - end_sec: float (0.0 -> end of the video)
60 | - max_fps: int (0 or -1 to disable)
61 | - force_size
62 | - frame_load_cap: max frames to be returned, the fps will be automatically changed by the duration and frame count. This will not increase the frame count of the original video (will not increase original fps).
63 |
64 | The video downloaded from YouTube will be saved in "path-to-comfyui/output/youtube/" (will be changed later)
65 |
66 |
67 | ### Ultimate Video Loader (simple)
68 | Same as above but without preview.
69 |
70 |
71 | ### YouTube Video Loader
72 |
73 | Able to load and extract video from youtube.
74 |
75 | Args:
76 | - Common Args Above...
77 | - output_dir (optional): defaults to "path-to-comfyui/output/youtube/"
78 |
79 | ## Others
80 | ### Image Merger
81 | Able to merge 2 images or videos side by side.
82 | Useful to see the results of img2img or vid2vid.
83 |
84 | divide_points: 2 points that creates a line to be splitted.
85 | One point will be like (x, y) and the points should be seperated by ";".
86 | for "x" and "y", you can use int (pixel) or with %.
87 | e.g.
88 | - (50%, 0);(50%, 100%) -> split by vertical line in the center
89 | - (0%, 50%);(100%, 50%) -> split by horizontal line in the center
90 | - (40%, 0);(70%, 100%) ->
91 |
92 |
93 |
94 |
95 |
96 |
97 | ## System Current Status Viewer
98 | Shows current status of GPU, CPU, and Memory every 1 second.
99 |
100 |
101 |
102 |
103 |
104 |
105 | - Current GPU memory, usage percentage, temperature
106 | - Current CPU usage
107 | - Current RAM usage
108 |
109 | Go to settings and check "🔥 Show System Status" to enable it.
110 |
111 |
--------------------------------------------------------------------------------
/nodes/image_merger.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import os
3 | import numpy as np
4 | import torch
5 | from typing import Tuple, Optional
6 |
7 |
8 | def tensor_to_int(tensor, bits):
9 | #TODO: investigate benefit of rounding by adding 0.5 before clip/cast
10 | tensor = tensor.cpu().numpy() * (2**bits-1)
11 | return np.clip(tensor, 0, (2**bits-1))
12 | def tensor_to_shorts(tensor):
13 | return tensor_to_int(tensor, 16).astype(np.uint16)
14 | def tensor_to_bytes(tensor):
15 | return tensor_to_int(tensor, 8).astype(np.uint8)
16 |
17 | def line_equation(x1, y1, x2, y2, x, y):
18 | return (x - x1) * (y2 - y1) - (y - y1) * (x2 - x1)
19 |
20 | def line_mask_equation(x1, y1, x2, y2, x, y, size):
21 | distance = np.abs((x - x1) * (y2 - y1) - (y - y1) * (x2 - x1)) / np.sqrt((y2 - y1)**2 + (x2 - x1)**2)
22 | return distance <= size / 2
23 |
24 | def merge_images(images1, images2, x1, y1, x2, y2, line_thickness):
25 | batch_size, height, width, channels = images1.shape
26 |
27 | # Create 2D grid of (x, y) coordinates
28 | y_coords, x_coords = torch.meshgrid(torch.arange(height), torch.arange(width))
29 | coords = torch.stack([x_coords, y_coords], dim=-1)
30 |
31 | # Calculate line equation for each point in the grid
32 | line_values = line_equation(x1, y1, x2, y2, coords[..., 0], coords[..., 1])
33 |
34 | # Create a mask based on the line equation
35 | mask = line_values > 0
36 |
37 | # Broadcast the mask to the shape of the images
38 | mask = mask.unsqueeze(0).unsqueeze(3).expand(batch_size, height, width, channels)
39 |
40 | # Combine the corresponding regions from each image
41 | merged_images = images1 * mask.float() + images2 * (~mask).float()
42 |
43 | if line_thickness:
44 | try:
45 | line_mask_values = line_mask_equation(x1, y1, x2, y2, coords[..., 0], coords[..., 1], line_thickness)
46 | line_mask_values = line_mask_values.unsqueeze(0).unsqueeze(3).expand(batch_size, height, width, channels)
47 | merged_images = merged_images * (~line_mask_values).float() + line_mask_values.float()
48 | except Exception as e:
49 | print(e)
50 |
51 | return merged_images
52 |
53 |
54 | class ImageMerger:
55 | @classmethod
56 | def INPUT_TYPES(s):
57 |
58 | return {
59 | "required": {
60 | "images_1": ("IMAGE",),
61 | "images_2": ("IMAGE",),
62 | "divide_points": ("STRING", {"default": "(50%, 0);(50%, 100%)"}),
63 | "line_thickness": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}),
64 | },
65 | }
66 |
67 | FUNCTION = "merge_video"
68 | CATEGORY = "komojini/Image"
69 | RETURN_NAMES = ("images", "num_images",)
70 | RETURN_TYPES = ("IMAGE", "INT",)
71 |
72 | def merge_video(self, images_1, images_2, divide_points, line_thickness):
73 | # image.shape = (num_imgs, height, width, channels)
74 | num_images, height, width, _ = images_1.shape
75 | print(f"start merge images, images_1.shape: {images_1.shape}")
76 | marks = []
77 | for mark_string in divide_points.split(";"):
78 | xy = self.get_xy(mark_string, height, width)
79 | if not xy:
80 | continue
81 | marks.append(xy)
82 |
83 | # TODO: implement using more than 2 marks.
84 | if len(marks) != 2:
85 | raise NotImplemented("currently only 2 marks are available.")
86 |
87 | else:
88 | x1, y1 = marks[0]
89 | x2, y2 = marks[1]
90 | merged_images = merge_images(
91 | images1=images_1,
92 | images2=images_2,
93 | x1=x1, y1=y1, x2=x2, y2=y2,
94 | line_thickness=line_thickness,
95 | )
96 |
97 | print(f"merged_images.shape: {merged_images.shape}")
98 | return (merged_images, len(merged_images))
99 |
100 |
101 | @staticmethod
102 | def get_xy(mark_string: str, height: int, width: int) -> Optional[Tuple[int, int]]:
103 | mark_string = mark_string.strip()
104 | if not mark_string.startswith("(") or not mark_string.endswith(")"):
105 | print(f"mark_string is not appropriate, mark_string: {mark_string}")
106 | return None
107 | mark_string = mark_string[1:-1]
108 | x, y = mark_string.split(",")
109 | x, y = x.strip(), y.strip()
110 | if x.endswith("%"):
111 | x = x[:-1]
112 | x = int(x)
113 | x = int(width * x / 100)
114 | else:
115 | x = int(x)
116 |
117 | if y.endswith("%"):
118 | y = y[:-1]
119 | y = int(y)
120 | y = int(height * y / 100)
121 | else:
122 | y = int(y)
123 |
124 | return x, y
125 |
--------------------------------------------------------------------------------
/nodes/utils.py:
--------------------------------------------------------------------------------
1 | import hashlib
2 | import os
3 | from typing import Iterable
4 | import shutil
5 | import subprocess
6 |
7 | from .logger import logger
8 |
9 |
10 | def ffmpeg_suitability(path):
11 | try:
12 | version = subprocess.run([path, "-version"], check=True,
13 | capture_output=True).stdout.decode("utf-8")
14 | except:
15 | return 0
16 | score = 0
17 | #rough layout of the importance of various features
18 | simple_criterion = [("libvpx", 20),("264",10), ("265",3),
19 | ("svtav1",5),("libopus", 1)]
20 | for criterion in simple_criterion:
21 | if version.find(criterion[0]) >= 0:
22 | score += criterion[1]
23 | #obtain rough compile year from copyright information
24 | copyright_index = version.find('2000-2')
25 | if copyright_index >= 0:
26 | copyright_year = version[copyright_index+6:copyright_index+9]
27 | if copyright_year.isnumeric():
28 | score += int(copyright_year)
29 | return score
30 |
31 | if "VHS_FORCE_FFMPEG_PATH" in os.environ:
32 | ffmpeg_path = os.env["VHS_FORCE_FFMPEG_PATH"]
33 | else:
34 | ffmpeg_paths = []
35 | try:
36 | from imageio_ffmpeg import get_ffmpeg_exe
37 | imageio_ffmpeg_path = get_ffmpeg_exe()
38 | ffmpeg_paths.append(imageio_ffmpeg_path)
39 | except:
40 | if "VHS_USE_IMAGEIO_FFMPEG" in os.environ:
41 | raise
42 | logger.warn("Failed to import imageio_ffmpeg")
43 | if "VHS_USE_IMAGEIO_FFMPEG" in os.environ:
44 | ffmpeg_path = imageio_ffmpeg_path
45 | else:
46 | system_ffmpeg = shutil.which("ffmpeg")
47 | if system_ffmpeg is not None:
48 | ffmpeg_paths.append(system_ffmpeg)
49 | if len(ffmpeg_paths) == 0:
50 | logger.error("No valid ffmpeg found.")
51 | ffmpeg_path = None
52 | else:
53 | ffmpeg_path = max(ffmpeg_paths, key=ffmpeg_suitability)
54 |
55 |
56 | def get_sorted_dir_files_from_directory(directory: str, skip_first_images: int=0, select_every_nth: int=1, extensions: Iterable=None):
57 | directory = directory.strip()
58 | dir_files = os.listdir(directory)
59 | dir_files = sorted(dir_files)
60 | dir_files = [os.path.join(directory, x) for x in dir_files]
61 | dir_files = list(filter(lambda filepath: os.path.isfile(filepath), dir_files))
62 | # filter by extension, if needed
63 | if extensions is not None:
64 | extensions = list(extensions)
65 | new_dir_files = []
66 | for filepath in dir_files:
67 | ext = "." + filepath.split(".")[-1]
68 | if ext.lower() in extensions:
69 | new_dir_files.append(filepath)
70 | dir_files = new_dir_files
71 | # start at skip_first_images
72 | dir_files = dir_files[skip_first_images:]
73 | dir_files = dir_files[0::select_every_nth]
74 | return dir_files
75 |
76 |
77 | # modified from https://stackoverflow.com/questions/22058048/hashing-a-file-in-python
78 | def calculate_file_hash(filename: str, hash_every_n: int = 1):
79 | h = hashlib.sha256()
80 | b = bytearray(10*1024*1024) # read 10 megabytes at a time
81 | mv = memoryview(b)
82 | with open(filename, 'rb', buffering=0) as f:
83 | i = 0
84 | # don't hash entire file, only portions of it if requested
85 | while n := f.readinto(mv):
86 | if i%hash_every_n == 0:
87 | h.update(mv[:n])
88 | i += 1
89 | return h.hexdigest()
90 |
91 |
92 | def get_audio(file, start_time=0, duration=0):
93 | args = [ffmpeg_path, "-v", "error", "-i", file]
94 | if start_time > 0:
95 | args += ["-ss", str(start_time)]
96 | if duration > 0:
97 | args += ["-t", str(duration)]
98 | return subprocess.run(args + ["-f", "wav", "-"],
99 | stdout=subprocess.PIPE, check=True).stdout
100 |
101 |
102 | def lazy_eval(func):
103 | class Cache:
104 | def __init__(self, func):
105 | self.res = None
106 | self.func = func
107 | def get(self):
108 | if self.res is None:
109 | self.res = self.func()
110 | return self.res
111 | cache = Cache(func)
112 | return lambda : cache.get()
113 |
114 |
115 | def is_url(url):
116 | return url.split("://")[0] in ["http", "https"]
117 |
118 |
119 | def hash_path(path):
120 | if path is None:
121 | return "input"
122 | if is_url(path):
123 | return "url"
124 | return calculate_file_hash(path.strip("\""))
125 |
126 |
127 | def validate_path(path, allow_none=False, allow_url=True):
128 | if path is None:
129 | return allow_none
130 | if is_url(path):
131 | #Probably not feasible to check if url resolves here
132 | return True if allow_url else "URLs are unsupported for this path"
133 | if not os.path.isfile(path.strip("\"")):
134 | return "Invalid file path: {}".format(path)
135 | return True
136 |
137 |
138 |
139 | def to_hashable(inputs):
140 | if isinstance(inputs, dict):
141 | # Convert each key-value pair in the dictionary
142 | hashable_dict = {key: to_hashable(value) for key, value in inputs.items()}
143 | return frozenset(hashable_dict.items())
144 | elif isinstance(inputs, list):
145 | # Convert each element in the list
146 | return tuple(to_hashable(item) for item in inputs)
147 | else:
148 | # Base case: if it's not a dictionary or list, return the element itself
149 | return inputs
150 |
151 |
152 | def hashable_to_dict(hashable_representation):
153 | if isinstance(hashable_representation, frozenset):
154 | # Convert each key-value pair back to a dictionary
155 | original_dict = {key: hashable_to_dict(value) for key, value in hashable_representation}
156 | return original_dict
157 | elif isinstance(hashable_representation, tuple):
158 | # Convert each element in the tuple back to a list
159 | original_list = [hashable_to_dict(item) for item in hashable_representation]
160 | return original_list
161 | else:
162 | # Base case: if it's not a frozenset or tuple, return the element itself
163 | return hashable_representation
164 |
--------------------------------------------------------------------------------
/js/status_viewer.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 | import { api } from "../../scripts/api.js"
3 | import { ComfyDialog, $el } from "../../scripts/ui.js";
4 | import * as shared from "./comfy_shared.js";
5 |
6 |
7 | app.ui.settings.addSetting({
8 | id: "komojini.ShowSystemStatus",
9 | name: "🔥 Show System Status",
10 | type: "boolean",
11 | defaultValue: false,
12 | })
13 |
14 | app.registerExtension({
15 | name: "komojini.statusviewer",
16 | init() {
17 |
18 | },
19 | async setup() {
20 |
21 | if (!app.ui.settings.getSettingValue("komojini.ShowSystemStatus", false)) {
22 | return;
23 | }
24 |
25 | const menu = document.querySelector(".comfy-menu");
26 |
27 | const separator = document.createElement("hr");
28 |
29 | separator.style.margin = "10px 0";
30 | separator.style.width = "100%";
31 |
32 | const systemStatus = document.createElement("div");
33 | systemStatus.id = "systemStatus";
34 | systemStatus.style.width = "100%";
35 | systemStatus.style.height = "300";
36 | systemStatus.style.textAlign = "left";
37 | systemStatus.style.backgroundColor = "black";
38 | systemStatus.style.padding = "0 6px";
39 | systemStatus.style.margin = "0 10px";
40 | systemStatus.style.borderRadius = "8px";
41 |
42 | const cpuInfoEl = document.createElement("div");
43 | cpuInfoEl.id = "cpuInfo";
44 | cpuInfoEl.style.width = "100%";
45 | cpuInfoEl.style.margin = "8px 0 0 0";
46 |
47 | const gpuInfoEl = document.createElement("div");
48 | gpuInfoEl.id = "gpuInfo";
49 | gpuInfoEl.style.width = "100%";
50 | // gpuInfoEl.style.textAlign = "left";
51 |
52 | const memoryInfoEl = document.createElement("div");
53 | memoryInfoEl.id = "memoryInfo";
54 | memoryInfoEl.style.width = "100%";
55 | memoryInfoEl.style.margin = "8px 0";
56 |
57 | systemStatus.appendChild(gpuInfoEl);
58 | systemStatus.appendChild(cpuInfoEl);
59 | systemStatus.appendChild(memoryInfoEl);
60 |
61 |
62 | function getStyledText(text, style) {
63 | var styleString = "";
64 | if (style) {
65 | for (var styleProp in style) {
66 | styleString += `${styleProp}: ${style[styleProp]};`;
67 | }
68 | } else {
69 | return text;
70 | }
71 |
72 | return `${text} `
73 | }
74 |
75 | function addTitleEl(title, parent) {
76 | const titleEl = document.createElement("div");
77 | titleEl.innerHTML = getStyledText(title, {color: "yellow"});
78 | titleEl.style.margin = "10px 0";
79 | parent.appendChild(titleEl);
80 | return titleEl;
81 | }
82 |
83 | const gpuTitleEl = addTitleEl("GPU", gpuInfoEl);
84 | const cpuTitleEl = addTitleEl("CPU", cpuInfoEl);
85 | const memoryTitleEl = addTitleEl("Memory", memoryInfoEl);
86 |
87 | let gpuElements = [];
88 |
89 |
90 | const gpuUsageEl = document.createElement("div");
91 | gpuUsageEl.id = "gpuUsage";
92 | gpuElements.push(gpuUsageEl)
93 |
94 | const gpuMemoryUsageEl = document.createElement("div");
95 | gpuMemoryUsageEl.id = "gpuMemoryUsage";
96 | gpuElements.push(gpuMemoryUsageEl)
97 |
98 | const gpuTemperatureEl = document.createElement("div");
99 | gpuTemperatureEl.id = "gpuTemperature";
100 | gpuElements.push(gpuTemperatureEl)
101 |
102 | for (var gpuElement of gpuElements) {
103 | gpuElement.style.margin = "4px";
104 | gpuInfoEl.appendChild(gpuElement);
105 | }
106 |
107 | const cpuUsageEl = document.createElement("div");
108 | cpuUsageEl.id = "cpuUsage";
109 | cpuUsageEl.style.margin = "4px";
110 | cpuInfoEl.appendChild(cpuUsageEl);
111 |
112 | const memoryUsageEl = document.createElement("div");
113 | memoryUsageEl.id = "memoryUsage";
114 | memoryUsageEl.style.margin = "4px";
115 | memoryInfoEl.appendChild(memoryUsageEl);
116 |
117 | const nameStyle = {
118 | display: "inline-block",
119 | width: "30%",
120 | }
121 |
122 | const updateSystemStatus = (data) => {
123 |
124 | cpuUsageEl.innerHTML = `${getStyledText("Usage", nameStyle)}: ${getStyledText(data.cpu.cpu_usage, {color: "white"})}${getStyledText("%", {color: "white"})}`;
125 | const gpuInfo = data.gpus[0];
126 | gpuTitleEl.innerHTML = getStyledText("GPU ", {color: "yellow"}) + " " + `(${getStyledText(gpuInfo.name, {"font-size": "8pt"})})`;
127 |
128 | gpuUsageEl.innerHTML = `${getStyledText("Usage", nameStyle)}: ${getStyledText(Math.round(gpuInfo.load * 100), {color: "white"})}${getStyledText("%", {color: "white"})}`;
129 |
130 | gpuMemoryUsageEl.innerHTML = `${getStyledText("VRAM", nameStyle)}:
131 | ${getStyledText(Math.round(gpuInfo.memoryTotal * gpuInfo.memoryUtil / 10) * 10 / 1000, {color: "white"})} /
132 | ${getStyledText(Math.round(gpuInfo.memoryTotal / 10) * 10 / 1000, {"font-size": "10pt"})}
133 | ${getStyledText("GB", {"font-size": "8pt"})}`;
134 | gpuTemperatureEl.innerHTML = `${getStyledText("Temp", nameStyle)}: ${getStyledText(gpuInfo.temperature, "white")}°`;
135 |
136 | memoryUsageEl.innerHTML = `${getStyledText("RAM", nameStyle)}:
137 | ${getStyledText(Math.round(data.virtual_memory.used / (10 ** 8)) * (10 ** 8) / (10 ** 9), {color: "white"})} /
138 | ${getStyledText(Math.round(data.virtual_memory.total / (10 ** 8)) * (10 ** 8) / (10 ** 9), {"font-size": "10pt"})}
139 | ${getStyledText("GB", {"font-size": "8pt"})}`;
140 |
141 | }
142 |
143 | // Function to fetch and update system status
144 | async function fetchSystemStatus() {
145 | try {
146 | const response = await fetch('/komojini/systemstatus');
147 | const data = await response.json();
148 |
149 | if (data.cpu !== null || data.gpu !== null) {
150 | updateSystemStatus(data);
151 | }
152 | } catch (error) {
153 | console.error('Error fetching system status:', error);
154 | }
155 | }
156 | menu.append(separator);
157 | menu.append(systemStatus);
158 |
159 | // Fetch system status initially and every 1 seconds
160 | fetchSystemStatus();
161 | setInterval(fetchSystemStatus, 500);
162 | },
163 | async beforeRegisterNodeDef(nodeType, nodeData, app) {
164 |
165 | },
166 | })
--------------------------------------------------------------------------------
/nodes/image_nodes.py:
--------------------------------------------------------------------------------
1 | import warnings
2 | warnings.filterwarnings('ignore', module="torchvision")
3 | import ast
4 | import math
5 | import random
6 | import operator as op
7 | import numpy as np
8 |
9 | import torch
10 | import torch.nn.functional as F
11 |
12 | import torchvision.transforms.v2 as T
13 |
14 | import comfy.utils
15 |
16 | from .logger import logger
17 |
18 |
19 | MAX_RESOLUTION = 8192
20 |
21 | def p(image):
22 | return image.permute([0,3,1,2])
23 | def pb(image):
24 | return image.permute([0,2,3,1])
25 |
26 |
27 | class ImageCropByRatio:
28 | @classmethod
29 | def INPUT_TYPES(s):
30 | return {
31 | "required": {
32 | "image": ("IMAGE",),
33 | "width_ratio": ("INT", {"default": 1, "min": 1, "max": MAX_RESOLUTION}),
34 | "height_ratio": (
35 | "INT",
36 | {"default": 1, "min": 1, "max": MAX_RESOLUTION},
37 | ),
38 | "position": (
39 | [
40 | "top",
41 | "right",
42 | "bottom",
43 | "left",
44 | "center",
45 | ],
46 | ),
47 | }
48 | }
49 |
50 | RETURN_TYPES = (
51 | "IMAGE",
52 | "INT",
53 | "INT",
54 | )
55 | RETURN_NAMES = (
56 | "IMAGE",
57 | "width",
58 | "height",
59 | )
60 | FUNCTION = "execute"
61 | CATEGORY = "essentials"
62 |
63 | def execute(self, image, width_ratio, height_ratio, position):
64 | _, oh, ow, _ = image.shape
65 |
66 | image_ratio = ow / oh
67 | target_ratio = width_ratio / height_ratio
68 |
69 | if image_ratio > target_ratio:
70 | height = oh
71 | width = target_ratio * height
72 | else:
73 | width = ow
74 | height = width / target_ratio
75 |
76 |
77 | x = round((ow - width) / 2)
78 | y = round((oh - height) / 2)
79 | width, height = round(width), round(height)
80 |
81 |
82 | if "top" in position:
83 | y = 0
84 | if "bottom" in position:
85 | y = oh - height
86 | if "left" in position:
87 | x = 0
88 | if "right" in position:
89 | x = ow - width
90 |
91 | x2 = x + width
92 | y2 = y + height
93 |
94 | if x2 > ow:
95 | x2 = ow
96 | if x < 0:
97 | x = 0
98 | if y2 > oh:
99 | y2 = oh
100 | if y < 0:
101 | y = 0
102 |
103 | image = image[:, y:y2, x:x2, :]
104 |
105 | return (
106 | image,
107 | width,
108 | height,
109 | )
110 |
111 |
112 |
113 | class ImageCropByRatioAndResize:
114 | @classmethod
115 | def INPUT_TYPES(s):
116 | return {
117 | "required": {
118 | "image": ("IMAGE",),
119 | "width_ratio_size": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION}),
120 | "height_ratio_size": (
121 | "INT",
122 | {"default": 512, "min": 1, "max": MAX_RESOLUTION},
123 | ),
124 | "position": (
125 | [
126 | "center",
127 | "top",
128 | "right",
129 | "bottom",
130 | "left",
131 | ],
132 | ),
133 | "interpolation": (["nearest", "bilinear", "bicubic", "area", "nearest-exact", "lanczos"],),
134 |
135 | }
136 | }
137 |
138 | RETURN_TYPES = (
139 | "IMAGE",
140 | "INT",
141 | "INT",
142 | )
143 | RETURN_NAMES = (
144 | "IMAGE",
145 | "width",
146 | "height",
147 | )
148 | FUNCTION = "execute"
149 | CATEGORY = "essentials"
150 |
151 | def execute(self, image, width_ratio_size, height_ratio_size, position, interpolation):
152 | _, oh, ow, _ = image.shape
153 |
154 | image_ratio = ow / oh
155 | target_ratio = width_ratio_size / height_ratio_size
156 |
157 |
158 | if image_ratio > target_ratio:
159 | height = oh
160 | width = target_ratio * height
161 | else:
162 | width = ow
163 | height = width / target_ratio
164 |
165 |
166 | x = round((ow - width) / 2)
167 | y = round((oh - height) / 2)
168 | width, height = round(width), round(height)
169 |
170 | if "top" in position:
171 | y = 0
172 | if "bottom" in position:
173 | y = oh - height
174 | if "left" in position:
175 | x = 0
176 | if "right" in position:
177 | x = ow - width
178 |
179 | x2 = x + width
180 | y2 = y + height
181 |
182 | if x2 > ow:
183 | x2 = ow
184 | if x < 0:
185 | x = 0
186 | if y2 > oh:
187 | y2 = oh
188 | if y < 0:
189 | y = 0
190 |
191 | image = image[:, y:y2, x:x2, :]
192 |
193 | width = width_ratio_size
194 | height = height_ratio_size
195 |
196 | outputs = p(image)
197 | if interpolation == "lanczos":
198 | outputs = comfy.utils.lanczos(outputs, width, height)
199 | else:
200 | outputs = F.interpolate(outputs, size=(height, width), mode=interpolation)
201 | outputs = pb(outputs)
202 |
203 | return(outputs, outputs.shape[2], outputs.shape[1],)
204 |
205 |
206 |
207 | class ImagesCropByRatioAndResizeBatch(ImageCropByRatioAndResize):
208 |
209 |
210 | FUNCTION = "list_execute"
211 | INPUT_IS_LIST = True
212 | OUTPUT_IS_LIST = (False, False, False,)
213 |
214 | def list_execute(self, image, **kwargs):
215 | logger.debug(f"{len(image)}, {kwargs}")
216 |
217 | output_images = []
218 | new_kwargs = {}
219 | for k, v in kwargs.items():
220 | if isinstance(v, list):
221 | new_kwargs[k] = v[0]
222 |
223 | width, height = new_kwargs["width_ratio_size"], new_kwargs["height_ratio_size"]
224 |
225 | for img in image:
226 | output_img, width, height = super().execute(img, **new_kwargs)
227 | output_images.append(output_img)
228 |
229 | if len(output_images) <= 1:
230 | return (output_images[0], width, height,)
231 |
232 | output_images = torch.cat(output_images, dim=0)
233 |
234 | print(f"image crop by ratio and resize image shape: {output_images.shape}")
235 |
236 | return (output_images, width, height, )
237 |
238 |
239 | __all__ = [
240 | "ImageCropByRatio",
241 | "ImageCropByRatioAndResize",
242 | "ImagesCropByRatioAndResizeBatch",
243 | ]
--------------------------------------------------------------------------------
/js/widgethider.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 | import { findWidgetByName, doesInputWithNameExist } from "./utils.js";
3 |
4 | let origProps = {};
5 | let initialized = false;
6 |
7 | const HIDDEN_TAG = "komojinihide";
8 |
9 | const WIDGET_HEIGHT = 24;
10 |
11 | // Toggle Widget + change size
12 | function toggleWidget(node, widget, show = false, suffix = "") {
13 | if (!widget || doesInputWithNameExist(node, widget.name)) return;
14 |
15 | // Store the original properties of the widget if not already stored
16 | if (!origProps[widget.name]) {
17 | origProps[widget.name] = { origType: widget.type, origComputeSize: widget.computeSize };
18 | }
19 |
20 | const origSize = node.size;
21 |
22 | // Set the widget type and computeSize based on the show flag
23 | widget.type = show ? origProps[widget.name].origType : HIDDEN_TAG + suffix;
24 | widget.computeSize = show ? origProps[widget.name].origComputeSize : () => [0, -4];
25 |
26 | // Recursively handle linked widgets if they exist
27 | widget.linkedWidgets?.forEach(w => toggleWidget(node, w, ":" + widget.name, show));
28 |
29 | // Calculate the new height for the node based on its computeSize method
30 | const newHeight = node.computeSize()[1];
31 | node.setSize([node.size[0], newHeight]);
32 | }
33 |
34 |
35 | // Use for Multiline Widget Nodes (aka Efficient Loaders)
36 | function toggleWidget_2(node, widget, show = false, suffix = "") {
37 | if (!widget || doesInputWithNameExist(node, widget.name)) return;
38 |
39 | const isCurrentlyVisible = widget.type !== HIDDEN_TAG + suffix;
40 | if (isCurrentlyVisible === show) return; // Early exit if widget is already in the desired state
41 |
42 | if (!origProps[widget.name]) {
43 | origProps[widget.name] = { origType: widget.type, origComputeSize: widget.computeSize };
44 | }
45 |
46 | widget.type = show ? origProps[widget.name].origType : HIDDEN_TAG + suffix;
47 | widget.computeSize = show ? origProps[widget.name].origComputeSize : () => [0, -4];
48 |
49 | if (initialized){
50 | const adjustment = show ? WIDGET_HEIGHT : -WIDGET_HEIGHT;
51 | node.setSize([node.size[0], node.size[1] + adjustment]);
52 | }
53 | }
54 |
55 | const commonLoaderInputs = ["start_sec", "end_sec", "max_fps", "force_size", "frame_load_cap"];
56 | const emptyVideoInputs = ["width", "height", "frame_count", "fps"];
57 |
58 | function allSourceInputsExept(source_name) {
59 | const allSourceInputs = ["video", "upload", "youtube_url"];
60 | let sources = [];
61 | for (const source of allSourceInputs) {
62 | if (source !== source_name) {
63 | sources.push(source);
64 | }
65 | }
66 | return sources;
67 | }
68 |
69 | // New function to handle widget visibility based on input_mode
70 | function handleInputModeWidgetsVisibility(node, inputModeValue) {
71 | const videoLoaderInputs = ["video", "youtube_url", "upload", ...commonLoaderInputs];
72 |
73 | let nodeVisibilityMap = {
74 | "UltimateVideoLoader": {
75 | "filepath": [...allSourceInputsExept("video"), ...emptyVideoInputs],
76 | "YouTube": [...allSourceInputsExept("youtube_url"), ...emptyVideoInputs],
77 | "fileupload": [...allSourceInputsExept("upload"), ...emptyVideoInputs],
78 | "emptyvideo": [...allSourceInputsExept(""), ...commonLoaderInputs],
79 | },
80 | };
81 |
82 | nodeVisibilityMap["UltimateVideoLoader (simple)"] = nodeVisibilityMap["UltimateVideoLoader"];
83 |
84 | const inputModeVisibilityMap = nodeVisibilityMap[node.comfyClass];
85 |
86 | if (!inputModeVisibilityMap || !inputModeVisibilityMap[inputModeValue]) return;
87 |
88 | // Reset all widgets to visible
89 | for (const key in inputModeVisibilityMap) {
90 | for (const widgetName of inputModeVisibilityMap[key]) {
91 | const widget = findWidgetByName(node, widgetName);
92 | toggleWidget(node, widget, true);
93 | }
94 | }
95 |
96 | // Hide the specific widgets for the current input_mode value
97 | for (const widgetName of inputModeVisibilityMap[inputModeValue]) {
98 | const widget = findWidgetByName(node, widgetName);
99 | toggleWidget(node, widget, false);
100 | }
101 | }
102 |
103 |
104 | // Create a map of node titles to their respective widget handlers
105 | const nodeWidgetHandlers = {
106 | "UltimateVideoLoader": {
107 | "source": handleUltimateVideoLoaderSource,
108 | },
109 | "UltimateVideoLoader (simple)": {
110 | "source": handleUltimateVideoLoaderSource,
111 | },
112 | "BatchCreativeInterpolationNodeDynamicSettings": {
113 | "image_count": handleBatchCreativeInterpolationNodeDynamicSettingsVisibility,
114 | }
115 | };
116 |
117 | // In the main function where widgetLogic is called
118 | function widgetLogic(node, widget) {
119 | // Retrieve the handler for the current node title and widget name
120 | const handler = nodeWidgetHandlers[node.comfyClass]?.[widget.name];
121 | if (handler) {
122 | handler(node, widget);
123 | }
124 | }
125 |
126 |
127 | function handleUltimateVideoLoaderVisibility(node, source) {
128 | const baseNamesMap = {
129 | "YouTube": ["youtube_url", ...commonLoaderInputs],
130 | "filepath": ["video", ...commonLoaderInputs],
131 | "fileupload": ["fileupload", ...commonLoaderInputs],
132 | "emptyvideo": [...emptyVideoInputs],
133 | };
134 |
135 | for (var key in baseNamesMap) {
136 | var toggle;
137 | if (key === source) {
138 | toggle = true;
139 | } else {
140 | toggle = false;
141 | }
142 | var baseNames = baseNamesMap[key];
143 |
144 | for (var nodeName in baseNames) {
145 | var widget = findWidgetByName(node, nodeName);
146 | toggleWidget(node, widget, toggle);
147 | }
148 | }
149 | }
150 |
151 | function handleBatchCreativeInterpolationNodeDynamicSettingsVisibility(node, widget) {
152 | handleVisibility(node, widget.value, "BatchCreativeInterpolationNodeDynamicSettings")
153 | }
154 |
155 | const MAX_COUNT_VALUE = 50
156 |
157 | function handleUltimateVideoLoaderSource(node, widget) {
158 | handleInputModeWidgetsVisibility(node, widget.value);
159 | handleUltimateVideoLoaderVisibility(node, widget.value);
160 | }
161 |
162 | function handleVisibility(node, countValue, nodeType) {
163 | const baseNamesMap = {
164 | "BatchCreativeInterpolationNodeDynamicSettings": [
165 | "frame_distribution",
166 | "key_frame_influence",
167 | "min_strength_value",
168 | "max_strength_value",
169 | ],
170 | }
171 | const baseNames = baseNamesMap[nodeType]
172 |
173 |
174 | for (let i=1; i <= MAX_COUNT_VALUE; i++) {
175 | const widgets = baseNames.map((n) => findWidgetByName(node, `${n}_${i}`))
176 |
177 | if (i <= countValue) {
178 | widgets?.forEach((w) => {
179 |
180 | toggleWidget(node, w, true)}
181 | )
182 | } else {
183 | widgets?.forEach((w) => toggleWidget(node, w, false))
184 | }
185 | }
186 | }
187 |
188 | app.registerExtension({
189 | name: "komojini.widgethider",
190 | nodeCreated(node) {
191 | for (const w of node.widgets || []) {
192 | let widgetValue = w.value;
193 |
194 | // Store the original descriptor if it exists
195 | let originalDescriptor = Object.getOwnPropertyDescriptor(w, 'value');
196 |
197 | widgetLogic(node, w);
198 |
199 | Object.defineProperty(w, 'value', {
200 | get() {
201 | // If there's an original getter, use it. Otherwise, return widgetValue.
202 | let valueToReturn = originalDescriptor && originalDescriptor.get
203 | ? originalDescriptor.get.call(w)
204 | : widgetValue;
205 |
206 | return valueToReturn;
207 | },
208 | set(newVal) {
209 |
210 | // If there's an original setter, use it. Otherwise, set widgetValue.
211 | if (originalDescriptor && originalDescriptor.set) {
212 | originalDescriptor.set.call(w, newVal);
213 | } else {
214 | widgetValue = newVal;
215 | }
216 |
217 | widgetLogic(node, w);
218 | }
219 | });
220 | }
221 | setTimeout(() => {initialized = true;}, 500);
222 | }
223 | });
224 |
225 |
--------------------------------------------------------------------------------
/js/utils.js:
--------------------------------------------------------------------------------
1 | import { api } from '/scripts/api.js'
2 | import * as shared from './comfy_shared.js'
3 | import { app } from '/scripts/app.js'
4 |
5 |
6 |
7 | export const findWidgetByName = (node, name) => {
8 | return node.widgets ? node.widgets.find((w) => w.name === name) : null;
9 | }
10 |
11 | export const doesInputWithNameExist = (node, name) => {
12 | return node.inputs ? node.inputs.some((input) => input.name === name) : false;
13 | }
14 |
15 | export const findWidgetsByType = (node, type) => {
16 | var widgets = [];
17 | node.widgets.map((widget) => {
18 | if (widget.type === type) {
19 | widgets.push(widget);
20 | }
21 | });
22 | return widgets;
23 | }
24 |
25 | export const getNodeByLink = (linkId, type) => app.graph.getNodeById(app.graph.links[linkId][type == "input" ? "origin_id" : "target_id"]);
26 |
27 | // node.title is visual title
28 |
29 | export function isGetter(node) {
30 | return node.type === "GetNode" || node.type?.includes?.("Getter");
31 | }
32 |
33 | export function isSetter(node) {
34 | return node.type === 'SetNode' || node.type?.includes?.("Setter");
35 | }
36 |
37 | export const isSetNode = (node) => node.type === "SetNode";
38 | export const isGetNode = (node) => node.type === "GetNode";
39 |
40 | function findSetterNode(key) {
41 | return app.graph._nodes.find((node) => isSetter(node) && findWidgetByName(node, "key").value === key);
42 | }
43 |
44 | function findGetterNode(key) {
45 | return app.graph._nodes.find((node) => isGetter(node) && findWidgetByName(node, "key").value === key);
46 | }
47 |
48 | function findSetNode(key) {
49 | return app.graph._nodes.find((node) => isSetNode(node) && node.widgets_values === key);
50 | }
51 |
52 | function findGetNode(key) {
53 | return app.graph._nodes.find((node) => isGetNode(node) && node.widgets_values === key);
54 | }
55 |
56 | export function enableOnlyRelatedNodes(targetNode) {
57 | let whitelist = {};
58 |
59 | function travelBackward(node) {
60 | whitelist[node.id] = node;
61 | if (!node.inputs) return;
62 |
63 | if (isGetter(node)) {
64 | const key = findWidgetByName(node, "key").value;
65 | const setterNode = findSetterNode(key);
66 |
67 | if (!setterNode) {
68 | shared.errorLogger('No Setter node find for key:', key);
69 | } else {
70 | shared.log("Connecting Getter & Setter", node?.widgets_values);
71 | travelBackward(setterNode);
72 | }
73 |
74 | } else if (isGetNode(node)) {
75 | const key = findWidgetByName(node, "Constant").value;
76 | const setNode = findSetNode(key);
77 |
78 | if (!setNode) {
79 | shared.errorLogger('No SetNode find for Constant:', key);
80 | } else {
81 | shared.log("Connecting GetNode & SetNode", node?.widgets_values);
82 | travelBackward(setNode);
83 | }
84 | } else {
85 | for (const input of node.inputs) {
86 | if (!input.link) continue
87 | travelBackward(getNodeByLink(input.link, "input"));
88 | }
89 | }
90 | }
91 |
92 | function travelForward(node) {
93 | whitelist[node.id] = node;
94 | travelBackward(node);
95 | if (!node.outputs) return;
96 |
97 | for (const output of node.outputs) {
98 | if (!output.links) continue;
99 | for (const link of output.links) {
100 | travelForward(getNodeByLink(link, "output"));
101 | }
102 | }
103 | }
104 |
105 | travelForward(targetNode);
106 |
107 | let notAlreadyMutedBlacklist = app.graph._nodes.filter(node => node.mode !== 2 && !whitelist[node.id]);
108 | for (const node of notAlreadyMutedBlacklist) node.mode = 2;
109 | return notAlreadyMutedBlacklist;
110 | }
111 |
112 | export function waitForPromptId() {
113 | const originalFetch = window.fetch;
114 | return new Promise(resolve => {
115 | window.fetch = async (...args) => {
116 | let [url, config] = args;
117 | const response = await originalFetch(url, config);
118 | if (url === "/prompt") {
119 | response.clone().json().then(data => resolve(data.prompt_id));
120 | window.fetch = originalFetch;
121 | }
122 | return response;
123 | };
124 | })
125 | }
126 |
127 | //https://github.com/melMass/comfy_mtb/blob/main/web/mtb_widgets.js#L309
128 | //Thanks for cool text box.
129 | export const DEBUG_STRING = (name, val) => {
130 | const fontSize = 16
131 | const w = {
132 | name,
133 | type: 'debug_text',
134 |
135 | draw: function (ctx, node, widgetWidth, widgetY, height) {
136 | // const [cw, ch] = this.computeSize(widgetWidth)
137 | shared.offsetDOMWidget(this, ctx, node, widgetWidth, widgetY, height)
138 | },
139 | computeSize: function (width) {
140 | const value = this.inputEl.innerHTML
141 | if (!value) {
142 | return [32, 32]
143 | }
144 | if (!width) {
145 | log(`No width ${this.parent.size}`)
146 | }
147 |
148 | const oldFont = app.ctx.font
149 | app.ctx.font = `${fontSize}px monospace`
150 |
151 | const words = value.split(' ')
152 | const lines = []
153 | let currentLine = ''
154 | for (const word of words) {
155 | const testLine =
156 | currentLine.length === 0 ? word : `${currentLine} ${word}`
157 |
158 | const testWidth = app.ctx.measureText(testLine).width
159 |
160 | if (testWidth > width) {
161 | lines.push(currentLine)
162 | currentLine = word
163 | } else {
164 | currentLine = testLine
165 | }
166 | }
167 | app.ctx.font = oldFont
168 | if (lines.length === 0) lines.push(currentLine)
169 |
170 | const textHeight = (lines.length + 1) * fontSize
171 |
172 | const maxLineWidth = lines.reduce(
173 | (maxWidth, line) =>
174 | Math.max(maxWidth, app.ctx.measureText(line).width),
175 | 0
176 | )
177 | const widgetWidth = Math.max(width || this.width || 32, maxLineWidth)
178 | const widgetHeight = textHeight * 1.5
179 | return [widgetWidth, widgetHeight]
180 | },
181 | onRemoved: function () {
182 | if (this.inputEl) {
183 | this.inputEl.remove()
184 | }
185 | },
186 | }
187 |
188 | Object.defineProperty(w, 'value', {
189 | get() {
190 | return this.inputEl.innerHTML
191 | },
192 | set(value) {
193 | this.inputEl.innerHTML = value
194 | this.parent?.setSize?.(this.parent?.computeSize())
195 | },
196 | })
197 |
198 | w.inputEl = document.createElement('p')
199 | w.inputEl.style.textAlign = 'center'
200 | w.inputEl.style.fontSize = `${fontSize}px`
201 | w.inputEl.style.color = 'var(--input-text)'
202 | w.inputEl.style.lineHeight = 0
203 |
204 | w.inputEl.style.fontFamily = 'monospace'
205 | w.value = val
206 | document.body.appendChild(w.inputEl)
207 |
208 | return w
209 | }
210 |
211 | export function setColorAndBgColor(type) {
212 | const colorMap = {
213 | "MODEL": LGraphCanvas.node_colors.blue,
214 | "LATENT": LGraphCanvas.node_colors.purple,
215 | "VAE": LGraphCanvas.node_colors.red,
216 | "CONDITIONING": LGraphCanvas.node_colors.brown,
217 | "IMAGE": LGraphCanvas.node_colors.pale_blue,
218 | "CLIP": LGraphCanvas.node_colors.yellow,
219 | "FLOAT": LGraphCanvas.node_colors.green,
220 | "MASK": LGraphCanvas.node_colors.cyan,
221 | "INT": { color: "#1b4669", bgcolor: "#29699c"},
222 | "CONDITIONING": { color: "#4F200D", bgcolor: "#FF8400",},
223 | "CLIP": { color: "#898121", bgcolor: "#E7B10A",},
224 |
225 | "*": { color: "#d4a828", bgcolor: "#756d58"},
226 | };
227 |
228 | const nameColorMap = {
229 | "ksampler": { color: "820300", bgcolor: "B80000"},
230 | "controlnet": { color: "FF9800", bgcolor: "5F8670"},
231 | "ipadapter": { color: "3E3232", bgcolor: "503C3C"},
232 | "checkpoint": { color: "2D3250", bgcolor: "424769"},
233 | "lora": {color: "C499F3" , bgcolor: "7360DF"},
234 | }
235 |
236 | const colors = colorMap[type];
237 | if (colors) {
238 | this.color = colors.color;
239 | this.bgcolor = colors.bgcolor;
240 | } else {
241 | // Handle the default case if needed
242 | const name = this.type.toLowerCase?.();
243 |
244 | if (!name) {
245 | return;
246 | }
247 | for (let [key, value] of Object.entries(nameColorMap)) {
248 | if (name.includes(key)) {
249 | this.color = value.color;
250 | this.bgcolor = value.bgcolor
251 | console.log(name, key);
252 | return;
253 | }
254 | }
255 | }
256 | }
257 |
--------------------------------------------------------------------------------
/nodes/komojini_nodes.py:
--------------------------------------------------------------------------------
1 |
2 | from server import PromptServer
3 | import os
4 |
5 | from .logger import logger
6 |
7 |
8 | # wildcard trick is taken from pythongossss's
9 | class AnyType(str):
10 | def __ne__(self, __value: object) -> bool:
11 | return False
12 |
13 | any_typ = AnyType("*")
14 |
15 |
16 | HIDDEN_ARGS = {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"}
17 |
18 |
19 | def get_file_item(base_type, path):
20 | path_type = base_type
21 |
22 | if path == "[output]":
23 | path_type = "output"
24 | path = path[:-9]
25 | elif path == "[input]":
26 | path_type = "input"
27 | path = path[:-8]
28 | elif path == "[temp]":
29 | path_type = "temp"
30 | path = path[:-7]
31 |
32 | subfolder = os.path.dirname(path)
33 | filename = os.path.basename(path)
34 |
35 | return {
36 | "filename": filename,
37 | "subfolder": subfolder,
38 | "type": path_type
39 | }
40 |
41 |
42 | def workflow_to_map(workflow):
43 | nodes = {}
44 | links = {}
45 | for link in workflow['links']:
46 | links[link[0]] = link[1:]
47 | for node in workflow['nodes']:
48 | nodes[str(node['id'])] = node
49 |
50 | return nodes, links
51 |
52 |
53 | def collect_non_reroute_nodes(node_map, links, res, node_id):
54 | if node_map[node_id]['type'] != 'Reroute' and node_map[node_id]['type'] != 'Reroute (rgthree)':
55 | res.append(node_id)
56 | else:
57 | for link in node_map[node_id]['outputs'][0]['links']:
58 | next_node_id = str(links[link][2])
59 | collect_non_reroute_nodes(node_map, links, res, next_node_id)
60 |
61 | from .cache_data import CACHED_MAP
62 |
63 |
64 | class To:
65 | @classmethod
66 | def INPUT_TYPES(cls):
67 | return {"required": {"key": ("STRING", {"default": ""}),
68 | },
69 | "optional": {"value": (any_typ, )}
70 | # "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"}
71 | }
72 |
73 | FUNCTION = "run"
74 | RETURN_TYPES = (any_typ, )
75 | RETURN_NAMES = ("*", )
76 |
77 | def run(self, key, **kwargs):
78 | if "*" in kwargs:
79 | value = kwargs["*"]
80 | elif "value" in kwargs:
81 | value = kwargs["value"]
82 | else:
83 | logger.warning(f"No value assigned for key: {key}, inputs: {kwargs}")
84 |
85 | value = next(iter(kwargs.values()))
86 |
87 | CACHED_MAP[key] = value;
88 | return (value, )
89 |
90 |
91 | def run_getter(key, **kwargs):
92 | if "*" in kwargs:
93 | return (kwargs["*"], )
94 | elif "value" in kwargs:
95 | return (kwargs["value"], )
96 |
97 | else:
98 | for k, v in kwargs.items():
99 | if k in HIDDEN_ARGS:
100 | continue
101 | return (v, )
102 | logger.warning(f"No value assigned for key: {key}, inputs: {kwargs}")
103 |
104 | return None
105 |
106 |
107 | class From:
108 | @classmethod
109 | def INPUT_TYPES(cls):
110 | return {"required": {"key": ("STRING", {"default": ""})},
111 | "optional" : {
112 | "value": (any_typ, )
113 | },
114 | # "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"}
115 | }
116 |
117 | FUNCTION = "run"
118 | RETURN_TYPES = (any_typ, )
119 | RETURN_NAMES = ("*", )
120 |
121 | def run(self, key, **kwargs):
122 | return run_getter(key, **kwargs)
123 |
124 |
125 | class ImageGetter:
126 | @classmethod
127 | def INPUT_TYPES(cls):
128 | return {"required": {"key": ("STRING", {"default": ""})},
129 | "optional" : {
130 | "value": ("IMAGE", )
131 | },
132 | # "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"}
133 | }
134 |
135 | FUNCTION = "run"
136 | RETURN_TYPES = ("IMAGE", )
137 | RETURN_NAMES = ("*", )
138 |
139 | def run(self, key, **kwargs):
140 | return run_getter(key, **kwargs)
141 |
142 |
143 | class CachedGetter:
144 | @classmethod
145 | def INPUT_TYPES(cls):
146 | return {"required": {"key": ("STRING", {"default": ""})},
147 | "optional" : {
148 | "value": (any_typ, )
149 | },
150 | # "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"}
151 | }
152 |
153 | FUNCTION = "run"
154 | RETURN_TYPES = (any_typ, )
155 | RETURN_NAMES = ("*", )
156 |
157 | def run(self, key, **kwargs):
158 | cached_value = CACHED_MAP.get(key)
159 | if cached_value is not None:
160 | return (cached_value,)
161 |
162 | value = run_getter(key, **kwargs)[0]
163 | logger.info(f"There is no cached data for {key}. Caching new data...")
164 | CACHED_MAP[key] = value
165 | return (value, )
166 |
167 |
168 | class FlowBuilder:
169 | @classmethod
170 | def INPUT_TYPES(cls):
171 | return {
172 | "required": {
173 | "value": (any_typ, ),
174 | },
175 | "optional": {
176 | "batch_size": ("INT", {"default": 1, "min": 1, "max": 10000, "step": 1}),
177 | },
178 | "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"},
179 | }
180 |
181 | FUNCTION = "run"
182 | RETURN_TYPES = (any_typ, )
183 | RETURN_NAMES = ("value", )
184 | CATEGORY = "komojini/flow"
185 |
186 | def run(self, value, **kwargs):
187 | return (value, )
188 |
189 |
190 | class FlowBuilderSetter:
191 | @classmethod
192 | def INPUT_TYPES(cls):
193 | return {
194 | "required": {
195 | "value": (any_typ,),
196 | "key": ("STRING", {"default": ""}),
197 | },
198 | "optional": {
199 | "batch_size": ("INT", {"default": 1, "min": 1, "max": 10000, "step": 1}),
200 | },
201 | "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"},
202 | }
203 |
204 | FUNCTION = "run"
205 | RETURN_TYPES = (any_typ,)
206 | RETURN_NAMES = ("value",)
207 | CATEGORY = "komojini/flow"
208 |
209 | def run(self, **kwargs):
210 | key = kwargs.get("key")
211 |
212 | if "*" in kwargs:
213 | value = kwargs["*"]
214 | elif "value" in kwargs:
215 | value = kwargs["value"]
216 | else:
217 | logger.warning(f"No value assigned for key: {key}, inputs: {kwargs}")
218 |
219 | value = next(iter(kwargs.values()))
220 |
221 | CACHED_MAP[key] = value
222 | return (value, )
223 |
224 |
225 | from PIL import Image, ImageOps
226 | import torch
227 | import base64
228 | from io import BytesIO
229 | import numpy as np
230 |
231 |
232 |
233 | class DragNUWAImageCanvas:
234 | @classmethod
235 | def INPUT_TYPES(cls):
236 | return {
237 | "required": {
238 | "image": ("STRING", {"default": "[IMAGE DATA]"}),
239 | "tracking_points": ("STRING", {"default": "", "multiline": True})
240 | }
241 | }
242 | FUNCTION = "run"
243 | RETURN_TYPES = ("IMAGE", "STRING",)
244 | RETURN_NAMES = ("image", "tracking_points",)
245 | CATEGORY = "komojini/image"
246 |
247 | def run(self, image, tracking_points, **kwargs):
248 | logger.info(f"DragNUWA output of tracking points: {tracking_points}")
249 |
250 | # Extract the base64 string without the prefix
251 | base64_string = image.split(",")[1]
252 |
253 | # Decode base64 string to bytes
254 | i = base64.b64decode(base64_string)
255 |
256 | # Convert bytes to PIL Image
257 | i = Image.open(BytesIO(i))
258 |
259 | i = ImageOps.exif_transpose(i)
260 | image = i.convert("RGB")
261 | image = np.array(image).astype(np.float32) / 255.0
262 | image = torch.from_numpy(image)[None,]
263 | return (image, tracking_points, )
264 |
265 |
266 | MAX_IMAGE_COUNT = 50
267 |
268 | class BatchCreativeInterpolationNodeDynamicSettings:
269 | @classmethod
270 | def INPUT_TYPES(s):
271 | inputs = {
272 | "required": {
273 | "image_count": ("INT", {"default": 1, "min": 1, "max": MAX_IMAGE_COUNT, "step": 1}),
274 | },
275 | }
276 |
277 | for i in range(1, MAX_IMAGE_COUNT):
278 | if i == 1:
279 | inputs["required"][f"frame_distribution_{i}"] = ("INT", {"default": 4, "min": 4, "max": 64, "step": 1})
280 | else:
281 | inputs["required"][f"frame_distribution_{i}"] = ("INT", {"default": 16, "min": 4, "max": 64, "step": 1})
282 |
283 | inputs["required"][f"key_frame_influence_{i}"] = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.1})
284 | inputs["required"][f"min_strength_value_{i}"] = ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.1})
285 | inputs["required"][f"max_strength_value_{i}"] = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.1})
286 |
287 | return inputs
288 |
289 | RETURN_TYPES = ("STRING", "STRING", "STRING",)
290 | RETURN_NAMES = ("dynamic_frame_distribution_values", "dynamic_key_frame_influence_values", "dynamic_strength_values",)
291 |
292 | FUNCTION = "run"
293 |
294 | def run(self, image_count, **kwargs):
295 | dynamic_frame_distribution_values = ""
296 | dynamic_key_frame_influence_values = ""
297 | dynamic_strength_values = ""
298 |
299 | previous_frame_distribution = 0
300 |
301 | for i in range(1, image_count+1):
302 | previous_frame_distribution += kwargs.get(f"frame_distribution_{i}", 0)
303 |
304 | distribution_value = str(previous_frame_distribution) + ","
305 | influence_value = str(kwargs.get(f"key_frame_influence_{i}")) + ","
306 | strength_value = "({min},{max}),".format(min=kwargs.get(f"min_strength_value_{i}"), max=kwargs.get(f"max_strength_value_{i}"))
307 |
308 | dynamic_frame_distribution_values += distribution_value
309 | dynamic_key_frame_influence_values += influence_value
310 | dynamic_strength_values += strength_value
311 |
312 | return (dynamic_frame_distribution_values[:-1], dynamic_key_frame_influence_values[:-1], dynamic_strength_values[:-1],)
313 |
314 | __all__ = [
315 | "To",
316 | "From",
317 | "ImageGetter",
318 | "CachedGetter",
319 | "FlowBuilder",
320 | "FlowBuilderSetter",
321 | "DragNUWAImageCanvas",
322 | "BatchCreativeInterpolationNodeDynamicSettings",
323 | ]
--------------------------------------------------------------------------------
/nodes/video_loaders.py:
--------------------------------------------------------------------------------
1 | from pytube import YouTube
2 | from pytube.exceptions import VideoUnavailable
3 | import cv2
4 | import os
5 | from pathlib import Path
6 | from PIL import Image, ImageOps
7 | from typing import Tuple, Dict, List, Any, Union
8 | import numpy as np
9 |
10 | import torch
11 | import subprocess
12 | import folder_paths
13 | from comfy.utils import common_upscale
14 |
15 | from .logger import logger
16 | from .utils import calculate_file_hash, validate_path, lazy_eval, hash_path
17 |
18 |
19 | video_extensions = ['webm', 'mp4', 'mkv', 'gif']
20 | force_sizes = ["Disabled", "256x?", "?x256", "256x256", "512x?", "?x512", "512x512", "?x768", "768x?"]
21 |
22 | COMMON_REQUIRED_INPUTS = {
23 | "start_sec": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10000.0, "step": 0.1}),
24 | "end_sec": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10000.0, "step": 0.1}),
25 | "max_fps": ("INT", {"default": -1, "min": -1, "max": 30, "step": 1}),
26 | "force_size": (force_sizes,),
27 | "frame_load_cap": ("INT", {"default": 50, "min": 1, "max": 10000, "step": 1}),
28 | }
29 |
30 | EMPTY_VIDEO_INPUTS = {
31 | "width": ("INT", {"default": 512, "min": 64, "max": 8192, "step": 64}),
32 | "height": ("INT", {"default": 512, "min": 64, "max": 8192, "step": 64}),
33 | "frame_count": ("INT", {"default": 1, "min": 1, "max": 4096}),
34 | "fps": ("INT", {"default": 10, "min": 1, "max": 1000, "step": 1}),
35 | }
36 |
37 | def target_size(width, height, force_size) -> tuple[int, int]:
38 | if force_size != "Disabled":
39 | force_size = force_size.split("x")
40 | if force_size[0] == "?":
41 | width = (width*int(force_size[1]))//height
42 | #Limit to a multple of 8 for latent conversion
43 | #TODO: Consider instead cropping and centering to main aspect ratio
44 | width = int(width)+4 & ~7
45 | height = int(force_size[1])
46 | elif force_size[1] == "?":
47 | height = (height*int(force_size[0]))//width
48 | height = int(height)+4 & ~7
49 | width = int(force_size[0])
50 | else:
51 | width = int(force_size[0])
52 | height = int(force_size[1])
53 | return (width, height)
54 |
55 |
56 | def frame_to_tensor(frame) -> torch.Tensor:
57 | frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
58 | # convert frame to comfyui's expected format (taken from comfy's load image code)
59 | image = Image.fromarray(frame)
60 | image = ImageOps.exif_transpose(image)
61 | image = np.array(image, dtype=np.float32) / 255.0
62 | image = torch.from_numpy(image)[None,]
63 | return image
64 |
65 | def process_video_cap(
66 | video_cap,
67 | start_sec,
68 | end_sec,
69 | frame_load_cap,
70 | max_fps = None,
71 | ):
72 | fps = int(video_cap.get(cv2.CAP_PROP_FPS))
73 | width, height = int(video_cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(video_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
74 | frame_count = int(video_cap.get(cv2.CAP_PROP_FRAME_COUNT))
75 |
76 | if not frame_load_cap or frame_load_cap <= 0:
77 | frame_load_cap = 999999
78 |
79 | if not end_sec:
80 | end_sec = frame_count / fps
81 |
82 | # Calculate the total number of frames in the specified time range
83 | video_sec = end_sec - start_sec
84 | original_frame_length = int(video_sec * fps)
85 |
86 | step = max(original_frame_length // frame_load_cap, 1)
87 | new_fps = fps // step
88 |
89 | if max_fps and 0 < max_fps < new_fps:
90 | if (step * new_fps) % new_fps != 0:
91 | logger.warning(f"Warning | new_fps: {new_fps}, max_fps: {max_fps}, modified step: int({step / max_fps * new_fps})")
92 | step = int(step / max_fps * new_fps)
93 | new_fps = max_fps
94 |
95 |
96 | start_frame = fps * start_sec
97 | end_frame = fps * end_sec
98 |
99 | frames_added = 0
100 | images = []
101 |
102 | curr_frame = start_frame
103 |
104 | logger.info(f"start_frame: {start_frame}\nend_frame: {end_frame}\nstep: {step}\n")
105 |
106 | while True:
107 | # Set the frame position
108 | int_curr_frame = int(curr_frame)
109 |
110 | video_cap.set(cv2.CAP_PROP_POS_FRAMES, int_curr_frame)
111 |
112 | ret, frame = video_cap.read()
113 | if not ret:
114 | break
115 |
116 | # Append the frame to the frames list
117 | image = frame_to_tensor(frame)
118 | images.append(image)
119 | frames_added += 1
120 |
121 | # if cap exists and we've reached it, stop processing frames
122 | if frame_load_cap > 0 and frames_added >= frame_load_cap:
123 | break
124 | if curr_frame >= end_frame:
125 | break
126 |
127 | curr_frame += step
128 |
129 | #Setup lambda for lazy audio capture
130 | #audio = lambda : get_audio(video, skip_first_frames * target_frame_time, frame_load_cap*target_frame_time)
131 | return (images, frames_added, new_fps, width, height)
132 |
133 |
134 | def load_video_cv(
135 | video: str,
136 | start_sec: float,
137 | end_sec: float,
138 | frame_load_cap: int = 50,
139 | output_dir = None,
140 | max_fps: int = -1,
141 | force_size = "Disabled",
142 | **kwargs,
143 | ) -> Tuple[torch.Tensor, int, int, int, int]:
144 |
145 | video_cap = None
146 | try:
147 | video_cap = cv2.VideoCapture(video)
148 | if not video_cap.isOpened():
149 | raise ValueError(f"{video} could not be loaded with cv.")
150 | images, frames_added, fps, width, height = process_video_cap(video_cap, start_sec, end_sec, frame_load_cap, max_fps)
151 |
152 | finally:
153 | if video_cap:
154 | video_cap.release()
155 | if len(images) == 0:
156 | raise RuntimeError("No frames generated")
157 | images = torch.cat(images, dim=0)
158 | if force_size != "Disabled":
159 | new_size = target_size(width, height, force_size)
160 |
161 | if new_size[0] != width or new_size[1] != height:
162 | s = images.movedim(-1,1)
163 | s = common_upscale(s, new_size[0], new_size[1], "lanczos", "center")
164 | images = s.movedim(1,-1)
165 | width, height = new_size
166 |
167 | # TODO: raise an error maybe if no frames were loaded?
168 |
169 | # Setup lambda for lazy audio capture
170 | # audio = lambda : get_audio(video, skip_first_frames * target_frame_time,
171 | # frame_load_cap*target_frame_time)
172 |
173 | return (images, frames_added, fps, width, height,)
174 |
175 |
176 | def is_gif(filename) -> bool:
177 | return str(filename).endswith("gif")
178 |
179 |
180 | def get_audio(file, start_time=0, duration=0):
181 | # TODO: set ffmpeg_path
182 | ffmpeg_path = ""
183 | args = [ffmpeg_path, "-v", "error", "-i", file]
184 | if start_time > 0:
185 | args += ["-ss", str(start_time)]
186 | if duration > 0:
187 | args += ["-t", str(duration)]
188 | return subprocess.run(args + ["-f", "wav", "-"],
189 | stdout=subprocess.PIPE, check=True).stdout
190 |
191 |
192 | def download_youtube_video(
193 | youtube_url: str,
194 | start_sec: float,
195 | end_sec: float,
196 | frame_load_cap: int = 50,
197 | output_dir = None,
198 | force_size = "Disabled",
199 | max_fps = None,
200 | **kwargs,
201 | ):
202 | if not output_dir:
203 | output_dir = os.path.join(folder_paths.output_directory, "youtube")
204 |
205 | if not os.path.exists(output_dir):
206 | os.makedirs(output_dir, exist_ok=True)
207 |
208 | cap = None
209 |
210 | try:
211 | yt = YouTube(youtube_url)
212 | stream = yt.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first()
213 |
214 | video_path = stream.download(output_dir)
215 |
216 | cap = cv2.VideoCapture(video_path)
217 | images, frames_added, fps, width, height = process_video_cap(cap, start_sec, end_sec, frame_load_cap, max_fps)
218 |
219 | finally:
220 | # Release the video capture object
221 | if cap:
222 | cap.release()
223 |
224 | if len(images) == 0:
225 | raise RuntimeError("No frames generated")
226 | images = torch.cat(images, dim=0)
227 |
228 | if force_size != "Disabled":
229 | new_size = target_size(width, height, force_size)
230 | if new_size[0] != width or new_size[1] != height:
231 | s = images.movedim(-1,1)
232 | s = common_upscale(s, new_size[0], new_size[1], "lanczos", "center")
233 | images = s.movedim(1,-1)
234 | width, height = new_size
235 |
236 | #Setup lambda for lazy audio capture
237 | #audio = lambda : get_audio(video, skip_first_frames * target_frame_time, frame_load_cap*target_frame_time)
238 | return (images, frames_added, fps, width, height)
239 |
240 |
241 | class YouTubeVideoLoader:
242 | @classmethod
243 | def INPUT_TYPES(s):
244 |
245 | inputs = {
246 | "required": {"youtube_url": ("STRING", {"default": "youtube/url/here"}),},
247 | "optional": {
248 | "output_dir": ("STRING", {"default": ""}),
249 | }
250 | }
251 | inputs["required"].update(COMMON_REQUIRED_INPUTS)
252 |
253 | return inputs
254 |
255 | FUNCTION = "load_video"
256 | RETURN_TYPES = ("IMAGE", "INT", "INT", "INT", "INT",)
257 | RETURN_NAMES = ("images", "frame_count", "fps", "width", "height",)
258 | CATEGORY = "komojini/Video"
259 |
260 | def load_video(self, **kwargs):
261 | return download_youtube_video(**kwargs)
262 |
263 |
264 | class UltimateVideoLoader:
265 | source = [
266 | "fileupload",
267 | "filepath",
268 | "YouTube",
269 | "emptyvideo",
270 | ]
271 |
272 | @classmethod
273 | def INPUT_TYPES(cls):
274 | input_dir = folder_paths.get_input_directory()
275 | files = []
276 | for f in os.listdir(input_dir):
277 | if os.path.isfile(os.path.join(input_dir, f)):
278 | file_parts = f.split('.')
279 | if len(file_parts) > 1 and (file_parts[-1] in video_extensions):
280 | files.append(f)
281 |
282 | inputs = {
283 | "required": {
284 | "source": (cls.source,),
285 | "youtube_url": ("STRING", {"default": "youtube/url/here"}),
286 | "video": ("STRING", {"default": "X://insert/path/here.mp4", "path_extensions": video_extensions}),
287 | "upload": (sorted(files),),
288 | }
289 | }
290 |
291 | inputs["required"].update(COMMON_REQUIRED_INPUTS)
292 | inputs["required"].update(EMPTY_VIDEO_INPUTS)
293 |
294 | return inputs
295 |
296 | FUNCTION = "load_video"
297 | RETURN_TYPES = ("IMAGE", "INT", "INT", "INT", "INT",)
298 | RETURN_NAMES = ("images", "frame_count", "fps", "width", "height",)
299 | CATEGORY = "komojini/Video"
300 |
301 | def load_video(self, **kwargs):
302 | source = kwargs.get("source")
303 | if source == "YouTube":
304 | images, frames_count, fps, width, height = download_youtube_video(**kwargs)
305 | elif source == "filepath":
306 | images, frames_count, fps, width, height = load_video_cv(**kwargs)
307 | elif source == "fileupload":
308 | kwargs['video'] = folder_paths.get_annotated_filepath(kwargs['upload'].strip("\""))
309 | images, frames_count, fps, width, height = load_video_cv(**kwargs)
310 | elif source == "emptyvideo":
311 | frames_count = kwargs["frame_count"]
312 | width, height = kwargs["width"], kwargs["height"]
313 | fps = kwargs["fps"]
314 | images = torch.zeros([frames_count, height, width, 3])
315 |
316 | logger.debug(f"loaded video images.shape: {images.shape}, frames_count: {frames_count}, fpe: {fps}, widthxheight: {width}x{height}")
317 | return (images, frames_count, fps, width, height,)
318 |
319 | # @classmethod
320 | # def IS_CHANGED(s, upload, **kwargs):
321 | # logger.debug(f"is_changed | source: {source}")
322 |
323 | # source = kwargs.get("source")
324 | # if source == "filepath":
325 | # video = kwargs.get("video")
326 | # return hash_path(video)
327 | # elif source == "fileupload":
328 | # image_path = folder_paths.get_annotated_filepath(upload)
329 | # return calculate_file_hash(image_path)
330 | # else:
331 | # youtube_url = kwargs.get("youtube_url")
332 | # return hash_path(youtube_url)
333 |
334 | # @classmethod
335 | # def VALIDATE_INPUTS(s, video, force_size, **kwargs):
336 | # return validate_path(video, allow_none=True)
--------------------------------------------------------------------------------
/js/comfy_shared.js:
--------------------------------------------------------------------------------
1 | import { app } from '/scripts/app.js'
2 |
3 | export const log = (...args) => {
4 | if (window.MTB?.DEBUG) {
5 | console.debug(...args)
6 | }
7 | }
8 |
9 | //- WIDGET UTILS
10 | export const CONVERTED_TYPE = 'converted-widget'
11 |
12 | export function offsetDOMWidget(
13 | widget,
14 | ctx,
15 | node,
16 | widgetWidth,
17 | widgetY,
18 | height
19 | ) {
20 | const margin = 10
21 | const elRect = ctx.canvas.getBoundingClientRect()
22 | const transform = new DOMMatrix()
23 | .scaleSelf(
24 | elRect.width / ctx.canvas.width,
25 | elRect.height / ctx.canvas.height
26 | )
27 | .multiplySelf(ctx.getTransform())
28 | .translateSelf(margin, margin + widgetY)
29 |
30 | const scale = new DOMMatrix().scaleSelf(transform.a, transform.d)
31 | Object.assign(widget.inputEl.style, {
32 | transformOrigin: '0 0',
33 | transform: scale,
34 | left: `${transform.a + transform.e}px`,
35 | top: `${transform.d + transform.f}px`,
36 | width: `${widgetWidth - margin * 2}px`,
37 | // height: `${(widget.parent?.inputHeight || 32) - (margin * 2)}px`,
38 | height: `${(height || widget.parent?.inputHeight || 32) - margin * 2}px`,
39 |
40 | position: 'absolute',
41 | background: !node.color ? '' : node.color,
42 | color: !node.color ? '' : 'white',
43 | zIndex: 5, //app.graph._nodes.indexOf(node),
44 | })
45 | }
46 |
47 | /**
48 | * Extracts the type and link type from a widget config object.
49 | * @param {*} config
50 | * @returns
51 | */
52 | export function getWidgetType(config) {
53 | // Special handling for COMBO so we restrict links based on the entries
54 | let type = config?.[0]
55 | let linkType = type
56 | if (type instanceof Array) {
57 | type = 'COMBO'
58 | linkType = linkType.join(',')
59 | }
60 | return { type, linkType }
61 | }
62 |
63 | export const dynamic_connection = (
64 | node,
65 | index,
66 | connected,
67 | connectionPrefix = 'input_',
68 | connectionType = 'PSDLAYER'
69 | ) => {
70 | // remove all non connected inputs
71 | if (!connected && node.inputs.length > 1) {
72 | log(`Removing input ${index} (${node.inputs[index].name})`)
73 | if (node.widgets) {
74 | const w = node.widgets.find((w) => w.name === node.inputs[index].name)
75 | if (w) {
76 | w.onRemoved?.()
77 | node.widgets.length = node.widgets.length - 1
78 | }
79 | }
80 | node.removeInput(index)
81 |
82 | // make inputs sequential again
83 | for (let i = 0; i < node.inputs.length; i++) {
84 | node.inputs[i].label = `${connectionPrefix}${i + 1}`
85 | }
86 | }
87 |
88 | // add an extra input
89 | if (node.inputs[node.inputs.length - 1].link != undefined) {
90 | log(
91 | `Adding input ${node.inputs.length + 1} (${connectionPrefix}${
92 | node.inputs.length + 1
93 | })`
94 | )
95 |
96 | node.addInput(
97 | `${connectionPrefix}${node.inputs.length + 1}`,
98 | connectionType
99 | )
100 | }
101 | }
102 |
103 | /**
104 | * Appends a callback to the extra menu options of a given node type.
105 | * @param {*} nodeType
106 | * @param {*} cb
107 | */
108 | export function addMenuHandler(nodeType, cb) {
109 | const getOpts = nodeType.prototype.getExtraMenuOptions
110 | nodeType.prototype.getExtraMenuOptions = function () {
111 | const r = getOpts.apply(this, arguments)
112 | cb.apply(this, arguments)
113 | return r
114 | }
115 | }
116 |
117 | export function hideWidget(node, widget, suffix = '') {
118 | widget.origType = widget.type
119 | widget.hidden = true
120 | widget.origComputeSize = widget.computeSize
121 | widget.origSerializeValue = widget.serializeValue
122 | widget.computeSize = () => [0, -4] // -4 is due to the gap litegraph adds between widgets automatically
123 | widget.type = CONVERTED_TYPE + suffix
124 | widget.serializeValue = () => {
125 | // Prevent serializing the widget if we have no input linked
126 | const { link } = node.inputs.find((i) => i.widget?.name === widget.name)
127 | if (link == null) {
128 | return undefined
129 | }
130 | return widget.origSerializeValue
131 | ? widget.origSerializeValue()
132 | : widget.value
133 | }
134 |
135 | // Hide any linked widgets, e.g. seed+seedControl
136 | if (widget.linkedWidgets) {
137 | for (const w of widget.linkedWidgets) {
138 | hideWidget(node, w, ':' + widget.name)
139 | }
140 | }
141 | }
142 |
143 | export function showWidget(widget) {
144 | widget.type = widget.origType
145 | widget.computeSize = widget.origComputeSize
146 | widget.serializeValue = widget.origSerializeValue
147 |
148 | delete widget.origType
149 | delete widget.origComputeSize
150 | delete widget.origSerializeValue
151 |
152 | // Hide any linked widgets, e.g. seed+seedControl
153 | if (widget.linkedWidgets) {
154 | for (const w of widget.linkedWidgets) {
155 | showWidget(w)
156 | }
157 | }
158 | }
159 |
160 | export function convertToWidget(node, widget) {
161 | showWidget(widget)
162 | const sz = node.size
163 | node.removeInput(node.inputs.findIndex((i) => i.widget?.name === widget.name))
164 |
165 | for (const widget of node.widgets) {
166 | widget.last_y -= LiteGraph.NODE_SLOT_HEIGHT
167 | }
168 |
169 | // Restore original size but grow if needed
170 | node.setSize([Math.max(sz[0], node.size[0]), Math.max(sz[1], node.size[1])])
171 | }
172 |
173 | export function convertToInput(node, widget, config) {
174 | hideWidget(node, widget)
175 |
176 | const { linkType } = getWidgetType(config)
177 |
178 | // Add input and store widget config for creating on primitive node
179 | const sz = node.size
180 | node.addInput(widget.name, linkType, {
181 | widget: { name: widget.name, config },
182 | })
183 |
184 | for (const widget of node.widgets) {
185 | widget.last_y += LiteGraph.NODE_SLOT_HEIGHT
186 | }
187 |
188 | // Restore original size but grow if needed
189 | node.setSize([Math.max(sz[0], node.size[0]), Math.max(sz[1], node.size[1])])
190 | }
191 |
192 | export function hideWidgetForGood(node, widget, suffix = '') {
193 | widget.origType = widget.type
194 | widget.origComputeSize = widget.computeSize
195 | widget.origSerializeValue = widget.serializeValue
196 | widget.computeSize = () => [0, -4] // -4 is due to the gap litegraph adds between widgets automatically
197 | widget.type = CONVERTED_TYPE + suffix
198 | // widget.serializeValue = () => {
199 | // // Prevent serializing the widget if we have no input linked
200 | // const w = node.inputs?.find((i) => i.widget?.name === widget.name);
201 | // if (w?.link == null) {
202 | // return undefined;
203 | // }
204 | // return widget.origSerializeValue ? widget.origSerializeValue() : widget.value;
205 | // };
206 |
207 | // Hide any linked widgets, e.g. seed+seedControl
208 | if (widget.linkedWidgets) {
209 | for (const w of widget.linkedWidgets) {
210 | hideWidgetForGood(node, w, ':' + widget.name)
211 | }
212 | }
213 | }
214 |
215 | export function fixWidgets(node) {
216 | if (node.inputs) {
217 | for (const input of node.inputs) {
218 | log(input)
219 | if (input.widget || node.widgets) {
220 | // if (newTypes.includes(input.type)) {
221 | const matching_widget = node.widgets.find((w) => w.name === input.name)
222 | if (matching_widget) {
223 | // if (matching_widget.hidden) {
224 | // log(`Already hidden skipping ${matching_widget.name}`)
225 | // continue
226 | // }
227 | const w = node.widgets.find((w) => w.name === matching_widget.name)
228 | if (w && w.type != CONVERTED_TYPE) {
229 | log(w)
230 | log(`hidding ${w.name}(${w.type}) from ${node.type}`)
231 | log(node)
232 | hideWidget(node, w)
233 | } else {
234 | log(`converting to widget ${w}`)
235 |
236 | convertToWidget(node, input)
237 | }
238 | }
239 | }
240 | }
241 | }
242 | }
243 | export function inner_value_change(widget, value, event = undefined) {
244 | if (widget.type == 'number' || widget.type == 'BBOX') {
245 | value = Number(value)
246 | } else if (widget.type == 'BOOL') {
247 | value = Boolean(value)
248 | }
249 | widget.value = value
250 | if (
251 | widget.options &&
252 | widget.options.property &&
253 | node.properties[widget.options.property] !== undefined
254 | ) {
255 | node.setProperty(widget.options.property, value)
256 | }
257 | if (widget.callback) {
258 | widget.callback(widget.value, app.canvas, node, pos, event)
259 | }
260 | }
261 |
262 | //- COLOR UTILS
263 | export function isColorBright(rgb, threshold = 240) {
264 | const brightess = getBrightness(rgb)
265 | return brightess > threshold
266 | }
267 |
268 | function getBrightness(rgbObj) {
269 | return Math.round(
270 | (parseInt(rgbObj[0]) * 299 +
271 | parseInt(rgbObj[1]) * 587 +
272 | parseInt(rgbObj[2]) * 114) /
273 | 1000
274 | )
275 | }
276 |
277 | //- HTML / CSS UTILS
278 | export function defineClass(className, classStyles) {
279 | const styleSheets = document.styleSheets
280 |
281 | // Helper function to check if the class exists in a style sheet
282 | function classExistsInStyleSheet(styleSheet) {
283 | const rules = styleSheet.rules || styleSheet.cssRules
284 | for (const rule of rules) {
285 | if (rule.selectorText === `.${className}`) {
286 | return true
287 | }
288 | }
289 | return false
290 | }
291 |
292 | // Check if the class is already defined in any of the style sheets
293 | let classExists = false
294 | for (const styleSheet of styleSheets) {
295 | if (classExistsInStyleSheet(styleSheet)) {
296 | classExists = true
297 | break
298 | }
299 | }
300 |
301 | // If the class doesn't exist, add the new class definition to the first style sheet
302 | if (!classExists) {
303 | if (styleSheets[0].insertRule) {
304 | styleSheets[0].insertRule(`.${className} { ${classStyles} }`, 0)
305 | } else if (styleSheets[0].addRule) {
306 | styleSheets[0].addRule(`.${className}`, classStyles, 0)
307 | }
308 | }
309 | }
310 |
311 |
312 | // - crude uuid
313 | export function makeUUID() {
314 | let dt = new Date().getTime()
315 | const uuid = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => {
316 | const r = (dt + Math.random() * 16) % 16 | 0
317 | dt = Math.floor(dt / 16)
318 | return (c === 'x' ? r : (r & 0x3) | 0x8).toString(16)
319 | })
320 | return uuid
321 | }
322 |
323 | //- local storage manager
324 | export class LocalStorageManager {
325 | constructor(namespace) {
326 | this.namespace = namespace;
327 | }
328 |
329 | _namespacedKey(key) {
330 | return `${this.namespace}:${key}`;
331 | }
332 |
333 | set(key, value) {
334 | const serializedValue = JSON.stringify(value);
335 | localStorage.setItem(this._namespacedKey(key), serializedValue);
336 | }
337 |
338 | get(key, default_val = null) {
339 | const value = localStorage.getItem(this._namespacedKey(key));
340 | return value ? JSON.parse(value) : default_val;
341 | }
342 |
343 | remove(key) {
344 | localStorage.removeItem(this._namespacedKey(key));
345 | }
346 |
347 | clear() {
348 | Object.keys(localStorage)
349 | .filter(k => k.startsWith(this.namespace + ':'))
350 | .forEach(k => localStorage.removeItem(k));
351 | }
352 | }
353 |
354 |
355 | // - log utilities
356 |
357 | function createLogger(emoji, color, consoleMethod = 'log') {
358 | return function (message, ...args) {
359 | if (window.MTB?.DEBUG) {
360 | console[consoleMethod](
361 | `%c${emoji} ${message}`,
362 | `color: ${color};`,
363 | ...args
364 | )
365 | }
366 | }
367 | }
368 |
369 | export const infoLogger = createLogger('ℹ️', 'yellow')
370 | export const warnLogger = createLogger('⚠️', 'orange', 'warn')
371 | export const errorLogger = createLogger('🔥', 'red', 'error')
372 | export const successLogger = createLogger('✅', 'green')
373 |
374 |
375 | export const hasWidgets = (node) => {
376 | if (!node.widgets || !node.widgets?.[Symbol.iterator]) {
377 | return false
378 | }
379 | return true
380 | }
381 |
382 | export const cleanupNode = (node) => {
383 | if (!hasWidgets(node)) {
384 | return
385 | }
386 |
387 | for (const w of node.widgets) {
388 | if (w.canvas) {
389 | w.canvas.remove()
390 | }
391 | if (w.inputEl) {
392 | w.inputEl.remove()
393 | }
394 | // calls the widget remove callback
395 | w.onRemoved?.()
396 | }
397 | }
398 |
399 |
400 | export const setupDynamicConnections = (nodeType, prefix, inputType) => {
401 | const onNodeCreated = nodeType.prototype.onNodeCreated
402 | nodeType.prototype.onNodeCreated = function () {
403 | const r = onNodeCreated ? onNodeCreated.apply(this, arguments) : undefined
404 | this.addInput(`${prefix}_1`, inputType)
405 | return r
406 | }
407 |
408 | const onConnectionsChange = nodeType.prototype.onConnectionsChange
409 | nodeType.prototype.onConnectionsChange = function (
410 | type,
411 | index,
412 | connected,
413 | link_info
414 | ) {
415 | const r = onConnectionsChange
416 | ? onConnectionsChange.apply(this, arguments)
417 | : undefined
418 | dynamic_connection(this, index, connected, `${prefix}_`, inputType)
419 | }
420 | }
421 |
422 | export function calculateTotalChildrenHeight(parentElement) {
423 | let totalHeight = 0
424 |
425 | for (const child of parentElement.children) {
426 | const style = window.getComputedStyle(child)
427 |
428 | // Get height as an integer (without 'px')
429 | const height = parseInt(style.height, 10)
430 |
431 | // Get vertical margin as integers
432 | const marginTop = parseInt(style.marginTop, 10)
433 | const marginBottom = parseInt(style.marginBottom, 10)
434 |
435 | // Sum up height and vertical margins
436 | totalHeight += height + marginTop + marginBottom
437 | }
438 |
439 | return totalHeight
440 | }
441 |
442 |
443 | //- HTML / CSS UTILS
444 | export const loadScript = (
445 | FILE_URL,
446 | async = true,
447 | type = 'text/javascript'
448 | ) => {
449 | return new Promise((resolve, reject) => {
450 | try {
451 | // Check if the script already exists
452 | const existingScript = document.querySelector(`script[src="${FILE_URL}"]`)
453 | if (existingScript) {
454 | resolve({ status: true, message: 'Script already loaded' })
455 | return
456 | }
457 |
458 | const scriptEle = document.createElement('script')
459 | scriptEle.type = type
460 | scriptEle.async = async
461 | scriptEle.src = FILE_URL
462 |
463 | scriptEle.addEventListener('load', (ev) => {
464 | resolve({ status: true })
465 | })
466 |
467 | scriptEle.addEventListener('error', (ev) => {
468 | reject({
469 | status: false,
470 | message: `Failed to load the script ${FILE_URL}`,
471 | })
472 | })
473 |
474 | document.body.appendChild(scriptEle)
475 | } catch (error) {
476 | reject(error)
477 | }
478 | })
479 | }
480 |
481 |
--------------------------------------------------------------------------------
/komojini_server.py:
--------------------------------------------------------------------------------
1 | import server
2 | import folder_paths
3 | import os
4 | import time
5 | import psutil
6 | import GPUtil
7 | import subprocess
8 |
9 | from .nodes.utils import is_url, get_sorted_dir_files_from_directory, ffmpeg_path
10 | from comfy.k_diffusion.utils import FolderOfImages
11 | import nodes
12 |
13 | DEBUG = True
14 |
15 | from pprint import pprint
16 |
17 | def print_info(info):
18 | pprint(f"🔥 - {info}")
19 |
20 | web = server.web
21 |
22 | def is_safe(path):
23 | if "KOMOJINI_STRICT_PATHS" not in os.environ:
24 | return True
25 | basedir = os.path.abspath('.')
26 | try:
27 | common_path = os.path.commonpath([basedir, path])
28 | except:
29 | #Different drive on windows
30 | return False
31 | return common_path == basedir
32 |
33 |
34 | @server.PromptServer.instance.routes.get("/komojini/systemstatus")
35 | async def get_system_status(request):
36 | system_status = {
37 | "cpu": None,
38 | "gpus": None,
39 | "cpustats": None,
40 | "virtual_memory": dict(psutil.virtual_memory()._asdict()), # {'total': 66480500736, 'available': 61169692672, 'percent': 8.0, 'used': 4553539584, 'free': 41330143232, 'active': 13218308096, 'inactive': 10867519488, 'buffers': 374468608, 'cached': 20222349312, 'shared': 15781888, 'slab': 567083008}
41 | }
42 |
43 | # Get CPU usage
44 | cpu_usage = psutil.cpu_percent(interval=1)
45 | cpu_stats = psutil.cpu_stats() # scpustats(ctx_switches=17990329, interrupts=17614856, soft_interrupts=10633860, syscalls=0)
46 | cpu_times_percent = psutil.cpu_times_percent()
47 | cpu_count = psutil.cpu_count()
48 |
49 |
50 | # system_status["cpustats"] = cpu.__dict__
51 | system_status['cpu'] = {
52 | "cpu_usage": cpu_usage,
53 | "cpu_times_percent": cpu_times_percent,
54 | "cpu_count": cpu_count,
55 | }
56 | # Get GPU usage
57 | try:
58 | gpu = GPUtil.getGPUs()[0] # Assuming you have only one GPU
59 | gpus = GPUtil.getGPUs()
60 | system_status["gpus"] = [gpu.__dict__ for gpu in gpus]
61 |
62 | except Exception as e:
63 | system_status['gpus'] = None # Handle the case where GPU information is not available
64 |
65 | return web.json_response(system_status)
66 |
67 |
68 | @server.PromptServer.instance.routes.get("/komojini/debug")
69 | async def get_debug(request):
70 | return web.json_response({"enabled": True})
71 |
72 |
73 | @server.PromptServer.instance.routes.get("/komojini/onqueue")
74 | async def on_queue(request):
75 | pass
76 |
77 | @server.PromptServer.instance.routes.get("/viewvideo")
78 | async def view_video(request):
79 | query = request.rel_url.query
80 | if "filename" not in query:
81 | return web.Response(status=404)
82 | filename = query["filename"]
83 |
84 | #Path code misformats urls on windows and must be skipped
85 | if is_url(filename):
86 | file = filename
87 | else:
88 | filename, output_dir = folder_paths.annotated_filepath(filename)
89 |
90 | type = request.rel_url.query.get("type", "output")
91 | if type == "path":
92 | #special case for path_based nodes
93 | #NOTE: output_dir may be empty, but non-None
94 | output_dir, filename = os.path.split(filename)
95 | if output_dir is None:
96 | output_dir = folder_paths.get_directory_by_type(type)
97 |
98 | if output_dir is None:
99 | return web.Response(status=400)
100 |
101 | if not is_safe(output_dir):
102 | return web.Response(status=403)
103 |
104 | if "subfolder" in request.rel_url.query:
105 | output_dir = os.path.join(output_dir, request.rel_url.query["subfolder"])
106 |
107 | filename = os.path.basename(filename)
108 | file = os.path.join(output_dir, filename)
109 |
110 | if query.get('format', 'video') == 'folder':
111 | if not os.path.isdir(file):
112 | return web.Response(status=404)
113 | else:
114 | if not os.path.isfile(file):
115 | return web.Response(status=404)
116 |
117 | if query.get('format', 'video') == "folder":
118 | #Check that folder contains some valid image file, get it's extension
119 | #ffmpeg seems to not support list globs, so support for mixed extensions seems unfeasible
120 | os.makedirs(folder_paths.get_temp_directory(), exist_ok=True)
121 | concat_file = os.path.join(folder_paths.get_temp_directory(), "image_sequence_preview.txt")
122 | skip_first_images = int(query.get('skip_first_images', 0))
123 | select_every_nth = int(query.get('select_every_nth', 1))
124 | valid_images = get_sorted_dir_files_from_directory(file, skip_first_images, select_every_nth, FolderOfImages.IMG_EXTENSIONS)
125 | if len(valid_images) == 0:
126 | return web.Response(status=400)
127 | with open(concat_file, "w") as f:
128 | f.write("ffconcat version 1.0\n")
129 | for path in valid_images:
130 | f.write("file '" + os.path.abspath(path) + "'\n")
131 | f.write("duration 0.125\n")
132 | in_args = ["-safe", "0", "-i", concat_file]
133 | else:
134 | in_args = ["-an", "-i", file]
135 |
136 | args = [ffmpeg_path, "-v", "error"] + in_args
137 | vfilters = []
138 | if int(query.get('force_rate',0)) != 0:
139 | vfilters.append("fps=fps="+query['force_rate'] + ":round=up:start_time=0.001")
140 | if int(query.get('skip_first_frames', 0)) > 0:
141 | vfilters.append(f"select=gt(n\\,{int(query['skip_first_frames'])-1})")
142 | if int(query.get('select_every_nth', 1)) > 1:
143 | vfilters.append(f"select=not(mod(n\\,{query['select_every_nth']}))")
144 | if query.get('force_size','Disabled') != "Disabled":
145 | size = query['force_size'].split('x')
146 | if size[0] == '?' or size[1] == '?':
147 | size[0] = "-2" if size[0] == '?' else f"'min({size[0]},iw)'"
148 | size[1] = "-2" if size[1] == '?' else f"'min({size[1]},ih)'"
149 | else:
150 | #Aspect ratio is likely changed. A more complex command is required
151 | #to crop the output to the new aspect ratio
152 | ar = float(size[0])/float(size[1])
153 | vfilters.append(f"crop=if(gt({ar}\\,a)\\,iw\\,ih*{ar}):if(gt({ar}\\,a)\\,iw/{ar}\\,ih)")
154 | size = ':'.join(size)
155 | vfilters.append(f"scale={size}")
156 | vfilters.append("setpts=PTS-STARTPTS")
157 | if len(vfilters) > 0:
158 | args += ["-vf", ",".join(vfilters)]
159 | if int(query.get('frame_load_cap', 0)) > 0:
160 | args += ["-frames:v", query['frame_load_cap']]
161 | #TODO:reconsider adding high frame cap/setting default frame cap on node
162 |
163 | args += ['-c:v', 'libvpx-vp9','-deadline', 'realtime', '-cpu-used', '8', '-f', 'webm', '-']
164 |
165 | try:
166 | with subprocess.Popen(args, stdout=subprocess.PIPE) as proc:
167 | try:
168 | resp = web.StreamResponse()
169 | resp.content_type = 'video/webm'
170 | resp.headers["Content-Disposition"] = f"filename=\"{filename}\""
171 | await resp.prepare(request)
172 | while True:
173 | bytes_read = proc.stdout.read()
174 | if bytes_read is None:
175 | #TODO: check for timeout here
176 | time.sleep(.1)
177 | continue
178 | if len(bytes_read) == 0:
179 | break
180 | await resp.write(bytes_read)
181 | except ConnectionResetError as e:
182 | #Kill ffmpeg before stdout closes
183 | proc.kill()
184 | except BrokenPipeError as e:
185 | pass
186 | return resp
187 |
188 | @server.PromptServer.instance.routes.get("/getpath")
189 | async def get_path(request):
190 | query = request.rel_url.query
191 | if "path" not in query:
192 | return web.Response(status=404)
193 | path = os.path.abspath(query["path"])
194 |
195 | if not os.path.exists(path) or not is_safe(path):
196 | return web.json_response([])
197 |
198 | #Use get so None is default instead of keyerror
199 | valid_extensions = query.get("extensions")
200 | valid_items = []
201 | for item in os.scandir(path):
202 | try:
203 | if item.is_dir():
204 | valid_items.append(item.name + "/")
205 | continue
206 | if valid_extensions is None or item.name.split(".")[-1] in valid_extensions:
207 | valid_items.append(item.name)
208 | except OSError:
209 | #Broken symlinks can throw a very unhelpful "Invalid argument"
210 | pass
211 |
212 | return web.json_response(valid_items)
213 |
214 | def is_prompt_node_type_of(node_value, node_type: str) -> bool:
215 | return node_type in node_value.get("class_type", "") or node_type in node_value.get("_meta", {}).get("tile", "")
216 |
217 |
218 | def is_workflow_node_type_of(node_value, node_type: str) -> bool:
219 | return node_type in node_value.get("type", "")
220 |
221 | def test_prompt(json_data):
222 | import json
223 |
224 | try:
225 | with open(".custom_nodes/komojini-comfyui-nodes/json_data", "w") as json_file:
226 | json_str = json.dumps(json_data, indent=4)
227 | json.dump(json_data, json_file)
228 | except Exception as e:
229 | print_info("Failed to save json data.")
230 | pass
231 |
232 | print_info("Got prompt")
233 |
234 | prompt = json_data['prompt']
235 | print(f"len(prompt): {len(prompt)}")
236 |
237 |
238 | from .nodes.cache_data import CACHED_MAP
239 |
240 | def search_setter_getter_connected_nodes(json_data):
241 | key_to_getter_node_ids = {}
242 | key_to_setter_node_id = {}
243 |
244 | prompt = json_data["prompt"]
245 | for node_id, v in prompt.items():
246 | if "class_type" in v and "inputs" in v:
247 | class_type: str = v["class_type"]
248 | inputs = v["inputs"]
249 |
250 | if is_prompt_node_type_of(v, "Get"):
251 | key = inputs.get("key")
252 | if not key:
253 | continue
254 |
255 | if class_type.endswith("CachedGetter") and CACHED_MAP.get(key, None) is not None:
256 | continue
257 |
258 | if key in key_to_getter_node_ids:
259 | key_to_getter_node_ids[key].append(node_id)
260 | else:
261 | key_to_getter_node_ids[key] = [node_id]
262 | elif is_prompt_node_type_of(v, "Set"):
263 | key = inputs.get("key")
264 | if not key:
265 | continue
266 | key_to_setter_node_id[key] = node_id
267 |
268 | return key_to_getter_node_ids, key_to_setter_node_id
269 |
270 |
271 | def search_setter_getter_from_workflow_test(json_data):
272 | key_to_getter_node_ids = {}
273 | key_to_setter_node_id = {}
274 |
275 | workflow = json_data["extra_data"]["extra_pnginfo"]["workflow"]
276 | last_node_id = workflow["last_node_id"]
277 | last_link_id = workflow["last_link_id"]
278 | nodes = workflow["nodes"]
279 | links = workflow["links"]
280 | prompt = json_data["prompt"]
281 |
282 | not_included_nodes_count = 0
283 | for node in nodes:
284 | # if node["id"] in prompt:
285 | # continue
286 | if node["mode"] == 0 and node["id"] not in prompt:
287 | # print_info(f"node not in prompt. node: {node}")
288 | not_included_nodes_count += 1
289 | inputs = node.get("inputs", [])
290 | widget_values = node.get("widget_values")
291 |
292 | # {"name": "", "type": "", "link": 320}
293 | # prompt[node["id"]] = {
294 | # "inputs": {
295 |
296 | # },
297 | # "class_type": node["type"],
298 | # "_meta": {
299 | # "title": node[""],
300 | # }
301 | # }
302 | if node.get("type", "").endswith("Setter"):
303 | key = node["widgets_values"][0]
304 | elif node.get("type", "").endswith("Getter"):
305 | key = node["widgets_values"][0]
306 |
307 |
308 | """
309 | {
310 | "id": 173,
311 | "type": "JsSetter",
312 | "pos": [
313 | 6196,
314 | 9558
315 | ],
316 | "size": {
317 | "0": 210,
318 | "1": 58
319 | },
320 | "flags": {},
321 | "order": 115,
322 | "mode": 0,
323 | "inputs": [
324 | {
325 | "name": "IMAGE",
326 | "type": "IMAGE",
327 | "link": 235
328 | }
329 | ],
330 | "outputs": [
331 | {
332 | "name": "IMAGE",
333 | "type": "IMAGE",
334 | "links": [
335 | 236
336 | ],
337 | "slot_index": 0
338 | }
339 | ],
340 | "title": "Set_STEERABLE_IMAGES",
341 | "properties": {
342 | "previousName": "STEERABLE_IMAGES"
343 | },
344 | "widgets_values": [
345 | "STEERABLE_IMAGES"
346 | ],
347 | "color": "#2a363b",
348 | "bgcolor": "#3f5159"
349 | },
350 | """
351 | print_info(f"{not_included_nodes_count} Nodes not included in prompt but is activated")
352 | return key_to_getter_node_ids, key_to_setter_node_id
353 |
354 | def search_setter_getter_from_workflow(json_data):
355 | key_to_getter_node_ids = {}
356 | key_to_setter_node_id = {}
357 |
358 | workflow = json_data["extra_data"]["extra_pnginfo"]["workflow"]
359 | nodes = workflow["nodes"]
360 | prompt = json_data["prompt"]
361 |
362 | not_included_nodes_count = 0
363 | for node in nodes:
364 | if node["mode"] == 0 and node["id"] not in prompt:
365 | not_included_nodes_count += 1
366 |
367 | print_info(f"{not_included_nodes_count} Nodes not included in prompt but is activated")
368 | return key_to_getter_node_ids, key_to_setter_node_id
369 |
370 |
371 | def connect_to_from_nodes(json_data):
372 | prompt = json_data["prompt"]
373 | key_to_getter_node_ids, key_to_setter_node_id = search_setter_getter_connected_nodes(json_data)
374 | for getter_key, getter_node_ids in key_to_getter_node_ids.items():
375 | if getter_key in key_to_setter_node_id:
376 | setter_node_id = key_to_setter_node_id[getter_key]
377 |
378 | for getter_node_id in getter_node_ids:
379 | # if "*" in prompt[getter_node_id]["inputs"]:
380 | prompt[getter_node_id]["inputs"]["*"] = [setter_node_id, 0]
381 | # elif "value" in prompt[getter_node_id]["inputs"]:
382 | prompt[getter_node_id]["inputs"]["value"] = [setter_node_id, 0]
383 | # else:
384 | # print(f"[WARN] Komojini-ComfyUI-CustonNodes: There is no 'Setter' node in the workflow for key: {getter_key}, inputs: {prompt[getter_node_id]['inputs']}")
385 |
386 | print(f"Connected getter {getter_node_id}: {json_data['prompt'][getter_node_id]}")
387 | if setter_node_id not in prompt:
388 | print(f"[WARN] setter node id for key({getter_key}) not in prompt, setter_node_id: {setter_node_id}")
389 | else:
390 | print(f"[WARN] Komojini-ComfyUI-CustonNodes: There is no 'Setter' node in the workflow for key: {getter_key}")
391 |
392 |
393 | def workflow_update(json_data):
394 | prompt = json_data["prompt"]
395 | for k, v in prompt.items():
396 | if "class_type" in v and "inputs" in v:
397 | class_type = v["class_type"]
398 | inputs = v["inputs"]
399 |
400 | class_ = nodes.NODE_CLASS_MAPPINGS[class_type]
401 | if hasattr(class_, "OUTPUT_NODE") and class_.OUTPUT_NODE == True:
402 | pass
403 | if class_type == "Getter":
404 | id = inputs["key"]
405 |
406 |
407 | def on_prompt_handler(json_data):
408 | try:
409 | # test_prompt(json_data)
410 | search_setter_getter_from_workflow(json_data)
411 | connect_to_from_nodes(json_data)
412 |
413 | except Exception as e:
414 | print_info(f"[WARN] Komojini-ComfyUI-CustomNodes: Error on prompt\n{e}")
415 | return json_data
416 |
417 | server.PromptServer.instance.add_on_prompt_handler(on_prompt_handler)
--------------------------------------------------------------------------------
/example_workflows/image_merger_example.json:
--------------------------------------------------------------------------------
1 | {
2 | "last_node_id": 17,
3 | "last_link_id": 21,
4 | "nodes": [
5 | {
6 | "id": 12,
7 | "type": "VAEEncode",
8 | "pos": [
9 | 1567,
10 | 620
11 | ],
12 | "size": {
13 | "0": 210,
14 | "1": 46
15 | },
16 | "flags": {},
17 | "order": 7,
18 | "mode": 0,
19 | "inputs": [
20 | {
21 | "name": "pixels",
22 | "type": "IMAGE",
23 | "link": 9
24 | },
25 | {
26 | "name": "vae",
27 | "type": "VAE",
28 | "link": 10
29 | }
30 | ],
31 | "outputs": [
32 | {
33 | "name": "LATENT",
34 | "type": "LATENT",
35 | "links": [
36 | 12
37 | ],
38 | "shape": 3,
39 | "slot_index": 0
40 | }
41 | ],
42 | "properties": {
43 | "Node name for S&R": "VAEEncode",
44 | "ttNbgOverride": {
45 | "color": "#222",
46 | "bgcolor": "#000",
47 | "groupcolor": "#444"
48 | }
49 | },
50 | "color": "#222",
51 | "bgcolor": "#000"
52 | },
53 | {
54 | "id": 6,
55 | "type": "ControlNetApply",
56 | "pos": [
57 | 1877,
58 | 841
59 | ],
60 | "size": {
61 | "0": 317.4000244140625,
62 | "1": 98
63 | },
64 | "flags": {},
65 | "order": 8,
66 | "mode": 0,
67 | "inputs": [
68 | {
69 | "name": "conditioning",
70 | "type": "CONDITIONING",
71 | "link": 7
72 | },
73 | {
74 | "name": "control_net",
75 | "type": "CONTROL_NET",
76 | "link": 4
77 | },
78 | {
79 | "name": "image",
80 | "type": "IMAGE",
81 | "link": 5
82 | }
83 | ],
84 | "outputs": [
85 | {
86 | "name": "CONDITIONING",
87 | "type": "CONDITIONING",
88 | "links": [
89 | 11
90 | ],
91 | "shape": 3,
92 | "slot_index": 0
93 | }
94 | ],
95 | "properties": {
96 | "Node name for S&R": "ControlNetApply",
97 | "ttNbgOverride": {
98 | "color": "#222",
99 | "bgcolor": "#000",
100 | "groupcolor": "#444"
101 | }
102 | },
103 | "widgets_values": [
104 | 1
105 | ],
106 | "color": "#222",
107 | "bgcolor": "#000"
108 | },
109 | {
110 | "id": 10,
111 | "type": "CLIPTextEncode",
112 | "pos": [
113 | 1437,
114 | 1258
115 | ],
116 | "size": {
117 | "0": 400,
118 | "1": 200
119 | },
120 | "flags": {},
121 | "order": 5,
122 | "mode": 0,
123 | "inputs": [
124 | {
125 | "name": "clip",
126 | "type": "CLIP",
127 | "link": 8
128 | }
129 | ],
130 | "outputs": [
131 | {
132 | "name": "CONDITIONING",
133 | "type": "CONDITIONING",
134 | "links": [
135 | 13
136 | ],
137 | "shape": 3,
138 | "slot_index": 0
139 | }
140 | ],
141 | "properties": {
142 | "Node name for S&R": "CLIPTextEncode",
143 | "ttNbgOverride": {
144 | "color": "#222",
145 | "bgcolor": "#000",
146 | "groupcolor": "#444"
147 | }
148 | },
149 | "widgets_values": [
150 | "low quality, worst quality, text, watermark"
151 | ],
152 | "color": "#222",
153 | "bgcolor": "#000"
154 | },
155 | {
156 | "id": 4,
157 | "type": "LineArtPreprocessor",
158 | "pos": [
159 | 1462,
160 | 729
161 | ],
162 | "size": {
163 | "0": 315,
164 | "1": 82
165 | },
166 | "flags": {},
167 | "order": 6,
168 | "mode": 0,
169 | "inputs": [
170 | {
171 | "name": "image",
172 | "type": "IMAGE",
173 | "link": 3
174 | }
175 | ],
176 | "outputs": [
177 | {
178 | "name": "IMAGE",
179 | "type": "IMAGE",
180 | "links": [
181 | 5
182 | ],
183 | "shape": 3,
184 | "slot_index": 0
185 | }
186 | ],
187 | "properties": {
188 | "Node name for S&R": "LineArtPreprocessor",
189 | "ttNbgOverride": {
190 | "color": "#222",
191 | "bgcolor": "#000",
192 | "groupcolor": "#444"
193 | }
194 | },
195 | "widgets_values": [
196 | "enable",
197 | 512
198 | ],
199 | "color": "#222",
200 | "bgcolor": "#000"
201 | },
202 | {
203 | "id": 9,
204 | "type": "CheckpointLoaderSimple",
205 | "pos": [
206 | 958,
207 | 934
208 | ],
209 | "size": {
210 | "0": 315,
211 | "1": 98
212 | },
213 | "flags": {},
214 | "order": 0,
215 | "mode": 0,
216 | "outputs": [
217 | {
218 | "name": "MODEL",
219 | "type": "MODEL",
220 | "links": [
221 | 14
222 | ],
223 | "shape": 3,
224 | "slot_index": 0
225 | },
226 | {
227 | "name": "CLIP",
228 | "type": "CLIP",
229 | "links": [
230 | 6,
231 | 8
232 | ],
233 | "shape": 3,
234 | "slot_index": 1
235 | },
236 | {
237 | "name": "VAE",
238 | "type": "VAE",
239 | "links": [
240 | 10,
241 | 16
242 | ],
243 | "shape": 3,
244 | "slot_index": 2
245 | }
246 | ],
247 | "properties": {
248 | "Node name for S&R": "CheckpointLoaderSimple",
249 | "ttNbgOverride": {
250 | "color": "#222",
251 | "bgcolor": "#000",
252 | "groupcolor": "#444"
253 | }
254 | },
255 | "widgets_values": [
256 | "SD1.5/chilloutmix_NiPrunedFp32Fix.safetensors"
257 | ],
258 | "color": "#222",
259 | "bgcolor": "#000"
260 | },
261 | {
262 | "id": 5,
263 | "type": "ControlNetLoader",
264 | "pos": [
265 | 1468,
266 | 873
267 | ],
268 | "size": {
269 | "0": 315,
270 | "1": 58
271 | },
272 | "flags": {},
273 | "order": 1,
274 | "mode": 0,
275 | "outputs": [
276 | {
277 | "name": "CONTROL_NET",
278 | "type": "CONTROL_NET",
279 | "links": [
280 | 4
281 | ],
282 | "shape": 3,
283 | "slot_index": 0
284 | }
285 | ],
286 | "properties": {
287 | "Node name for S&R": "ControlNetLoader",
288 | "ttNbgOverride": {
289 | "color": "#222",
290 | "bgcolor": "#000",
291 | "groupcolor": "#444"
292 | }
293 | },
294 | "widgets_values": [
295 | "ControlNet-v1-1/control_v11p_sd15_lineart.pth"
296 | ],
297 | "color": "#222",
298 | "bgcolor": "#000"
299 | },
300 | {
301 | "id": 7,
302 | "type": "CLIPTextEncode",
303 | "pos": [
304 | 1438,
305 | 985
306 | ],
307 | "size": {
308 | "0": 400,
309 | "1": 200
310 | },
311 | "flags": {},
312 | "order": 4,
313 | "mode": 0,
314 | "inputs": [
315 | {
316 | "name": "clip",
317 | "type": "CLIP",
318 | "link": 6
319 | }
320 | ],
321 | "outputs": [
322 | {
323 | "name": "CONDITIONING",
324 | "type": "CONDITIONING",
325 | "links": [
326 | 7
327 | ],
328 | "shape": 3,
329 | "slot_index": 0
330 | }
331 | ],
332 | "properties": {
333 | "Node name for S&R": "CLIPTextEncode",
334 | "ttNbgOverride": {
335 | "color": "#222",
336 | "bgcolor": "#000",
337 | "groupcolor": "#444"
338 | }
339 | },
340 | "widgets_values": [
341 | "A beautiful girl in city, night, masterpiece, 4k, dark background"
342 | ],
343 | "color": "#222",
344 | "bgcolor": "#000"
345 | },
346 | {
347 | "id": 13,
348 | "type": "KSampler",
349 | "pos": [
350 | 1887,
351 | 999
352 | ],
353 | "size": {
354 | "0": 315,
355 | "1": 262
356 | },
357 | "flags": {},
358 | "order": 9,
359 | "mode": 0,
360 | "inputs": [
361 | {
362 | "name": "model",
363 | "type": "MODEL",
364 | "link": 14
365 | },
366 | {
367 | "name": "positive",
368 | "type": "CONDITIONING",
369 | "link": 11
370 | },
371 | {
372 | "name": "negative",
373 | "type": "CONDITIONING",
374 | "link": 13
375 | },
376 | {
377 | "name": "latent_image",
378 | "type": "LATENT",
379 | "link": 12
380 | }
381 | ],
382 | "outputs": [
383 | {
384 | "name": "LATENT",
385 | "type": "LATENT",
386 | "links": [
387 | 15
388 | ],
389 | "shape": 3,
390 | "slot_index": 0
391 | }
392 | ],
393 | "properties": {
394 | "Node name for S&R": "KSampler",
395 | "ttNbgOverride": {
396 | "color": "#222",
397 | "bgcolor": "#000",
398 | "groupcolor": "#444"
399 | }
400 | },
401 | "widgets_values": [
402 | 1056212845093504,
403 | "randomize",
404 | 30,
405 | 8,
406 | "euler",
407 | "normal",
408 | 1
409 | ],
410 | "color": "#222",
411 | "bgcolor": "#000"
412 | },
413 | {
414 | "id": 17,
415 | "type": "PreviewImage",
416 | "pos": [
417 | 2535,
418 | 1095
419 | ],
420 | "size": [
421 | 405.241493286132,
422 | 388.9299367675776
423 | ],
424 | "flags": {},
425 | "order": 12,
426 | "mode": 0,
427 | "inputs": [
428 | {
429 | "name": "images",
430 | "type": "IMAGE",
431 | "link": 21
432 | }
433 | ],
434 | "properties": {
435 | "Node name for S&R": "PreviewImage",
436 | "ttNbgOverride": {
437 | "color": "#222",
438 | "bgcolor": "#000",
439 | "groupcolor": "#444"
440 | }
441 | },
442 | "color": "#222",
443 | "bgcolor": "#000"
444 | },
445 | {
446 | "id": 16,
447 | "type": "SaveImage",
448 | "pos": [
449 | 2985,
450 | 918
451 | ],
452 | "size": [
453 | 535.2378607177716,
454 | 570.2888000488274
455 | ],
456 | "flags": {},
457 | "order": 13,
458 | "mode": 0,
459 | "inputs": [
460 | {
461 | "name": "images",
462 | "type": "IMAGE",
463 | "link": 20
464 | }
465 | ],
466 | "properties": {
467 | "ttNbgOverride": {
468 | "color": "#222",
469 | "bgcolor": "#000",
470 | "groupcolor": "#444"
471 | }
472 | },
473 | "widgets_values": [
474 | "ComfyUI"
475 | ],
476 | "color": "#222",
477 | "bgcolor": "#000"
478 | },
479 | {
480 | "id": 15,
481 | "type": "Note",
482 | "pos": [
483 | 2982,
484 | 635
485 | ],
486 | "size": [
487 | 541.3411071777327,
488 | 99.29258911132774
489 | ],
490 | "flags": {},
491 | "order": 2,
492 | "mode": 0,
493 | "properties": {
494 | "text": ""
495 | },
496 | "widgets_values": [
497 | "(60%, 0);(50%, 100%) will generate line of\n\npoint 1: (50%, 0) -> (width * 0.5, 0)\npoint 2: (60%, 100%) -> (width * 0.6, height * 1.0)\n\nThe dividing line will be the connection of point 1 and point 2."
498 | ],
499 | "color": "#432",
500 | "bgcolor": "#653"
501 | },
502 | {
503 | "id": 1,
504 | "type": "ImageMerger",
505 | "pos": [
506 | 2986,
507 | 791
508 | ],
509 | "size": {
510 | "0": 315,
511 | "1": 102
512 | },
513 | "flags": {},
514 | "order": 11,
515 | "mode": 0,
516 | "inputs": [
517 | {
518 | "name": "images_1",
519 | "type": "IMAGE",
520 | "link": 19,
521 | "slot_index": 0
522 | },
523 | {
524 | "name": "images_2",
525 | "type": "IMAGE",
526 | "link": 18
527 | }
528 | ],
529 | "outputs": [
530 | {
531 | "name": "images",
532 | "type": "IMAGE",
533 | "links": [
534 | 20
535 | ],
536 | "shape": 3,
537 | "slot_index": 0
538 | },
539 | {
540 | "name": "num_images",
541 | "type": "INT",
542 | "links": null,
543 | "shape": 3
544 | }
545 | ],
546 | "properties": {
547 | "Node name for S&R": "ImageMerger",
548 | "ttNbgOverride": {
549 | "color": "#222",
550 | "bgcolor": "#000",
551 | "groupcolor": "#444"
552 | }
553 | },
554 | "widgets_values": [
555 | "(50%, 0);(60%, 100%)",
556 | 2
557 | ],
558 | "color": "#222",
559 | "bgcolor": "#000"
560 | },
561 | {
562 | "id": 3,
563 | "type": "LoadImage",
564 | "pos": [
565 | 2537,
566 | 610
567 | ],
568 | "size": {
569 | "0": 411.8054504394531,
570 | "1": 427.59283447265625
571 | },
572 | "flags": {},
573 | "order": 3,
574 | "mode": 0,
575 | "outputs": [
576 | {
577 | "name": "IMAGE",
578 | "type": "IMAGE",
579 | "links": [
580 | 3,
581 | 9,
582 | 19
583 | ],
584 | "shape": 3,
585 | "slot_index": 0
586 | },
587 | {
588 | "name": "MASK",
589 | "type": "MASK",
590 | "links": null,
591 | "shape": 3
592 | }
593 | ],
594 | "properties": {
595 | "Node name for S&R": "LoadImage",
596 | "ttNbgOverride": {
597 | "color": "#222",
598 | "bgcolor": "#000",
599 | "groupcolor": "#444"
600 | }
601 | },
602 | "widgets_values": [
603 | "1111example (1).png",
604 | "image"
605 | ],
606 | "color": "#222",
607 | "bgcolor": "#000"
608 | },
609 | {
610 | "id": 14,
611 | "type": "VAEDecode",
612 | "pos": [
613 | 2123,
614 | 704
615 | ],
616 | "size": {
617 | "0": 210,
618 | "1": 46
619 | },
620 | "flags": {},
621 | "order": 10,
622 | "mode": 0,
623 | "inputs": [
624 | {
625 | "name": "samples",
626 | "type": "LATENT",
627 | "link": 15
628 | },
629 | {
630 | "name": "vae",
631 | "type": "VAE",
632 | "link": 16
633 | }
634 | ],
635 | "outputs": [
636 | {
637 | "name": "IMAGE",
638 | "type": "IMAGE",
639 | "links": [
640 | 18,
641 | 21
642 | ],
643 | "shape": 3,
644 | "slot_index": 0
645 | }
646 | ],
647 | "properties": {
648 | "Node name for S&R": "VAEDecode",
649 | "ttNbgOverride": {
650 | "color": "#222",
651 | "bgcolor": "#000",
652 | "groupcolor": "#444"
653 | }
654 | },
655 | "color": "#222",
656 | "bgcolor": "#000"
657 | }
658 | ],
659 | "links": [
660 | [
661 | 3,
662 | 3,
663 | 0,
664 | 4,
665 | 0,
666 | "IMAGE"
667 | ],
668 | [
669 | 4,
670 | 5,
671 | 0,
672 | 6,
673 | 1,
674 | "CONTROL_NET"
675 | ],
676 | [
677 | 5,
678 | 4,
679 | 0,
680 | 6,
681 | 2,
682 | "IMAGE"
683 | ],
684 | [
685 | 6,
686 | 9,
687 | 1,
688 | 7,
689 | 0,
690 | "CLIP"
691 | ],
692 | [
693 | 7,
694 | 7,
695 | 0,
696 | 6,
697 | 0,
698 | "CONDITIONING"
699 | ],
700 | [
701 | 8,
702 | 9,
703 | 1,
704 | 10,
705 | 0,
706 | "CLIP"
707 | ],
708 | [
709 | 9,
710 | 3,
711 | 0,
712 | 12,
713 | 0,
714 | "IMAGE"
715 | ],
716 | [
717 | 10,
718 | 9,
719 | 2,
720 | 12,
721 | 1,
722 | "VAE"
723 | ],
724 | [
725 | 11,
726 | 6,
727 | 0,
728 | 13,
729 | 1,
730 | "CONDITIONING"
731 | ],
732 | [
733 | 12,
734 | 12,
735 | 0,
736 | 13,
737 | 3,
738 | "LATENT"
739 | ],
740 | [
741 | 13,
742 | 10,
743 | 0,
744 | 13,
745 | 2,
746 | "CONDITIONING"
747 | ],
748 | [
749 | 14,
750 | 9,
751 | 0,
752 | 13,
753 | 0,
754 | "MODEL"
755 | ],
756 | [
757 | 15,
758 | 13,
759 | 0,
760 | 14,
761 | 0,
762 | "LATENT"
763 | ],
764 | [
765 | 16,
766 | 9,
767 | 2,
768 | 14,
769 | 1,
770 | "VAE"
771 | ],
772 | [
773 | 18,
774 | 14,
775 | 0,
776 | 1,
777 | 1,
778 | "IMAGE"
779 | ],
780 | [
781 | 19,
782 | 3,
783 | 0,
784 | 1,
785 | 0,
786 | "IMAGE"
787 | ],
788 | [
789 | 20,
790 | 1,
791 | 0,
792 | 16,
793 | 0,
794 | "IMAGE"
795 | ],
796 | [
797 | 21,
798 | 14,
799 | 0,
800 | 17,
801 | 0,
802 | "IMAGE"
803 | ]
804 | ],
805 | "groups": [],
806 | "config": {},
807 | "extra": {},
808 | "version": 0.4
809 | }
--------------------------------------------------------------------------------
/js/komojini_widgets.js:
--------------------------------------------------------------------------------
1 |
2 |
3 | import { app } from '../../scripts/app.js'
4 | import { api } from '../../scripts/api.js'
5 |
6 | import { log } from './comfy_shared.js'
7 | import * as shared from './comfy_shared.js'
8 | import { DEBUG_STRING, findWidgetByName, isSetter, isGetter, setColorAndBgColor, enableOnlyRelatedNodes } from './utils.js'
9 | import { executeAndWaitForTargetNode } from './komojini.chain.js'
10 |
11 |
12 | const END_EMOJI = '🔥';
13 |
14 | const newTypes = [ 'BUTTON']
15 |
16 | const _drawImage = (node, imageNode, canvasEl, ctx) => {
17 |
18 | var x=0, y=0, w=imageNode.width, h=imageNode.height;
19 |
20 | const size = node.properties.size;
21 |
22 | canvasEl.width = size[0];
23 | canvasEl.height = size[1];
24 |
25 | canvasEl.style = `width: ${size[0]}px; height: ${size[1]}px;`
26 | canvasEl.style.border = "1px dotted gray"
27 |
28 | if (!imageNode.width) {
29 | return;
30 | }
31 |
32 | else if (imageNode.width / imageNode.height > canvasEl.width/canvasEl.height) {
33 | y = 0;
34 | h = imageNode.height
35 | w = imageNode.height * canvasEl.width / canvasEl.height
36 | x = (imageNode.width - w) / 2
37 | } else {
38 | x = 0;
39 | w = imageNode.width
40 | h = imageNode.width * canvasEl.height / canvasEl.width
41 | y = (imageNode.height - h) / 2
42 | }
43 | ctx.drawImage(imageNode, x, y, w, h, 0, 0, canvasEl.width, canvasEl.height)
44 | }
45 |
46 | function drawAllLines(node, draglines, imageNode, canvas) {
47 | var ctx
48 |
49 | ctx = canvas.getContext("2d");
50 |
51 | ctx.clearRect(0, 0, canvas.width, canvas.height)
52 |
53 | _drawImage(node, imageNode, canvas, ctx);
54 |
55 | for (const line of draglines) {
56 | var prevX, prevY = null;
57 | for (const pos of line) {
58 | var newX = pos[0];
59 | var newY = pos[1];
60 |
61 | if (prevX && prevY) {
62 | drawArrow(prevX, prevY, newX, newY, ctx);
63 | } else {
64 | ctx.beginPath();
65 | ctx.arc(newX, newY, 4, 0, 2 * Math.PI);
66 | ctx.fillStyle = 'red';
67 | ctx.fill();
68 | }
69 | prevX = newX;
70 | prevY = newY;
71 | }
72 | }
73 | }
74 |
75 | function drawArrow(x1, y1, x2, y2, ctx) {
76 | // Calculate the arrow direction
77 | const direction = Math.atan2(y2 - y1, x2 - x1);
78 |
79 | if (!(x1 && y1 && x2 && y2)) {
80 | return;
81 | } else if ((x1 == x2 && y1 == y2)) {
82 | return;
83 | }
84 |
85 | // Draw a line
86 | ctx.beginPath();
87 | ctx.moveTo(x1, y1);
88 | ctx.lineTo(x2, y2);
89 | ctx.stroke();
90 |
91 | // Draw arrowhead
92 | const arrowheadSize = 14;
93 | ctx.beginPath();
94 | ctx.moveTo(x2, y2);
95 | ctx.lineTo(
96 | x2 - arrowheadSize * Math.cos(direction - Math.PI / 6),
97 | y2 - arrowheadSize * Math.sin(direction - Math.PI / 6)
98 | );
99 | ctx.lineTo(
100 | x2 - arrowheadSize * Math.cos(direction + Math.PI / 6),
101 | y2 - arrowheadSize * Math.sin(direction + Math.PI / 6)
102 | );
103 | ctx.closePath();
104 | ctx.fill();
105 |
106 | }
107 |
108 | function getLast(array) {
109 | return array[array.length -1]
110 | }
111 |
112 |
113 | const komojini_widgets = {
114 | name: 'komojini.widgets',
115 |
116 | init: async () => {
117 | log('Registering komojini.widgets')
118 | try {
119 | const res = await api.fetchApi('/komojini/debug');
120 | const msg = res.json();
121 | if (!window.komojini) {
122 | window.komojini = {};
123 | }
124 | window.komojini.DEBUG = msg.DEBUG;
125 | } catch (e) {
126 | console.error('Error', error);
127 | }
128 | },
129 |
130 | setup: () => {
131 | app.ui.settings.addSetting({
132 | id: "komojini.NodeAutoColor",
133 | name: "🔥 Auto color nodes by name & output type",
134 | type: "boolean",
135 | defaultValue: false,
136 | });
137 | },
138 | /**
139 | * @param {import("./types/comfy").NodeType} nodeType
140 | * @param {import("./types/comfy").NodeDef} nodeData
141 | * @param {import("./types/comfy").App} app
142 | */
143 | async beforeRegisterNodeDef(nodeType, nodeData, app) {
144 | const node = this;
145 |
146 | const addFlowRunButton = function(node) {
147 | const batchSizeWidget = findWidgetByName(node, "batch_size")
148 | if (batchSizeWidget) {
149 | shared.hideWidgetForGood(node, batchSizeWidget)
150 | }
151 |
152 | const run_button = node.addWidget(
153 | 'button',
154 | `Queue`,
155 | 'queue',
156 | () => {
157 | app.canvas.setDirty(true);
158 |
159 | preview.value = 'Flow running...'
160 | return (async _ => {
161 | log('FlowBuilder Queue button pressed')
162 | // TODO: Should fix this by different solution
163 | app.graph._nodes.forEach((node) => {
164 | node.mode = 0;
165 | })
166 | await executeAndWaitForTargetNode(app, node);
167 | log('Queue finished')
168 | preview.value = 'Queue finished!'
169 | await new Promise(re => setTimeout(re, 1000));
170 |
171 | })();
172 | }
173 | )
174 |
175 | const preview = node.addCustomWidget(DEBUG_STRING('Preview', ''))
176 | preview.parent = node
177 |
178 |
179 | return run_button;
180 | }
181 |
182 | const addAdvancedFlowWidgets = function(node) {
183 | const batchSizeWidget = findWidgetByName(node, "batch_size")
184 |
185 | const run_button = node.addWidget(
186 | 'button',
187 | `Queue`,
188 | 'queue',
189 | () => {
190 | app.canvas.setDirty(true);
191 |
192 | return (async _ => {
193 | log('FlowBuilder Queue button pressed')
194 | const style = "margin: 20 20"
195 |
196 | preview.value = ``
197 |
198 | try {
199 | if (disableToggleWidget?.value) {
200 | await app.queuePrompt(0, 1);
201 | const promptId = await promptIdPromise;
202 | await waitForQueueEnd(promptId);
203 |
204 | } else {
205 | const totalBatchSize = batchSizeWidget.value;
206 | var currBatchSize = 0;
207 | // TODO: Should fix this by different solution
208 | app.graph._nodes.forEach((node) => {
209 | node.mode = 0;
210 | })
211 | while (autoQueueToggleWidget.value || currBatchSize < totalBatchSize) {
212 | if (autoQueueToggleWidget.value) {
213 | preview.value = ``
214 | currBatchSize = totalBatchSize;
215 | } else {
216 | currBatchSize += 1;
217 | preview.value = `${currBatchSize}/${totalBatchSize} Running...
`
218 | }
219 | await executeAndWaitForTargetNode(app, node);
220 | log('Queue finished')
221 | await new Promise(re => setTimeout(re, 500));
222 | }
223 | }
224 | } catch (error) {
225 | console.error(`Error while running queue: ${error}`)
226 |
227 | } finally {
228 | preview.value = `
`
229 | }
230 |
231 | })();
232 | }
233 | )
234 |
235 | const preview = node.addCustomWidget(DEBUG_STRING('Preview', ''))
236 | preview.parent = node
237 |
238 | const disableToggleWidget = node.addWidget("toggle", "Disable Unrelated Nodes", false, "", { "on": 'yes', "off": 'no' });
239 |
240 | disableToggleWidget.doModeChange = (forceValue, skipOtherNodeCheck) => {
241 | console.log(`toggle changed`)
242 |
243 | const toggleValue = disableToggleWidget.value;
244 |
245 | if (toggleValue) {
246 | disableToggleWidget.notAlreadyMutedBlacklist = enableOnlyRelatedNodes(node)
247 | } else if (disableToggleWidget.notAlreadyMutedBlacklist) {
248 | for (const node of disableToggleWidget.notAlreadyMutedBlacklist) node.mode = 0;
249 | } else {
250 | app.graph._nodes.forEach((node) => {
251 | node.mode = 0;
252 | })
253 | }
254 | }
255 | disableToggleWidget.callback = () => {
256 | disableToggleWidget.doModeChange();
257 | };
258 |
259 | const autoQueueToggleWidget = node.addWidget("toggle", "Auto Queue", false, "", { "on": 'yes', "off": 'no' });
260 |
261 |
262 | node.setSize(node.computeSize());
263 |
264 |
265 | }
266 |
267 | let has_custom = false;
268 | if (nodeData.input && nodeData.input.required) {
269 | for (const i of Object.keys(nodeData.input.required)) {
270 | const input_type = nodeData.input.required[i][0];
271 |
272 | if (newTypes.includes(input_type)) {
273 | has_custom = true
274 | break
275 | }
276 | }
277 | }
278 | if (has_custom) {
279 | const onNodeCreated = nodeType.prototype.onNodeCreated
280 | nodeType.prototype.onNodeCreated = function() {
281 | const r = onNodeCreated ? onNodeCreated.apply(this, arguments) : undefined;
282 | this.serialize_widgets = true;
283 | this.setSize?.(this.computeSize());
284 |
285 | this.onRemoved = function() {
286 | shared.cleanupNode(this);
287 | }
288 | return r;
289 | }
290 |
291 | //- Extra menus
292 | const origGetExtraMenuOptions = nodeType.prototype.getExtraMenuOptions
293 | nodeType.prototype.getExtraMenuOptions = function (_, options) {
294 | const r = origGetExtraMenuOptions
295 | ? origGetExtraMenuOptions.apply(this, arguments)
296 | : undefined
297 | if (this.widgets) {
298 | let toInput = []
299 | let toWidget = []
300 | for (const w of this.widgets) {
301 | if (w.type === shared.CONVERTED_TYPE) {
302 | //- This is already handled by widgetinputs.js
303 | // toWidget.push({
304 | // content: `Convert ${w.name} to widget`,
305 | // callback: () => shared.convertToWidget(this, w),
306 | // });
307 | } else if (newTypes.includes(w.type)) {
308 | const config = nodeData?.input?.required[w.name] ||
309 | nodeData?.input?.optional?.[w.name] || [w.type, w.options || {}]
310 |
311 | toInput.push({
312 | content: `Convert ${w.name} to input`,
313 | callback: () => shared.convertToInput(this, w, config),
314 | })
315 | }
316 | }
317 | if (toInput.length) {
318 | options.push(...toInput, null)
319 | }
320 |
321 | if (toWidget.length) {
322 | options.push(...toWidget, null)
323 | }
324 | }
325 |
326 | return r
327 | }
328 | }
329 |
330 | log("Start setting komojini extension", nodeData.name)
331 |
332 | // Extending Python Nodes
333 | if (nodeData.name.endsWith("Getter")) {
334 | const onNodeCreated = nodeType.prototype.onNodeCreated
335 | nodeType.prototype.onNodeCreated = function () {
336 | const r = onNodeCreated
337 | ? onNodeCreated.apply(this, arguments)
338 | : undefined;
339 |
340 |
341 | var nameSuffix = "";
342 | if (nodeData.name.includes("Cache")) {
343 | nameSuffix = " (cached)";
344 | }
345 |
346 | this.widgets = [];
347 |
348 | this.addWidget(
349 | "combo",
350 | "key",
351 | "",
352 | (e) => {
353 | this.onRename();
354 | },
355 | {
356 | values: () => {
357 | const setterNodes = this.graph._nodes.filter((otherNode) => isSetter(otherNode));
358 | return setterNodes.map((otherNode) => otherNode.widgets[0].value).sort();
359 | }
360 | }
361 | );
362 |
363 | this.findSetter = function(graph) {
364 | const name = this.widgets[0].value;
365 | return graph._nodes.find(otherNode => isSetter(otherNode) && otherNode.widgets[0].value === name && name !== '');
366 | }
367 |
368 | this.setName = function(name) {
369 | node.widgets[0].value = name;
370 | node.onRename();
371 | node.serialize();
372 | }
373 |
374 | this.setType = function(type) {
375 | this.outputs[0].name = type;
376 | this.outputs[0].type = type;
377 | // this.validateLinks();
378 | }
379 |
380 | this.onRename = function() {
381 | const setter = this.findSetter(this.graph);
382 | if (setter) {
383 | let linkType = (setter.inputs[0].type);
384 |
385 | this.setType(linkType);
386 | this.title = "Get_" + setter.widgets[0].value + nameSuffix;
387 |
388 | if (app.ui.settings.getSettingValue("komojini.NodeAutoColor")){
389 | setColorAndBgColor.call(this, linkType);
390 | }
391 |
392 | } else {
393 | this.setType('*');
394 | }
395 | }
396 |
397 | this.size = this.computeSize();
398 |
399 | return r;
400 |
401 | }
402 |
403 |
404 | }
405 | else if (nodeData.name.endsWith("Setter")) {
406 | const onNodeCreated = nodeType.prototype.onNodeCreated
407 | nodeType.prototype.onNodeCreated = function () {
408 | const r = onNodeCreated
409 | ? onNodeCreated.apply(this, arguments)
410 | : undefined;
411 | const keyValue = findWidgetByName(this, "key").value ?? '';
412 | const node = this;
413 |
414 | if (!this.properties) {
415 | this.properties = {
416 | "previousName": "",
417 | };
418 | }
419 |
420 |
421 | this.defaultVisibility = true;
422 | this.serialize_widgets = true;
423 | this.properties.showOutputText = true;
424 |
425 | this.widgets = [];
426 | this.inputs = [];
427 |
428 | this.addInput("value", "*");
429 |
430 | this.addWidget(
431 | "text",
432 | "key",
433 | keyValue,
434 | (s, t, u, v, x) => {
435 | // node.validateName(node.graph);
436 | if(this.widgets[0].value !== ''){
437 | var preFix = ""
438 | if (nodeData.name.includes("adv")) {
439 | preFix = "🔥(adv) "
440 | }
441 | else if (nodeData.name.includes("Flow")) {
442 | preFix = "🔥 "
443 | }
444 | this.title = preFix + "Set_" + this.widgets[0].value;
445 | }
446 | this.update();
447 | this.properties.previousName = this.widgets[0].value;
448 | },
449 | {}
450 | )
451 |
452 | if (nodeData.name.includes("FlowBuilder")) {
453 |
454 | if ( nodeData.name.includes("adv") ) {
455 | addAdvancedFlowWidgets(this);
456 | } else {
457 | addFlowRunButton(this);
458 | }
459 | }
460 |
461 | this.findGetters = function(graph, checkForPreviousName) {
462 | const name = checkForPreviousName ? this.properties.previousName : this.widgets[0].value;
463 | return graph._nodes.filter(otherNode => isGetter(otherNode) && otherNode.widgets[0].value === name && name !== '' );
464 | }
465 |
466 | this.update = function() {
467 | if (!node.graph) {
468 | return;
469 | }
470 |
471 | try {
472 | const getters = this.findGetters(node.graph);
473 | getters.forEach(getter => {
474 | if (getter.setType) {
475 | getter.setType?.(this.inputs[0].type);
476 | } else {
477 | setTypeOtherNode(getter, this.inputs[0].type);
478 | }
479 | });
480 |
481 | if (this.widgets[0].value) {
482 | const gettersWithPreviousName = this.findGetters(node.graph, true);
483 | gettersWithPreviousName.forEach(getter => {
484 |
485 | if (getter.setName ) {
486 | getter.setName(this.widgets[0].value);
487 | } else {
488 | getter.widgets[0].value = this.widgets[0].value;
489 | }
490 | });
491 | }
492 |
493 | const allGetters = node.graph._nodes.filter(otherNode => otherNode.type === "GetNode");
494 | allGetters.forEach(otherNode => {
495 | if (otherNode.setComboValues) {
496 | otherNode.setComboValues();
497 | }
498 | });
499 | } catch (error) {
500 | console.error(`Failed to update Setter: ${error}`)
501 | }
502 | }
503 |
504 | this.validateName = function(graph) {
505 | let widgetValue = node.widgets[0].value;
506 |
507 | if (widgetValue !== '') {
508 | let tries = 0;
509 | const existingValues = new Set();
510 |
511 | graph._nodes.forEach(otherNode => {
512 | if (otherNode !== this && isSetter(otherNode)) {
513 | existingValues.add(otherNode.widgets[0].value);
514 | }
515 | });
516 |
517 | while (existingValues.has(widgetValue)) {
518 | widgetValue = node.widgets[0].value + "_" + tries;
519 | tries++;
520 | }
521 |
522 | node.widgets[0].value = widgetValue;
523 | this.update();
524 | }
525 | }
526 |
527 | this.onAdded = function(graph) {
528 | this.validateName(graph);
529 | }
530 |
531 | this.onConnectionsChange = function(
532 | slotType, //1 = input, 2 = output
533 | slot,
534 | isChangeConnect,
535 | link_info,
536 | output
537 | ) {
538 | console.log(`Setter node connection`)
539 | try {
540 | //On Disconnect
541 | if (slotType == 1 && !isChangeConnect) {
542 | if(this.inputs[slot].name === ''){
543 | this.inputs[slot].type = '*';
544 | // this.inputs[slot].name = 'value';
545 | this.title = "Setter"
546 | }
547 | }
548 | if (slotType == 2 && !isChangeConnect) {
549 | this.outputs[slot].type = '*';
550 | this.outputs[slot].name = '*';
551 |
552 | }
553 | //On Connect
554 | if (link_info && node.graph && slotType == 1 && isChangeConnect) {
555 | console.log("setternode connected");
556 | const fromNode = node.graph._nodes.find((otherNode) => otherNode.id == link_info.origin_id);
557 |
558 | if (fromNode && fromNode.outputs && fromNode.outputs[link_info.origin_slot]) {
559 | const type = fromNode.outputs[link_info.origin_slot].type;
560 |
561 | if (this.title === "Setter" && nodeData.name == "Setter"){
562 | this.title = "Set_" + type;
563 | }
564 | if (this.widgets[0].value === '*'){
565 | this.widgets[0].value = type
566 | }
567 |
568 | this.validateName(node.graph);
569 | this.inputs[0].type = type;
570 | // this.inputs[0].name = type;
571 |
572 | if (app.ui.settings.getSettingValue("komojini.NodeAutoColor")){
573 | setColorAndBgColor.call(this, type);
574 | }
575 | } else {
576 | alert("Error: Set node input undefined. Most likely you're missing custom nodes");
577 | }
578 | }
579 | if (link_info && node.graph && slotType == 2 && isChangeConnect) {
580 | const fromNode = node.graph._nodes.find((otherNode) => otherNode.id == link_info.origin_id);
581 |
582 | if (fromNode && fromNode.inputs && fromNode.inputs[link_info.origin_slot]) {
583 | const type = fromNode.inputs[link_info.origin_slot].type;
584 |
585 | this.outputs[0].type = type;
586 | // this.outputs[0].name = type;
587 | } else {
588 | alert("Error: Get Set node output undefined. Most likely you're missing custom nodes");
589 | }
590 | }
591 | }
592 | catch (error) {
593 | console.error(`Error onConnectionChange in Setter ${error}`)
594 | }
595 | //Update either way
596 | // this.update();
597 | }
598 |
599 | this.clone = function () {
600 | const cloned = nodeType.prototype.clone.apply(this);
601 | cloned.inputs[0].name = 'value';
602 | cloned.inputs[0].type = '*';
603 | cloned.value = '';
604 | cloned.properties.previousName = '';
605 | cloned.size = cloned.computeSize();
606 | return cloned;
607 | };
608 |
609 | this.onRemoved = () => {
610 | const allGetters = this.graph._nodes.filter((otherNode) => isGetter(otherNode));
611 | allGetters.forEach((otherNode) => {
612 | if (otherNode.setComboValues) {
613 | otherNode.setComboValues([this]);
614 | }
615 | })
616 | shared.cleanupNode(this)
617 | }
618 | this.inputs[0].name = "value";
619 |
620 | this.size = this.computeSize();
621 |
622 | return r;
623 | }
624 | } else if (nodeData.name.startsWith('FlowBuilder' || nodeData.name.endsWith('FlowBuilder')) ) {
625 | const onNodeCreated = nodeType.prototype.onNodeCreated
626 | nodeType.prototype.onNodeCreated = function () {
627 | const r = onNodeCreated
628 | ? onNodeCreated.apply(this, arguments)
629 | : undefined
630 |
631 | this.changeMode(LiteGraph.ALWAYS);
632 |
633 | if ( nodeData.name.includes("adv")) {
634 | console.log(`Advanced Flowbuilder added.`)
635 | addAdvancedFlowWidgets(this);
636 | } else {
637 | console.log(`Flowbuilder added.`)
638 | addFlowRunButton(this);
639 | }
640 |
641 | this.onRemoved = () => {
642 | shared.cleanupNode(this)
643 | app.canvas.setDirty(true)
644 | }
645 |
646 | return r;
647 | }
648 | }
649 | },
650 | nodeCreated(node, app) {
651 | if (node.comfyClass == "DragNUWAImageCanvas") {
652 | if (!node.properties) {
653 | node.properties = {}
654 | }
655 |
656 | const sizes = [
657 | "576x320",
658 | "320x576",
659 | "512x512",
660 | ];
661 |
662 | console.log(`DragNUWAImageCanvas Created`);
663 | const w = findWidgetByName(node, "image");
664 | const dragTextWidget = findWidgetByName(node, "tracking_points")
665 |
666 | shared.hideWidgetForGood(node, w)
667 |
668 | node.addWidget("button", "Get Drag Values", "", () => {
669 | openEditorDialog(node)
670 | })
671 |
672 | console.log(node)
673 |
674 | Object.defineProperty(node.properties, "draglines", {
675 | set(v) {
676 | const newDraglines = [];
677 |
678 | for (var i = 0; i < v.length; i++) {
679 | if (i < v.length - 1 && v[i].length > 1) {
680 | newDraglines.push(v[i])
681 | } else if (i === v.length - 1) {
682 | newDraglines.push(v[i])
683 | }
684 | }
685 | node.properties._draglines = newDraglines;
686 | },
687 | get() {
688 | return node.properties._draglines ?? [];
689 | }
690 | });
691 |
692 | Object.defineProperty(w, 'value', {
693 | set(v) {
694 | if(v != '[IMAGE DATA]' && v != "") {
695 | const img = new Image();
696 | img.onload = function() {
697 | console.log(`Set Image value of size(${img.width}x${img.height})`)
698 | }
699 | img.src = v;
700 | w._value = v;
701 | }
702 | },
703 | get() {
704 | const stackTrace = new Error().stack;
705 | if(!stackTrace.includes('draw') && !stackTrace.includes('graphToPrompt') && stackTrace.includes('app.js')) {
706 | return "[IMAGE DATA]";
707 | }
708 | else {
709 | return w._value;
710 | }
711 | },
712 | });
713 |
714 | Object.defineProperty(node.properties, "size", {
715 | set(v) {
716 | node.properties._size = v;
717 | },
718 | get() {
719 | if (node.properties._size) {
720 | return node.properties._size;
721 | } else {
722 | return ["576", "320"]
723 | }
724 | }
725 | })
726 |
727 | let set_img_act = (v) => {
728 | console.log(`set_img_act`)
729 |
730 | node._img = v;
731 |
732 | };
733 |
734 | Object.defineProperty(node, "imgs", {
735 | set(v) {
736 | if (!v[0].complete) {
737 | let orig_onload = v[0].onload;
738 | v[0].onload = function(v2) {
739 | if(orig_onload)
740 | orig_onload();
741 | set_img_act(v);
742 | };
743 | }
744 | else {
745 | set_img_act(v);
746 | }
747 | },
748 | get() {
749 | if(node._img == undefined && w.value != '') {
750 | node._img = [new Image()];
751 | if(w.value && w.value != '[IMAGE DATA]')
752 | node._img[0].src = w.value;
753 | }
754 |
755 | return node._img;
756 | }
757 | });
758 |
759 |
760 | node.closeEditorDialog = function(accept) {
761 | node.properties.dialogOpened = false;
762 |
763 | if (accept) {
764 |
765 | }
766 | node.dialog.close()
767 | }
768 |
769 | const openEditorDialog = function(node) {
770 | node.properties.dialogOpened = true;
771 | node.dialog = new app.ui.dialog.constructor()
772 |
773 | // node.dialog.element.style.height = "90%";
774 | // node.dialog.element.style.width = "90%";
775 | // node.dialog.element.style.display = "block";
776 |
777 | console.log(`Setup dialog size: ${node.dialog.element.width}, ${node.dialog.element.height}`)
778 |
779 | function setTrackingPoints() {
780 | console.log('setTrackingPoints')
781 | draglineTextEl.value = JSON.stringify(node.properties.draglines, null, 0)
782 | }
783 |
784 | function setTrackingPointsWidget() {
785 | console.log('setTrackingPointsWidget')
786 | dragTextWidget.value = JSON.stringify(node.properties.draglines, null, 0)
787 | }
788 |
789 | // node.dialog.element.classList.add('comfy-settings')
790 | const closeButton = node.dialog.element.querySelector('button')
791 | closeButton.textContent = 'CANCEL'
792 | const saveButton = document.createElement('button')
793 | saveButton.textContent = 'SAVE'
794 | saveButton.onclick = () => {
795 | node.closeEditorDialog(true)
796 | // _refreshCanvas()
797 |
798 | node.imgs = [imageNode];
799 |
800 | setTrackingPoints();
801 | setTrackingPointsWidget();
802 |
803 | if (canvasEl) {
804 | const ctx = canvasEl.getContext('2d');
805 | _drawImage(node, imageNode, canvasEl, ctx);
806 | const base64Img = canvasEl.toDataURL('image/png');
807 | w.value = base64Img;
808 | }
809 | }
810 | closeButton.onclick = () => {
811 | node.closeEditorDialog(false)
812 | }
813 | closeButton.before(saveButton)
814 |
815 | node.properties.newline = true
816 |
817 | const container = document.createElement("div")
818 |
819 | container.id = "drag-image-container";
820 | // container.style = "display: flex; flex-wrap: wrap; gap: 10px; justify-content: space-around;"
821 |
822 | Object.assign(container.style, {
823 | display: 'flex',
824 | gap: '10px',
825 | // flexWrap: 'wrap',
826 | flexDirection: 'row',
827 | // justifyContent: 'space-around',
828 | })
829 |
830 |
831 | // Object.assign(container.style, {
832 | // display: 'flex',
833 | // gap: '10px',
834 | // flexDirection: 'column',
835 | // })
836 |
837 | const imageNode = document.createElement("img")
838 | if (node.imgs) {
839 | imageNode.src = node.imgs[0].src
840 | imageNode.width = node.imgs[0].width
841 | imageNode.height = node.imgs[0].height
842 | }
843 | imageNode.id = "canvasImage"
844 |
845 |
846 | const canvasEl = document.createElement("canvas")
847 | canvasEl.id = "imageCanvas"
848 |
849 | Object.assign(canvasEl, {
850 | height: `${node.properties.size[1]}px`,
851 | width: `${node.properties.size[0]}px`,
852 | style: "border: 1px dotted gray;",
853 | })
854 |
855 | node.properties.canvas = canvasEl;
856 | container.append(canvasEl)
857 |
858 |
859 | const _refreshCanvas = () => {
860 |
861 | shared.infoLogger(`Update Dialog Canvas`)
862 |
863 | node.properties.newline = true;
864 |
865 | var ctx
866 | // const canvasEl = document.getElementById("imageCanvas")
867 | // const imageNode = document.getElementById("canvasImage")
868 |
869 | if (canvasEl.getContext) {
870 | ctx = canvasEl.getContext("2d")
871 | }
872 |
873 | var x=0, y=0, w=imageNode.width, h=imageNode.height;
874 | node.properties.size = sizeSelectorEl.value.split("x");
875 | // node.properties.size = document.getElementById("sizeSelector").value.split("x");
876 | const size = node.properties.size;
877 |
878 | console.log(`Setting canvas size: ${node.properties.size}`)
879 |
880 | canvasEl.width = size[0];
881 | canvasEl.height = size[1];
882 |
883 | canvasEl.style = `width: ${size[0]}px; height: ${size[1]}px;`
884 | canvasEl.style.border = "1px dotted gray"
885 |
886 | if (!imageNode.width) {
887 | console.warn(`No Image node for updating canvas.`)
888 | }
889 |
890 | else if (imageNode.width / imageNode.height > canvasEl.width/canvasEl.height) {
891 | y = 0;
892 | h = imageNode.height
893 | w = imageNode.height * canvasEl.width / canvasEl.height
894 | x = (imageNode.width - w) / 2
895 | } else {
896 | x = 0;
897 | w = imageNode.width
898 | h = imageNode.width * canvasEl.height / canvasEl.width
899 | y = (imageNode.height - h) / 2
900 | }
901 | ctx.drawImage(imageNode, x, y, w, h, 0, 0, canvasEl.width, canvasEl.height)
902 |
903 | node.properties.draglines = [];
904 | console.log('canvas updated', canvasEl)
905 | }
906 |
907 |
908 | const draglineTextEl = document.createElement("textarea")
909 | draglineTextEl.id = "draglinetext"
910 | // draglineTextEl.style.height = draglineTextEl.scrollHeight + 'px'; // Set the height to the scrollHeight
911 | draglineTextEl.readOnly = true;
912 |
913 | function _undo() {
914 | const newDraglines = [...node.properties.draglines];
915 |
916 | const lastLine = getLast(newDraglines);
917 | lastLine.pop();
918 | if (lastLine.length === 0) {
919 | newDraglines.pop();
920 | }
921 | node.properties.draglines = [...newDraglines];
922 | drawAllLines(node, node.properties.draglines, imageNode, canvasEl);
923 | setTrackingPoints();
924 | }
925 |
926 | function handleKeydown(e) {
927 |
928 | if (!node.properties.dialogOpened) {
929 | return;
930 | }
931 |
932 | else if (e.key === 'Enter') {
933 | setNewline();
934 | drawAllLines(node, node.properties.draglines, imageNode, canvasEl);
935 | }
936 | else if (e.key === 'Escape') {
937 | node.closeEditorDialog(false);
938 | }
939 |
940 | console.log(e);
941 | }
942 | document.addEventListener('keydown', handleKeydown)
943 |
944 | canvasEl.addEventListener('mousedown', handleMouseDown)
945 | canvasEl.addEventListener('mousemove', handleMouseMove)
946 | canvasEl.addEventListener('mouseout', handleMouseOut)
947 |
948 | function handleMouseOut(e) {
949 | console.log("on mouseout");
950 | drawAllLines(node, node.properties.draglines, imageNode, canvasEl);
951 | }
952 |
953 | function handleMouseMove(e) {
954 | const rect = canvasEl.getBoundingClientRect();
955 | const x = Math.round(e.clientX - rect.left);
956 | const y = Math.round(e.clientY - rect.top);
957 |
958 | var currentDraglines;
959 |
960 | if (node.properties.newline) {
961 | currentDraglines = [...node.properties.draglines, [[x, y]]]
962 | } else {
963 | let prevDragline = getLast(node.properties.draglines) ?? [];
964 | currentDraglines = [...node.properties.draglines];
965 | currentDraglines[currentDraglines.length -1] = [...prevDragline, [x, y]]
966 | }
967 |
968 | drawAllLines(node, currentDraglines, imageNode, canvasEl);
969 |
970 | }
971 |
972 | function handleMouseDown(e) {
973 | // Get the mouse coordinates relative to the canvas
974 | console.log("mousedown")
975 | const rect = canvasEl.getBoundingClientRect();
976 |
977 | const x = Math.round(e.clientX - rect.left);
978 | const y = Math.round(e.clientY - rect.top);
979 | // console.log(`${e.clientX} - ${rect.left}, ${e.clientY} - ${rect.top}`)
980 | // Now, you have the x, y position relative to the canvas
981 | console.log('Mouse Down at:', x, y);
982 |
983 | // Optionally, you can pass x and y to another function
984 | // Do something with x and y, e.g., draw on the canvas
985 | // const canvasEl = document.getElementById("imageCanvas")
986 | // const imageNode = document.getElementById("canvasImage")
987 |
988 | var ctx
989 |
990 | if (canvasEl.getContext) {
991 | ctx = canvasEl.getContext("2d")
992 | }
993 |
994 | if (node.properties.newline) {
995 | node.properties.draglines = [...node.properties.draglines, [[x, y]]]
996 | node.properties.newline = false;
997 |
998 | } else {
999 |
1000 | const prevDragLine = getLast(node.properties.draglines);
1001 |
1002 | if (prevDragLine) {
1003 | prevDragLine.push([x, y])
1004 | } else {
1005 | node.properties.draglines = [...node.properties.draglines, [[x, y]]]
1006 | }
1007 | }
1008 |
1009 | setTrackingPoints();
1010 | drawAllLines(node, node.properties.draglines, imageNode, canvasEl)
1011 | // draglineTextEl.value = JSON.stringify(node.properties.draglines, null, 0)
1012 | }
1013 |
1014 | const inputContainer = document.createElement("div")
1015 | Object.assign(inputContainer.style, {
1016 | display: 'flex',
1017 | gap: '10px',
1018 | flexDirection: 'column',
1019 | })
1020 | const sizeSelectorEl = document.createElement("select")
1021 | sizeSelectorEl.id = "sizeSelector"
1022 | let sizeOptions = "";
1023 | sizes.forEach((size) => {
1024 | const nodeSize = `${node.properties.size[0]}x${node.properties.size[1]}`;
1025 | if (nodeSize == size) {
1026 | sizeOptions += `
${size} `
1027 | } else {
1028 | sizeOptions += `
${size} `
1029 | }
1030 | return sizeOptions
1031 | })
1032 |
1033 | sizeSelectorEl.insertAdjacentHTML("beforeend", sizeOptions)
1034 |
1035 | sizeSelectorEl.onchange = _refreshCanvas
1036 |
1037 | const imageInputEl = document.createElement("input")
1038 | Object.assign(imageInputEl, {
1039 | type: "file",
1040 | id: "inputFile",
1041 | accept: "image/*",
1042 | })
1043 | node.properties.imageNode = imageNode;
1044 |
1045 | imageInputEl.onchange = function(e) {
1046 | shared.infoLogger(`Image chosen`)
1047 | var file = e.target.files[0];
1048 | var reader = new FileReader();
1049 | reader.onload = function(e) {
1050 | shared.infoLogger(`Image onload 1`)
1051 | // const imageNode = document.getElementById("canvasImage")
1052 |
1053 | var img = new Image();
1054 |
1055 | img.onload = function() {
1056 | console.log(`Got image of size ${img.width}x${img.height}`)
1057 | imageNode.width = img.width;
1058 | imageNode.height = img.height;
1059 | var ctx;
1060 |
1061 | if (canvasEl.getContext) {
1062 | ctx = canvasEl.getContext("2d")
1063 | }
1064 |
1065 | imageNode.src = e.target.result;
1066 | imageNode.onload = function () {
1067 | shared.infoLogger(`Image onload 2`)
1068 |
1069 | var x=0,y=0,w=node.width,h=node.height;
1070 | const size=document.getElementById("sizeSelector").value.split('x');
1071 | canvasEl.width=size[0];
1072 | canvasEl.height=size[1];
1073 |
1074 | refresh();
1075 | };
1076 | };
1077 | img.src = e.target.result;
1078 | };
1079 | file && reader.readAsDataURL(file);
1080 | }
1081 |
1082 | const refresh = () => {
1083 | node.properties.newline = true;
1084 | node.properties.draglines = []
1085 | draglineTextEl.value = JSON.stringify(node.properties.draglines, null, 0)
1086 |
1087 | _refreshCanvas()
1088 | }
1089 | const refreshButton = document.createElement("button");
1090 | refreshButton.textContent = "Refresh"
1091 | refreshButton.style.margin = "5px 10px"
1092 | refreshButton.onclick = refresh;
1093 |
1094 | function setNewline() {
1095 | node.properties.newline = true;
1096 | }
1097 |
1098 | const undoButton = document.createElement("button");
1099 | undoButton.textContent = "Undo"
1100 | undoButton.style.margin = "5px 10px"
1101 | undoButton.onclick = _undo;
1102 |
1103 | const newlineButton = document.createElement("button");
1104 | newlineButton.textContent = "New Line (Enter)"
1105 | newlineButton.style.margin = "5px 10px"
1106 | newlineButton.onclick = setNewline;
1107 | newlineButton.width = 100;
1108 |
1109 | const controlContainer = document.createElement("div")
1110 | Object.assign(controlContainer.style, {
1111 | display: "flex",
1112 | flexDirection: "column",
1113 | })
1114 |
1115 | const inputStyle = {
1116 | padding: '5px',
1117 | margin: '10px'
1118 | };
1119 |
1120 | controlContainer.append(sizeSelectorEl)
1121 | Object.assign(sizeSelectorEl.style, inputStyle)
1122 |
1123 | controlContainer.append(imageInputEl)
1124 | Object.assign(imageInputEl.style, inputStyle)
1125 |
1126 | controlContainer.append(newlineButton)
1127 | controlContainer.append(undoButton)
1128 | controlContainer.append(refreshButton)
1129 |
1130 | container.append(controlContainer)
1131 | // container.append(inputContainer)
1132 |
1133 | node.dialog.show('')
1134 | node.dialog.textElement.append(container)
1135 |
1136 | Object.assign(draglineTextEl.style, {
1137 | flex: 1,
1138 | margin: "20px",
1139 | })
1140 | controlContainer.append(draglineTextEl)
1141 |
1142 | _refreshCanvas()
1143 | node.properties.draglines = JSON.parse(dragTextWidget.value ?? "[]") ?? [];
1144 | setTrackingPoints();
1145 | }
1146 |
1147 |
1148 | shared.log(`Setup dialog`)
1149 |
1150 | }
1151 | }
1152 | }
1153 |
1154 |
1155 | app.registerExtension(komojini_widgets);
--------------------------------------------------------------------------------