├── .gitattributes
├── .github
└── workflows
│ └── publish_action.yml
├── .gitignore
├── LICENSE
├── NOTICE
├── README.md
├── __init__.py
├── core
├── __init__.py
├── adjust.py
├── anim.py
├── calc.py
├── color.py
├── compose.py
├── create.py
├── trans.py
├── utility
│ ├── __init__.py
│ ├── batch.py
│ ├── info.py
│ └── io.py
└── vars.py
├── node_list.json
├── pyproject.toml
├── requirements.txt
├── res
├── aud
│ ├── bread.mp3
│ └── bread.wav
├── img
│ ├── anim
│ │ ├── anim (1).png
│ │ ├── anim (2).png
│ │ ├── anim (3).png
│ │ ├── anim (4).png
│ │ ├── anim (5).png
│ │ ├── anim (6).png
│ │ ├── anim (7).png
│ │ └── anim (8).png
│ ├── color-a.png
│ ├── color-b.png
│ ├── color-c.png
│ ├── color-d.png
│ ├── color-e.png
│ ├── color-f.png
│ ├── color-g.png
│ ├── depth-a.png
│ ├── depth-b.png
│ ├── depth-c.png
│ ├── mask-a.png
│ ├── mask-b.png
│ ├── mask-c.png
│ ├── mask-e.png
│ ├── shape-a.png
│ ├── shape-b.png
│ ├── shape-c.png
│ ├── shape-d.png
│ ├── test-a.png
│ ├── test-b.png
│ ├── test-c.png
│ ├── test-d.jpg
│ ├── tile-a.png
│ ├── tile-b.png
│ ├── tile-c.png
│ └── tile-d.png
└── wiki
│ ├── YouTube.svg
│ └── help_002.png
└── web
├── core.js
├── fun.js
├── nodes
├── akashic.js
├── array.js
├── delay.js
├── flatten.js
├── graph.js
├── lerp.js
├── op_binary.js
├── op_unary.js
├── queue.js
├── route.js
├── stack.js
├── stringer.js
└── value.js
├── util.js
└── widget_vector.js
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
--------------------------------------------------------------------------------
/.github/workflows/publish_action.yml:
--------------------------------------------------------------------------------
1 | name: Publish to Comfy registry
2 | on:
3 | workflow_dispatch:
4 | push:
5 | branches:
6 | - main
7 | paths:
8 | - "pyproject.toml"
9 |
10 | permissions:
11 | issues: write
12 |
13 | jobs:
14 | publish-node:
15 | name: Publish Custom Node to registry
16 | runs-on: ubuntu-latest
17 | if: ${{ github.repository_owner == 'Amorano' }}
18 | steps:
19 | - name: Check out code
20 | uses: actions/checkout@v4
21 | - name: Publish Custom Node
22 | uses: Comfy-Org/publish-node-action@v1
23 | with:
24 | personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }}
25 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__
2 | *.py[cod]
3 | *$py.class
4 | _*/
5 | glsl/*
6 | *.code-workspace
7 | .vscode
8 | config.json
9 | ignore.txt
10 | .env
11 | .venv
12 | .DS_Store
13 | *.egg-info
14 | *.bak
15 | checkpoints
16 | results
17 | backup
18 | node_modules
19 | *-lock.json
20 | *.config.mjs
21 | package.json
22 | _TODO*.*
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Alexander G. Morano
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
23 | GO NUTS; JUST TRY NOT TO DO IT IN YOUR HEAD.
24 |
--------------------------------------------------------------------------------
/NOTICE:
--------------------------------------------------------------------------------
1 | This project includes code concepts from the MTB Nodes project (MIT)
2 | https://github.com/melMass/comfy_mtb
3 |
4 | This project includes code concepts from the ComfyUI-Custom-Scripts project (MIT)
5 | https://github.com/pythongosssss/ComfyUI-Custom-Scripts
6 |
7 | This project includes code concepts from the KJNodes for ComfyUI project (GPL 3.0)
8 | https://github.com/kijai/ComfyUI-KJNodes
9 |
10 | This project includes code concepts from the UE Nodes project (Apache 2.0)
11 | https://github.com/chrisgoringe/cg-use-everywhere
12 |
13 | This project includes code concepts from the WAS Node Suite project (MIT)
14 | https://github.com/WASasquatch/was-node-suite-comfyui
15 |
16 | This project includes code concepts from the rgthree-comfy project (MIT)
17 | https://github.com/rgthree/rgthree-comfy
18 |
19 | This project includes code concepts from the FizzNodes project (MIT)
20 | https://github.com/FizzleDorf/ComfyUI_FizzNodes
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
5 |
virtual environment
(venv
), make sure it is activated before installation. Then install the requirements with the command:
275 | ```
276 | pip install -r requirements.txt
277 | ```
278 | # WHERE TO FIND ME
279 |
280 | You can find me on [](https://discord.gg/62TJaZ3Z5r).
281 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | ██ ██████ ██ ██ ██ ███ ███ ███████ ████████ ██████ ██ ██ ██
3 | ██ ██ ██ ██ ██ ██ ████ ████ ██ ██ ██ ██ ██ ██ ██
4 | ██ ██ ██ ██ ██ ██ ██ ████ ██ █████ ██ ██████ ██ ███
5 | ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██
6 | █████ ██████ ████ ██ ██ ██ ███████ ██ ██ ██ ██ ██ ██
7 |
8 | Animation, Image Compositing & Procedural Creation
9 |
10 | @title: Jovimetrix
11 | @author: Alexander G. Morano
12 | @category: Compositing
13 | @reference: https://github.com/Amorano/Jovimetrix
14 | @tags: adjust, animate, compose, compositing, composition, device, flow, video,
15 | mask, shape, animation, logic
16 | @description: Animation via tick. Parameter manipulation with wave generator.
17 | Unary and Binary math support. Value convert int/float/bool, VectorN and Image,
18 | Mask types. Shape mask generator. Stack images, do channel ops, split, merge
19 | and randomize arrays and batches. Load images & video from anywhere. Dynamic
20 | bus routing. Save output anywhere! Flatten, crop, transform; check
21 | colorblindness or linear interpolate values.
22 | @node list:
23 | TickNode, TickSimpleNode, WaveGeneratorNode
24 | BitSplitNode, ComparisonNode, LerpNode, OPUnaryNode, OPBinaryNode, StringerNode, SwizzleNode,
25 | ColorBlindNode, ColorMatchNode, ColorKMeansNode, ColorTheoryNode, GradientMapNode,
26 | AdjustNode, BlendNode, FilterMaskNode, PixelMergeNode, PixelSplitNode, PixelSwapNode, ThresholdNode,
27 | ConstantNode, ShapeNode, TextNode,
28 | CropNode, FlattenNode, StackNode, TransformNode,
29 |
30 | ArrayNode, QueueNode, QueueTooNode,
31 | AkashicNode, GraphNode, ImageInfoNode,
32 | DelayNode, ExportNode, RouteNode, SaveOutputNode
33 |
34 | ValueNode, Vector2Node, Vector3Node, Vector4Node,
35 | """
36 |
37 | __author__ = "Alexander G. Morano"
38 | __email__ = "amorano@gmail.com"
39 |
40 | from pathlib import Path
41 |
42 | from cozy_comfyui import \
43 | logger
44 |
45 | from cozy_comfyui.node import \
46 | loader
47 |
48 | JOV_DOCKERENV = False
49 | try:
50 | with open('/proc/1/cgroup', 'rt') as f:
51 | content = f.read()
52 | JOV_DOCKERENV = any(x in content for x in ['docker', 'kubepods', 'containerd'])
53 | except FileNotFoundError:
54 | pass
55 |
56 | if JOV_DOCKERENV:
57 | logger.info("RUNNING IN A DOCKER")
58 |
59 | # ==============================================================================
60 | # === GLOBAL ===
61 | # ==============================================================================
62 |
63 | PACKAGE = "JOVIMETRIX"
64 | WEB_DIRECTORY = "./web"
65 | ROOT = Path(__file__).resolve().parent
66 | NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS = loader(ROOT,
67 | PACKAGE,
68 | "core",
69 | f"{PACKAGE} 🔺🟩🔵",
70 | False)
71 |
--------------------------------------------------------------------------------
/core/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | from enum import Enum
3 |
4 | class EnumFillOperation(Enum):
5 | DEFAULT = 0
6 | FILL_ZERO = 20
7 | FILL_ALL = 10
8 |
--------------------------------------------------------------------------------
/core/adjust.py:
--------------------------------------------------------------------------------
1 | """ Jovimetrix - Adjust """
2 |
3 | import sys
4 | from enum import Enum
5 | from typing import Any, List
6 |
7 | from comfy.utils import ProgressBar
8 |
9 | from cozy_comfyui import \
10 | InputType, RGBAMaskType, EnumConvertType, \
11 | deep_merge, parse_param, zip_longest_fill
12 |
13 | from cozy_comfyui.lexicon import \
14 | Lexicon
15 |
16 | from cozy_comfyui.node import \
17 | COZY_TYPE_IMAGE, \
18 | CozyImageNode
19 |
20 | from cozy_comfyui.image.adjust import \
21 | EnumAdjustBlur, EnumAdjustColor, EnumAdjustEdge, EnumAdjustMorpho, \
22 | image_contrast, image_brightness, image_equalize, image_gamma, \
23 | image_exposure, image_hsv, image_invert, image_pixelate, image_pixelscale, \
24 | image_posterize, image_quantize, image_sharpen, image_morphology, \
25 | image_emboss, image_blur, image_edge, image_color
26 |
27 | from cozy_comfyui.image.channel import \
28 | channel_solid
29 |
30 | from cozy_comfyui.image.compose import \
31 | image_levels
32 |
33 | from cozy_comfyui.image.convert import \
34 | tensor_to_cv, cv_to_tensor_full, image_mask, image_mask_add
35 |
36 | from cozy_comfyui.image.misc import \
37 | image_stack
38 |
39 | # ==============================================================================
40 | # === GLOBAL ===
41 | # ==============================================================================
42 |
43 | JOV_CATEGORY = "ADJUST"
44 |
45 | # ==============================================================================
46 | # === ENUMERATION ===
47 | # ==============================================================================
48 |
49 | class EnumAdjustLight(Enum):
50 | EXPOSURE = 10
51 | GAMMA = 20
52 | BRIGHTNESS = 30
53 | CONTRAST = 40
54 | EQUALIZE = 50
55 |
56 | class EnumAdjustPixel(Enum):
57 | PIXELATE = 10
58 | PIXELSCALE = 20
59 | QUANTIZE = 30
60 | POSTERIZE = 40
61 |
62 | # ==============================================================================
63 | # === CLASS ===
64 | # ==============================================================================
65 |
66 | class AdjustBlurNode(CozyImageNode):
67 | NAME = "ADJUST: BLUR (JOV)"
68 | CATEGORY = JOV_CATEGORY
69 | DESCRIPTION = """
70 | Enhance and modify images with various blur effects.
71 | """
72 |
73 | @classmethod
74 | def INPUT_TYPES(cls) -> InputType:
75 | d = super().INPUT_TYPES()
76 | d = deep_merge(d, {
77 | "optional": {
78 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}),
79 | Lexicon.FUNCTION: (EnumAdjustBlur._member_names_, {
80 | "default": EnumAdjustBlur.BLUR.name,}),
81 | Lexicon.RADIUS: ("INT", {
82 | "default": 3, "min": 3}),
83 | }
84 | })
85 | return Lexicon._parse(d)
86 |
87 | def run(self, **kw) -> RGBAMaskType:
88 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None)
89 | op = parse_param(kw, Lexicon.FUNCTION, EnumAdjustBlur, EnumAdjustBlur.BLUR.name)
90 | radius = parse_param(kw, Lexicon.RADIUS, EnumConvertType.INT, 3)
91 | params = list(zip_longest_fill(pA, op, radius))
92 | images = []
93 | pbar = ProgressBar(len(params))
94 | for idx, (pA, op, radius) in enumerate(params):
95 | pA = channel_solid() if pA is None else tensor_to_cv(pA)
96 | # height, width = pA.shape[:2]
97 | pA = image_blur(pA, op, radius)
98 | #pA = image_blend(pA, img_new, mask)
99 | images.append(cv_to_tensor_full(pA))
100 | pbar.update_absolute(idx)
101 | return image_stack(images)
102 |
103 | class AdjustColorNode(CozyImageNode):
104 | NAME = "ADJUST: COLOR (JOV)"
105 | CATEGORY = JOV_CATEGORY
106 | DESCRIPTION = """
107 | Enhance and modify images with various blur effects.
108 | """
109 |
110 | @classmethod
111 | def INPUT_TYPES(cls) -> InputType:
112 | d = super().INPUT_TYPES()
113 | d = deep_merge(d, {
114 | "optional": {
115 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}),
116 | Lexicon.FUNCTION: (EnumAdjustColor._member_names_, {
117 | "default": EnumAdjustColor.RGB.name,}),
118 | Lexicon.VEC: ("VEC3", {
119 | "default": (0,0,0), "mij": -1, "maj": 1, "step": 0.025})
120 | }
121 | })
122 | return Lexicon._parse(d)
123 |
124 | def run(self, **kw) -> RGBAMaskType:
125 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None)
126 | op = parse_param(kw, Lexicon.FUNCTION, EnumAdjustColor, EnumAdjustColor.RGB.name)
127 | vec = parse_param(kw, Lexicon.VEC, EnumConvertType.VEC3, (0,0,0))
128 | params = list(zip_longest_fill(pA, op, vec))
129 | images = []
130 | pbar = ProgressBar(len(params))
131 | for idx, (pA, op, vec) in enumerate(params):
132 | pA = channel_solid() if pA is None else tensor_to_cv(pA)
133 | pA = image_color(pA, op, vec[0], vec[1], vec[2])
134 | images.append(cv_to_tensor_full(pA))
135 | pbar.update_absolute(idx)
136 | return image_stack(images)
137 |
138 | class AdjustEdgeNode(CozyImageNode):
139 | NAME = "ADJUST: EDGE (JOV)"
140 | CATEGORY = JOV_CATEGORY
141 | DESCRIPTION = """
142 | Enhanced edge detection.
143 | """
144 |
145 | @classmethod
146 | def INPUT_TYPES(cls) -> InputType:
147 | d = super().INPUT_TYPES()
148 | d = deep_merge(d, {
149 | "optional": {
150 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}),
151 | Lexicon.FUNCTION: (EnumAdjustEdge._member_names_, {
152 | "default": EnumAdjustEdge.CANNY.name,}),
153 | Lexicon.RADIUS: ("INT", {
154 | "default": 1, "min": 1}),
155 | Lexicon.ITERATION: ("INT", {
156 | "default": 1, "min": 1, "max": 1000}),
157 | Lexicon.LOHI: ("VEC2", {
158 | "default": (0, 1), "mij": 0, "maj": 1, "step": 0.01})
159 | }
160 | })
161 | return Lexicon._parse(d)
162 |
163 | def run(self, **kw) -> RGBAMaskType:
164 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None)
165 | op = parse_param(kw, Lexicon.FUNCTION, EnumAdjustEdge, EnumAdjustEdge.CANNY.name)
166 | radius = parse_param(kw, Lexicon.RADIUS, EnumConvertType.INT, 1)
167 | count = parse_param(kw, Lexicon.ITERATION, EnumConvertType.INT, 1)
168 | lohi = parse_param(kw, Lexicon.LOHI, EnumConvertType.VEC2, (0,1))
169 | params = list(zip_longest_fill(pA, op, radius, count, lohi))
170 | images = []
171 | pbar = ProgressBar(len(params))
172 | for idx, (pA, op, radius, count, lohi) in enumerate(params):
173 | pA = channel_solid() if pA is None else tensor_to_cv(pA)
174 | alpha = image_mask(pA)
175 | pA = image_edge(pA, op, radius, count, lohi[0], lohi[1])
176 | pA = image_mask_add(pA, alpha)
177 | images.append(cv_to_tensor_full(pA))
178 | pbar.update_absolute(idx)
179 | return image_stack(images)
180 |
181 | class AdjustEmbossNode(CozyImageNode):
182 | NAME = "ADJUST: EMBOSS (JOV)"
183 | CATEGORY = JOV_CATEGORY
184 | DESCRIPTION = """
185 | Emboss boss mode.
186 | """
187 |
188 | @classmethod
189 | def INPUT_TYPES(cls) -> InputType:
190 | d = super().INPUT_TYPES()
191 | d = deep_merge(d, {
192 | "optional": {
193 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}),
194 | Lexicon.HEADING: ("FLOAT", {
195 | "default": -45, "min": -sys.float_info.max, "max": sys.float_info.max, "step": 0.1}),
196 | Lexicon.ELEVATION: ("FLOAT", {
197 | "default": 45, "min": -sys.float_info.max, "max": sys.float_info.max, "step": 0.1}),
198 | Lexicon.DEPTH: ("FLOAT", {
199 | "default": 10, "min": 0, "max": sys.float_info.max, "step": 0.1,
200 | "tooltip": "Depth perceived from the light angles above"}),
201 | }
202 | })
203 | return Lexicon._parse(d)
204 |
205 | def run(self, **kw) -> RGBAMaskType:
206 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None)
207 | heading = parse_param(kw, Lexicon.HEADING, EnumConvertType.FLOAT, -45)
208 | elevation = parse_param(kw, Lexicon.ELEVATION, EnumConvertType.FLOAT, 45)
209 | depth = parse_param(kw, Lexicon.DEPTH, EnumConvertType.FLOAT, 10)
210 | params = list(zip_longest_fill(pA, heading, elevation, depth))
211 | images = []
212 | pbar = ProgressBar(len(params))
213 | for idx, (pA, heading, elevation, depth) in enumerate(params):
214 | pA = channel_solid() if pA is None else tensor_to_cv(pA)
215 | alpha = image_mask(pA)
216 | pA = image_emboss(pA, heading, elevation, depth)
217 | pA = image_mask_add(pA, alpha)
218 | images.append(cv_to_tensor_full(pA))
219 | pbar.update_absolute(idx)
220 | return image_stack(images)
221 |
222 | class AdjustLevelNode(CozyImageNode):
223 | NAME = "ADJUST: LEVELS (JOV)"
224 | CATEGORY = JOV_CATEGORY
225 | DESCRIPTION = """
226 |
227 | """
228 |
229 | @classmethod
230 | def INPUT_TYPES(cls) -> InputType:
231 | d = super().INPUT_TYPES()
232 | d = deep_merge(d, {
233 | "optional": {
234 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}),
235 | Lexicon.LMH: ("VEC3", {
236 | "default": (0,0.5,1), "mij": 0, "maj": 1, "step": 0.01,
237 | "label": ["LOW", "MID", "HIGH"]}),
238 | Lexicon.RANGE: ("VEC2", {
239 | "default": (0, 1), "mij": 0, "maj": 1, "step": 0.01,
240 | "label": ["IN", "OUT"]})
241 | }
242 | })
243 | return Lexicon._parse(d)
244 |
245 | def run(self, **kw) -> RGBAMaskType:
246 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None)
247 | LMH = parse_param(kw, Lexicon.LMH, EnumConvertType.VEC3, (0,0.5,1))
248 | inout = parse_param(kw, Lexicon.RANGE, EnumConvertType.VEC2, (0,1))
249 | params = list(zip_longest_fill(pA, LMH, inout))
250 | images = []
251 | pbar = ProgressBar(len(params))
252 | for idx, (pA, LMH, inout) in enumerate(params):
253 | pA = channel_solid() if pA is None else tensor_to_cv(pA)
254 | '''
255 | h, s, v = hsv
256 | img_new = image_hsv(img_new, h, s, v)
257 | '''
258 | low, mid, high = LMH
259 | start, end = inout
260 | pA = image_levels(pA, low, mid, high, start, end)
261 | images.append(cv_to_tensor_full(pA))
262 | pbar.update_absolute(idx)
263 | return image_stack(images)
264 |
265 | class AdjustLightNode(CozyImageNode):
266 | NAME = "ADJUST: LIGHT (JOV)"
267 | CATEGORY = JOV_CATEGORY
268 | DESCRIPTION = """
269 | Tonal adjustments. They can be applied individually or all at the same time in order: brightness, contrast, histogram equalization, exposure, and gamma correction.
270 | """
271 |
272 | @classmethod
273 | def INPUT_TYPES(cls) -> InputType:
274 | d = super().INPUT_TYPES()
275 | d = deep_merge(d, {
276 | "optional": {
277 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}),
278 | Lexicon.BRIGHTNESS: ("FLOAT", {
279 | "default": 0.5, "min": 0, "max": 1, "step": 0.01}),
280 | Lexicon.CONTRAST: ("FLOAT", {
281 | "default": 0, "min": -1, "max": 1, "step": 0.01}),
282 | Lexicon.EQUALIZE: ("BOOLEAN", {
283 | "default": False}),
284 | Lexicon.EXPOSURE: ("FLOAT", {
285 | "default": 1, "min": -8, "max": 8, "step": 0.01}),
286 | Lexicon.GAMMA: ("FLOAT", {
287 | "default": 1, "min": 0, "max": 8, "step": 0.01}),
288 | }
289 | })
290 | return Lexicon._parse(d)
291 |
292 | def run(self, **kw) -> RGBAMaskType:
293 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None)
294 | brightness = parse_param(kw, Lexicon.BRIGHTNESS, EnumConvertType.FLOAT, 0.5)
295 | contrast = parse_param(kw, Lexicon.CONTRAST, EnumConvertType.FLOAT, 0)
296 | equalize = parse_param(kw, Lexicon.EQUALIZE, EnumConvertType.FLOAT, 0)
297 | exposure = parse_param(kw, Lexicon.EXPOSURE, EnumConvertType.FLOAT, 0)
298 | gamma = parse_param(kw, Lexicon.GAMMA, EnumConvertType.FLOAT, 0)
299 | params = list(zip_longest_fill(pA, brightness, contrast, equalize, exposure, gamma))
300 | images = []
301 | pbar = ProgressBar(len(params))
302 | for idx, (pA, brightness, contrast, equalize, exposure, gamma) in enumerate(params):
303 | pA = channel_solid() if pA is None else tensor_to_cv(pA)
304 | alpha = image_mask(pA)
305 |
306 | brightness = 2. * (brightness - 0.5)
307 | if brightness != 0:
308 | pA = image_brightness(pA, brightness)
309 |
310 | if contrast != 0:
311 | pA = image_contrast(pA, contrast)
312 |
313 | if equalize:
314 | pA = image_equalize(pA)
315 |
316 | if exposure != 1:
317 | pA = image_exposure(pA, exposure)
318 |
319 | if gamma != 1:
320 | pA = image_gamma(pA, gamma)
321 |
322 | '''
323 | h, s, v = hsv
324 | img_new = image_hsv(img_new, h, s, v)
325 |
326 | l, m, h = level
327 | img_new = image_levels(img_new, l, h, m, gamma)
328 | '''
329 | pA = image_mask_add(pA, alpha)
330 | images.append(cv_to_tensor_full(pA))
331 | pbar.update_absolute(idx)
332 | return image_stack(images)
333 |
334 | class AdjustMorphNode(CozyImageNode):
335 | NAME = "ADJUST: MORPHOLOGY (JOV)"
336 | CATEGORY = JOV_CATEGORY
337 | DESCRIPTION = """
338 | Operations based on the image shape.
339 | """
340 |
341 | @classmethod
342 | def INPUT_TYPES(cls) -> InputType:
343 | d = super().INPUT_TYPES()
344 | d = deep_merge(d, {
345 | "optional": {
346 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}),
347 | Lexicon.FUNCTION: (EnumAdjustMorpho._member_names_, {
348 | "default": EnumAdjustMorpho.DILATE.name,}),
349 | Lexicon.RADIUS: ("INT", {
350 | "default": 1, "min": 1}),
351 | Lexicon.ITERATION: ("INT", {
352 | "default": 1, "min": 1, "max": 1000}),
353 | }
354 | })
355 | return Lexicon._parse(d)
356 |
357 | def run(self, **kw) -> RGBAMaskType:
358 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None)
359 | op = parse_param(kw, Lexicon.FUNCTION, EnumAdjustMorpho, EnumAdjustMorpho.DILATE.name)
360 | kernel = parse_param(kw, Lexicon.RADIUS, EnumConvertType.INT, 1)
361 | count = parse_param(kw, Lexicon.ITERATION, EnumConvertType.INT, 1)
362 | params = list(zip_longest_fill(pA, op, kernel, count))
363 | images: List[Any] = []
364 | pbar = ProgressBar(len(params))
365 | for idx, (pA, op, kernel, count) in enumerate(params):
366 | pA = channel_solid() if pA is None else tensor_to_cv(pA)
367 | alpha = image_mask(pA)
368 | pA = image_morphology(pA, op, kernel, count)
369 | pA = image_mask_add(pA, alpha)
370 | images.append(cv_to_tensor_full(pA))
371 | pbar.update_absolute(idx)
372 | return image_stack(images)
373 |
374 | class AdjustPixelNode(CozyImageNode):
375 | NAME = "ADJUST: PIXEL (JOV)"
376 | CATEGORY = JOV_CATEGORY
377 | DESCRIPTION = """
378 | Pixel-level transformations. The val parameter controls the intensity or resolution of the effect, depending on the operation.
379 | """
380 |
381 | @classmethod
382 | def INPUT_TYPES(cls) -> InputType:
383 | d = super().INPUT_TYPES()
384 | d = deep_merge(d, {
385 | "optional": {
386 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}),
387 | Lexicon.FUNCTION: (EnumAdjustPixel._member_names_, {
388 | "default": EnumAdjustPixel.PIXELATE.name,}),
389 | Lexicon.VALUE: ("FLOAT", {
390 | "default": 0, "min": 0, "max": 1, "step": 0.01})
391 | }
392 | })
393 | return Lexicon._parse(d)
394 |
395 | def run(self, **kw) -> RGBAMaskType:
396 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None)
397 | op = parse_param(kw, Lexicon.FUNCTION, EnumAdjustPixel, EnumAdjustPixel.PIXELATE.name)
398 | val = parse_param(kw, Lexicon.VALUE, EnumConvertType.FLOAT, 0)
399 | params = list(zip_longest_fill(pA, op, val))
400 | images = []
401 | pbar = ProgressBar(len(params))
402 | for idx, (pA, op, val) in enumerate(params):
403 | pA = channel_solid() if pA is None else tensor_to_cv(pA, chan=4)
404 | alpha = image_mask(pA)
405 |
406 | match op:
407 | case EnumAdjustPixel.PIXELATE:
408 | pA = image_pixelate(pA, val / 2.)
409 |
410 | case EnumAdjustPixel.PIXELSCALE:
411 | pA = image_pixelscale(pA, val)
412 |
413 | case EnumAdjustPixel.QUANTIZE:
414 | pA = image_quantize(pA, val)
415 |
416 | case EnumAdjustPixel.POSTERIZE:
417 | pA = image_posterize(pA, val)
418 |
419 | pA = image_mask_add(pA, alpha)
420 | images.append(cv_to_tensor_full(pA))
421 | pbar.update_absolute(idx)
422 | return image_stack(images)
423 |
424 | class AdjustSharpenNode(CozyImageNode):
425 | NAME = "ADJUST: SHARPEN (JOV)"
426 | CATEGORY = JOV_CATEGORY
427 | DESCRIPTION = """
428 | Sharpen the pixels of an image.
429 | """
430 |
431 | @classmethod
432 | def INPUT_TYPES(cls) -> InputType:
433 | d = super().INPUT_TYPES()
434 | d = deep_merge(d, {
435 | "optional": {
436 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}),
437 | Lexicon.AMOUNT: ("FLOAT", {
438 | "default": 0, "min": 0, "max": 1, "step": 0.01}),
439 | Lexicon.THRESHOLD: ("FLOAT", {
440 | "default": 0, "min": 0, "max": 1, "step": 0.01})
441 | }
442 | })
443 | return Lexicon._parse(d)
444 |
445 | def run(self, **kw) -> RGBAMaskType:
446 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None)
447 | amount = parse_param(kw, Lexicon.AMOUNT, EnumConvertType.FLOAT, 0)
448 | threshold = parse_param(kw, Lexicon.THRESHOLD, EnumConvertType.FLOAT, 0)
449 | params = list(zip_longest_fill(pA, amount, threshold))
450 | images = []
451 | pbar = ProgressBar(len(params))
452 | for idx, (pA, amount, threshold) in enumerate(params):
453 | pA = channel_solid() if pA is None else tensor_to_cv(pA)
454 | pA = image_sharpen(pA, amount / 2., threshold=threshold / 25.5)
455 | images.append(cv_to_tensor_full(pA))
456 | pbar.update_absolute(idx)
457 | return image_stack(images)
458 |
--------------------------------------------------------------------------------
/core/anim.py:
--------------------------------------------------------------------------------
1 | """ Jovimetrix - Animation """
2 |
3 | import sys
4 |
5 | import numpy as np
6 |
7 | from comfy.utils import ProgressBar
8 |
9 | from cozy_comfyui import \
10 | InputType, EnumConvertType, \
11 | deep_merge, parse_param, zip_longest_fill
12 |
13 | from cozy_comfyui.lexicon import \
14 | Lexicon
15 |
16 | from cozy_comfyui.node import \
17 | CozyBaseNode
18 |
19 | from cozy_comfyui.maths.ease import \
20 | EnumEase, \
21 | ease_op
22 |
23 | from cozy_comfyui.maths.norm import \
24 | EnumNormalize, \
25 | norm_op
26 |
27 | from cozy_comfyui.maths.wave import \
28 | EnumWave, \
29 | wave_op
30 |
31 | from cozy_comfyui.maths.series import \
32 | seriesLinear
33 |
34 | # ==============================================================================
35 | # === GLOBAL ===
36 | # ==============================================================================
37 |
38 | JOV_CATEGORY = "ANIMATION"
39 |
40 | # ==============================================================================
41 | # === CLASS ===
42 | # ==============================================================================
43 |
44 | class ResultObject(object):
45 | def __init__(self, *arg, **kw) -> None:
46 | self.frame = []
47 | self.lin = []
48 | self.fixed = []
49 | self.trigger = []
50 | self.batch = []
51 |
52 | class TickNode(CozyBaseNode):
53 | NAME = "TICK (JOV) ⏱"
54 | CATEGORY = JOV_CATEGORY
55 | RETURN_TYPES = ("FLOAT", "FLOAT", "FLOAT", "FLOAT", "FLOAT")
56 | RETURN_NAMES = ("VALUE", "LINEAR", "EASED", "SCALAR_LIN", "SCALAR_EASE")
57 | OUTPUT_IS_LIST = (True, True, True, True, True,)
58 | OUTPUT_TOOLTIPS = (
59 | "List of values",
60 | "Normalized values",
61 | "Eased values",
62 | "Scalar normalized values",
63 | "Scalar eased values",
64 | )
65 | DESCRIPTION = """
66 | Value generator with normalized values based on based on time interval.
67 | """
68 |
69 | @classmethod
70 | def INPUT_TYPES(cls) -> InputType:
71 | d = super().INPUT_TYPES()
72 | d = deep_merge(d, {
73 | "optional": {
74 | # forces a MOD on CYCLE
75 | Lexicon.START: ("INT", {
76 | "default": 0, "min": -sys.maxsize, "max": sys.maxsize
77 | }),
78 | # interval between frames
79 | Lexicon.STEP: ("FLOAT", {
80 | "default": 0, "min": -sys.float_info.max, "max": sys.float_info.max, "precision": 3,
81 | "tooltip": "Amount to add to each frame per tick"
82 | }),
83 | # how many frames to dump....
84 | Lexicon.COUNT: ("INT", {
85 | "default": 1, "min": 1, "max": 1500
86 | }),
87 | Lexicon.LOOP: ("INT", {
88 | "default": 0, "min": 0, "max": sys.maxsize,
89 | "tooltip": "What value before looping starts. 0 means linear playback (no loop point)"
90 | }),
91 | Lexicon.PINGPONG: ("BOOLEAN", {
92 | "default": False
93 | }),
94 | Lexicon.EASE: (EnumEase._member_names_, {
95 | "default": EnumEase.LINEAR.name}),
96 | Lexicon.NORMALIZE: (EnumNormalize._member_names_, {
97 | "default": EnumNormalize.MINMAX2.name}),
98 | Lexicon.SCALAR: ("FLOAT", {
99 | "default": 1, "min": 0, "max": sys.float_info.max
100 | })
101 |
102 | }
103 | })
104 | return Lexicon._parse(d)
105 |
106 | def run(self, **kw) -> tuple[float, ...]:
107 | """
108 | Generates a series of numbers with various options including:
109 | - Custom start value (supporting floating point and negative numbers)
110 | - Custom step value (supporting floating point and negative numbers)
111 | - Fixed number of frames
112 | - Custom loop point (series restarts after reaching this many steps)
113 | - Ping-pong option (reverses direction at end points)
114 | - Support for easing functions
115 | - Normalized output 0..1, -1..1, L2 or ZScore
116 | """
117 |
118 | start = parse_param(kw, Lexicon.START, EnumConvertType.INT, 0)[0]
119 | step = parse_param(kw, Lexicon.STEP, EnumConvertType.FLOAT, 0)[0]
120 | count = parse_param(kw, Lexicon.COUNT, EnumConvertType.INT, 1, 1, 1500)[0]
121 | loop = parse_param(kw, Lexicon.LOOP, EnumConvertType.INT, 0, 0)[0]
122 | pingpong = parse_param(kw, Lexicon.PINGPONG, EnumConvertType.BOOLEAN, False)[0]
123 | ease = parse_param(kw, Lexicon.EASE, EnumEase, EnumEase.LINEAR.name)[0]
124 | normalize = parse_param(kw, Lexicon.NORMALIZE, EnumNormalize, EnumNormalize.MINMAX1.name)[0]
125 | scalar = parse_param(kw, Lexicon.SCALAR, EnumConvertType.FLOAT, 1, 0)[0]
126 |
127 | if step == 0:
128 | step = 1
129 |
130 | cycle = seriesLinear(start, step, count, loop, pingpong)
131 | linear = norm_op(normalize, np.array(cycle))
132 | eased = ease_op(ease, linear, len(linear))
133 | scalar_linear = linear * scalar
134 | scalar_eased = eased * scalar
135 |
136 | return (
137 | cycle,
138 | linear.tolist(),
139 | eased.tolist(),
140 | scalar_linear.tolist(),
141 | scalar_eased.tolist(),
142 | )
143 |
144 | class WaveGeneratorNode(CozyBaseNode):
145 | NAME = "WAVE GEN (JOV) 🌊"
146 | NAME_PRETTY = "WAVE GEN (JOV) 🌊"
147 | CATEGORY = JOV_CATEGORY
148 | RETURN_TYPES = ("FLOAT", "INT", )
149 | RETURN_NAMES = ("FLOAT", "INT", )
150 | DESCRIPTION = """
151 | Produce waveforms like sine, square, or sawtooth with adjustable frequency, amplitude, phase, and offset. It's handy for creating oscillating patterns or controlling animation dynamics. This node emits both continuous floating-point values and integer representations of the generated waves.
152 | """
153 |
154 | @classmethod
155 | def INPUT_TYPES(cls) -> InputType:
156 | d = super().INPUT_TYPES()
157 | d = deep_merge(d, {
158 | "optional": {
159 | Lexicon.WAVE: (EnumWave._member_names_, {
160 | "default": EnumWave.SIN.name}),
161 | Lexicon.FREQ: ("FLOAT", {
162 | "default": 1, "min": 0, "max": sys.float_info.max, "step": 0.01,}),
163 | Lexicon.AMP: ("FLOAT", {
164 | "default": 1, "min": 0, "max": sys.float_info.max, "step": 0.01,}),
165 | Lexicon.PHASE: ("FLOAT", {
166 | "default": 0, "min": 0, "max": 1, "step": 0.01}),
167 | Lexicon.OFFSET: ("FLOAT", {
168 | "default": 0, "min": 0, "max": 1, "step": 0.001}),
169 | Lexicon.TIME: ("FLOAT", {
170 | "default": 0, "min": 0, "max": sys.float_info.max, "step": 0.0001}),
171 | Lexicon.INVERT: ("BOOLEAN", {
172 | "default": False}),
173 | Lexicon.ABSOLUTE: ("BOOLEAN", {
174 | "default": False,}),
175 | }
176 | })
177 | return Lexicon._parse(d)
178 |
179 | def run(self, **kw) -> tuple[float, int]:
180 | op = parse_param(kw, Lexicon.WAVE, EnumWave, EnumWave.SIN.name)
181 | freq = parse_param(kw, Lexicon.FREQ, EnumConvertType.FLOAT, 1, 0)
182 | amp = parse_param(kw, Lexicon.AMP, EnumConvertType.FLOAT, 1, 0)
183 | phase = parse_param(kw, Lexicon.PHASE, EnumConvertType.FLOAT, 0, 0)
184 | shift = parse_param(kw, Lexicon.OFFSET, EnumConvertType.FLOAT, 0, 0)
185 | delta_time = parse_param(kw, Lexicon.TIME, EnumConvertType.FLOAT, 0, 0)
186 | invert = parse_param(kw, Lexicon.INVERT, EnumConvertType.BOOLEAN, False)
187 | absolute = parse_param(kw, Lexicon.ABSOLUTE, EnumConvertType.BOOLEAN, False)
188 | results = []
189 | params = list(zip_longest_fill(op, freq, amp, phase, shift, delta_time, invert, absolute))
190 | pbar = ProgressBar(len(params))
191 | for idx, (op, freq, amp, phase, shift, delta_time, invert, absolute) in enumerate(params):
192 | # freq = 1. / freq
193 | if invert:
194 | amp = 1. / val
195 | val = wave_op(op, phase, freq, amp, shift, delta_time)
196 | if absolute:
197 | val = np.abs(val)
198 | val = max(-sys.float_info.max, min(val, sys.float_info.max))
199 | results.append([val, int(val)])
200 | pbar.update_absolute(idx)
201 | return *list(zip(*results)),
202 |
203 | '''
204 | class TickOldNode(CozyBaseNode):
205 | NAME = "TICK OLD (JOV) ⏱"
206 | CATEGORY = JOV_CATEGORY
207 | RETURN_TYPES = ("INT", "FLOAT", "FLOAT", COZY_TYPE_ANY, COZY_TYPE_ANY,)
208 | RETURN_NAMES = ("VAL", "LINEAR", "FPS", "TRIGGER", "BATCH",)
209 | OUTPUT_IS_LIST = (True, False, False, False, False,)
210 | OUTPUT_TOOLTIPS = (
211 | "Current value for the configured tick as ComfyUI List",
212 | "Normalized tick value (0..1) based on BPM and Loop",
213 | "Current 'frame' in the tick based on FPS setting",
214 | "Based on the BPM settings, on beat hit, output the input at '⚡'",
215 | "Current batch of values for the configured tick as standard list which works in other Jovimetrix nodes",
216 | )
217 | DESCRIPTION = """
218 | A timer and frame counter, emitting pulses or signals based on time intervals. It allows precise synchronization and control over animation sequences, with options to adjust FPS, BPM, and loop points. This node is useful for generating time-based events or driving animations with rhythmic precision.
219 | """
220 |
221 | @classmethod
222 | def INPUT_TYPES(cls) -> InputType:
223 | d = super().INPUT_TYPES()
224 | d = deep_merge(d, {
225 | "optional": {
226 | # data to pass on a pulse of the loop
227 | Lexicon.TRIGGER: (COZY_TYPE_ANY, {
228 | "default": None,
229 | "tooltip": "Output to send when beat (BPM setting) is hit"
230 | }),
231 | # forces a MOD on CYCLE
232 | Lexicon.START: ("INT", {
233 | "default": 0, "min": 0, "max": sys.maxsize,
234 | }),
235 | Lexicon.LOOP: ("INT", {
236 | "default": 0, "min": 0, "max": sys.maxsize,
237 | "tooltip": "Number of frames before looping starts. 0 means continuous playback (no loop point)"
238 | }),
239 | Lexicon.FPS: ("INT", {
240 | "default": 24, "min": 1
241 | }),
242 | Lexicon.BPM: ("INT", {
243 | "default": 120, "min": 1, "max": 60000,
244 | "tooltip": "BPM trigger rate to send the input. If input is empty, TRUE is sent on trigger"
245 | }),
246 | Lexicon.NOTE: ("INT", {
247 | "default": 4, "min": 1, "max": 256,
248 | "tooltip": "Number of beats per measure. Quarter note is 4, Eighth is 8, 16 is 16, etc."}),
249 | # how many frames to dump....
250 | Lexicon.BATCH: ("INT", {
251 | "default": 1, "min": 1, "max": 32767,
252 | "tooltip": "Number of frames wanted"
253 | }),
254 | Lexicon.STEP: ("INT", {
255 | "default": 0, "min": 0, "max": sys.maxsize
256 | }),
257 | }
258 | })
259 | return Lexicon._parse(d)
260 |
261 | def run(self, ident, **kw) -> tuple[int, float, float, Any]:
262 | passthru = parse_param(kw, Lexicon.TRIGGER, EnumConvertType.ANY, None)[0]
263 | stride = parse_param(kw, Lexicon.STEP, EnumConvertType.INT, 0)[0]
264 | loop = parse_param(kw, Lexicon.LOOP, EnumConvertType.INT, 0)[0]
265 | start = parse_param(kw, Lexicon.START, EnumConvertType.INT, self.__frame)[0]
266 | if loop != 0:
267 | self.__frame %= loop
268 | fps = parse_param(kw, Lexicon.FPS, EnumConvertType.INT, 24, 1)[0]
269 | bpm = parse_param(kw, Lexicon.BPM, EnumConvertType.INT, 120, 1)[0]
270 | divisor = parse_param(kw, Lexicon.NOTE, EnumConvertType.INT, 4, 1)[0]
271 | beat = 60. / max(1., bpm) / divisor
272 | batch = parse_param(kw, Lexicon.BATCH, EnumConvertType.INT, 1, 1)[0]
273 | step_fps = 1. / max(1., float(fps))
274 |
275 | trigger = None
276 | results = ResultObject()
277 | pbar = ProgressBar(batch)
278 | step = stride if stride != 0 else max(1, loop / batch)
279 | for idx in range(batch):
280 | trigger = False
281 | lin = start if loop == 0 else start / loop
282 | fixed_step = math.fmod(start * step_fps, fps)
283 | if (math.fmod(fixed_step, beat) == 0):
284 | trigger = [passthru]
285 | if loop != 0:
286 | start %= loop
287 | results.frame.append(start)
288 | results.lin.append(float(lin))
289 | results.fixed.append(float(fixed_step))
290 | results.trigger.append(trigger)
291 | results.batch.append(start)
292 | start += step
293 | pbar.update_absolute(idx)
294 |
295 | return (results.frame, results.lin, results.fixed, results.trigger, results.batch,)
296 |
297 | '''
--------------------------------------------------------------------------------
/core/color.py:
--------------------------------------------------------------------------------
1 | """ Jovimetrix - Color """
2 |
3 | from enum import Enum
4 | from typing import List
5 |
6 | import cv2
7 | import torch
8 |
9 | from comfy.utils import ProgressBar
10 |
11 | from cozy_comfyui import \
12 | IMAGE_SIZE_MIN, \
13 | InputType, RGBAMaskType, EnumConvertType, TensorType, \
14 | deep_merge, parse_param, zip_longest_fill
15 |
16 | from cozy_comfyui.lexicon import \
17 | Lexicon
18 |
19 | from cozy_comfyui.node import \
20 | COZY_TYPE_IMAGE, \
21 | CozyBaseNode, CozyImageNode
22 |
23 | from cozy_comfyui.image.adjust import \
24 | image_invert
25 |
26 | from cozy_comfyui.image.color import \
27 | EnumCBDeficiency, EnumCBSimulator, EnumColorMap, EnumColorTheory, \
28 | color_lut_full, color_lut_match, color_lut_palette, \
29 | color_lut_tonal, color_lut_visualize, color_match_reinhard, \
30 | color_theory, color_blind, color_top_used, image_gradient_expand, \
31 | image_gradient_map
32 |
33 | from cozy_comfyui.image.channel import \
34 | channel_solid
35 |
36 | from cozy_comfyui.image.compose import \
37 | EnumScaleMode, EnumInterpolation, \
38 | image_scalefit
39 |
40 | from cozy_comfyui.image.convert import \
41 | tensor_to_cv, cv_to_tensor, cv_to_tensor_full, image_mask, image_mask_add
42 |
43 | from cozy_comfyui.image.misc import \
44 | image_stack
45 |
46 | # ==============================================================================
47 | # === GLOBAL ===
48 | # ==============================================================================
49 |
50 | JOV_CATEGORY = "COLOR"
51 |
52 | # ==============================================================================
53 | # === ENUMERATION ===
54 | # ==============================================================================
55 |
56 | class EnumColorMatchMode(Enum):
57 | REINHARD = 30
58 | LUT = 10
59 | # HISTOGRAM = 20
60 |
61 | class EnumColorMatchMap(Enum):
62 | USER_MAP = 0
63 | PRESET_MAP = 10
64 |
65 | # ==============================================================================
66 | # === CLASS ===
67 | # ==============================================================================
68 |
69 | class ColorBlindNode(CozyImageNode):
70 | NAME = "COLOR BLIND (JOV) 👁🗨"
71 | CATEGORY = JOV_CATEGORY
72 | DESCRIPTION = """
73 | Simulate color blindness effects on images. You can select various types of color deficiencies, adjust the severity of the effect, and apply the simulation using different simulators. This node is ideal for accessibility testing and design adjustments, ensuring inclusivity in your visual content.
74 | """
75 |
76 | @classmethod
77 | def INPUT_TYPES(cls) -> InputType:
78 | d = super().INPUT_TYPES()
79 | d = deep_merge(d, {
80 | "optional": {
81 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}),
82 | Lexicon.DEFICIENCY: (EnumCBDeficiency._member_names_, {
83 | "default": EnumCBDeficiency.PROTAN.name,}),
84 | Lexicon.SOLVER: (EnumCBSimulator._member_names_, {
85 | "default": EnumCBSimulator.AUTOSELECT.name,})
86 | }
87 | })
88 | return Lexicon._parse(d)
89 |
90 | def run(self, **kw) -> RGBAMaskType:
91 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None)
92 | deficiency = parse_param(kw, Lexicon.DEFICIENCY, EnumCBDeficiency, EnumCBDeficiency.PROTAN.name)
93 | simulator = parse_param(kw, Lexicon.SOLVER, EnumCBSimulator, EnumCBSimulator.AUTOSELECT.name)
94 | severity = parse_param(kw, Lexicon.VALUE, EnumConvertType.FLOAT, 1)
95 | params = list(zip_longest_fill(pA, deficiency, simulator, severity))
96 | images = []
97 | pbar = ProgressBar(len(params))
98 | for idx, (pA, deficiency, simulator, severity) in enumerate(params):
99 | pA = channel_solid() if pA is None else tensor_to_cv(pA)
100 | pA = color_blind(pA, deficiency, simulator, severity)
101 | images.append(cv_to_tensor_full(pA))
102 | pbar.update_absolute(idx)
103 | return image_stack(images)
104 |
105 | class ColorMatchNode(CozyImageNode):
106 | NAME = "COLOR MATCH (JOV) 💞"
107 | CATEGORY = JOV_CATEGORY
108 | DESCRIPTION = """
109 | Adjust the color scheme of one image to match another with the Color Match Node. Choose from various color matching LUTs or Reinhard matching. You can specify a custom user color maps, the number of colors, and whether to flip or invert the images.
110 | """
111 |
112 | @classmethod
113 | def INPUT_TYPES(cls) -> InputType:
114 | d = super().INPUT_TYPES()
115 | d = deep_merge(d, {
116 | "optional": {
117 | Lexicon.IMAGE_SOURCE: (COZY_TYPE_IMAGE, {}),
118 | Lexicon.IMAGE_TARGET: (COZY_TYPE_IMAGE, {}),
119 | Lexicon.MODE: (EnumColorMatchMode._member_names_, {
120 | "default": EnumColorMatchMode.REINHARD.name,
121 | "tooltip": "Match colors from an image or built-in (LUT), Histogram lookups or Reinhard method"}),
122 | Lexicon.MAP: (EnumColorMatchMap._member_names_, {
123 | "default": EnumColorMatchMap.USER_MAP.name, }),
124 | Lexicon.COLORMAP: (EnumColorMap._member_names_, {
125 | "default": EnumColorMap.HSV.name,}),
126 | Lexicon.VALUE: ("INT", {
127 | "default": 255, "min": 0, "max": 255,
128 | "tooltip":"The number of colors to use from the LUT during the remap. Will quantize the LUT range."}),
129 | Lexicon.SWAP: ("BOOLEAN", {
130 | "default": False,}),
131 | Lexicon.INVERT: ("BOOLEAN", {
132 | "default": False,}),
133 | Lexicon.MATTE: ("VEC4", {
134 | "default": (0, 0, 0, 255), "rgb": True,}),
135 | }
136 | })
137 | return Lexicon._parse(d)
138 |
139 | def run(self, **kw) -> RGBAMaskType:
140 | pA = parse_param(kw, Lexicon.IMAGE_SOURCE, EnumConvertType.IMAGE, None)
141 | pB = parse_param(kw, Lexicon.IMAGE_TARGET, EnumConvertType.IMAGE, None)
142 | mode = parse_param(kw, Lexicon.MODE, EnumColorMatchMode, EnumColorMatchMode.REINHARD.name)
143 | cmap = parse_param(kw, Lexicon.MAP, EnumColorMatchMap, EnumColorMatchMap.USER_MAP.name)
144 | colormap = parse_param(kw, Lexicon.COLORMAP, EnumColorMap, EnumColorMap.HSV.name)
145 | num_colors = parse_param(kw, Lexicon.VALUE, EnumConvertType.INT, 255)
146 | swap = parse_param(kw, Lexicon.SWAP, EnumConvertType.BOOLEAN, False)
147 | invert = parse_param(kw, Lexicon.INVERT, EnumConvertType.BOOLEAN, False)
148 | matte = parse_param(kw, Lexicon.MATTE, EnumConvertType.VEC4, (0, 0, 0, 255), 0, 255)
149 | params = list(zip_longest_fill(pA, pB, mode, cmap, colormap, num_colors, swap, invert, matte))
150 | images = []
151 | pbar = ProgressBar(len(params))
152 | for idx, (pA, pB, mode, cmap, colormap, num_colors, swap, invert, matte) in enumerate(params):
153 | if swap == True:
154 | pA, pB = pB, pA
155 |
156 | mask = None
157 | if pA is None:
158 | pA = channel_solid()
159 | else:
160 | pA = tensor_to_cv(pA)
161 | if pA.ndim == 3 and pA.shape[2] == 4:
162 | mask = image_mask(pA)
163 |
164 | # h, w = pA.shape[:2]
165 | if pB is None:
166 | pB = channel_solid()
167 | else:
168 | pB = tensor_to_cv(pB)
169 |
170 | match mode:
171 | case EnumColorMatchMode.LUT:
172 | if cmap == EnumColorMatchMap.PRESET_MAP:
173 | pB = None
174 | pA = color_lut_match(pA, colormap.value, pB, num_colors)
175 |
176 | case EnumColorMatchMode.REINHARD:
177 | pA = color_match_reinhard(pA, pB)
178 |
179 | if invert == True:
180 | pA = image_invert(pA, 1)
181 |
182 | if mask is not None:
183 | pA = image_mask_add(pA, mask)
184 |
185 | images.append(cv_to_tensor_full(pA, matte))
186 | pbar.update_absolute(idx)
187 | return image_stack(images)
188 |
189 | class ColorKMeansNode(CozyBaseNode):
190 | NAME = "COLOR MEANS (JOV) 〰️"
191 | CATEGORY = JOV_CATEGORY
192 | RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "JLUT", "IMAGE",)
193 | RETURN_NAMES = ("IMAGE", "PALETTE", "GRADIENT", "LUT", "RGB", )
194 | OUTPUT_TOOLTIPS = (
195 | "Sequence of top-K colors. Count depends on value in `VAL`.",
196 | "Simple Tone palette based on result top-K colors. Width is taken from input.",
197 | "Gradient of top-K colors.",
198 | "Full 3D LUT of the image mapped to the resultant top-K colors chosen.",
199 | "Visualization of full 3D .cube LUT in JLUT output"
200 | )
201 | DESCRIPTION = """
202 | The top-k colors ordered from most->least used as a strip, tonal palette and 3D LUT.
203 | """
204 |
205 | @classmethod
206 | def INPUT_TYPES(cls) -> InputType:
207 | d = super().INPUT_TYPES()
208 | d = deep_merge(d, {
209 | "optional": {
210 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}),
211 | Lexicon.VALUE: ("INT", {
212 | "default": 12, "min": 1, "max": 255,
213 | "tooltip": "The top K colors to select"}),
214 | Lexicon.SIZE: ("INT", {
215 | "default": 32, "min": 1, "max": 256,
216 | "tooltip": "Height of the tones in the strip. Width is based on input"}),
217 | Lexicon.COUNT: ("INT", {
218 | "default": 33, "min": 1, "max": 255,
219 | "tooltip": "Number of nodes to use in interpolation of full LUT (256 is every pixel)"}),
220 | Lexicon.WH: ("VEC2", {
221 | "default": (256, 256), "mij":IMAGE_SIZE_MIN, "int": True,
222 | "label": ["W", "H"]
223 | }),
224 | }
225 | })
226 | return Lexicon._parse(d)
227 |
228 | def run(self, **kw) -> RGBAMaskType:
229 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None)
230 | kcolors = parse_param(kw, Lexicon.VALUE, EnumConvertType.INT, 12, 1, 255)
231 | lut_height = parse_param(kw, Lexicon.SIZE, EnumConvertType.INT, 32, 1, 256)
232 | nodes = parse_param(kw, Lexicon.COUNT, EnumConvertType.INT, 33, 1, 255)
233 | wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (256, 256), IMAGE_SIZE_MIN)
234 |
235 | params = list(zip_longest_fill(pA, kcolors, nodes, lut_height, wihi))
236 | top_colors = []
237 | lut_tonal = []
238 | lut_full = []
239 | lut_visualized = []
240 | gradients = []
241 | pbar = ProgressBar(len(params) * sum(kcolors))
242 | for idx, (pA, kcolors, nodes, lut_height, wihi) in enumerate(params):
243 | if pA is None:
244 | pA = channel_solid()
245 |
246 | pA = tensor_to_cv(pA)
247 | colors = color_top_used(pA, kcolors)
248 |
249 | # size down to 1px strip then expand to 256 for full gradient
250 | top_colors.extend([cv_to_tensor(channel_solid(*wihi, color=c)) for c in colors])
251 | lut = color_lut_tonal(colors, width=pA.shape[1], height=lut_height)
252 | lut_tonal.append(cv_to_tensor(lut))
253 | full = color_lut_full(colors, nodes)
254 | lut_full.append(torch.from_numpy(full))
255 | lut = color_lut_visualize(full, wihi[1])
256 | lut_visualized.append(cv_to_tensor(lut))
257 | palette = color_lut_palette(colors, 1)
258 | gradient = image_gradient_expand(palette)
259 | gradient = cv2.resize(gradient, wihi)
260 | gradients.append(cv_to_tensor(gradient))
261 | pbar.update_absolute(idx)
262 |
263 | return torch.stack(top_colors), torch.stack(lut_tonal), torch.stack(gradients), lut_full, torch.stack(lut_visualized),
264 |
265 | class ColorTheoryNode(CozyBaseNode):
266 | NAME = "COLOR THEORY (JOV) 🛞"
267 | CATEGORY = JOV_CATEGORY
268 | RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE")
269 | RETURN_NAMES = ("C1", "C2", "C3", "C4", "C5")
270 | DESCRIPTION = """
271 | Generate a color harmony based on the selected scheme.
272 |
273 | Supported schemes include complimentary, analogous, triadic, tetradic, and more.
274 |
275 | Users can customize the angle of separation for color calculations, offering flexibility in color manipulation and exploration of different color palettes.
276 | """
277 |
278 | @classmethod
279 | def INPUT_TYPES(cls) -> InputType:
280 | d = super().INPUT_TYPES()
281 | d = deep_merge(d, {
282 | "optional": {
283 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}),
284 | Lexicon.SCHEME: (EnumColorTheory._member_names_, {
285 | "default": EnumColorTheory.COMPLIMENTARY.name}),
286 | Lexicon.VALUE: ("INT", {
287 | "default": 45, "min": -90, "max": 90,
288 | "tooltip": "Custom angle of separation to use when calculating colors"}),
289 | Lexicon.INVERT: ("BOOLEAN", {
290 | "default": False})
291 | }
292 | })
293 | return Lexicon._parse(d)
294 |
295 | def run(self, **kw) -> tuple[List[TensorType], List[TensorType]]:
296 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None)
297 | scheme = parse_param(kw, Lexicon.SCHEME, EnumColorTheory, EnumColorTheory.COMPLIMENTARY.name)
298 | value = parse_param(kw, Lexicon.VALUE, EnumConvertType.INT, 45, -90, 90)
299 | invert = parse_param(kw, Lexicon.INVERT, EnumConvertType.BOOLEAN, False)
300 | params = list(zip_longest_fill(pA, scheme, value, invert))
301 | images = []
302 | pbar = ProgressBar(len(params))
303 | for idx, (img, scheme, value, invert) in enumerate(params):
304 | img = channel_solid() if img is None else tensor_to_cv(img)
305 | img = color_theory(img, value, scheme)
306 | if invert:
307 | img = (image_invert(s, 1) for s in img)
308 | images.append([cv_to_tensor(a) for a in img])
309 | pbar.update_absolute(idx)
310 | return image_stack(images)
311 |
312 | class GradientMapNode(CozyImageNode):
313 | NAME = "GRADIENT MAP (JOV) 🇲🇺"
314 | CATEGORY = JOV_CATEGORY
315 | DESCRIPTION = """
316 | Remaps an input image using a gradient lookup table (LUT).
317 |
318 | The gradient image will be translated into a single row lookup table.
319 | """
320 |
321 | @classmethod
322 | def INPUT_TYPES(cls) -> InputType:
323 | d = super().INPUT_TYPES()
324 | d = deep_merge(d, {
325 | "optional": {
326 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {
327 | "tooltip": "Image to remap with gradient input"}),
328 | Lexicon.GRADIENT: (COZY_TYPE_IMAGE, {
329 | "tooltip": f"Look up table (LUT) to remap the input image in `{"IMAGE"}`"}),
330 | Lexicon.REVERSE: ("BOOLEAN", {
331 | "default": False,
332 | "tooltip": "Reverse the gradient from left-to-right"}),
333 | Lexicon.MODE: (EnumScaleMode._member_names_, {
334 | "default": EnumScaleMode.MATTE.name,}),
335 | Lexicon.WH: ("VEC2", {
336 | "default": (512, 512), "mij":IMAGE_SIZE_MIN, "int": True,
337 | "label": ["W", "H"] }),
338 | Lexicon.SAMPLE: (EnumInterpolation._member_names_, {
339 | "default": EnumInterpolation.LANCZOS4.name,}),
340 | Lexicon.MATTE: ("VEC4", {
341 | "default": (0, 0, 0, 255), "rgb": True,})
342 | }
343 | })
344 | return Lexicon._parse(d)
345 |
346 | def run(self, **kw) -> RGBAMaskType:
347 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None)
348 | gradient = parse_param(kw, Lexicon.GRADIENT, EnumConvertType.IMAGE, None)
349 | reverse = parse_param(kw, Lexicon.REVERSE, EnumConvertType.BOOLEAN, False)
350 | mode = parse_param(kw, Lexicon.MODE, EnumScaleMode, EnumScaleMode.MATTE.name)
351 | wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (512, 512), IMAGE_SIZE_MIN)
352 | sample = parse_param(kw, Lexicon.SAMPLE, EnumInterpolation, EnumInterpolation.LANCZOS4.name)
353 | matte = parse_param(kw, Lexicon.MATTE, EnumConvertType.VEC4INT, (0, 0, 0, 255), 0, 255)
354 | images = []
355 | params = list(zip_longest_fill(pA, gradient, reverse, mode, sample, wihi, matte))
356 | pbar = ProgressBar(len(params))
357 | for idx, (pA, gradient, reverse, mode, sample, wihi, matte) in enumerate(params):
358 | pA = channel_solid() if pA is None else tensor_to_cv(pA)
359 | mask = None
360 | if pA.ndim == 3 and pA.shape[2] == 4:
361 | mask = image_mask(pA)
362 |
363 | gradient = channel_solid() if gradient is None else tensor_to_cv(gradient)
364 | pA = image_gradient_map(pA, gradient)
365 | if mode != EnumScaleMode.MATTE:
366 | w, h = wihi
367 | pA = image_scalefit(pA, w, h, mode, sample)
368 |
369 | if mask is not None:
370 | pA = image_mask_add(pA, mask)
371 |
372 | images.append(cv_to_tensor_full(pA, matte))
373 | pbar.update_absolute(idx)
374 | return image_stack(images)
375 |
--------------------------------------------------------------------------------
/core/create.py:
--------------------------------------------------------------------------------
1 | """ Jovimetrix - Creation """
2 |
3 | import numpy as np
4 | from PIL import ImageFont
5 | from skimage.filters import gaussian
6 |
7 | from comfy.utils import ProgressBar
8 |
9 | from cozy_comfyui import \
10 | IMAGE_SIZE_MIN, \
11 | InputType, EnumConvertType, RGBAMaskType, \
12 | deep_merge, parse_param, zip_longest_fill
13 |
14 | from cozy_comfyui.lexicon import \
15 | Lexicon
16 |
17 | from cozy_comfyui.node import \
18 | COZY_TYPE_IMAGE, \
19 | CozyImageNode
20 |
21 | from cozy_comfyui.image import \
22 | EnumImageType
23 |
24 | from cozy_comfyui.image.adjust import \
25 | image_invert
26 |
27 | from cozy_comfyui.image.channel import \
28 | channel_solid
29 |
30 | from cozy_comfyui.image.compose import \
31 | EnumEdge, EnumScaleMode, EnumInterpolation, \
32 | image_rotate, image_scalefit, image_transform, image_translate, image_blend
33 |
34 | from cozy_comfyui.image.convert import \
35 | image_convert, pil_to_cv, cv_to_tensor, cv_to_tensor_full, tensor_to_cv, \
36 | image_mask, image_mask_add, image_mask_binary
37 |
38 | from cozy_comfyui.image.misc import \
39 | image_stack
40 |
41 | from cozy_comfyui.image.shape import \
42 | EnumShapes, \
43 | shape_ellipse, shape_polygon, shape_quad
44 |
45 | from cozy_comfyui.image.text import \
46 | EnumAlignment, EnumJustify, \
47 | font_names, text_autosize, text_draw
48 |
49 | # ==============================================================================
50 | # === GLOBAL ===
51 | # ==============================================================================
52 |
53 | JOV_CATEGORY = "CREATE"
54 |
55 | # ==============================================================================
56 | # === CLASS ===
57 | # ==============================================================================
58 |
59 | class ConstantNode(CozyImageNode):
60 | NAME = "CONSTANT (JOV) 🟪"
61 | CATEGORY = JOV_CATEGORY
62 | DESCRIPTION = """
63 | Generate a constant image or mask of a specified size and color. It can be used to create solid color backgrounds or matte images for compositing with other visual elements. The node allows you to define the desired width and height of the output and specify the RGBA color value for the constant output. Additionally, you can input an optional image to use as a matte with the selected color.
64 | """
65 |
66 | @classmethod
67 | def INPUT_TYPES(cls) -> InputType:
68 | d = super().INPUT_TYPES()
69 | d = deep_merge(d, {
70 | "optional": {
71 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {
72 | "tooltip":"Optional Image to Matte with Selected Color"}),
73 | Lexicon.MASK: (COZY_TYPE_IMAGE, {
74 | "tooltip":"Override Image mask"}),
75 | Lexicon.COLOR: ("VEC4", {
76 | "default": (0, 0, 0, 255), "rgb": True,
77 | "tooltip": "Constant Color to Output"}),
78 | Lexicon.MODE: (EnumScaleMode._member_names_, {
79 | "default": EnumScaleMode.MATTE.name,}),
80 | Lexicon.WH: ("VEC2", {
81 | "default": (512, 512), "mij": 1, "int": True,
82 | "label": ["W", "H"],}),
83 | Lexicon.SAMPLE: (EnumInterpolation._member_names_, {
84 | "default": EnumInterpolation.LANCZOS4.name,})
85 | }
86 | })
87 | return Lexicon._parse(d)
88 |
89 | def run(self, **kw) -> RGBAMaskType:
90 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None)
91 | mask = parse_param(kw, Lexicon.MASK, EnumConvertType.MASK, None)
92 | matte = parse_param(kw, Lexicon.COLOR, EnumConvertType.VEC4INT, (0, 0, 0, 255), 0, 255)
93 | mode = parse_param(kw, Lexicon.MODE, EnumScaleMode, EnumScaleMode.MATTE.name)
94 | wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (512, 512), 1)
95 | sample = parse_param(kw, Lexicon.SAMPLE, EnumInterpolation, EnumInterpolation.LANCZOS4.name)
96 | images = []
97 | params = list(zip_longest_fill(pA, mask, matte, mode, wihi, sample))
98 | pbar = ProgressBar(len(params))
99 | for idx, (pA, mask, matte, mode, wihi, sample) in enumerate(params):
100 | width, height = wihi
101 | w, h = width, height
102 |
103 | if pA is None:
104 | pA = channel_solid(width, height, (0,0,0,255))
105 | else:
106 | pA = tensor_to_cv(pA)
107 | pA = image_convert(pA, 4)
108 | h, w = pA.shape[:2]
109 |
110 | if mask is None:
111 | mask = image_mask(pA)
112 | else:
113 | mask = tensor_to_cv(mask, chan=1)
114 | mask = image_scalefit(mask, w, h)
115 |
116 | pB = channel_solid(w, h, matte)
117 | pA = image_blend(pB, pA, mask)
118 | pA = image_mask_add(pA, mask)
119 |
120 | if mode != EnumScaleMode.MATTE:
121 | pA = image_scalefit(pA, width, height, mode, sample, matte)
122 | images.append(cv_to_tensor_full(pA, matte))
123 | pbar.update_absolute(idx)
124 | return image_stack(images)
125 |
126 | class ShapeNode(CozyImageNode):
127 | NAME = "SHAPE GEN (JOV) ✨"
128 | CATEGORY = JOV_CATEGORY
129 | DESCRIPTION = """
130 | Create n-sided polygons. These shapes can be customized by adjusting parameters such as size, color, position, rotation angle, and edge blur. The node provides options to specify the shape type, the number of sides for polygons, the RGBA color value for the main shape, and the RGBA color value for the background. Additionally, you can control the width and height of the output images, the position offset, and the amount of edge blur applied to the shapes.
131 | """
132 |
133 | @classmethod
134 | def INPUT_TYPES(cls) -> InputType:
135 | d = super().INPUT_TYPES()
136 | d = deep_merge(d, {
137 | "optional": {
138 | Lexicon.SHAPE: (EnumShapes._member_names_, {
139 | "default": EnumShapes.CIRCLE.name}),
140 | Lexicon.SIDES: ("INT", {
141 | "default": 3, "min": 3, "max": 100}),
142 | Lexicon.COLOR: ("VEC4", {
143 | "default": (255, 255, 255, 255), "rgb": True,
144 | "tooltip": "Main Shape Color"}),
145 | Lexicon.MATTE: ("VEC4", {
146 | "default": (0, 0, 0, 255), "rgb": True,}),
147 | Lexicon.WH: ("VEC2", {
148 | "default": (256, 256), "mij":IMAGE_SIZE_MIN, "int": True,
149 | "label": ["W", "H"],}),
150 | Lexicon.XY: ("VEC2", {
151 | "default": (0, 0,), "mij": -1, "maj": 1,
152 | "label": ["X", "Y"]}),
153 | Lexicon.ANGLE: ("FLOAT", {
154 | "default": 0, "min": -180, "max": 180, "step": 0.01,}),
155 | Lexicon.SIZE: ("VEC2", {
156 | "default": (1, 1), "mij": 0, "maj": 1,
157 | "label": ["X", "Y"]}),
158 | Lexicon.EDGE: (EnumEdge._member_names_, {
159 | "default": EnumEdge.CLIP.name}),
160 | Lexicon.BLUR: ("FLOAT", {
161 | "default": 0, "min": 0, "step": 0.01,}),
162 | }
163 | })
164 | return Lexicon._parse(d)
165 |
166 | def run(self, **kw) -> RGBAMaskType:
167 | shape = parse_param(kw, Lexicon.SHAPE, EnumShapes, EnumShapes.CIRCLE.name)
168 | sides = parse_param(kw, Lexicon.SIDES, EnumConvertType.INT, 3, 3)
169 | color = parse_param(kw, Lexicon.COLOR, EnumConvertType.VEC4INT, (255, 255, 255, 255), 0, 255)
170 | matte = parse_param(kw, Lexicon.MATTE, EnumConvertType.VEC4INT, (0, 0, 0, 255), 0, 255)
171 | wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (256, 256), IMAGE_SIZE_MIN)
172 | offset = parse_param(kw, Lexicon.XY, EnumConvertType.VEC2, (0, 0), -1, 1)
173 | angle = parse_param(kw, Lexicon.ANGLE, EnumConvertType.FLOAT, 0, -180, 180)
174 | size = parse_param(kw, Lexicon.SIZE, EnumConvertType.VEC2, (1, 1), 0, 1, zero=0.001)
175 | edge = parse_param(kw, Lexicon.EDGE, EnumEdge, EnumEdge.CLIP.name)
176 | blur = parse_param(kw, Lexicon.BLUR, EnumConvertType.FLOAT, 0, 0)
177 | params = list(zip_longest_fill(shape, sides, color, matte, wihi, offset, angle, size, edge, blur))
178 | images = []
179 | pbar = ProgressBar(len(params))
180 | for idx, (shape, sides, color, matte, wihi, offset, angle, size, edge, blur) in enumerate(params):
181 | width, height = wihi
182 | sizeX, sizeY = size
183 | fill = color[:3][::-1]
184 |
185 | match shape:
186 | case EnumShapes.SQUARE:
187 | rgb = shape_quad(width, height, sizeX, sizeY, fill)
188 |
189 | case EnumShapes.CIRCLE:
190 | rgb = shape_ellipse(width, height, sizeX, sizeY, fill)
191 |
192 | case EnumShapes.POLYGON:
193 | rgb = shape_polygon(width, height, sizeX, sides, fill)
194 |
195 | rgb = pil_to_cv(rgb)
196 | rgb = image_transform(rgb, offset, angle, edge=edge)
197 | mask = image_mask_binary(rgb)
198 |
199 | if blur > 0:
200 | # @TODO: Do blur on larger canvas to remove wrap bleed.
201 | rgb = (gaussian(rgb, sigma=blur, channel_axis=2) * 255).astype(np.uint8)
202 | mask = (gaussian(mask, sigma=blur, channel_axis=2) * 255).astype(np.uint8)
203 |
204 | back = list(matte[:3]) + [255]
205 | canvas = np.full((height, width, 4), back, dtype=rgb.dtype)
206 | rgba = image_blend(canvas, rgb, mask)
207 | rgba = image_mask_add(rgba, mask)
208 | rgb = image_convert(rgba, 3)
209 |
210 | images.append([cv_to_tensor(rgba), cv_to_tensor(rgb), cv_to_tensor(mask, True)])
211 | pbar.update_absolute(idx)
212 | return image_stack(images)
213 |
214 | class TextNode(CozyImageNode):
215 | NAME = "TEXT GEN (JOV) 📝"
216 | CATEGORY = JOV_CATEGORY
217 | FONTS = font_names()
218 | FONT_NAMES = sorted(FONTS.keys())
219 | DESCRIPTION = """
220 | Generates images containing text based on parameters such as font, size, alignment, color, and position. Users can input custom text messages, select fonts from a list of available options, adjust font size, and specify the alignment and justification of the text. Additionally, the node provides options for auto-sizing text to fit within specified dimensions, controlling letter-by-letter rendering, and applying edge effects such as clipping and inversion.
221 | """
222 |
223 | @classmethod
224 | def INPUT_TYPES(cls) -> InputType:
225 | d = super().INPUT_TYPES()
226 | d = deep_merge(d, {
227 | "optional": {
228 | Lexicon.STRING: ("STRING", {
229 | "default": "jovimetrix", "multiline": True,
230 | "dynamicPrompts": False,
231 | "tooltip": "Your Message"}),
232 | Lexicon.FONT: (cls.FONT_NAMES, {
233 | "default": cls.FONT_NAMES[0]}),
234 | Lexicon.LETTER: ("BOOLEAN", {
235 | "default": False,}),
236 | Lexicon.AUTOSIZE: ("BOOLEAN", {
237 | "default": False,
238 | "tooltip": "Scale based on Width & Height"}),
239 | Lexicon.COLOR: ("VEC4", {
240 | "default": (255, 255, 255, 255), "rgb": True,
241 | "tooltip": "Color of the letters"}),
242 | Lexicon.MATTE: ("VEC4", {
243 | "default": (0, 0, 0, 255), "rgb": True,}),
244 | Lexicon.COLUMNS: ("INT", {
245 | "default": 0, "min": 0}),
246 | # if auto on, hide these...
247 | Lexicon.SIZE: ("INT", {
248 | "default": 16, "min": 8}),
249 | Lexicon.ALIGN: (EnumAlignment._member_names_, {
250 | "default": EnumAlignment.CENTER.name,}),
251 | Lexicon.JUSTIFY: (EnumJustify._member_names_, {
252 | "default": EnumJustify.CENTER.name,}),
253 | Lexicon.MARGIN: ("INT", {
254 | "default": 0, "min": -1024, "max": 1024,}),
255 | Lexicon.SPACING: ("INT", {
256 | "default": 0, "min": -1024, "max": 1024}),
257 | Lexicon.WH: ("VEC2", {
258 | "default": (256, 256), "mij":IMAGE_SIZE_MIN, "int": True,
259 | "label": ["W", "H"],}),
260 | Lexicon.XY: ("VEC2", {
261 | "default": (0, 0,), "mij": -1, "maj": 1,
262 | "label": ["X", "Y"],
263 | "tooltip":"Offset the position"}),
264 | Lexicon.ANGLE: ("FLOAT", {
265 | "default": 0, "step": 0.01,}),
266 | Lexicon.EDGE: (EnumEdge._member_names_, {
267 | "default": EnumEdge.CLIP.name}),
268 | Lexicon.INVERT: ("BOOLEAN", {
269 | "default": False,
270 | "tooltip": "Invert the mask input"})
271 | }
272 | })
273 | return Lexicon._parse(d)
274 |
275 | def run(self, **kw) -> RGBAMaskType:
276 | full_text = parse_param(kw, Lexicon.STRING, EnumConvertType.STRING, "jovimetrix")
277 | font_idx = parse_param(kw, Lexicon.FONT, EnumConvertType.STRING, self.FONT_NAMES[0])
278 | autosize = parse_param(kw, Lexicon.AUTOSIZE, EnumConvertType.BOOLEAN, False)
279 | letter = parse_param(kw, Lexicon.LETTER, EnumConvertType.BOOLEAN, False)
280 | color = parse_param(kw, Lexicon.COLOR, EnumConvertType.VEC4INT, (255,255,255,255))
281 | matte = parse_param(kw, Lexicon.MATTE, EnumConvertType.VEC4INT, (0,0,0,255))
282 | columns = parse_param(kw, Lexicon.COLUMNS, EnumConvertType.INT, 0)
283 | font_size = parse_param(kw, Lexicon.SIZE, EnumConvertType.INT, 1)
284 | align = parse_param(kw, Lexicon.ALIGN, EnumAlignment, EnumAlignment.CENTER.name)
285 | justify = parse_param(kw, Lexicon.JUSTIFY, EnumJustify, EnumJustify.CENTER.name)
286 | margin = parse_param(kw, Lexicon.MARGIN, EnumConvertType.INT, 0)
287 | line_spacing = parse_param(kw, Lexicon.SPACING, EnumConvertType.INT, 0)
288 | wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (512, 512), IMAGE_SIZE_MIN)
289 | pos = parse_param(kw, Lexicon.XY, EnumConvertType.VEC2, (0, 0))
290 | angle = parse_param(kw, Lexicon.ANGLE, EnumConvertType.INT, 0)
291 | edge = parse_param(kw, Lexicon.EDGE, EnumEdge, EnumEdge.CLIP.name)
292 | invert = parse_param(kw, Lexicon.INVERT, EnumConvertType.BOOLEAN, False)
293 | images = []
294 | params = list(zip_longest_fill(full_text, font_idx, autosize, letter, color,
295 | matte, columns, font_size, align, justify, margin,
296 | line_spacing, wihi, pos, angle, edge, invert))
297 |
298 | pbar = ProgressBar(len(params))
299 | for idx, (full_text, font_idx, autosize, letter, color, matte, columns,
300 | font_size, align, justify, margin, line_spacing, wihi, pos,
301 | angle, edge, invert) in enumerate(params):
302 |
303 | width, height = wihi
304 | font_name = self.FONTS[font_idx]
305 | full_text = str(full_text)
306 |
307 | if letter:
308 | full_text = full_text.replace('\n', '')
309 | if autosize:
310 | _, font_size = text_autosize(full_text[0].upper(), font_name, width, height)[:2]
311 | margin = 0
312 | line_spacing = 0
313 | else:
314 | if autosize:
315 | wm = width - margin * 2
316 | hm = height - margin * 2 - line_spacing
317 | columns = 0 if columns == 0 else columns * 2 + 2
318 | full_text, font_size = text_autosize(full_text, font_name, wm, hm, columns)[:2]
319 | full_text = [full_text]
320 | font_size *= 2.5
321 |
322 | font = ImageFont.truetype(font_name, font_size)
323 | for ch in full_text:
324 | img = text_draw(ch, font, width, height, align, justify, margin, line_spacing, color)
325 | img = image_rotate(img, angle, edge=edge)
326 | img = image_translate(img, pos, edge=edge)
327 | if invert:
328 | img = image_invert(img, 1)
329 | images.append(cv_to_tensor_full(img, matte))
330 | pbar.update_absolute(idx)
331 | return image_stack(images)
332 |
--------------------------------------------------------------------------------
/core/trans.py:
--------------------------------------------------------------------------------
1 | """ Jovimetrix - Transform """
2 |
3 | from re import I
4 | import sys
5 | from enum import Enum
6 |
7 | from comfy.utils import ProgressBar
8 |
9 | from cozy_comfyui import \
10 | logger, \
11 | IMAGE_SIZE_MIN, \
12 | InputType, RGBAMaskType, EnumConvertType, \
13 | deep_merge, parse_param, parse_dynamic, zip_longest_fill
14 |
15 | from cozy_comfyui.lexicon import \
16 | Lexicon
17 |
18 | from cozy_comfyui.node import \
19 | COZY_TYPE_IMAGE, \
20 | CozyImageNode, CozyBaseNode
21 |
22 | from cozy_comfyui.image.channel import \
23 | channel_solid
24 |
25 | from cozy_comfyui.image.convert import \
26 | tensor_to_cv, cv_to_tensor_full, cv_to_tensor, image_mask, image_mask_add
27 |
28 | from cozy_comfyui.image.compose import \
29 | EnumOrientation, EnumEdge, EnumMirrorMode, EnumScaleMode, EnumInterpolation, \
30 | image_edge_wrap, image_mirror, image_scalefit, image_transform, \
31 | image_crop, image_crop_center, image_crop_polygonal, image_stacker, \
32 | image_flatten
33 |
34 | from cozy_comfyui.image.misc import \
35 | image_stack
36 |
37 | from cozy_comfyui.image.mapping import \
38 | EnumProjection, \
39 | remap_fisheye, remap_perspective, remap_polar, remap_sphere
40 |
41 | # ==============================================================================
42 | # === GLOBAL ===
43 | # ==============================================================================
44 |
45 | JOV_CATEGORY = "TRANSFORM"
46 |
47 | # ==============================================================================
48 | # === ENUMERATION ===
49 | # ==============================================================================
50 |
51 | class EnumCropMode(Enum):
52 | CENTER = 20
53 | XY = 0
54 | FREE = 10
55 |
56 | # ==============================================================================
57 | # === CLASS ===
58 | # ==============================================================================
59 |
60 | class CropNode(CozyImageNode):
61 | NAME = "CROP (JOV) ✂️"
62 | CATEGORY = JOV_CATEGORY
63 | DESCRIPTION = """
64 | Extract a portion of an input image or resize it. It supports various cropping modes, including center cropping, custom XY cropping, and free-form polygonal cropping. This node is useful for preparing image data for specific tasks or extracting regions of interest.
65 | """
66 |
67 | @classmethod
68 | def INPUT_TYPES(cls) -> InputType:
69 | d = super().INPUT_TYPES()
70 | d = deep_merge(d, {
71 | "optional": {
72 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}),
73 | Lexicon.FUNCTION: (EnumCropMode._member_names_, {
74 | "default": EnumCropMode.CENTER.name}),
75 | Lexicon.XY: ("VEC2", {
76 | "default": (0, 0), "mij": 0, "maj": 1,
77 | "label": ["X", "Y"]}),
78 | Lexicon.WH: ("VEC2", {
79 | "default": (512, 512), "mij": IMAGE_SIZE_MIN, "int": True,
80 | "label": ["W", "H"]}),
81 | Lexicon.TLTR: ("VEC4", {
82 | "default": (0, 0, 0, 1), "mij": 0, "maj": 1,
83 | "label": ["TOP", "LEFT", "TOP", "RIGHT"],}),
84 | Lexicon.BLBR: ("VEC4", {
85 | "default": (1, 0, 1, 1), "mij": 0, "maj": 1,
86 | "label": ["BOTTOM", "LEFT", "BOTTOM", "RIGHT"],}),
87 | Lexicon.MATTE: ("VEC4", {
88 | "default": (0, 0, 0, 255), "rgb": True,})
89 | }
90 | })
91 | return Lexicon._parse(d)
92 |
93 | def run(self, **kw) -> RGBAMaskType:
94 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None)
95 | func = parse_param(kw, Lexicon.FUNCTION, EnumCropMode, EnumCropMode.CENTER.name)
96 | # if less than 1 then use as scalar, over 1 = int(size)
97 | xy = parse_param(kw, Lexicon.XY, EnumConvertType.VEC2, (0, 0,))
98 | wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (512, 512), IMAGE_SIZE_MIN)
99 | tltr = parse_param(kw, Lexicon.TLTR, EnumConvertType.VEC4, (0, 0, 0, 1,))
100 | blbr = parse_param(kw, Lexicon.BLBR, EnumConvertType.VEC4, (1, 0, 1, 1,))
101 | matte = parse_param(kw, Lexicon.MATTE, EnumConvertType.VEC4INT, (0, 0, 0, 255), 0, 255)
102 | params = list(zip_longest_fill(pA, func, xy, wihi, tltr, blbr, matte))
103 | images = []
104 | pbar = ProgressBar(len(params))
105 | for idx, (pA, func, xy, wihi, tltr, blbr, matte) in enumerate(params):
106 | width, height = wihi
107 | pA = tensor_to_cv(pA) if pA is not None else channel_solid(width, height)
108 | alpha = None
109 | if pA.ndim == 3 and pA.shape[2] == 4:
110 | alpha = image_mask(pA)
111 |
112 | if func == EnumCropMode.FREE:
113 | x1, y1, x2, y2 = tltr
114 | x4, y4, x3, y3 = blbr
115 | points = (x1 * width, y1 * height), (x2 * width, y2 * height), \
116 | (x3 * width, y3 * height), (x4 * width, y4 * height)
117 | pA = image_crop_polygonal(pA, points)
118 | if alpha is not None:
119 | alpha = image_crop_polygonal(alpha, points)
120 | pA[..., 3] = alpha[..., 0][:,:]
121 | elif func == EnumCropMode.XY:
122 | pA = image_crop(pA, width, height, xy)
123 | else:
124 | pA = image_crop_center(pA, width, height)
125 | images.append(cv_to_tensor_full(pA, matte))
126 | pbar.update_absolute(idx)
127 | return image_stack(images)
128 |
129 | class FlattenNode(CozyImageNode):
130 | NAME = "FLATTEN (JOV) ⬇️"
131 | CATEGORY = JOV_CATEGORY
132 | DESCRIPTION = """
133 | Combine multiple input images into a single image by summing their pixel values. This operation is useful for merging multiple layers or images into one composite image, such as combining different elements of a design or merging masks. Users can specify the blending mode and interpolation method to control how the images are combined. Additionally, a matte can be applied to adjust the transparency of the final composite image.
134 | """
135 |
136 | @classmethod
137 | def INPUT_TYPES(cls) -> InputType:
138 | d = super().INPUT_TYPES()
139 | d = deep_merge(d, {
140 | "optional": {
141 | Lexicon.MODE: (EnumScaleMode._member_names_, {
142 | "default": EnumScaleMode.MATTE.name,}),
143 | Lexicon.WH: ("VEC2", {
144 | "default": (512, 512), "mij":1, "int": True,
145 | "label": ["W", "H"]}),
146 | Lexicon.SAMPLE: (EnumInterpolation._member_names_, {
147 | "default": EnumInterpolation.LANCZOS4.name,}),
148 | Lexicon.MATTE: ("VEC4", {
149 | "default": (0, 0, 0, 255), "rgb": True,}),
150 | Lexicon.OFFSET: ("VEC2", {
151 | "default": (0, 0), "mij":0, "int": True,
152 | "label": ["X", "Y"]}),
153 | }
154 | })
155 | return Lexicon._parse(d)
156 |
157 | def run(self, **kw) -> RGBAMaskType:
158 | imgs = parse_dynamic(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None)
159 | if imgs is None:
160 | logger.warning("no images to flatten")
161 | return ()
162 |
163 | # be less dumb when merging
164 | pA = [tensor_to_cv(i) for i in imgs]
165 | mode = parse_param(kw, Lexicon.MODE, EnumScaleMode, EnumScaleMode.MATTE.name)[0]
166 | wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (512, 512), 1)[0]
167 | sample = parse_param(kw, Lexicon.SAMPLE, EnumInterpolation, EnumInterpolation.LANCZOS4.name)[0]
168 | matte = parse_param(kw, Lexicon.MATTE, EnumConvertType.VEC4INT, (0, 0, 0, 255), 0, 255)[0]
169 | offset = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (0, 0), 0)[0]
170 | w, h = wihi
171 | x, y = offset
172 | current = image_flatten(pA, x, y, w, h, mode=mode, sample=sample)
173 | images = []
174 | images.append(cv_to_tensor_full(current, matte))
175 | return image_stack(images)
176 |
177 | class SplitNode(CozyBaseNode):
178 | NAME = "SPLIT (JOV) 🎭"
179 | CATEGORY = JOV_CATEGORY
180 | RETURN_TYPES = ("IMAGE", "IMAGE",)
181 | RETURN_NAMES = ("IMAGEA", "IMAGEB",)
182 | OUTPUT_TOOLTIPS = (
183 | "Left/Top image",
184 | "Right/Bottom image"
185 | )
186 | DESCRIPTION = """
187 | Split an image into two or four images based on the percentages for width and height.
188 | """
189 |
190 | @classmethod
191 | def INPUT_TYPES(cls) -> InputType:
192 | d = super().INPUT_TYPES()
193 | d = deep_merge(d, {
194 | "optional": {
195 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}),
196 | Lexicon.VALUE: ("FLOAT", {
197 | "default": 0.5, "min": 0, "max": 1, "step": 0.001
198 | }),
199 | Lexicon.FLIP: ("BOOLEAN", {
200 | "default": False,
201 | "tooltip": "Horizontal split (False) or Vertical split (True)"
202 | }),
203 | Lexicon.MODE: (EnumScaleMode._member_names_, {
204 | "default": EnumScaleMode.MATTE.name,}),
205 | Lexicon.WH: ("VEC2", {
206 | "default": (512, 512), "mij":IMAGE_SIZE_MIN, "int": True,
207 | "label": ["W", "H"]}),
208 | Lexicon.SAMPLE: (EnumInterpolation._member_names_, {
209 | "default": EnumInterpolation.LANCZOS4.name,}),
210 | Lexicon.MATTE: ("VEC4", {
211 | "default": (0, 0, 0, 255), "rgb": True,})
212 | }
213 | })
214 | return Lexicon._parse(d)
215 |
216 | def run(self, **kw) -> RGBAMaskType:
217 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None)
218 | percent = parse_param(kw, Lexicon.VALUE, EnumConvertType.FLOAT, 0.5, 0, 1)
219 | flip = parse_param(kw, Lexicon.FLIP, EnumConvertType.BOOLEAN, False)
220 | mode = parse_param(kw, Lexicon.MODE, EnumScaleMode, EnumScaleMode.MATTE.name)
221 | wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (512, 512), IMAGE_SIZE_MIN)
222 | sample = parse_param(kw, Lexicon.SAMPLE, EnumInterpolation, EnumInterpolation.LANCZOS4.name)
223 | matte = parse_param(kw, Lexicon.MATTE, EnumConvertType.VEC4INT, (0, 0, 0, 255), 0, 255)
224 | params = list(zip_longest_fill(pA, percent, flip, mode, wihi, sample, matte))
225 | images = []
226 | pbar = ProgressBar(len(params))
227 | for idx, (pA, percent, flip, mode, wihi, sample, matte) in enumerate(params):
228 | w, h = wihi
229 | pA = channel_solid(w, h, matte) if pA is None else tensor_to_cv(pA)
230 |
231 | if flip:
232 | size = pA.shape[1]
233 | percent = max(1, min(size-1, int(size * percent)))
234 | image_a = pA[:, :percent]
235 | image_b = pA[:, percent:]
236 | else:
237 | size = pA.shape[0]
238 | percent = max(1, min(size-1, int(size * percent)))
239 | image_a = pA[:percent, :]
240 | image_b = pA[percent:, :]
241 |
242 | if mode != EnumScaleMode.MATTE:
243 | image_a = image_scalefit(image_a, w, h, mode, sample)
244 | image_b = image_scalefit(image_b, w, h, mode, sample)
245 |
246 | images.append([cv_to_tensor(img) for img in [image_a, image_b]])
247 | pbar.update_absolute(idx)
248 | return image_stack(images)
249 |
250 | class StackNode(CozyImageNode):
251 | NAME = "STACK (JOV) ➕"
252 | CATEGORY = JOV_CATEGORY
253 | DESCRIPTION = """
254 | Merge multiple input images into a single composite image by stacking them along a specified axis.
255 |
256 | Options include axis, stride, scaling mode, width and height, interpolation method, and matte color.
257 |
258 | The axis parameter allows for horizontal, vertical, or grid stacking of images, while stride controls the spacing between them.
259 | """
260 |
261 | @classmethod
262 | def INPUT_TYPES(cls) -> InputType:
263 | d = super().INPUT_TYPES()
264 | d = deep_merge(d, {
265 | "optional": {
266 | Lexicon.AXIS: (EnumOrientation._member_names_, {
267 | "default": EnumOrientation.GRID.name,}),
268 | Lexicon.STEP: ("INT", {
269 | "default": 1, "min": 0,
270 | "tooltip":"How many images are placed before a new row starts (stride)"}),
271 | Lexicon.MODE: (EnumScaleMode._member_names_, {
272 | "default": EnumScaleMode.MATTE.name,}),
273 | Lexicon.WH: ("VEC2", {
274 | "default": (512, 512), "mij": IMAGE_SIZE_MIN, "int": True,
275 | "label": ["W", "H"]}),
276 | Lexicon.SAMPLE: (EnumInterpolation._member_names_, {
277 | "default": EnumInterpolation.LANCZOS4.name,}),
278 | Lexicon.MATTE: ("VEC4", {
279 | "default": (0, 0, 0, 255), "rgb": True,})
280 | }
281 | })
282 | return Lexicon._parse(d)
283 |
284 | def run(self, **kw) -> RGBAMaskType:
285 | images = parse_dynamic(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None)
286 | if len(images) == 0:
287 | logger.warning("no images to stack")
288 | return
289 |
290 | images = [tensor_to_cv(i) for i in images]
291 | axis = parse_param(kw, Lexicon.AXIS, EnumOrientation, EnumOrientation.GRID.name)[0]
292 | stride = parse_param(kw, Lexicon.STEP, EnumConvertType.INT, 1, 0)[0]
293 | mode = parse_param(kw, Lexicon.MODE, EnumScaleMode, EnumScaleMode.MATTE.name)[0]
294 | wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (512, 512), IMAGE_SIZE_MIN)[0]
295 | sample = parse_param(kw, Lexicon.SAMPLE, EnumInterpolation, EnumInterpolation.LANCZOS4.name)[0]
296 | matte = parse_param(kw, Lexicon.MATTE, EnumConvertType.VEC4INT, (0, 0, 0, 255), 0, 255)[0]
297 | img = image_stacker(images, axis, stride) #, matte)
298 | if mode != EnumScaleMode.MATTE:
299 | w, h = wihi
300 | img = image_scalefit(img, w, h, mode, sample)
301 | rgba, rgb, mask = cv_to_tensor_full(img, matte)
302 | return rgba.unsqueeze(0), rgb.unsqueeze(0), mask.unsqueeze(0)
303 |
304 | class TransformNode(CozyImageNode):
305 | NAME = "TRANSFORM (JOV) 🏝️"
306 | CATEGORY = JOV_CATEGORY
307 | DESCRIPTION = """
308 | Apply various geometric transformations to images, including translation, rotation, scaling, mirroring, tiling and perspective projection. It offers extensive control over image manipulation to achieve desired visual effects.
309 | """
310 |
311 | @classmethod
312 | def INPUT_TYPES(cls) -> InputType:
313 | d = super().INPUT_TYPES(prompt=True, dynprompt=True)
314 | d = deep_merge(d, {
315 | "optional": {
316 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}),
317 | Lexicon.MASK: (COZY_TYPE_IMAGE, {
318 | "tooltip": "Override Image mask"}),
319 | Lexicon.XY: ("VEC2", {
320 | "default": (0, 0,), "mij": -1, "maj": 1,
321 | "label": ["X", "Y"]}),
322 | Lexicon.ANGLE: ("FLOAT", {
323 | "default": 0, "min": -sys.float_info.max, "max": sys.float_info.max, "step": 0.1,}),
324 | Lexicon.SIZE: ("VEC2", {
325 | "default": (1, 1), "mij": 0.001,
326 | "label": ["X", "Y"]}),
327 | Lexicon.TILE: ("VEC2", {
328 | "default": (1, 1), "mij": 1,
329 | "label": ["X", "Y"]}),
330 | Lexicon.EDGE: (EnumEdge._member_names_, {
331 | "default": EnumEdge.CLIP.name}),
332 | Lexicon.MIRROR: (EnumMirrorMode._member_names_, {
333 | "default": EnumMirrorMode.NONE.name}),
334 | Lexicon.PIVOT: ("VEC2", {
335 | "default": (0.5, 0.5), "mij": 0, "maj": 1, "step": 0.01,
336 | "label": ["X", "Y"]}),
337 | Lexicon.PROJECTION: (EnumProjection._member_names_, {
338 | "default": EnumProjection.NORMAL.name}),
339 | Lexicon.TLTR: ("VEC4", {
340 | "default": (0, 0, 1, 0), "mij": 0, "maj": 1, "step": 0.005,
341 | "label": ["TOP", "LEFT", "TOP", "RIGHT"],}),
342 | Lexicon.BLBR: ("VEC4", {
343 | "default": (0, 1, 1, 1), "mij": 0, "maj": 1, "step": 0.005,
344 | "label": ["BOTTOM", "LEFT", "BOTTOM", "RIGHT"],}),
345 | Lexicon.STRENGTH: ("FLOAT", {
346 | "default": 1, "min": 0, "max": 1, "step": 0.005}),
347 | Lexicon.MODE: (EnumScaleMode._member_names_, {
348 | "default": EnumScaleMode.MATTE.name,}),
349 | Lexicon.WH: ("VEC2", {
350 | "default": (512, 512), "mij": IMAGE_SIZE_MIN, "int": True,
351 | "label": ["W", "H"]}),
352 | Lexicon.SAMPLE: (EnumInterpolation._member_names_, {
353 | "default": EnumInterpolation.LANCZOS4.name,}),
354 | Lexicon.MATTE: ("VEC4", {
355 | "default": (0, 0, 0, 255), "rgb": True,})
356 | }
357 | })
358 | return Lexicon._parse(d)
359 |
360 | def run(self, **kw) -> RGBAMaskType:
361 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None)
362 | mask = parse_param(kw, Lexicon.MASK, EnumConvertType.IMAGE, None)
363 | offset = parse_param(kw, Lexicon.XY, EnumConvertType.VEC2, (0, 0), -1, 1)
364 | angle = parse_param(kw, Lexicon.ANGLE, EnumConvertType.FLOAT, 0)
365 | size = parse_param(kw, Lexicon.SIZE, EnumConvertType.VEC2, (1, 1), 0.001)
366 | edge = parse_param(kw, Lexicon.EDGE, EnumEdge, EnumEdge.CLIP.name)
367 | mirror = parse_param(kw, Lexicon.MIRROR, EnumMirrorMode, EnumMirrorMode.NONE.name)
368 | mirror_pivot = parse_param(kw, Lexicon.PIVOT, EnumConvertType.VEC2, (0.5, 0.5), 0, 1)
369 | tile_xy = parse_param(kw, Lexicon.TILE, EnumConvertType.VEC2, (1, 1), 1)
370 | proj = parse_param(kw, Lexicon.PROJECTION, EnumProjection, EnumProjection.NORMAL.name)
371 | tltr = parse_param(kw, Lexicon.TLTR, EnumConvertType.VEC4, (0, 0, 1, 0), 0, 1)
372 | blbr = parse_param(kw, Lexicon.BLBR, EnumConvertType.VEC4, (0, 1, 1, 1), 0, 1)
373 | strength = parse_param(kw, Lexicon.STRENGTH, EnumConvertType.FLOAT, 1, 0, 1)
374 | mode = parse_param(kw, Lexicon.MODE, EnumScaleMode, EnumScaleMode.MATTE.name)
375 | wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (512, 512), IMAGE_SIZE_MIN)
376 | sample = parse_param(kw, Lexicon.SAMPLE, EnumInterpolation, EnumInterpolation.LANCZOS4.name)
377 | matte = parse_param(kw, Lexicon.MATTE, EnumConvertType.VEC4INT, (0, 0, 0, 255), 0, 255)
378 | params = list(zip_longest_fill(pA, mask, offset, angle, size, edge, tile_xy, mirror, mirror_pivot, proj, strength, tltr, blbr, mode, wihi, sample, matte))
379 | images = []
380 | pbar = ProgressBar(len(params))
381 | for idx, (pA, mask, offset, angle, size, edge, tile_xy, mirror, mirror_pivot, proj, strength, tltr, blbr, mode, wihi, sample, matte) in enumerate(params):
382 | pA = tensor_to_cv(pA) if pA is not None else channel_solid()
383 | if mask is not None:
384 | mask = tensor_to_cv(mask)
385 | pA = image_mask_add(pA, mask)
386 |
387 | h, w = pA.shape[:2]
388 | pA = image_transform(pA, offset, angle, size, sample, edge)
389 | pA = image_crop_center(pA, w, h)
390 |
391 | if mirror != EnumMirrorMode.NONE:
392 | mpx, mpy = mirror_pivot
393 | pA = image_mirror(pA, mirror, mpx, mpy)
394 | pA = image_scalefit(pA, w, h, EnumScaleMode.FIT, sample)
395 |
396 | tx, ty = tile_xy
397 | if tx != 1. or ty != 1.:
398 | pA = image_edge_wrap(pA, tx / 2 - 0.5, ty / 2 - 0.5)
399 | pA = image_scalefit(pA, w, h, EnumScaleMode.FIT, sample)
400 |
401 | match proj:
402 | case EnumProjection.PERSPECTIVE:
403 | x1, y1, x2, y2 = tltr
404 | x4, y4, x3, y3 = blbr
405 | sh, sw = pA.shape[:2]
406 | x1, x2, x3, x4 = map(lambda x: x * sw, [x1, x2, x3, x4])
407 | y1, y2, y3, y4 = map(lambda y: y * sh, [y1, y2, y3, y4])
408 | pA = remap_perspective(pA, [[x1, y1], [x2, y2], [x3, y3], [x4, y4]])
409 | case EnumProjection.SPHERICAL:
410 | pA = remap_sphere(pA, strength)
411 | case EnumProjection.FISHEYE:
412 | pA = remap_fisheye(pA, strength)
413 | case EnumProjection.POLAR:
414 | pA = remap_polar(pA)
415 |
416 | if proj != EnumProjection.NORMAL:
417 | pA = image_scalefit(pA, w, h, EnumScaleMode.FIT, sample)
418 |
419 | if mode != EnumScaleMode.MATTE:
420 | w, h = wihi
421 | pA = image_scalefit(pA, w, h, mode, sample)
422 |
423 | images.append(cv_to_tensor_full(pA, matte))
424 | pbar.update_absolute(idx)
425 | return image_stack(images)
426 |
--------------------------------------------------------------------------------
/core/utility/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/core/utility/__init__.py
--------------------------------------------------------------------------------
/core/utility/info.py:
--------------------------------------------------------------------------------
1 | """ Jovimetrix - Utility """
2 |
3 | import io
4 | import json
5 | from typing import Any
6 |
7 | import torch
8 | import numpy as np
9 | from PIL import Image
10 | import matplotlib.pyplot as plt
11 |
12 | from cozy_comfyui import \
13 | IMAGE_SIZE_MIN, \
14 | InputType, EnumConvertType, TensorType, \
15 | deep_merge, parse_dynamic, parse_param
16 |
17 | from cozy_comfyui.lexicon import \
18 | Lexicon
19 |
20 | from cozy_comfyui.node import \
21 | COZY_TYPE_IMAGE, \
22 | CozyBaseNode
23 |
24 | from cozy_comfyui.image.convert import \
25 | pil_to_tensor
26 |
27 | from cozy_comfyui.api import \
28 | parse_reset
29 |
30 | JOV_CATEGORY = "UTILITY/INFO"
31 |
32 | # ==============================================================================
33 | # === SUPPORT ===
34 | # ==============================================================================
35 |
36 | def decode_tensor(tensor: TensorType) -> str:
37 | if tensor.ndim > 3:
38 | b, h, w, cc = tensor.shape
39 | elif tensor.ndim > 2:
40 | cc = 1
41 | b, h, w = tensor.shape
42 | else:
43 | b = 1
44 | cc = 1
45 | h, w = tensor.shape
46 | return f"{b}x{w}x{h}x{cc}"
47 |
48 | # ==============================================================================
49 | # === CLASS ===
50 | # ==============================================================================
51 |
52 | class AkashicData:
53 | def __init__(self, **kw) -> None:
54 | for k, v in kw.items():
55 | setattr(self, k, v)
56 |
57 | class AkashicNode(CozyBaseNode):
58 | NAME = "AKASHIC (JOV) 📓"
59 | CATEGORY = JOV_CATEGORY
60 | RETURN_NAMES = ()
61 | OUTPUT_NODE = True
62 | DESCRIPTION = """
63 | Visualize data. It accepts various types of data, including images, text, and other types. If no input is provided, it returns an empty result. The output consists of a dictionary containing UI-related information, such as base64-encoded images and text representations of the input data.
64 | """
65 |
66 | def run(self, **kw) -> tuple[Any, Any]:
67 | kw.pop('ident', None)
68 | o = kw.values()
69 | output = {"ui": {"b64_images": [], "text": []}}
70 | if o is None or len(o) == 0:
71 | output["ui"]["result"] = (None, None, )
72 | return output
73 |
74 | def __parse(val) -> str:
75 | ret = ''
76 | typ = ''.join(repr(type(val)).split("'")[1:2])
77 | if isinstance(val, dict):
78 | # mixlab layer?
79 | if (image := val.get('image', None)) is not None:
80 | ret = image
81 | if (mask := val.get('mask', None)) is not None:
82 | while len(mask.shape) < len(image.shape):
83 | mask = mask.unsqueeze(-1)
84 | ret = torch.cat((image, mask), dim=-1)
85 | if ret.ndim < 4:
86 | ret = ret.unsqueeze(-1)
87 | ret = decode_tensor(ret)
88 | typ = "Mixlab Layer"
89 |
90 | # vector patch....
91 | elif 'xyzw' in val:
92 | val = val["xyzw"]
93 | typ = "VECTOR"
94 | # latents....
95 | elif 'samples' in val:
96 | ret = decode_tensor(val['samples'][0])
97 | typ = "LATENT"
98 | # empty bugger
99 | elif len(val) == 0:
100 | ret = ""
101 | else:
102 | try:
103 | ret = json.dumps(val, indent=3, separators=(',', ': '))
104 | except Exception as e:
105 | ret = str(e)
106 | elif isinstance(val, (tuple, set, list,)):
107 | if (size := len(val)) > 0:
108 | if isinstance(val, (np.ndarray,)):
109 | ret = str(val)
110 | typ = "NUMPY ARRAY"
111 | elif isinstance(val[0], (TensorType,)):
112 | ret = decode_tensor(val[0])
113 | typ = type(val[0])
114 | elif size == 1 and isinstance(val[0], (list,)) and isinstance(val[0][0], (TensorType,)):
115 | ret = decode_tensor(val[0][0])
116 | typ = "CONDITIONING"
117 | elif all(isinstance(i, (tuple, set, list)) for i in val):
118 | ret = "[\n" + ",\n".join(f" {row}" for row in val) + "\n]"
119 | # ret = json.dumps(val, indent=4)
120 | elif all(isinstance(i, (bool, int, float)) for i in val):
121 | ret = ','.join([str(x) for x in val])
122 | else:
123 | ret = str(val)
124 | elif isinstance(val, bool):
125 | ret = "True" if val else "False"
126 | elif isinstance(val, TensorType):
127 | ret = decode_tensor(val)
128 | else:
129 | ret = str(val)
130 | return json.dumps({typ: ret}, separators=(',', ': '))
131 |
132 | for x in o:
133 | data = ""
134 | if len(x) > 1:
135 | data += "::\n"
136 | for p in x:
137 | data += __parse(p) + "\n"
138 | output["ui"]["text"].append(data)
139 | return output
140 |
141 | class GraphNode(CozyBaseNode):
142 | NAME = "GRAPH (JOV) 📈"
143 | CATEGORY = JOV_CATEGORY
144 | OUTPUT_NODE = True
145 | RETURN_TYPES = ("IMAGE", )
146 | RETURN_NAMES = ("IMAGE",)
147 | OUTPUT_TOOLTIPS = (
148 | "The graphed image"
149 | )
150 | DESCRIPTION = """
151 | Visualize a series of data points over time. It accepts a dynamic number of values to graph and display, with options to reset the graph or specify the number of values. The output is an image displaying the graph, allowing users to analyze trends and patterns.
152 | """
153 |
154 | @classmethod
155 | def INPUT_TYPES(cls) -> InputType:
156 | d = super().INPUT_TYPES()
157 | d = deep_merge(d, {
158 | "optional": {
159 | Lexicon.RESET: ("BOOLEAN", {
160 | "default": False,
161 | "tooltip":"Clear the graph history"}),
162 | Lexicon.VALUE: ("INT", {
163 | "default": 60, "min": 0,
164 | "tooltip":"Number of values to graph and display"}),
165 | Lexicon.WH: ("VEC2", {
166 | "default": (512, 512), "mij":IMAGE_SIZE_MIN, "int": True,
167 | "label": ["W", "H"]}),
168 | }
169 | })
170 | return Lexicon._parse(d)
171 |
172 | @classmethod
173 | def IS_CHANGED(cls, **kw) -> float:
174 | return float('nan')
175 |
176 | def __init__(self, *arg, **kw) -> None:
177 | super().__init__(*arg, **kw)
178 | self.__history = []
179 | self.__fig, self.__ax = plt.subplots(figsize=(5.12, 5.12))
180 |
181 | def run(self, ident, **kw) -> tuple[TensorType]:
182 | slice = parse_param(kw, Lexicon.VALUE, EnumConvertType.INT, 60)[0]
183 | wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (512, 512), IMAGE_SIZE_MIN)[0]
184 | if parse_reset(ident) > 0 or parse_param(kw, Lexicon.RESET, EnumConvertType.BOOLEAN, False)[0]:
185 | self.__history = []
186 | longest_edge = 0
187 | dynamic = parse_dynamic(kw, Lexicon.DYNAMIC, EnumConvertType.FLOAT, 0)
188 | dynamic = [i[0] for i in dynamic]
189 | self.__ax.clear()
190 | for idx, val in enumerate(dynamic):
191 | if isinstance(val, (set, tuple,)):
192 | val = list(val)
193 | if not isinstance(val, (list, )):
194 | val = [val]
195 | while len(self.__history) <= idx:
196 | self.__history.append([])
197 | self.__history[idx].extend(val)
198 | if slice > 0:
199 | stride = max(0, -slice + len(self.__history[idx]) + 1)
200 | longest_edge = max(longest_edge, stride)
201 | self.__history[idx] = self.__history[idx][stride:]
202 | self.__ax.plot(self.__history[idx], color="rgbcymk"[idx])
203 |
204 | self.__history = self.__history[:slice+1]
205 | width, height = wihi
206 | width, height = (width / 100., height / 100.)
207 | self.__fig.set_figwidth(width)
208 | self.__fig.set_figheight(height)
209 | self.__fig.canvas.draw_idle()
210 | buffer = io.BytesIO()
211 | self.__fig.savefig(buffer, format="png")
212 | buffer.seek(0)
213 | image = Image.open(buffer)
214 | return (pil_to_tensor(image),)
215 |
216 | class ImageInfoNode(CozyBaseNode):
217 | NAME = "IMAGE INFO (JOV) 📚"
218 | CATEGORY = JOV_CATEGORY
219 | RETURN_TYPES = ("INT", "INT", "INT", "INT", "VEC2", "VEC3")
220 | RETURN_NAMES = ("COUNT", "W", "H", "C", "WH", "WHC")
221 | OUTPUT_TOOLTIPS = (
222 | "Batch count",
223 | "Width",
224 | "Height",
225 | "Channels",
226 | "Width & Height as a VEC2",
227 | "Width, Height and Channels as a VEC3"
228 | )
229 | DESCRIPTION = """
230 | Exports and Displays immediate information about images.
231 | """
232 |
233 | @classmethod
234 | def INPUT_TYPES(cls) -> InputType:
235 | d = super().INPUT_TYPES()
236 | d = deep_merge(d, {
237 | "optional": {
238 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {})
239 | }
240 | })
241 | return Lexicon._parse(d)
242 |
243 | def run(self, **kw) -> tuple[int, list]:
244 | image = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None)
245 | height, width, cc = image[0].shape
246 | return (len(image), width, height, cc, (width, height), (width, height, cc))
247 |
--------------------------------------------------------------------------------
/core/utility/io.py:
--------------------------------------------------------------------------------
1 | """ Jovimetrix - Utility """
2 |
3 | import os
4 | import json
5 | from uuid import uuid4
6 | from pathlib import Path
7 | from typing import Any
8 |
9 | import torch
10 | import numpy as np
11 | from PIL import Image
12 | from PIL.PngImagePlugin import PngInfo
13 |
14 | from comfy.utils import ProgressBar
15 | from folder_paths import get_output_directory
16 | from nodes import interrupt_processing
17 |
18 | from cozy_comfyui import \
19 | logger, \
20 | InputType, EnumConvertType, \
21 | deep_merge, parse_param, parse_param_list, zip_longest_fill
22 |
23 | from cozy_comfyui.lexicon import \
24 | Lexicon
25 |
26 | from cozy_comfyui.node import \
27 | COZY_TYPE_IMAGE, COZY_TYPE_ANY, \
28 | CozyBaseNode
29 |
30 | from cozy_comfyui.image.convert import \
31 | tensor_to_pil, tensor_to_cv
32 |
33 | from cozy_comfyui.api import \
34 | TimedOutException, ComfyAPIMessage, \
35 | comfy_api_post
36 |
37 | # ==============================================================================
38 | # === GLOBAL ===
39 | # ==============================================================================
40 |
41 | JOV_CATEGORY = "UTILITY/IO"
42 |
43 | # min amount of time before showing the cancel dialog
44 | JOV_DELAY_MIN = 5
45 | try: JOV_DELAY_MIN = int(os.getenv("JOV_DELAY_MIN", JOV_DELAY_MIN))
46 | except: pass
47 | JOV_DELAY_MIN = max(1, JOV_DELAY_MIN)
48 |
49 | # max 10 minutes to start
50 | JOV_DELAY_MAX = 600
51 | try: JOV_DELAY_MAX = int(os.getenv("JOV_DELAY_MAX", JOV_DELAY_MAX))
52 | except: pass
53 |
54 | FORMATS = ["gif", "png", "jpg"]
55 | if (JOV_GIFSKI := os.getenv("JOV_GIFSKI", None)) is not None:
56 | if not os.path.isfile(JOV_GIFSKI):
57 | logger.error(f"gifski missing [{JOV_GIFSKI}]")
58 | JOV_GIFSKI = None
59 | else:
60 | FORMATS = ["gifski"] + FORMATS
61 | logger.info("gifski support")
62 | else:
63 | logger.warning("no gifski support")
64 |
65 | # ==============================================================================
66 | # === SUPPORT ===
67 | # ==============================================================================
68 |
69 | def path_next(pattern: str) -> str:
70 | """
71 | Finds the next free path in an sequentially named list of files
72 | """
73 | i = 1
74 | while os.path.exists(pattern % i):
75 | i = i * 2
76 |
77 | a, b = (i // 2, i)
78 | while a + 1 < b:
79 | c = (a + b) // 2
80 | a, b = (c, b) if os.path.exists(pattern % c) else (a, c)
81 | return pattern % b
82 |
83 | # ==============================================================================
84 | # === CLASS ===
85 | # ==============================================================================
86 |
87 | class DelayNode(CozyBaseNode):
88 | NAME = "DELAY (JOV) ✋🏽"
89 | CATEGORY = JOV_CATEGORY
90 | RETURN_TYPES = (COZY_TYPE_ANY,)
91 | RETURN_NAMES = ("OUT",)
92 | OUTPUT_TOOLTIPS = (
93 | "Pass through data when the delay ends"
94 | )
95 | DESCRIPTION = """
96 | Introduce pauses in the workflow that accept an optional input to pass through and a timer parameter to specify the duration of the delay. If no timer is provided, it defaults to a maximum delay. During the delay, it periodically checks for messages to interrupt the delay. Once the delay is completed, it returns the input passed to it. You can disable the screensaver with the `ENABLE` option
97 | """
98 |
99 | @classmethod
100 | def INPUT_TYPES(cls) -> InputType:
101 | d = super().INPUT_TYPES()
102 | d = deep_merge(d, {
103 | "optional": {
104 | Lexicon.PASS_IN: (COZY_TYPE_ANY, {
105 | "default": None,
106 | "tooltip":"The data that should be held until the timer completes."}),
107 | Lexicon.TIMER: ("INT", {
108 | "default" : 0, "min": -1,
109 | "tooltip":"How long to delay if enabled. 0 means no delay."}),
110 | Lexicon.ENABLE: ("BOOLEAN", {
111 | "default": True,
112 | "tooltip":"Enable or disable the screensaver."})
113 | }
114 | })
115 | return Lexicon._parse(d)
116 |
117 | @classmethod
118 | def IS_CHANGED(cls, **kw) -> float:
119 | return float('nan')
120 |
121 | def run(self, ident, **kw) -> tuple[Any]:
122 | delay = parse_param(kw, Lexicon.TIMER, EnumConvertType.INT, -1, 0, JOV_DELAY_MAX)[0]
123 | if delay < 0:
124 | delay = JOV_DELAY_MAX
125 | if delay > JOV_DELAY_MIN:
126 | comfy_api_post("jovi-delay-user", ident, {"id": ident, "timeout": delay})
127 | # enable = parse_param(kw, Lexicon.ENABLE, EnumConvertType.BOOLEAN, True)[0]
128 |
129 | step = 1
130 | pbar = ProgressBar(delay)
131 | while step <= delay:
132 | try:
133 | data = ComfyAPIMessage.poll(ident, timeout=1)
134 | if data.get('id', None) == ident:
135 | if data.get('cmd', False) == False:
136 | interrupt_processing(True)
137 | logger.warning(f"delay [cancelled] ({step}): {ident}")
138 | break
139 | except TimedOutException as _:
140 | if step % 10 == 0:
141 | logger.info(f"delay [continue] ({step}): {ident}")
142 | pbar.update_absolute(step)
143 | step += 1
144 | return kw[Lexicon.PASS_IN],
145 |
146 | class ExportNode(CozyBaseNode):
147 | NAME = "EXPORT (JOV) 📽"
148 | CATEGORY = JOV_CATEGORY
149 | NOT_IDEMPOTENT = True
150 | OUTPUT_NODE = True
151 | RETURN_TYPES = ()
152 | DESCRIPTION = """
153 | Responsible for saving images or animations to disk. It supports various output formats such as GIF and GIFSKI. Users can specify the output directory, filename prefix, image quality, frame rate, and other parameters. Additionally, it allows overwriting existing files or generating unique filenames to avoid conflicts. The node outputs the saved images or animation as a tensor.
154 | """
155 |
156 | @classmethod
157 | def IS_CHANGED(cls, **kw) -> float:
158 | return float('nan')
159 |
160 | @classmethod
161 | def INPUT_TYPES(cls) -> InputType:
162 | d = super().INPUT_TYPES()
163 | d = deep_merge(d, {
164 | "optional": {
165 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}),
166 | Lexicon.PATH: ("STRING", {
167 | "default": get_output_directory(),
168 | "default_top": "