├── .gitattributes ├── .github └── workflows │ └── publish_action.yml ├── .gitignore ├── LICENSE ├── NOTICE ├── README.md ├── __init__.py ├── core ├── __init__.py ├── adjust.py ├── anim.py ├── calc.py ├── color.py ├── compose.py ├── create.py ├── trans.py ├── utility │ ├── __init__.py │ ├── batch.py │ ├── info.py │ └── io.py └── vars.py ├── node_list.json ├── pyproject.toml ├── requirements.txt ├── res ├── aud │ ├── bread.mp3 │ └── bread.wav ├── img │ ├── anim │ │ ├── anim (1).png │ │ ├── anim (2).png │ │ ├── anim (3).png │ │ ├── anim (4).png │ │ ├── anim (5).png │ │ ├── anim (6).png │ │ ├── anim (7).png │ │ └── anim (8).png │ ├── color-a.png │ ├── color-b.png │ ├── color-c.png │ ├── color-d.png │ ├── color-e.png │ ├── color-f.png │ ├── color-g.png │ ├── depth-a.png │ ├── depth-b.png │ ├── depth-c.png │ ├── mask-a.png │ ├── mask-b.png │ ├── mask-c.png │ ├── mask-e.png │ ├── shape-a.png │ ├── shape-b.png │ ├── shape-c.png │ ├── shape-d.png │ ├── test-a.png │ ├── test-b.png │ ├── test-c.png │ ├── test-d.jpg │ ├── tile-a.png │ ├── tile-b.png │ ├── tile-c.png │ └── tile-d.png └── wiki │ ├── YouTube.svg │ └── help_002.png └── web ├── core.js ├── fun.js ├── nodes ├── akashic.js ├── array.js ├── delay.js ├── flatten.js ├── graph.js ├── lerp.js ├── op_binary.js ├── op_unary.js ├── queue.js ├── route.js ├── stack.js ├── stringer.js └── value.js ├── util.js └── widget_vector.js /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.github/workflows/publish_action.yml: -------------------------------------------------------------------------------- 1 | name: Publish to Comfy registry 2 | on: 3 | workflow_dispatch: 4 | push: 5 | branches: 6 | - main 7 | paths: 8 | - "pyproject.toml" 9 | 10 | permissions: 11 | issues: write 12 | 13 | jobs: 14 | publish-node: 15 | name: Publish Custom Node to registry 16 | runs-on: ubuntu-latest 17 | if: ${{ github.repository_owner == 'Amorano' }} 18 | steps: 19 | - name: Check out code 20 | uses: actions/checkout@v4 21 | - name: Publish Custom Node 22 | uses: Comfy-Org/publish-node-action@v1 23 | with: 24 | personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }} 25 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | *.py[cod] 3 | *$py.class 4 | _*/ 5 | glsl/* 6 | *.code-workspace 7 | .vscode 8 | config.json 9 | ignore.txt 10 | .env 11 | .venv 12 | .DS_Store 13 | *.egg-info 14 | *.bak 15 | checkpoints 16 | results 17 | backup 18 | node_modules 19 | *-lock.json 20 | *.config.mjs 21 | package.json 22 | _TODO*.* -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Alexander G. Morano 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | GO NUTS; JUST TRY NOT TO DO IT IN YOUR HEAD. 24 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | This project includes code concepts from the MTB Nodes project (MIT) 2 | https://github.com/melMass/comfy_mtb 3 | 4 | This project includes code concepts from the ComfyUI-Custom-Scripts project (MIT) 5 | https://github.com/pythongosssss/ComfyUI-Custom-Scripts 6 | 7 | This project includes code concepts from the KJNodes for ComfyUI project (GPL 3.0) 8 | https://github.com/kijai/ComfyUI-KJNodes 9 | 10 | This project includes code concepts from the UE Nodes project (Apache 2.0) 11 | https://github.com/chrisgoringe/cg-use-everywhere 12 | 13 | This project includes code concepts from the WAS Node Suite project (MIT) 14 | https://github.com/WASasquatch/was-node-suite-comfyui 15 | 16 | This project includes code concepts from the rgthree-comfy project (MIT) 17 | https://github.com/rgthree/rgthree-comfy 18 | 19 | This project includes code concepts from the FizzNodes project (MIT) 20 | https://github.com/FizzleDorf/ComfyUI_FizzNodes -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | ComfyUI Nodes for procedural masking, live composition and video manipulation 5 | 6 | 7 |

8 | COMFYUI Nodes for procedural masking, live composition and video manipulation 9 |

10 | 11 |

12 | JOVIMETRIX IS ONLY GUARANTEED TO SUPPORT COMFYUI 0.1.3+ and FRONTEND 1.2.40+
13 | IF YOU NEED AN OLDER VERSION, PLEASE DO NOT UPDATE. 14 |

15 | 16 |

17 | 18 | ![KNIVES!](https://badgen.net/github/open-issues/amorano/jovimetrix) 19 | ![FORKS!](https://badgen.net/github/forks/amorano/jovimetrix) 20 | 21 |

22 | 23 | 24 | 25 | # SPONSORSHIP 26 | 27 | Please consider sponsoring me if you enjoy the results of my work, code or documentation or otherwise. A good way to keep code development open and free is through sponsorship. 28 | 29 |
30 | 31 |  | | |  32 | -|-|-|- 33 | [![BE A GITHUB SPONSOR ❤️](https://img.shields.io/badge/sponsor-30363D?style=for-the-badge&logo=GitHub-Sponsors&logoColor=#EA4AAA)](https://github.com/sponsors/Amorano) | [![DIRECTLY SUPPORT ME VIA PAYPAL](https://img.shields.io/badge/PayPal-00457C?style=for-the-badge&logo=paypal&logoColor=white)](https://www.paypal.com/paypalme/onarom) | [![PATREON SUPPORTER](https://img.shields.io/badge/Patreon-F96854?style=for-the-badge&logo=patreon&logoColor=white)](https://www.patreon.com/joviex) | [![SUPPORT ME ON KO-FI!](https://ko-fi.com/img/githubbutton_sm.svg)](https://ko-fi.com/alexandermorano) 34 |
35 | 36 | ## HIGHLIGHTS 37 | 38 | * 30 function `BLEND` node -- subtract, multiply and overlay like the best 39 | * Vector support for 2, 3, 4 size tuples of integer or float type 40 | * Specific RGB/RGBA color vector support that provides a color picker 41 | * All Image inputs support RGBA, RGB or pure MASK input 42 | * Full Text generation support using installed system fonts 43 | * Basic parametric shape (Circle, Square, Polygon) generator~~ 44 | * `COLOR BLIND` check support 45 | * `COLOR MATCH` against existing images or create a custom LUT 46 | * Generate `COLOR THEORY` spreads from an existing image 47 | * `COLOR MEANS` to generate palettes for existing images to keep other images in the same tonal ranges 48 | * `PIXEL SPLIT` separate the channels of an image to manipulate and `PIXEL MERGE` them back together 49 | * `STACK` a series of images into a new single image vertically, horizontally or in a grid 50 | * Or `FLATTEN` a batch of images into a single image with each image subsequently added on top (slap comp) 51 | * `VALUE` Node has conversion support for all ComfyUI types and some 3rd party types (2DCoords, Mixlab Layers) 52 | * `LERP` node to linear interpolate all ComfyUI and Jovimetrix value types 53 | * Automatic conversion of Mixlab Layer types into Image types 54 | * Generic `ARRAY` that can Merge, Split, Select, Slice or Randomize a list of ANY type 55 | * `STRINGER` node to perform specific string manipulation operations: Split, Join, Replace, Slice. 56 | * A `QUEUE` Node that supports recursing directories, filtering multiple file types and batch loading 57 | * Use the `OP UNARY` and `OP BINARY` nodes to perform single and double type functions across all ComfyUI and Jovimetrix value types 58 | * Manipulate vectors with the `SWIZZLE` node to swap their XYZW positions 59 | * `DELAY` execution at certain parts in a workflow, with or without a timeout 60 | * Generate curve data with the `TICK` and `WAVE GEN` nodes 61 | 62 |
63 | 64 |

AS OF VERSION 2.0.0, THESE NODES HAVE MIGRATED TO OTHER, SMALLER PACKAGES

65 | 66 | Migrated to [Jovi_GLSL](https://github.com/Amorano/Jovi_GLSL) 67 | 68 | ~~* GLSL shader support~~ 69 | ~~* * `GLSL Node` provides raw access to Vertex and Fragment shaders~~ 70 | ~~* * `Dynamic GLSL` dynamically convert existing GLSL scripts file into ComfyUI nodes at runtime~~ 71 | ~~* * Over 20+ Hand written GLSL nodes to speed up specific tasks better done on the GPU (10x speedup in most cases)~~ 72 | 73 | Migrated to [Jovi_Capture](https://github.com/Amorano/Jovi_Capture) 74 | 75 | ~~* `STREAM READER` node to capture monitor, webcam or url media~~ 76 | ~~* `STREAM WRITER` node to export media to a HTTP/HTTPS server for OBS or other 3rd party streaming software~~ 77 | 78 | Migrated to [Jovi_Spout](https://github.com/Amorano/Jovi_Spout) 79 | 80 | ~~* `SPOUT` streaming support *WINDOWS ONLY*~~ 81 | 82 | Migrated to [Jovi_MIDI](https://github.com/Amorano/Jovi_MIDI) 83 | 84 | ~~* `MIDI READER` Captures MIDI messages from an external MIDI device or controller~~ 85 | ~~* `MIDI MESSAGE` Processes MIDI messages received from an external MIDI controller or device~~ 86 | ~~* `MIDI FILTER` (advanced filter) to select messages from MIDI streams and devices~~ 87 | ~~* `MIDI FILTER EZ` simpler interface to filter single messages from MIDI streams and devices~~ 88 | 89 | Migrated to [Jovi_Help](https://github.com/Amorano/Jovi_Help) 90 | 91 | ~~* Help System for *ALL NODES* that will auto-parse unknown knows for their type data and descriptions~~ 92 | 93 | Migrated to [Jovi_Colorizer](https://github.com/Amorano/Jovi_Colorizer) 94 | 95 | ~~* Colorization for *ALL NODES* using their own node settings, their node group or via regex pattern matching~~ 96 | 97 | ## UPDATES 98 | 99 |

DO NOT UPDATE JOVIMETRIX PAST VERSION 1.7.48 IF YOU DONT WANT TO LOSE A BUNCH OF NODES

100 | 101 | Nodes that have been removed are in various other packages now. You can install those specific packages to get the functionality back, but I have no way to migrate the actual connections -- you will need to do that manually. ** 102 | 103 | Nodes that have been migrated: 104 | 105 | * ALL MIDI NODES: 106 | * * MIDIMessageNode 107 | * * MIDIReaderNode 108 | * * MIDIFilterNode 109 | * * MIDIFilterEZNode 110 | 111 | [Migrated to Jovi_MIDI](https://github.com/Amorano/Jovi_MIDI) 112 | 113 | * ALL STREAMING NODES: 114 | * * StreamReaderNode 115 | * * StreamWriterNode 116 | 117 | [Migrated to Jovi_Capture](https://github.com/Amorano/Jovi_Capture) 118 | 119 | * * SpoutWriterNode 120 | 121 | [Migrated to Jovi_Spout](https://github.com/Amorano/Jovi_Spout) 122 | 123 | * ALL GLSL NODES: 124 | * * GLSL 125 | * * GLSL BLEND LINEAR 126 | * * GLSL COLOR CONVERSION 127 | * * GLSL COLOR PALETTE 128 | * * GLSL CONICAL GRADIENT 129 | * * GLSL DIRECTIONAL WARP 130 | * * GLSL FILTER RANGE 131 | * * GLSL GRAYSCALE 132 | * * GLSL HSV ADJUST 133 | * * GLSL INVERT 134 | * * GLSL NORMAL 135 | * * GLSL NORMAL BLEND 136 | * * GLSL POSTERIZE 137 | * * GLSL TRANSFORM 138 | 139 | [Migrated to Jovi_GLSL](https://github.com/Amorano/Jovi_GLSL) 140 | 141 | **2025/06/06** @2.1.10: 142 | * updated to comfy_cozy 0.0.34 143 | * default width and height to 1 144 | * removed old debug string 145 | * akashic try to parse unicode emoji strings 146 | 147 | **2025/06/02** @2.1.9: 148 | * fixed dynamic nodes that already start with inputs (dynamic input wouldnt show up) 149 | * patched Queue node to work with new `COMBO` style of inputs 150 | 151 | **2025/05/29** @2.1.8: 152 | * updated to comfy_cozy 0.0.32 153 | 154 | **2025/05/27** @2.1.7: 155 | * re-ranged all FLOAT to their maximum representations 156 | * clerical cleanup for JS callbacks 157 | * added `SPLIT` node to break images into vertical or horizontal slices 158 | 159 | **2025/05/25** @2.1.6: 160 | * loosened restriction for python 3.11+ to allow for 3.10+ 161 | * * I make zero guarantee that will actually let 3.10 work and I will not support 3.10 162 | 163 | **2025/05/16** @2.1.5: 164 | * Full compatibility with [ComfyMath Vector](https://github.com/evanspearman/ComfyMath) nodes 165 | * Masks can be inverted at inputs 166 | * `EnumScaleInputMode` for `BLEND` node to adjust inputs prior to operation 167 | * Allow images or mask inputs in `CONSTANT` node to fall through 168 | * `VALUE` nodes return all items as list, not just >1 169 | * Added explicit MASK option for `PIXEL SPLIT` node 170 | * Split `ADJUST` node into `BLUR`, `EDGE`, `LIGHT`, `PIXEL`, 171 | * Migrated most of image lib to cozy_comfyui 172 | * widget_vector tweaked to disallow non-numerics 173 | * widgetHookControl streamlined 174 | 175 | **2025/05/08** @2.1.4: 176 | * Support for NUMERICAL (bool, int, float, vecN) inputs on value inputs 177 | 178 | **2025/05/08** @2.1.3: 179 | * fixed for VEC* types using MIN/MAX 180 | 181 | **2025/05/07** @2.1.2: 182 | * `TICK` with normalization and new series generator 183 | 184 | **2025/05/06** @2.1.1: 185 | * fixed IS_CHANGED in graphnode 186 | * updated `TICK SIMPLE` in situ of `TICK` to be inclusive of the end range 187 | * migrated ease, normalization and wave functions to cozy_comfyui 188 | * first pass preserving values in multi-type fields 189 | 190 | **2025/05/05** @2.1.0: 191 | * Cleaned up all node defaults 192 | * Vector nodes aligned for list outputs 193 | * Cleaned all emoji from input/output 194 | * Clear all EnumConvertTypes and align with new comfy_cozy 195 | * Lexicon defines come from Comfy_Cozy module 196 | * `OP UNARY` fixed factorial 197 | * Added fill array mode for `OP UNARY` 198 | * removed `STEREOGRAM` and `STEROSCOPIC` -- they were designed poorly 199 | 200 | **2025/05/01** @2.0.11: 201 | * unified widget_vector.js 202 | * new comfy_cozy support 203 | * auto-convert all VEC*INT -> VEC* float types 204 | * readability for node definitions 205 | 206 | **2025/04/24** @2.0.10: 207 | * `SHAPE NODE` fixed for transparency blends when using blurred masks 208 | 209 | **2025/04/24** @2.0.9: 210 | * removed inversion in pixel splitter 211 | 212 | **2025/04/23** @2.0.8: 213 | * categories aligned to new comfy-cozy support 214 | 215 | **2025/04/19** @2.0.7: 216 | * all JS messages fixed 217 | 218 | **2025/04/19** @2.0.6: 219 | * fixed reset message from JS 220 | 221 | **2025/04/19** @2.0.5: 222 | * patched new frontend input mechanism for dynamic inputs 223 | * reduced requirements 224 | * removed old vector conversions waiting for new frontend mechanism 225 | 226 | **2025/04/17** @2.0.4: 227 | * fixed bug in resize_matte `MODE` that would fail when the matte was smaller than the input image 228 | * migrated to image_crop functions to cozy_comfyui 229 | 230 | **2025/04/12** @2.0.0: 231 | * REMOVED ALL STREAMING, MIDI and GLSL nodes for new packages, HELP System and Node Colorization system: 232 | 233 | [Jovi_Capture - Web camera, Monitor Capture, Window Capture](https://github.com/Amorano/Jovi_Capture) 234 | 235 | [Jovi_MIDI - MIDI capture and MIDI message parsing](https://github.com/Amorano/Jovi_MIDI) 236 | 237 | [Jovi_GLSL - GLSL Shaders](https://github.com/Amorano/Jovi_GLSL) 238 | 239 | [Jovi_Spout - SPOUT Streaming support](https://github.com/Amorano/Jovi_Spout) 240 | 241 | [Jovi_Colorizer - Node Colorization](https://github.com/Amorano/Jovi_Colorizer) 242 | 243 | [Jovi_Help - Node Help](https://github.com/Amorano/Jovi_Help) 244 | 245 | * all nodes will accept `LIST` or `BATCH` and process as if all elements are in a list. 246 | * patched constant node to work with `MATTE_RESIZE` 247 | * patched import loader to work with old/new comfyui 248 | * missing array web node partial 249 | * removed array and no one even noticed. 250 | * all inputs should be treated as a list even single elements [] 251 | 252 |
253 | explicit vector node supports 254 | TICK Node Batch Support Output 255 |
256 | 257 | # INSTALLATION 258 | 259 | [Please see the wiki for advanced use of the environment variables used during startup](https://github.com/Amorano/Jovimetrix/wiki/B.-ASICS) 260 | 261 | ## COMFYUI MANAGER 262 | 263 | If you have [ComfyUI Manager](https://github.com/ltdrdata/ComfyUI-Manager) installed, simply search for Jovimetrix and install from the manager's database. 264 | 265 | ## MANUAL INSTALL 266 | Clone the repository into your ComfyUI custom_nodes directory. You can clone the repository with the command: 267 | ``` 268 | git clone https://github.com/Amorano/Jovimetrix.git 269 | ``` 270 | You can then install the requirements by using the command: 271 | ``` 272 | .\python_embed\python.exe -s -m pip install -r requirements.txt 273 | ``` 274 | If you are using a virtual environment (venv), make sure it is activated before installation. Then install the requirements with the command: 275 | ``` 276 | pip install -r requirements.txt 277 | ``` 278 | # WHERE TO FIND ME 279 | 280 | You can find me on [![DISCORD](https://dcbadge.vercel.app/api/server/62TJaZ3Z5r?style=flat-square)](https://discord.gg/62TJaZ3Z5r). 281 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | ██  ██████  ██  ██ ██ ███  ███ ███████ ████████ ██████  ██ ██  ██  3 | ██ ██    ██ ██  ██ ██ ████  ████ ██         ██    ██   ██ ██  ██ ██   4 | ██ ██  ██ ██  ██ ██ ██ ████ ██ █████  ██  ██████  ██   ███   5 | ██ ██ ██  ██  ██  ██  ██ ██  ██  ██ ██     ██  ██   ██ ██  ██ ██  6 |  █████   ██████    ████   ██ ██      ██ ███████  ██  ██  ██ ██ ██   ██  7 | 8 | Animation, Image Compositing & Procedural Creation 9 | 10 | @title: Jovimetrix 11 | @author: Alexander G. Morano 12 | @category: Compositing 13 | @reference: https://github.com/Amorano/Jovimetrix 14 | @tags: adjust, animate, compose, compositing, composition, device, flow, video, 15 | mask, shape, animation, logic 16 | @description: Animation via tick. Parameter manipulation with wave generator. 17 | Unary and Binary math support. Value convert int/float/bool, VectorN and Image, 18 | Mask types. Shape mask generator. Stack images, do channel ops, split, merge 19 | and randomize arrays and batches. Load images & video from anywhere. Dynamic 20 | bus routing. Save output anywhere! Flatten, crop, transform; check 21 | colorblindness or linear interpolate values. 22 | @node list: 23 | TickNode, TickSimpleNode, WaveGeneratorNode 24 | BitSplitNode, ComparisonNode, LerpNode, OPUnaryNode, OPBinaryNode, StringerNode, SwizzleNode, 25 | ColorBlindNode, ColorMatchNode, ColorKMeansNode, ColorTheoryNode, GradientMapNode, 26 | AdjustNode, BlendNode, FilterMaskNode, PixelMergeNode, PixelSplitNode, PixelSwapNode, ThresholdNode, 27 | ConstantNode, ShapeNode, TextNode, 28 | CropNode, FlattenNode, StackNode, TransformNode, 29 | 30 | ArrayNode, QueueNode, QueueTooNode, 31 | AkashicNode, GraphNode, ImageInfoNode, 32 | DelayNode, ExportNode, RouteNode, SaveOutputNode 33 | 34 | ValueNode, Vector2Node, Vector3Node, Vector4Node, 35 | """ 36 | 37 | __author__ = "Alexander G. Morano" 38 | __email__ = "amorano@gmail.com" 39 | 40 | from pathlib import Path 41 | 42 | from cozy_comfyui import \ 43 | logger 44 | 45 | from cozy_comfyui.node import \ 46 | loader 47 | 48 | JOV_DOCKERENV = False 49 | try: 50 | with open('/proc/1/cgroup', 'rt') as f: 51 | content = f.read() 52 | JOV_DOCKERENV = any(x in content for x in ['docker', 'kubepods', 'containerd']) 53 | except FileNotFoundError: 54 | pass 55 | 56 | if JOV_DOCKERENV: 57 | logger.info("RUNNING IN A DOCKER") 58 | 59 | # ============================================================================== 60 | # === GLOBAL === 61 | # ============================================================================== 62 | 63 | PACKAGE = "JOVIMETRIX" 64 | WEB_DIRECTORY = "./web" 65 | ROOT = Path(__file__).resolve().parent 66 | NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS = loader(ROOT, 67 | PACKAGE, 68 | "core", 69 | f"{PACKAGE} 🔺🟩🔵", 70 | False) 71 | -------------------------------------------------------------------------------- /core/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from enum import Enum 3 | 4 | class EnumFillOperation(Enum): 5 | DEFAULT = 0 6 | FILL_ZERO = 20 7 | FILL_ALL = 10 8 | -------------------------------------------------------------------------------- /core/adjust.py: -------------------------------------------------------------------------------- 1 | """ Jovimetrix - Adjust """ 2 | 3 | import sys 4 | from enum import Enum 5 | from typing import Any, List 6 | 7 | from comfy.utils import ProgressBar 8 | 9 | from cozy_comfyui import \ 10 | InputType, RGBAMaskType, EnumConvertType, \ 11 | deep_merge, parse_param, zip_longest_fill 12 | 13 | from cozy_comfyui.lexicon import \ 14 | Lexicon 15 | 16 | from cozy_comfyui.node import \ 17 | COZY_TYPE_IMAGE, \ 18 | CozyImageNode 19 | 20 | from cozy_comfyui.image.adjust import \ 21 | EnumAdjustBlur, EnumAdjustColor, EnumAdjustEdge, EnumAdjustMorpho, \ 22 | image_contrast, image_brightness, image_equalize, image_gamma, \ 23 | image_exposure, image_hsv, image_invert, image_pixelate, image_pixelscale, \ 24 | image_posterize, image_quantize, image_sharpen, image_morphology, \ 25 | image_emboss, image_blur, image_edge, image_color 26 | 27 | from cozy_comfyui.image.channel import \ 28 | channel_solid 29 | 30 | from cozy_comfyui.image.compose import \ 31 | image_levels 32 | 33 | from cozy_comfyui.image.convert import \ 34 | tensor_to_cv, cv_to_tensor_full, image_mask, image_mask_add 35 | 36 | from cozy_comfyui.image.misc import \ 37 | image_stack 38 | 39 | # ============================================================================== 40 | # === GLOBAL === 41 | # ============================================================================== 42 | 43 | JOV_CATEGORY = "ADJUST" 44 | 45 | # ============================================================================== 46 | # === ENUMERATION === 47 | # ============================================================================== 48 | 49 | class EnumAdjustLight(Enum): 50 | EXPOSURE = 10 51 | GAMMA = 20 52 | BRIGHTNESS = 30 53 | CONTRAST = 40 54 | EQUALIZE = 50 55 | 56 | class EnumAdjustPixel(Enum): 57 | PIXELATE = 10 58 | PIXELSCALE = 20 59 | QUANTIZE = 30 60 | POSTERIZE = 40 61 | 62 | # ============================================================================== 63 | # === CLASS === 64 | # ============================================================================== 65 | 66 | class AdjustBlurNode(CozyImageNode): 67 | NAME = "ADJUST: BLUR (JOV)" 68 | CATEGORY = JOV_CATEGORY 69 | DESCRIPTION = """ 70 | Enhance and modify images with various blur effects. 71 | """ 72 | 73 | @classmethod 74 | def INPUT_TYPES(cls) -> InputType: 75 | d = super().INPUT_TYPES() 76 | d = deep_merge(d, { 77 | "optional": { 78 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}), 79 | Lexicon.FUNCTION: (EnumAdjustBlur._member_names_, { 80 | "default": EnumAdjustBlur.BLUR.name,}), 81 | Lexicon.RADIUS: ("INT", { 82 | "default": 3, "min": 3}), 83 | } 84 | }) 85 | return Lexicon._parse(d) 86 | 87 | def run(self, **kw) -> RGBAMaskType: 88 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None) 89 | op = parse_param(kw, Lexicon.FUNCTION, EnumAdjustBlur, EnumAdjustBlur.BLUR.name) 90 | radius = parse_param(kw, Lexicon.RADIUS, EnumConvertType.INT, 3) 91 | params = list(zip_longest_fill(pA, op, radius)) 92 | images = [] 93 | pbar = ProgressBar(len(params)) 94 | for idx, (pA, op, radius) in enumerate(params): 95 | pA = channel_solid() if pA is None else tensor_to_cv(pA) 96 | # height, width = pA.shape[:2] 97 | pA = image_blur(pA, op, radius) 98 | #pA = image_blend(pA, img_new, mask) 99 | images.append(cv_to_tensor_full(pA)) 100 | pbar.update_absolute(idx) 101 | return image_stack(images) 102 | 103 | class AdjustColorNode(CozyImageNode): 104 | NAME = "ADJUST: COLOR (JOV)" 105 | CATEGORY = JOV_CATEGORY 106 | DESCRIPTION = """ 107 | Enhance and modify images with various blur effects. 108 | """ 109 | 110 | @classmethod 111 | def INPUT_TYPES(cls) -> InputType: 112 | d = super().INPUT_TYPES() 113 | d = deep_merge(d, { 114 | "optional": { 115 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}), 116 | Lexicon.FUNCTION: (EnumAdjustColor._member_names_, { 117 | "default": EnumAdjustColor.RGB.name,}), 118 | Lexicon.VEC: ("VEC3", { 119 | "default": (0,0,0), "mij": -1, "maj": 1, "step": 0.025}) 120 | } 121 | }) 122 | return Lexicon._parse(d) 123 | 124 | def run(self, **kw) -> RGBAMaskType: 125 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None) 126 | op = parse_param(kw, Lexicon.FUNCTION, EnumAdjustColor, EnumAdjustColor.RGB.name) 127 | vec = parse_param(kw, Lexicon.VEC, EnumConvertType.VEC3, (0,0,0)) 128 | params = list(zip_longest_fill(pA, op, vec)) 129 | images = [] 130 | pbar = ProgressBar(len(params)) 131 | for idx, (pA, op, vec) in enumerate(params): 132 | pA = channel_solid() if pA is None else tensor_to_cv(pA) 133 | pA = image_color(pA, op, vec[0], vec[1], vec[2]) 134 | images.append(cv_to_tensor_full(pA)) 135 | pbar.update_absolute(idx) 136 | return image_stack(images) 137 | 138 | class AdjustEdgeNode(CozyImageNode): 139 | NAME = "ADJUST: EDGE (JOV)" 140 | CATEGORY = JOV_CATEGORY 141 | DESCRIPTION = """ 142 | Enhanced edge detection. 143 | """ 144 | 145 | @classmethod 146 | def INPUT_TYPES(cls) -> InputType: 147 | d = super().INPUT_TYPES() 148 | d = deep_merge(d, { 149 | "optional": { 150 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}), 151 | Lexicon.FUNCTION: (EnumAdjustEdge._member_names_, { 152 | "default": EnumAdjustEdge.CANNY.name,}), 153 | Lexicon.RADIUS: ("INT", { 154 | "default": 1, "min": 1}), 155 | Lexicon.ITERATION: ("INT", { 156 | "default": 1, "min": 1, "max": 1000}), 157 | Lexicon.LOHI: ("VEC2", { 158 | "default": (0, 1), "mij": 0, "maj": 1, "step": 0.01}) 159 | } 160 | }) 161 | return Lexicon._parse(d) 162 | 163 | def run(self, **kw) -> RGBAMaskType: 164 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None) 165 | op = parse_param(kw, Lexicon.FUNCTION, EnumAdjustEdge, EnumAdjustEdge.CANNY.name) 166 | radius = parse_param(kw, Lexicon.RADIUS, EnumConvertType.INT, 1) 167 | count = parse_param(kw, Lexicon.ITERATION, EnumConvertType.INT, 1) 168 | lohi = parse_param(kw, Lexicon.LOHI, EnumConvertType.VEC2, (0,1)) 169 | params = list(zip_longest_fill(pA, op, radius, count, lohi)) 170 | images = [] 171 | pbar = ProgressBar(len(params)) 172 | for idx, (pA, op, radius, count, lohi) in enumerate(params): 173 | pA = channel_solid() if pA is None else tensor_to_cv(pA) 174 | alpha = image_mask(pA) 175 | pA = image_edge(pA, op, radius, count, lohi[0], lohi[1]) 176 | pA = image_mask_add(pA, alpha) 177 | images.append(cv_to_tensor_full(pA)) 178 | pbar.update_absolute(idx) 179 | return image_stack(images) 180 | 181 | class AdjustEmbossNode(CozyImageNode): 182 | NAME = "ADJUST: EMBOSS (JOV)" 183 | CATEGORY = JOV_CATEGORY 184 | DESCRIPTION = """ 185 | Emboss boss mode. 186 | """ 187 | 188 | @classmethod 189 | def INPUT_TYPES(cls) -> InputType: 190 | d = super().INPUT_TYPES() 191 | d = deep_merge(d, { 192 | "optional": { 193 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}), 194 | Lexicon.HEADING: ("FLOAT", { 195 | "default": -45, "min": -sys.float_info.max, "max": sys.float_info.max, "step": 0.1}), 196 | Lexicon.ELEVATION: ("FLOAT", { 197 | "default": 45, "min": -sys.float_info.max, "max": sys.float_info.max, "step": 0.1}), 198 | Lexicon.DEPTH: ("FLOAT", { 199 | "default": 10, "min": 0, "max": sys.float_info.max, "step": 0.1, 200 | "tooltip": "Depth perceived from the light angles above"}), 201 | } 202 | }) 203 | return Lexicon._parse(d) 204 | 205 | def run(self, **kw) -> RGBAMaskType: 206 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None) 207 | heading = parse_param(kw, Lexicon.HEADING, EnumConvertType.FLOAT, -45) 208 | elevation = parse_param(kw, Lexicon.ELEVATION, EnumConvertType.FLOAT, 45) 209 | depth = parse_param(kw, Lexicon.DEPTH, EnumConvertType.FLOAT, 10) 210 | params = list(zip_longest_fill(pA, heading, elevation, depth)) 211 | images = [] 212 | pbar = ProgressBar(len(params)) 213 | for idx, (pA, heading, elevation, depth) in enumerate(params): 214 | pA = channel_solid() if pA is None else tensor_to_cv(pA) 215 | alpha = image_mask(pA) 216 | pA = image_emboss(pA, heading, elevation, depth) 217 | pA = image_mask_add(pA, alpha) 218 | images.append(cv_to_tensor_full(pA)) 219 | pbar.update_absolute(idx) 220 | return image_stack(images) 221 | 222 | class AdjustLevelNode(CozyImageNode): 223 | NAME = "ADJUST: LEVELS (JOV)" 224 | CATEGORY = JOV_CATEGORY 225 | DESCRIPTION = """ 226 | 227 | """ 228 | 229 | @classmethod 230 | def INPUT_TYPES(cls) -> InputType: 231 | d = super().INPUT_TYPES() 232 | d = deep_merge(d, { 233 | "optional": { 234 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}), 235 | Lexicon.LMH: ("VEC3", { 236 | "default": (0,0.5,1), "mij": 0, "maj": 1, "step": 0.01, 237 | "label": ["LOW", "MID", "HIGH"]}), 238 | Lexicon.RANGE: ("VEC2", { 239 | "default": (0, 1), "mij": 0, "maj": 1, "step": 0.01, 240 | "label": ["IN", "OUT"]}) 241 | } 242 | }) 243 | return Lexicon._parse(d) 244 | 245 | def run(self, **kw) -> RGBAMaskType: 246 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None) 247 | LMH = parse_param(kw, Lexicon.LMH, EnumConvertType.VEC3, (0,0.5,1)) 248 | inout = parse_param(kw, Lexicon.RANGE, EnumConvertType.VEC2, (0,1)) 249 | params = list(zip_longest_fill(pA, LMH, inout)) 250 | images = [] 251 | pbar = ProgressBar(len(params)) 252 | for idx, (pA, LMH, inout) in enumerate(params): 253 | pA = channel_solid() if pA is None else tensor_to_cv(pA) 254 | ''' 255 | h, s, v = hsv 256 | img_new = image_hsv(img_new, h, s, v) 257 | ''' 258 | low, mid, high = LMH 259 | start, end = inout 260 | pA = image_levels(pA, low, mid, high, start, end) 261 | images.append(cv_to_tensor_full(pA)) 262 | pbar.update_absolute(idx) 263 | return image_stack(images) 264 | 265 | class AdjustLightNode(CozyImageNode): 266 | NAME = "ADJUST: LIGHT (JOV)" 267 | CATEGORY = JOV_CATEGORY 268 | DESCRIPTION = """ 269 | Tonal adjustments. They can be applied individually or all at the same time in order: brightness, contrast, histogram equalization, exposure, and gamma correction. 270 | """ 271 | 272 | @classmethod 273 | def INPUT_TYPES(cls) -> InputType: 274 | d = super().INPUT_TYPES() 275 | d = deep_merge(d, { 276 | "optional": { 277 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}), 278 | Lexicon.BRIGHTNESS: ("FLOAT", { 279 | "default": 0.5, "min": 0, "max": 1, "step": 0.01}), 280 | Lexicon.CONTRAST: ("FLOAT", { 281 | "default": 0, "min": -1, "max": 1, "step": 0.01}), 282 | Lexicon.EQUALIZE: ("BOOLEAN", { 283 | "default": False}), 284 | Lexicon.EXPOSURE: ("FLOAT", { 285 | "default": 1, "min": -8, "max": 8, "step": 0.01}), 286 | Lexicon.GAMMA: ("FLOAT", { 287 | "default": 1, "min": 0, "max": 8, "step": 0.01}), 288 | } 289 | }) 290 | return Lexicon._parse(d) 291 | 292 | def run(self, **kw) -> RGBAMaskType: 293 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None) 294 | brightness = parse_param(kw, Lexicon.BRIGHTNESS, EnumConvertType.FLOAT, 0.5) 295 | contrast = parse_param(kw, Lexicon.CONTRAST, EnumConvertType.FLOAT, 0) 296 | equalize = parse_param(kw, Lexicon.EQUALIZE, EnumConvertType.FLOAT, 0) 297 | exposure = parse_param(kw, Lexicon.EXPOSURE, EnumConvertType.FLOAT, 0) 298 | gamma = parse_param(kw, Lexicon.GAMMA, EnumConvertType.FLOAT, 0) 299 | params = list(zip_longest_fill(pA, brightness, contrast, equalize, exposure, gamma)) 300 | images = [] 301 | pbar = ProgressBar(len(params)) 302 | for idx, (pA, brightness, contrast, equalize, exposure, gamma) in enumerate(params): 303 | pA = channel_solid() if pA is None else tensor_to_cv(pA) 304 | alpha = image_mask(pA) 305 | 306 | brightness = 2. * (brightness - 0.5) 307 | if brightness != 0: 308 | pA = image_brightness(pA, brightness) 309 | 310 | if contrast != 0: 311 | pA = image_contrast(pA, contrast) 312 | 313 | if equalize: 314 | pA = image_equalize(pA) 315 | 316 | if exposure != 1: 317 | pA = image_exposure(pA, exposure) 318 | 319 | if gamma != 1: 320 | pA = image_gamma(pA, gamma) 321 | 322 | ''' 323 | h, s, v = hsv 324 | img_new = image_hsv(img_new, h, s, v) 325 | 326 | l, m, h = level 327 | img_new = image_levels(img_new, l, h, m, gamma) 328 | ''' 329 | pA = image_mask_add(pA, alpha) 330 | images.append(cv_to_tensor_full(pA)) 331 | pbar.update_absolute(idx) 332 | return image_stack(images) 333 | 334 | class AdjustMorphNode(CozyImageNode): 335 | NAME = "ADJUST: MORPHOLOGY (JOV)" 336 | CATEGORY = JOV_CATEGORY 337 | DESCRIPTION = """ 338 | Operations based on the image shape. 339 | """ 340 | 341 | @classmethod 342 | def INPUT_TYPES(cls) -> InputType: 343 | d = super().INPUT_TYPES() 344 | d = deep_merge(d, { 345 | "optional": { 346 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}), 347 | Lexicon.FUNCTION: (EnumAdjustMorpho._member_names_, { 348 | "default": EnumAdjustMorpho.DILATE.name,}), 349 | Lexicon.RADIUS: ("INT", { 350 | "default": 1, "min": 1}), 351 | Lexicon.ITERATION: ("INT", { 352 | "default": 1, "min": 1, "max": 1000}), 353 | } 354 | }) 355 | return Lexicon._parse(d) 356 | 357 | def run(self, **kw) -> RGBAMaskType: 358 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None) 359 | op = parse_param(kw, Lexicon.FUNCTION, EnumAdjustMorpho, EnumAdjustMorpho.DILATE.name) 360 | kernel = parse_param(kw, Lexicon.RADIUS, EnumConvertType.INT, 1) 361 | count = parse_param(kw, Lexicon.ITERATION, EnumConvertType.INT, 1) 362 | params = list(zip_longest_fill(pA, op, kernel, count)) 363 | images: List[Any] = [] 364 | pbar = ProgressBar(len(params)) 365 | for idx, (pA, op, kernel, count) in enumerate(params): 366 | pA = channel_solid() if pA is None else tensor_to_cv(pA) 367 | alpha = image_mask(pA) 368 | pA = image_morphology(pA, op, kernel, count) 369 | pA = image_mask_add(pA, alpha) 370 | images.append(cv_to_tensor_full(pA)) 371 | pbar.update_absolute(idx) 372 | return image_stack(images) 373 | 374 | class AdjustPixelNode(CozyImageNode): 375 | NAME = "ADJUST: PIXEL (JOV)" 376 | CATEGORY = JOV_CATEGORY 377 | DESCRIPTION = """ 378 | Pixel-level transformations. The val parameter controls the intensity or resolution of the effect, depending on the operation. 379 | """ 380 | 381 | @classmethod 382 | def INPUT_TYPES(cls) -> InputType: 383 | d = super().INPUT_TYPES() 384 | d = deep_merge(d, { 385 | "optional": { 386 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}), 387 | Lexicon.FUNCTION: (EnumAdjustPixel._member_names_, { 388 | "default": EnumAdjustPixel.PIXELATE.name,}), 389 | Lexicon.VALUE: ("FLOAT", { 390 | "default": 0, "min": 0, "max": 1, "step": 0.01}) 391 | } 392 | }) 393 | return Lexicon._parse(d) 394 | 395 | def run(self, **kw) -> RGBAMaskType: 396 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None) 397 | op = parse_param(kw, Lexicon.FUNCTION, EnumAdjustPixel, EnumAdjustPixel.PIXELATE.name) 398 | val = parse_param(kw, Lexicon.VALUE, EnumConvertType.FLOAT, 0) 399 | params = list(zip_longest_fill(pA, op, val)) 400 | images = [] 401 | pbar = ProgressBar(len(params)) 402 | for idx, (pA, op, val) in enumerate(params): 403 | pA = channel_solid() if pA is None else tensor_to_cv(pA, chan=4) 404 | alpha = image_mask(pA) 405 | 406 | match op: 407 | case EnumAdjustPixel.PIXELATE: 408 | pA = image_pixelate(pA, val / 2.) 409 | 410 | case EnumAdjustPixel.PIXELSCALE: 411 | pA = image_pixelscale(pA, val) 412 | 413 | case EnumAdjustPixel.QUANTIZE: 414 | pA = image_quantize(pA, val) 415 | 416 | case EnumAdjustPixel.POSTERIZE: 417 | pA = image_posterize(pA, val) 418 | 419 | pA = image_mask_add(pA, alpha) 420 | images.append(cv_to_tensor_full(pA)) 421 | pbar.update_absolute(idx) 422 | return image_stack(images) 423 | 424 | class AdjustSharpenNode(CozyImageNode): 425 | NAME = "ADJUST: SHARPEN (JOV)" 426 | CATEGORY = JOV_CATEGORY 427 | DESCRIPTION = """ 428 | Sharpen the pixels of an image. 429 | """ 430 | 431 | @classmethod 432 | def INPUT_TYPES(cls) -> InputType: 433 | d = super().INPUT_TYPES() 434 | d = deep_merge(d, { 435 | "optional": { 436 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}), 437 | Lexicon.AMOUNT: ("FLOAT", { 438 | "default": 0, "min": 0, "max": 1, "step": 0.01}), 439 | Lexicon.THRESHOLD: ("FLOAT", { 440 | "default": 0, "min": 0, "max": 1, "step": 0.01}) 441 | } 442 | }) 443 | return Lexicon._parse(d) 444 | 445 | def run(self, **kw) -> RGBAMaskType: 446 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None) 447 | amount = parse_param(kw, Lexicon.AMOUNT, EnumConvertType.FLOAT, 0) 448 | threshold = parse_param(kw, Lexicon.THRESHOLD, EnumConvertType.FLOAT, 0) 449 | params = list(zip_longest_fill(pA, amount, threshold)) 450 | images = [] 451 | pbar = ProgressBar(len(params)) 452 | for idx, (pA, amount, threshold) in enumerate(params): 453 | pA = channel_solid() if pA is None else tensor_to_cv(pA) 454 | pA = image_sharpen(pA, amount / 2., threshold=threshold / 25.5) 455 | images.append(cv_to_tensor_full(pA)) 456 | pbar.update_absolute(idx) 457 | return image_stack(images) 458 | -------------------------------------------------------------------------------- /core/anim.py: -------------------------------------------------------------------------------- 1 | """ Jovimetrix - Animation """ 2 | 3 | import sys 4 | 5 | import numpy as np 6 | 7 | from comfy.utils import ProgressBar 8 | 9 | from cozy_comfyui import \ 10 | InputType, EnumConvertType, \ 11 | deep_merge, parse_param, zip_longest_fill 12 | 13 | from cozy_comfyui.lexicon import \ 14 | Lexicon 15 | 16 | from cozy_comfyui.node import \ 17 | CozyBaseNode 18 | 19 | from cozy_comfyui.maths.ease import \ 20 | EnumEase, \ 21 | ease_op 22 | 23 | from cozy_comfyui.maths.norm import \ 24 | EnumNormalize, \ 25 | norm_op 26 | 27 | from cozy_comfyui.maths.wave import \ 28 | EnumWave, \ 29 | wave_op 30 | 31 | from cozy_comfyui.maths.series import \ 32 | seriesLinear 33 | 34 | # ============================================================================== 35 | # === GLOBAL === 36 | # ============================================================================== 37 | 38 | JOV_CATEGORY = "ANIMATION" 39 | 40 | # ============================================================================== 41 | # === CLASS === 42 | # ============================================================================== 43 | 44 | class ResultObject(object): 45 | def __init__(self, *arg, **kw) -> None: 46 | self.frame = [] 47 | self.lin = [] 48 | self.fixed = [] 49 | self.trigger = [] 50 | self.batch = [] 51 | 52 | class TickNode(CozyBaseNode): 53 | NAME = "TICK (JOV) ⏱" 54 | CATEGORY = JOV_CATEGORY 55 | RETURN_TYPES = ("FLOAT", "FLOAT", "FLOAT", "FLOAT", "FLOAT") 56 | RETURN_NAMES = ("VALUE", "LINEAR", "EASED", "SCALAR_LIN", "SCALAR_EASE") 57 | OUTPUT_IS_LIST = (True, True, True, True, True,) 58 | OUTPUT_TOOLTIPS = ( 59 | "List of values", 60 | "Normalized values", 61 | "Eased values", 62 | "Scalar normalized values", 63 | "Scalar eased values", 64 | ) 65 | DESCRIPTION = """ 66 | Value generator with normalized values based on based on time interval. 67 | """ 68 | 69 | @classmethod 70 | def INPUT_TYPES(cls) -> InputType: 71 | d = super().INPUT_TYPES() 72 | d = deep_merge(d, { 73 | "optional": { 74 | # forces a MOD on CYCLE 75 | Lexicon.START: ("INT", { 76 | "default": 0, "min": -sys.maxsize, "max": sys.maxsize 77 | }), 78 | # interval between frames 79 | Lexicon.STEP: ("FLOAT", { 80 | "default": 0, "min": -sys.float_info.max, "max": sys.float_info.max, "precision": 3, 81 | "tooltip": "Amount to add to each frame per tick" 82 | }), 83 | # how many frames to dump.... 84 | Lexicon.COUNT: ("INT", { 85 | "default": 1, "min": 1, "max": 1500 86 | }), 87 | Lexicon.LOOP: ("INT", { 88 | "default": 0, "min": 0, "max": sys.maxsize, 89 | "tooltip": "What value before looping starts. 0 means linear playback (no loop point)" 90 | }), 91 | Lexicon.PINGPONG: ("BOOLEAN", { 92 | "default": False 93 | }), 94 | Lexicon.EASE: (EnumEase._member_names_, { 95 | "default": EnumEase.LINEAR.name}), 96 | Lexicon.NORMALIZE: (EnumNormalize._member_names_, { 97 | "default": EnumNormalize.MINMAX2.name}), 98 | Lexicon.SCALAR: ("FLOAT", { 99 | "default": 1, "min": 0, "max": sys.float_info.max 100 | }) 101 | 102 | } 103 | }) 104 | return Lexicon._parse(d) 105 | 106 | def run(self, **kw) -> tuple[float, ...]: 107 | """ 108 | Generates a series of numbers with various options including: 109 | - Custom start value (supporting floating point and negative numbers) 110 | - Custom step value (supporting floating point and negative numbers) 111 | - Fixed number of frames 112 | - Custom loop point (series restarts after reaching this many steps) 113 | - Ping-pong option (reverses direction at end points) 114 | - Support for easing functions 115 | - Normalized output 0..1, -1..1, L2 or ZScore 116 | """ 117 | 118 | start = parse_param(kw, Lexicon.START, EnumConvertType.INT, 0)[0] 119 | step = parse_param(kw, Lexicon.STEP, EnumConvertType.FLOAT, 0)[0] 120 | count = parse_param(kw, Lexicon.COUNT, EnumConvertType.INT, 1, 1, 1500)[0] 121 | loop = parse_param(kw, Lexicon.LOOP, EnumConvertType.INT, 0, 0)[0] 122 | pingpong = parse_param(kw, Lexicon.PINGPONG, EnumConvertType.BOOLEAN, False)[0] 123 | ease = parse_param(kw, Lexicon.EASE, EnumEase, EnumEase.LINEAR.name)[0] 124 | normalize = parse_param(kw, Lexicon.NORMALIZE, EnumNormalize, EnumNormalize.MINMAX1.name)[0] 125 | scalar = parse_param(kw, Lexicon.SCALAR, EnumConvertType.FLOAT, 1, 0)[0] 126 | 127 | if step == 0: 128 | step = 1 129 | 130 | cycle = seriesLinear(start, step, count, loop, pingpong) 131 | linear = norm_op(normalize, np.array(cycle)) 132 | eased = ease_op(ease, linear, len(linear)) 133 | scalar_linear = linear * scalar 134 | scalar_eased = eased * scalar 135 | 136 | return ( 137 | cycle, 138 | linear.tolist(), 139 | eased.tolist(), 140 | scalar_linear.tolist(), 141 | scalar_eased.tolist(), 142 | ) 143 | 144 | class WaveGeneratorNode(CozyBaseNode): 145 | NAME = "WAVE GEN (JOV) 🌊" 146 | NAME_PRETTY = "WAVE GEN (JOV) 🌊" 147 | CATEGORY = JOV_CATEGORY 148 | RETURN_TYPES = ("FLOAT", "INT", ) 149 | RETURN_NAMES = ("FLOAT", "INT", ) 150 | DESCRIPTION = """ 151 | Produce waveforms like sine, square, or sawtooth with adjustable frequency, amplitude, phase, and offset. It's handy for creating oscillating patterns or controlling animation dynamics. This node emits both continuous floating-point values and integer representations of the generated waves. 152 | """ 153 | 154 | @classmethod 155 | def INPUT_TYPES(cls) -> InputType: 156 | d = super().INPUT_TYPES() 157 | d = deep_merge(d, { 158 | "optional": { 159 | Lexicon.WAVE: (EnumWave._member_names_, { 160 | "default": EnumWave.SIN.name}), 161 | Lexicon.FREQ: ("FLOAT", { 162 | "default": 1, "min": 0, "max": sys.float_info.max, "step": 0.01,}), 163 | Lexicon.AMP: ("FLOAT", { 164 | "default": 1, "min": 0, "max": sys.float_info.max, "step": 0.01,}), 165 | Lexicon.PHASE: ("FLOAT", { 166 | "default": 0, "min": 0, "max": 1, "step": 0.01}), 167 | Lexicon.OFFSET: ("FLOAT", { 168 | "default": 0, "min": 0, "max": 1, "step": 0.001}), 169 | Lexicon.TIME: ("FLOAT", { 170 | "default": 0, "min": 0, "max": sys.float_info.max, "step": 0.0001}), 171 | Lexicon.INVERT: ("BOOLEAN", { 172 | "default": False}), 173 | Lexicon.ABSOLUTE: ("BOOLEAN", { 174 | "default": False,}), 175 | } 176 | }) 177 | return Lexicon._parse(d) 178 | 179 | def run(self, **kw) -> tuple[float, int]: 180 | op = parse_param(kw, Lexicon.WAVE, EnumWave, EnumWave.SIN.name) 181 | freq = parse_param(kw, Lexicon.FREQ, EnumConvertType.FLOAT, 1, 0) 182 | amp = parse_param(kw, Lexicon.AMP, EnumConvertType.FLOAT, 1, 0) 183 | phase = parse_param(kw, Lexicon.PHASE, EnumConvertType.FLOAT, 0, 0) 184 | shift = parse_param(kw, Lexicon.OFFSET, EnumConvertType.FLOAT, 0, 0) 185 | delta_time = parse_param(kw, Lexicon.TIME, EnumConvertType.FLOAT, 0, 0) 186 | invert = parse_param(kw, Lexicon.INVERT, EnumConvertType.BOOLEAN, False) 187 | absolute = parse_param(kw, Lexicon.ABSOLUTE, EnumConvertType.BOOLEAN, False) 188 | results = [] 189 | params = list(zip_longest_fill(op, freq, amp, phase, shift, delta_time, invert, absolute)) 190 | pbar = ProgressBar(len(params)) 191 | for idx, (op, freq, amp, phase, shift, delta_time, invert, absolute) in enumerate(params): 192 | # freq = 1. / freq 193 | if invert: 194 | amp = 1. / val 195 | val = wave_op(op, phase, freq, amp, shift, delta_time) 196 | if absolute: 197 | val = np.abs(val) 198 | val = max(-sys.float_info.max, min(val, sys.float_info.max)) 199 | results.append([val, int(val)]) 200 | pbar.update_absolute(idx) 201 | return *list(zip(*results)), 202 | 203 | ''' 204 | class TickOldNode(CozyBaseNode): 205 | NAME = "TICK OLD (JOV) ⏱" 206 | CATEGORY = JOV_CATEGORY 207 | RETURN_TYPES = ("INT", "FLOAT", "FLOAT", COZY_TYPE_ANY, COZY_TYPE_ANY,) 208 | RETURN_NAMES = ("VAL", "LINEAR", "FPS", "TRIGGER", "BATCH",) 209 | OUTPUT_IS_LIST = (True, False, False, False, False,) 210 | OUTPUT_TOOLTIPS = ( 211 | "Current value for the configured tick as ComfyUI List", 212 | "Normalized tick value (0..1) based on BPM and Loop", 213 | "Current 'frame' in the tick based on FPS setting", 214 | "Based on the BPM settings, on beat hit, output the input at '⚡'", 215 | "Current batch of values for the configured tick as standard list which works in other Jovimetrix nodes", 216 | ) 217 | DESCRIPTION = """ 218 | A timer and frame counter, emitting pulses or signals based on time intervals. It allows precise synchronization and control over animation sequences, with options to adjust FPS, BPM, and loop points. This node is useful for generating time-based events or driving animations with rhythmic precision. 219 | """ 220 | 221 | @classmethod 222 | def INPUT_TYPES(cls) -> InputType: 223 | d = super().INPUT_TYPES() 224 | d = deep_merge(d, { 225 | "optional": { 226 | # data to pass on a pulse of the loop 227 | Lexicon.TRIGGER: (COZY_TYPE_ANY, { 228 | "default": None, 229 | "tooltip": "Output to send when beat (BPM setting) is hit" 230 | }), 231 | # forces a MOD on CYCLE 232 | Lexicon.START: ("INT", { 233 | "default": 0, "min": 0, "max": sys.maxsize, 234 | }), 235 | Lexicon.LOOP: ("INT", { 236 | "default": 0, "min": 0, "max": sys.maxsize, 237 | "tooltip": "Number of frames before looping starts. 0 means continuous playback (no loop point)" 238 | }), 239 | Lexicon.FPS: ("INT", { 240 | "default": 24, "min": 1 241 | }), 242 | Lexicon.BPM: ("INT", { 243 | "default": 120, "min": 1, "max": 60000, 244 | "tooltip": "BPM trigger rate to send the input. If input is empty, TRUE is sent on trigger" 245 | }), 246 | Lexicon.NOTE: ("INT", { 247 | "default": 4, "min": 1, "max": 256, 248 | "tooltip": "Number of beats per measure. Quarter note is 4, Eighth is 8, 16 is 16, etc."}), 249 | # how many frames to dump.... 250 | Lexicon.BATCH: ("INT", { 251 | "default": 1, "min": 1, "max": 32767, 252 | "tooltip": "Number of frames wanted" 253 | }), 254 | Lexicon.STEP: ("INT", { 255 | "default": 0, "min": 0, "max": sys.maxsize 256 | }), 257 | } 258 | }) 259 | return Lexicon._parse(d) 260 | 261 | def run(self, ident, **kw) -> tuple[int, float, float, Any]: 262 | passthru = parse_param(kw, Lexicon.TRIGGER, EnumConvertType.ANY, None)[0] 263 | stride = parse_param(kw, Lexicon.STEP, EnumConvertType.INT, 0)[0] 264 | loop = parse_param(kw, Lexicon.LOOP, EnumConvertType.INT, 0)[0] 265 | start = parse_param(kw, Lexicon.START, EnumConvertType.INT, self.__frame)[0] 266 | if loop != 0: 267 | self.__frame %= loop 268 | fps = parse_param(kw, Lexicon.FPS, EnumConvertType.INT, 24, 1)[0] 269 | bpm = parse_param(kw, Lexicon.BPM, EnumConvertType.INT, 120, 1)[0] 270 | divisor = parse_param(kw, Lexicon.NOTE, EnumConvertType.INT, 4, 1)[0] 271 | beat = 60. / max(1., bpm) / divisor 272 | batch = parse_param(kw, Lexicon.BATCH, EnumConvertType.INT, 1, 1)[0] 273 | step_fps = 1. / max(1., float(fps)) 274 | 275 | trigger = None 276 | results = ResultObject() 277 | pbar = ProgressBar(batch) 278 | step = stride if stride != 0 else max(1, loop / batch) 279 | for idx in range(batch): 280 | trigger = False 281 | lin = start if loop == 0 else start / loop 282 | fixed_step = math.fmod(start * step_fps, fps) 283 | if (math.fmod(fixed_step, beat) == 0): 284 | trigger = [passthru] 285 | if loop != 0: 286 | start %= loop 287 | results.frame.append(start) 288 | results.lin.append(float(lin)) 289 | results.fixed.append(float(fixed_step)) 290 | results.trigger.append(trigger) 291 | results.batch.append(start) 292 | start += step 293 | pbar.update_absolute(idx) 294 | 295 | return (results.frame, results.lin, results.fixed, results.trigger, results.batch,) 296 | 297 | ''' -------------------------------------------------------------------------------- /core/color.py: -------------------------------------------------------------------------------- 1 | """ Jovimetrix - Color """ 2 | 3 | from enum import Enum 4 | from typing import List 5 | 6 | import cv2 7 | import torch 8 | 9 | from comfy.utils import ProgressBar 10 | 11 | from cozy_comfyui import \ 12 | IMAGE_SIZE_MIN, \ 13 | InputType, RGBAMaskType, EnumConvertType, TensorType, \ 14 | deep_merge, parse_param, zip_longest_fill 15 | 16 | from cozy_comfyui.lexicon import \ 17 | Lexicon 18 | 19 | from cozy_comfyui.node import \ 20 | COZY_TYPE_IMAGE, \ 21 | CozyBaseNode, CozyImageNode 22 | 23 | from cozy_comfyui.image.adjust import \ 24 | image_invert 25 | 26 | from cozy_comfyui.image.color import \ 27 | EnumCBDeficiency, EnumCBSimulator, EnumColorMap, EnumColorTheory, \ 28 | color_lut_full, color_lut_match, color_lut_palette, \ 29 | color_lut_tonal, color_lut_visualize, color_match_reinhard, \ 30 | color_theory, color_blind, color_top_used, image_gradient_expand, \ 31 | image_gradient_map 32 | 33 | from cozy_comfyui.image.channel import \ 34 | channel_solid 35 | 36 | from cozy_comfyui.image.compose import \ 37 | EnumScaleMode, EnumInterpolation, \ 38 | image_scalefit 39 | 40 | from cozy_comfyui.image.convert import \ 41 | tensor_to_cv, cv_to_tensor, cv_to_tensor_full, image_mask, image_mask_add 42 | 43 | from cozy_comfyui.image.misc import \ 44 | image_stack 45 | 46 | # ============================================================================== 47 | # === GLOBAL === 48 | # ============================================================================== 49 | 50 | JOV_CATEGORY = "COLOR" 51 | 52 | # ============================================================================== 53 | # === ENUMERATION === 54 | # ============================================================================== 55 | 56 | class EnumColorMatchMode(Enum): 57 | REINHARD = 30 58 | LUT = 10 59 | # HISTOGRAM = 20 60 | 61 | class EnumColorMatchMap(Enum): 62 | USER_MAP = 0 63 | PRESET_MAP = 10 64 | 65 | # ============================================================================== 66 | # === CLASS === 67 | # ============================================================================== 68 | 69 | class ColorBlindNode(CozyImageNode): 70 | NAME = "COLOR BLIND (JOV) 👁‍🗨" 71 | CATEGORY = JOV_CATEGORY 72 | DESCRIPTION = """ 73 | Simulate color blindness effects on images. You can select various types of color deficiencies, adjust the severity of the effect, and apply the simulation using different simulators. This node is ideal for accessibility testing and design adjustments, ensuring inclusivity in your visual content. 74 | """ 75 | 76 | @classmethod 77 | def INPUT_TYPES(cls) -> InputType: 78 | d = super().INPUT_TYPES() 79 | d = deep_merge(d, { 80 | "optional": { 81 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}), 82 | Lexicon.DEFICIENCY: (EnumCBDeficiency._member_names_, { 83 | "default": EnumCBDeficiency.PROTAN.name,}), 84 | Lexicon.SOLVER: (EnumCBSimulator._member_names_, { 85 | "default": EnumCBSimulator.AUTOSELECT.name,}) 86 | } 87 | }) 88 | return Lexicon._parse(d) 89 | 90 | def run(self, **kw) -> RGBAMaskType: 91 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None) 92 | deficiency = parse_param(kw, Lexicon.DEFICIENCY, EnumCBDeficiency, EnumCBDeficiency.PROTAN.name) 93 | simulator = parse_param(kw, Lexicon.SOLVER, EnumCBSimulator, EnumCBSimulator.AUTOSELECT.name) 94 | severity = parse_param(kw, Lexicon.VALUE, EnumConvertType.FLOAT, 1) 95 | params = list(zip_longest_fill(pA, deficiency, simulator, severity)) 96 | images = [] 97 | pbar = ProgressBar(len(params)) 98 | for idx, (pA, deficiency, simulator, severity) in enumerate(params): 99 | pA = channel_solid() if pA is None else tensor_to_cv(pA) 100 | pA = color_blind(pA, deficiency, simulator, severity) 101 | images.append(cv_to_tensor_full(pA)) 102 | pbar.update_absolute(idx) 103 | return image_stack(images) 104 | 105 | class ColorMatchNode(CozyImageNode): 106 | NAME = "COLOR MATCH (JOV) 💞" 107 | CATEGORY = JOV_CATEGORY 108 | DESCRIPTION = """ 109 | Adjust the color scheme of one image to match another with the Color Match Node. Choose from various color matching LUTs or Reinhard matching. You can specify a custom user color maps, the number of colors, and whether to flip or invert the images. 110 | """ 111 | 112 | @classmethod 113 | def INPUT_TYPES(cls) -> InputType: 114 | d = super().INPUT_TYPES() 115 | d = deep_merge(d, { 116 | "optional": { 117 | Lexicon.IMAGE_SOURCE: (COZY_TYPE_IMAGE, {}), 118 | Lexicon.IMAGE_TARGET: (COZY_TYPE_IMAGE, {}), 119 | Lexicon.MODE: (EnumColorMatchMode._member_names_, { 120 | "default": EnumColorMatchMode.REINHARD.name, 121 | "tooltip": "Match colors from an image or built-in (LUT), Histogram lookups or Reinhard method"}), 122 | Lexicon.MAP: (EnumColorMatchMap._member_names_, { 123 | "default": EnumColorMatchMap.USER_MAP.name, }), 124 | Lexicon.COLORMAP: (EnumColorMap._member_names_, { 125 | "default": EnumColorMap.HSV.name,}), 126 | Lexicon.VALUE: ("INT", { 127 | "default": 255, "min": 0, "max": 255, 128 | "tooltip":"The number of colors to use from the LUT during the remap. Will quantize the LUT range."}), 129 | Lexicon.SWAP: ("BOOLEAN", { 130 | "default": False,}), 131 | Lexicon.INVERT: ("BOOLEAN", { 132 | "default": False,}), 133 | Lexicon.MATTE: ("VEC4", { 134 | "default": (0, 0, 0, 255), "rgb": True,}), 135 | } 136 | }) 137 | return Lexicon._parse(d) 138 | 139 | def run(self, **kw) -> RGBAMaskType: 140 | pA = parse_param(kw, Lexicon.IMAGE_SOURCE, EnumConvertType.IMAGE, None) 141 | pB = parse_param(kw, Lexicon.IMAGE_TARGET, EnumConvertType.IMAGE, None) 142 | mode = parse_param(kw, Lexicon.MODE, EnumColorMatchMode, EnumColorMatchMode.REINHARD.name) 143 | cmap = parse_param(kw, Lexicon.MAP, EnumColorMatchMap, EnumColorMatchMap.USER_MAP.name) 144 | colormap = parse_param(kw, Lexicon.COLORMAP, EnumColorMap, EnumColorMap.HSV.name) 145 | num_colors = parse_param(kw, Lexicon.VALUE, EnumConvertType.INT, 255) 146 | swap = parse_param(kw, Lexicon.SWAP, EnumConvertType.BOOLEAN, False) 147 | invert = parse_param(kw, Lexicon.INVERT, EnumConvertType.BOOLEAN, False) 148 | matte = parse_param(kw, Lexicon.MATTE, EnumConvertType.VEC4, (0, 0, 0, 255), 0, 255) 149 | params = list(zip_longest_fill(pA, pB, mode, cmap, colormap, num_colors, swap, invert, matte)) 150 | images = [] 151 | pbar = ProgressBar(len(params)) 152 | for idx, (pA, pB, mode, cmap, colormap, num_colors, swap, invert, matte) in enumerate(params): 153 | if swap == True: 154 | pA, pB = pB, pA 155 | 156 | mask = None 157 | if pA is None: 158 | pA = channel_solid() 159 | else: 160 | pA = tensor_to_cv(pA) 161 | if pA.ndim == 3 and pA.shape[2] == 4: 162 | mask = image_mask(pA) 163 | 164 | # h, w = pA.shape[:2] 165 | if pB is None: 166 | pB = channel_solid() 167 | else: 168 | pB = tensor_to_cv(pB) 169 | 170 | match mode: 171 | case EnumColorMatchMode.LUT: 172 | if cmap == EnumColorMatchMap.PRESET_MAP: 173 | pB = None 174 | pA = color_lut_match(pA, colormap.value, pB, num_colors) 175 | 176 | case EnumColorMatchMode.REINHARD: 177 | pA = color_match_reinhard(pA, pB) 178 | 179 | if invert == True: 180 | pA = image_invert(pA, 1) 181 | 182 | if mask is not None: 183 | pA = image_mask_add(pA, mask) 184 | 185 | images.append(cv_to_tensor_full(pA, matte)) 186 | pbar.update_absolute(idx) 187 | return image_stack(images) 188 | 189 | class ColorKMeansNode(CozyBaseNode): 190 | NAME = "COLOR MEANS (JOV) 〰️" 191 | CATEGORY = JOV_CATEGORY 192 | RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "JLUT", "IMAGE",) 193 | RETURN_NAMES = ("IMAGE", "PALETTE", "GRADIENT", "LUT", "RGB", ) 194 | OUTPUT_TOOLTIPS = ( 195 | "Sequence of top-K colors. Count depends on value in `VAL`.", 196 | "Simple Tone palette based on result top-K colors. Width is taken from input.", 197 | "Gradient of top-K colors.", 198 | "Full 3D LUT of the image mapped to the resultant top-K colors chosen.", 199 | "Visualization of full 3D .cube LUT in JLUT output" 200 | ) 201 | DESCRIPTION = """ 202 | The top-k colors ordered from most->least used as a strip, tonal palette and 3D LUT. 203 | """ 204 | 205 | @classmethod 206 | def INPUT_TYPES(cls) -> InputType: 207 | d = super().INPUT_TYPES() 208 | d = deep_merge(d, { 209 | "optional": { 210 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}), 211 | Lexicon.VALUE: ("INT", { 212 | "default": 12, "min": 1, "max": 255, 213 | "tooltip": "The top K colors to select"}), 214 | Lexicon.SIZE: ("INT", { 215 | "default": 32, "min": 1, "max": 256, 216 | "tooltip": "Height of the tones in the strip. Width is based on input"}), 217 | Lexicon.COUNT: ("INT", { 218 | "default": 33, "min": 1, "max": 255, 219 | "tooltip": "Number of nodes to use in interpolation of full LUT (256 is every pixel)"}), 220 | Lexicon.WH: ("VEC2", { 221 | "default": (256, 256), "mij":IMAGE_SIZE_MIN, "int": True, 222 | "label": ["W", "H"] 223 | }), 224 | } 225 | }) 226 | return Lexicon._parse(d) 227 | 228 | def run(self, **kw) -> RGBAMaskType: 229 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None) 230 | kcolors = parse_param(kw, Lexicon.VALUE, EnumConvertType.INT, 12, 1, 255) 231 | lut_height = parse_param(kw, Lexicon.SIZE, EnumConvertType.INT, 32, 1, 256) 232 | nodes = parse_param(kw, Lexicon.COUNT, EnumConvertType.INT, 33, 1, 255) 233 | wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (256, 256), IMAGE_SIZE_MIN) 234 | 235 | params = list(zip_longest_fill(pA, kcolors, nodes, lut_height, wihi)) 236 | top_colors = [] 237 | lut_tonal = [] 238 | lut_full = [] 239 | lut_visualized = [] 240 | gradients = [] 241 | pbar = ProgressBar(len(params) * sum(kcolors)) 242 | for idx, (pA, kcolors, nodes, lut_height, wihi) in enumerate(params): 243 | if pA is None: 244 | pA = channel_solid() 245 | 246 | pA = tensor_to_cv(pA) 247 | colors = color_top_used(pA, kcolors) 248 | 249 | # size down to 1px strip then expand to 256 for full gradient 250 | top_colors.extend([cv_to_tensor(channel_solid(*wihi, color=c)) for c in colors]) 251 | lut = color_lut_tonal(colors, width=pA.shape[1], height=lut_height) 252 | lut_tonal.append(cv_to_tensor(lut)) 253 | full = color_lut_full(colors, nodes) 254 | lut_full.append(torch.from_numpy(full)) 255 | lut = color_lut_visualize(full, wihi[1]) 256 | lut_visualized.append(cv_to_tensor(lut)) 257 | palette = color_lut_palette(colors, 1) 258 | gradient = image_gradient_expand(palette) 259 | gradient = cv2.resize(gradient, wihi) 260 | gradients.append(cv_to_tensor(gradient)) 261 | pbar.update_absolute(idx) 262 | 263 | return torch.stack(top_colors), torch.stack(lut_tonal), torch.stack(gradients), lut_full, torch.stack(lut_visualized), 264 | 265 | class ColorTheoryNode(CozyBaseNode): 266 | NAME = "COLOR THEORY (JOV) 🛞" 267 | CATEGORY = JOV_CATEGORY 268 | RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE") 269 | RETURN_NAMES = ("C1", "C2", "C3", "C4", "C5") 270 | DESCRIPTION = """ 271 | Generate a color harmony based on the selected scheme. 272 | 273 | Supported schemes include complimentary, analogous, triadic, tetradic, and more. 274 | 275 | Users can customize the angle of separation for color calculations, offering flexibility in color manipulation and exploration of different color palettes. 276 | """ 277 | 278 | @classmethod 279 | def INPUT_TYPES(cls) -> InputType: 280 | d = super().INPUT_TYPES() 281 | d = deep_merge(d, { 282 | "optional": { 283 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}), 284 | Lexicon.SCHEME: (EnumColorTheory._member_names_, { 285 | "default": EnumColorTheory.COMPLIMENTARY.name}), 286 | Lexicon.VALUE: ("INT", { 287 | "default": 45, "min": -90, "max": 90, 288 | "tooltip": "Custom angle of separation to use when calculating colors"}), 289 | Lexicon.INVERT: ("BOOLEAN", { 290 | "default": False}) 291 | } 292 | }) 293 | return Lexicon._parse(d) 294 | 295 | def run(self, **kw) -> tuple[List[TensorType], List[TensorType]]: 296 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None) 297 | scheme = parse_param(kw, Lexicon.SCHEME, EnumColorTheory, EnumColorTheory.COMPLIMENTARY.name) 298 | value = parse_param(kw, Lexicon.VALUE, EnumConvertType.INT, 45, -90, 90) 299 | invert = parse_param(kw, Lexicon.INVERT, EnumConvertType.BOOLEAN, False) 300 | params = list(zip_longest_fill(pA, scheme, value, invert)) 301 | images = [] 302 | pbar = ProgressBar(len(params)) 303 | for idx, (img, scheme, value, invert) in enumerate(params): 304 | img = channel_solid() if img is None else tensor_to_cv(img) 305 | img = color_theory(img, value, scheme) 306 | if invert: 307 | img = (image_invert(s, 1) for s in img) 308 | images.append([cv_to_tensor(a) for a in img]) 309 | pbar.update_absolute(idx) 310 | return image_stack(images) 311 | 312 | class GradientMapNode(CozyImageNode): 313 | NAME = "GRADIENT MAP (JOV) 🇲🇺" 314 | CATEGORY = JOV_CATEGORY 315 | DESCRIPTION = """ 316 | Remaps an input image using a gradient lookup table (LUT). 317 | 318 | The gradient image will be translated into a single row lookup table. 319 | """ 320 | 321 | @classmethod 322 | def INPUT_TYPES(cls) -> InputType: 323 | d = super().INPUT_TYPES() 324 | d = deep_merge(d, { 325 | "optional": { 326 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, { 327 | "tooltip": "Image to remap with gradient input"}), 328 | Lexicon.GRADIENT: (COZY_TYPE_IMAGE, { 329 | "tooltip": f"Look up table (LUT) to remap the input image in `{"IMAGE"}`"}), 330 | Lexicon.REVERSE: ("BOOLEAN", { 331 | "default": False, 332 | "tooltip": "Reverse the gradient from left-to-right"}), 333 | Lexicon.MODE: (EnumScaleMode._member_names_, { 334 | "default": EnumScaleMode.MATTE.name,}), 335 | Lexicon.WH: ("VEC2", { 336 | "default": (512, 512), "mij":IMAGE_SIZE_MIN, "int": True, 337 | "label": ["W", "H"] }), 338 | Lexicon.SAMPLE: (EnumInterpolation._member_names_, { 339 | "default": EnumInterpolation.LANCZOS4.name,}), 340 | Lexicon.MATTE: ("VEC4", { 341 | "default": (0, 0, 0, 255), "rgb": True,}) 342 | } 343 | }) 344 | return Lexicon._parse(d) 345 | 346 | def run(self, **kw) -> RGBAMaskType: 347 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None) 348 | gradient = parse_param(kw, Lexicon.GRADIENT, EnumConvertType.IMAGE, None) 349 | reverse = parse_param(kw, Lexicon.REVERSE, EnumConvertType.BOOLEAN, False) 350 | mode = parse_param(kw, Lexicon.MODE, EnumScaleMode, EnumScaleMode.MATTE.name) 351 | wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (512, 512), IMAGE_SIZE_MIN) 352 | sample = parse_param(kw, Lexicon.SAMPLE, EnumInterpolation, EnumInterpolation.LANCZOS4.name) 353 | matte = parse_param(kw, Lexicon.MATTE, EnumConvertType.VEC4INT, (0, 0, 0, 255), 0, 255) 354 | images = [] 355 | params = list(zip_longest_fill(pA, gradient, reverse, mode, sample, wihi, matte)) 356 | pbar = ProgressBar(len(params)) 357 | for idx, (pA, gradient, reverse, mode, sample, wihi, matte) in enumerate(params): 358 | pA = channel_solid() if pA is None else tensor_to_cv(pA) 359 | mask = None 360 | if pA.ndim == 3 and pA.shape[2] == 4: 361 | mask = image_mask(pA) 362 | 363 | gradient = channel_solid() if gradient is None else tensor_to_cv(gradient) 364 | pA = image_gradient_map(pA, gradient) 365 | if mode != EnumScaleMode.MATTE: 366 | w, h = wihi 367 | pA = image_scalefit(pA, w, h, mode, sample) 368 | 369 | if mask is not None: 370 | pA = image_mask_add(pA, mask) 371 | 372 | images.append(cv_to_tensor_full(pA, matte)) 373 | pbar.update_absolute(idx) 374 | return image_stack(images) 375 | -------------------------------------------------------------------------------- /core/create.py: -------------------------------------------------------------------------------- 1 | """ Jovimetrix - Creation """ 2 | 3 | import numpy as np 4 | from PIL import ImageFont 5 | from skimage.filters import gaussian 6 | 7 | from comfy.utils import ProgressBar 8 | 9 | from cozy_comfyui import \ 10 | IMAGE_SIZE_MIN, \ 11 | InputType, EnumConvertType, RGBAMaskType, \ 12 | deep_merge, parse_param, zip_longest_fill 13 | 14 | from cozy_comfyui.lexicon import \ 15 | Lexicon 16 | 17 | from cozy_comfyui.node import \ 18 | COZY_TYPE_IMAGE, \ 19 | CozyImageNode 20 | 21 | from cozy_comfyui.image import \ 22 | EnumImageType 23 | 24 | from cozy_comfyui.image.adjust import \ 25 | image_invert 26 | 27 | from cozy_comfyui.image.channel import \ 28 | channel_solid 29 | 30 | from cozy_comfyui.image.compose import \ 31 | EnumEdge, EnumScaleMode, EnumInterpolation, \ 32 | image_rotate, image_scalefit, image_transform, image_translate, image_blend 33 | 34 | from cozy_comfyui.image.convert import \ 35 | image_convert, pil_to_cv, cv_to_tensor, cv_to_tensor_full, tensor_to_cv, \ 36 | image_mask, image_mask_add, image_mask_binary 37 | 38 | from cozy_comfyui.image.misc import \ 39 | image_stack 40 | 41 | from cozy_comfyui.image.shape import \ 42 | EnumShapes, \ 43 | shape_ellipse, shape_polygon, shape_quad 44 | 45 | from cozy_comfyui.image.text import \ 46 | EnumAlignment, EnumJustify, \ 47 | font_names, text_autosize, text_draw 48 | 49 | # ============================================================================== 50 | # === GLOBAL === 51 | # ============================================================================== 52 | 53 | JOV_CATEGORY = "CREATE" 54 | 55 | # ============================================================================== 56 | # === CLASS === 57 | # ============================================================================== 58 | 59 | class ConstantNode(CozyImageNode): 60 | NAME = "CONSTANT (JOV) 🟪" 61 | CATEGORY = JOV_CATEGORY 62 | DESCRIPTION = """ 63 | Generate a constant image or mask of a specified size and color. It can be used to create solid color backgrounds or matte images for compositing with other visual elements. The node allows you to define the desired width and height of the output and specify the RGBA color value for the constant output. Additionally, you can input an optional image to use as a matte with the selected color. 64 | """ 65 | 66 | @classmethod 67 | def INPUT_TYPES(cls) -> InputType: 68 | d = super().INPUT_TYPES() 69 | d = deep_merge(d, { 70 | "optional": { 71 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, { 72 | "tooltip":"Optional Image to Matte with Selected Color"}), 73 | Lexicon.MASK: (COZY_TYPE_IMAGE, { 74 | "tooltip":"Override Image mask"}), 75 | Lexicon.COLOR: ("VEC4", { 76 | "default": (0, 0, 0, 255), "rgb": True, 77 | "tooltip": "Constant Color to Output"}), 78 | Lexicon.MODE: (EnumScaleMode._member_names_, { 79 | "default": EnumScaleMode.MATTE.name,}), 80 | Lexicon.WH: ("VEC2", { 81 | "default": (512, 512), "mij": 1, "int": True, 82 | "label": ["W", "H"],}), 83 | Lexicon.SAMPLE: (EnumInterpolation._member_names_, { 84 | "default": EnumInterpolation.LANCZOS4.name,}) 85 | } 86 | }) 87 | return Lexicon._parse(d) 88 | 89 | def run(self, **kw) -> RGBAMaskType: 90 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None) 91 | mask = parse_param(kw, Lexicon.MASK, EnumConvertType.MASK, None) 92 | matte = parse_param(kw, Lexicon.COLOR, EnumConvertType.VEC4INT, (0, 0, 0, 255), 0, 255) 93 | mode = parse_param(kw, Lexicon.MODE, EnumScaleMode, EnumScaleMode.MATTE.name) 94 | wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (512, 512), 1) 95 | sample = parse_param(kw, Lexicon.SAMPLE, EnumInterpolation, EnumInterpolation.LANCZOS4.name) 96 | images = [] 97 | params = list(zip_longest_fill(pA, mask, matte, mode, wihi, sample)) 98 | pbar = ProgressBar(len(params)) 99 | for idx, (pA, mask, matte, mode, wihi, sample) in enumerate(params): 100 | width, height = wihi 101 | w, h = width, height 102 | 103 | if pA is None: 104 | pA = channel_solid(width, height, (0,0,0,255)) 105 | else: 106 | pA = tensor_to_cv(pA) 107 | pA = image_convert(pA, 4) 108 | h, w = pA.shape[:2] 109 | 110 | if mask is None: 111 | mask = image_mask(pA) 112 | else: 113 | mask = tensor_to_cv(mask, chan=1) 114 | mask = image_scalefit(mask, w, h) 115 | 116 | pB = channel_solid(w, h, matte) 117 | pA = image_blend(pB, pA, mask) 118 | pA = image_mask_add(pA, mask) 119 | 120 | if mode != EnumScaleMode.MATTE: 121 | pA = image_scalefit(pA, width, height, mode, sample, matte) 122 | images.append(cv_to_tensor_full(pA, matte)) 123 | pbar.update_absolute(idx) 124 | return image_stack(images) 125 | 126 | class ShapeNode(CozyImageNode): 127 | NAME = "SHAPE GEN (JOV) ✨" 128 | CATEGORY = JOV_CATEGORY 129 | DESCRIPTION = """ 130 | Create n-sided polygons. These shapes can be customized by adjusting parameters such as size, color, position, rotation angle, and edge blur. The node provides options to specify the shape type, the number of sides for polygons, the RGBA color value for the main shape, and the RGBA color value for the background. Additionally, you can control the width and height of the output images, the position offset, and the amount of edge blur applied to the shapes. 131 | """ 132 | 133 | @classmethod 134 | def INPUT_TYPES(cls) -> InputType: 135 | d = super().INPUT_TYPES() 136 | d = deep_merge(d, { 137 | "optional": { 138 | Lexicon.SHAPE: (EnumShapes._member_names_, { 139 | "default": EnumShapes.CIRCLE.name}), 140 | Lexicon.SIDES: ("INT", { 141 | "default": 3, "min": 3, "max": 100}), 142 | Lexicon.COLOR: ("VEC4", { 143 | "default": (255, 255, 255, 255), "rgb": True, 144 | "tooltip": "Main Shape Color"}), 145 | Lexicon.MATTE: ("VEC4", { 146 | "default": (0, 0, 0, 255), "rgb": True,}), 147 | Lexicon.WH: ("VEC2", { 148 | "default": (256, 256), "mij":IMAGE_SIZE_MIN, "int": True, 149 | "label": ["W", "H"],}), 150 | Lexicon.XY: ("VEC2", { 151 | "default": (0, 0,), "mij": -1, "maj": 1, 152 | "label": ["X", "Y"]}), 153 | Lexicon.ANGLE: ("FLOAT", { 154 | "default": 0, "min": -180, "max": 180, "step": 0.01,}), 155 | Lexicon.SIZE: ("VEC2", { 156 | "default": (1, 1), "mij": 0, "maj": 1, 157 | "label": ["X", "Y"]}), 158 | Lexicon.EDGE: (EnumEdge._member_names_, { 159 | "default": EnumEdge.CLIP.name}), 160 | Lexicon.BLUR: ("FLOAT", { 161 | "default": 0, "min": 0, "step": 0.01,}), 162 | } 163 | }) 164 | return Lexicon._parse(d) 165 | 166 | def run(self, **kw) -> RGBAMaskType: 167 | shape = parse_param(kw, Lexicon.SHAPE, EnumShapes, EnumShapes.CIRCLE.name) 168 | sides = parse_param(kw, Lexicon.SIDES, EnumConvertType.INT, 3, 3) 169 | color = parse_param(kw, Lexicon.COLOR, EnumConvertType.VEC4INT, (255, 255, 255, 255), 0, 255) 170 | matte = parse_param(kw, Lexicon.MATTE, EnumConvertType.VEC4INT, (0, 0, 0, 255), 0, 255) 171 | wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (256, 256), IMAGE_SIZE_MIN) 172 | offset = parse_param(kw, Lexicon.XY, EnumConvertType.VEC2, (0, 0), -1, 1) 173 | angle = parse_param(kw, Lexicon.ANGLE, EnumConvertType.FLOAT, 0, -180, 180) 174 | size = parse_param(kw, Lexicon.SIZE, EnumConvertType.VEC2, (1, 1), 0, 1, zero=0.001) 175 | edge = parse_param(kw, Lexicon.EDGE, EnumEdge, EnumEdge.CLIP.name) 176 | blur = parse_param(kw, Lexicon.BLUR, EnumConvertType.FLOAT, 0, 0) 177 | params = list(zip_longest_fill(shape, sides, color, matte, wihi, offset, angle, size, edge, blur)) 178 | images = [] 179 | pbar = ProgressBar(len(params)) 180 | for idx, (shape, sides, color, matte, wihi, offset, angle, size, edge, blur) in enumerate(params): 181 | width, height = wihi 182 | sizeX, sizeY = size 183 | fill = color[:3][::-1] 184 | 185 | match shape: 186 | case EnumShapes.SQUARE: 187 | rgb = shape_quad(width, height, sizeX, sizeY, fill) 188 | 189 | case EnumShapes.CIRCLE: 190 | rgb = shape_ellipse(width, height, sizeX, sizeY, fill) 191 | 192 | case EnumShapes.POLYGON: 193 | rgb = shape_polygon(width, height, sizeX, sides, fill) 194 | 195 | rgb = pil_to_cv(rgb) 196 | rgb = image_transform(rgb, offset, angle, edge=edge) 197 | mask = image_mask_binary(rgb) 198 | 199 | if blur > 0: 200 | # @TODO: Do blur on larger canvas to remove wrap bleed. 201 | rgb = (gaussian(rgb, sigma=blur, channel_axis=2) * 255).astype(np.uint8) 202 | mask = (gaussian(mask, sigma=blur, channel_axis=2) * 255).astype(np.uint8) 203 | 204 | back = list(matte[:3]) + [255] 205 | canvas = np.full((height, width, 4), back, dtype=rgb.dtype) 206 | rgba = image_blend(canvas, rgb, mask) 207 | rgba = image_mask_add(rgba, mask) 208 | rgb = image_convert(rgba, 3) 209 | 210 | images.append([cv_to_tensor(rgba), cv_to_tensor(rgb), cv_to_tensor(mask, True)]) 211 | pbar.update_absolute(idx) 212 | return image_stack(images) 213 | 214 | class TextNode(CozyImageNode): 215 | NAME = "TEXT GEN (JOV) 📝" 216 | CATEGORY = JOV_CATEGORY 217 | FONTS = font_names() 218 | FONT_NAMES = sorted(FONTS.keys()) 219 | DESCRIPTION = """ 220 | Generates images containing text based on parameters such as font, size, alignment, color, and position. Users can input custom text messages, select fonts from a list of available options, adjust font size, and specify the alignment and justification of the text. Additionally, the node provides options for auto-sizing text to fit within specified dimensions, controlling letter-by-letter rendering, and applying edge effects such as clipping and inversion. 221 | """ 222 | 223 | @classmethod 224 | def INPUT_TYPES(cls) -> InputType: 225 | d = super().INPUT_TYPES() 226 | d = deep_merge(d, { 227 | "optional": { 228 | Lexicon.STRING: ("STRING", { 229 | "default": "jovimetrix", "multiline": True, 230 | "dynamicPrompts": False, 231 | "tooltip": "Your Message"}), 232 | Lexicon.FONT: (cls.FONT_NAMES, { 233 | "default": cls.FONT_NAMES[0]}), 234 | Lexicon.LETTER: ("BOOLEAN", { 235 | "default": False,}), 236 | Lexicon.AUTOSIZE: ("BOOLEAN", { 237 | "default": False, 238 | "tooltip": "Scale based on Width & Height"}), 239 | Lexicon.COLOR: ("VEC4", { 240 | "default": (255, 255, 255, 255), "rgb": True, 241 | "tooltip": "Color of the letters"}), 242 | Lexicon.MATTE: ("VEC4", { 243 | "default": (0, 0, 0, 255), "rgb": True,}), 244 | Lexicon.COLUMNS: ("INT", { 245 | "default": 0, "min": 0}), 246 | # if auto on, hide these... 247 | Lexicon.SIZE: ("INT", { 248 | "default": 16, "min": 8}), 249 | Lexicon.ALIGN: (EnumAlignment._member_names_, { 250 | "default": EnumAlignment.CENTER.name,}), 251 | Lexicon.JUSTIFY: (EnumJustify._member_names_, { 252 | "default": EnumJustify.CENTER.name,}), 253 | Lexicon.MARGIN: ("INT", { 254 | "default": 0, "min": -1024, "max": 1024,}), 255 | Lexicon.SPACING: ("INT", { 256 | "default": 0, "min": -1024, "max": 1024}), 257 | Lexicon.WH: ("VEC2", { 258 | "default": (256, 256), "mij":IMAGE_SIZE_MIN, "int": True, 259 | "label": ["W", "H"],}), 260 | Lexicon.XY: ("VEC2", { 261 | "default": (0, 0,), "mij": -1, "maj": 1, 262 | "label": ["X", "Y"], 263 | "tooltip":"Offset the position"}), 264 | Lexicon.ANGLE: ("FLOAT", { 265 | "default": 0, "step": 0.01,}), 266 | Lexicon.EDGE: (EnumEdge._member_names_, { 267 | "default": EnumEdge.CLIP.name}), 268 | Lexicon.INVERT: ("BOOLEAN", { 269 | "default": False, 270 | "tooltip": "Invert the mask input"}) 271 | } 272 | }) 273 | return Lexicon._parse(d) 274 | 275 | def run(self, **kw) -> RGBAMaskType: 276 | full_text = parse_param(kw, Lexicon.STRING, EnumConvertType.STRING, "jovimetrix") 277 | font_idx = parse_param(kw, Lexicon.FONT, EnumConvertType.STRING, self.FONT_NAMES[0]) 278 | autosize = parse_param(kw, Lexicon.AUTOSIZE, EnumConvertType.BOOLEAN, False) 279 | letter = parse_param(kw, Lexicon.LETTER, EnumConvertType.BOOLEAN, False) 280 | color = parse_param(kw, Lexicon.COLOR, EnumConvertType.VEC4INT, (255,255,255,255)) 281 | matte = parse_param(kw, Lexicon.MATTE, EnumConvertType.VEC4INT, (0,0,0,255)) 282 | columns = parse_param(kw, Lexicon.COLUMNS, EnumConvertType.INT, 0) 283 | font_size = parse_param(kw, Lexicon.SIZE, EnumConvertType.INT, 1) 284 | align = parse_param(kw, Lexicon.ALIGN, EnumAlignment, EnumAlignment.CENTER.name) 285 | justify = parse_param(kw, Lexicon.JUSTIFY, EnumJustify, EnumJustify.CENTER.name) 286 | margin = parse_param(kw, Lexicon.MARGIN, EnumConvertType.INT, 0) 287 | line_spacing = parse_param(kw, Lexicon.SPACING, EnumConvertType.INT, 0) 288 | wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (512, 512), IMAGE_SIZE_MIN) 289 | pos = parse_param(kw, Lexicon.XY, EnumConvertType.VEC2, (0, 0)) 290 | angle = parse_param(kw, Lexicon.ANGLE, EnumConvertType.INT, 0) 291 | edge = parse_param(kw, Lexicon.EDGE, EnumEdge, EnumEdge.CLIP.name) 292 | invert = parse_param(kw, Lexicon.INVERT, EnumConvertType.BOOLEAN, False) 293 | images = [] 294 | params = list(zip_longest_fill(full_text, font_idx, autosize, letter, color, 295 | matte, columns, font_size, align, justify, margin, 296 | line_spacing, wihi, pos, angle, edge, invert)) 297 | 298 | pbar = ProgressBar(len(params)) 299 | for idx, (full_text, font_idx, autosize, letter, color, matte, columns, 300 | font_size, align, justify, margin, line_spacing, wihi, pos, 301 | angle, edge, invert) in enumerate(params): 302 | 303 | width, height = wihi 304 | font_name = self.FONTS[font_idx] 305 | full_text = str(full_text) 306 | 307 | if letter: 308 | full_text = full_text.replace('\n', '') 309 | if autosize: 310 | _, font_size = text_autosize(full_text[0].upper(), font_name, width, height)[:2] 311 | margin = 0 312 | line_spacing = 0 313 | else: 314 | if autosize: 315 | wm = width - margin * 2 316 | hm = height - margin * 2 - line_spacing 317 | columns = 0 if columns == 0 else columns * 2 + 2 318 | full_text, font_size = text_autosize(full_text, font_name, wm, hm, columns)[:2] 319 | full_text = [full_text] 320 | font_size *= 2.5 321 | 322 | font = ImageFont.truetype(font_name, font_size) 323 | for ch in full_text: 324 | img = text_draw(ch, font, width, height, align, justify, margin, line_spacing, color) 325 | img = image_rotate(img, angle, edge=edge) 326 | img = image_translate(img, pos, edge=edge) 327 | if invert: 328 | img = image_invert(img, 1) 329 | images.append(cv_to_tensor_full(img, matte)) 330 | pbar.update_absolute(idx) 331 | return image_stack(images) 332 | -------------------------------------------------------------------------------- /core/trans.py: -------------------------------------------------------------------------------- 1 | """ Jovimetrix - Transform """ 2 | 3 | from re import I 4 | import sys 5 | from enum import Enum 6 | 7 | from comfy.utils import ProgressBar 8 | 9 | from cozy_comfyui import \ 10 | logger, \ 11 | IMAGE_SIZE_MIN, \ 12 | InputType, RGBAMaskType, EnumConvertType, \ 13 | deep_merge, parse_param, parse_dynamic, zip_longest_fill 14 | 15 | from cozy_comfyui.lexicon import \ 16 | Lexicon 17 | 18 | from cozy_comfyui.node import \ 19 | COZY_TYPE_IMAGE, \ 20 | CozyImageNode, CozyBaseNode 21 | 22 | from cozy_comfyui.image.channel import \ 23 | channel_solid 24 | 25 | from cozy_comfyui.image.convert import \ 26 | tensor_to_cv, cv_to_tensor_full, cv_to_tensor, image_mask, image_mask_add 27 | 28 | from cozy_comfyui.image.compose import \ 29 | EnumOrientation, EnumEdge, EnumMirrorMode, EnumScaleMode, EnumInterpolation, \ 30 | image_edge_wrap, image_mirror, image_scalefit, image_transform, \ 31 | image_crop, image_crop_center, image_crop_polygonal, image_stacker, \ 32 | image_flatten 33 | 34 | from cozy_comfyui.image.misc import \ 35 | image_stack 36 | 37 | from cozy_comfyui.image.mapping import \ 38 | EnumProjection, \ 39 | remap_fisheye, remap_perspective, remap_polar, remap_sphere 40 | 41 | # ============================================================================== 42 | # === GLOBAL === 43 | # ============================================================================== 44 | 45 | JOV_CATEGORY = "TRANSFORM" 46 | 47 | # ============================================================================== 48 | # === ENUMERATION === 49 | # ============================================================================== 50 | 51 | class EnumCropMode(Enum): 52 | CENTER = 20 53 | XY = 0 54 | FREE = 10 55 | 56 | # ============================================================================== 57 | # === CLASS === 58 | # ============================================================================== 59 | 60 | class CropNode(CozyImageNode): 61 | NAME = "CROP (JOV) ✂️" 62 | CATEGORY = JOV_CATEGORY 63 | DESCRIPTION = """ 64 | Extract a portion of an input image or resize it. It supports various cropping modes, including center cropping, custom XY cropping, and free-form polygonal cropping. This node is useful for preparing image data for specific tasks or extracting regions of interest. 65 | """ 66 | 67 | @classmethod 68 | def INPUT_TYPES(cls) -> InputType: 69 | d = super().INPUT_TYPES() 70 | d = deep_merge(d, { 71 | "optional": { 72 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}), 73 | Lexicon.FUNCTION: (EnumCropMode._member_names_, { 74 | "default": EnumCropMode.CENTER.name}), 75 | Lexicon.XY: ("VEC2", { 76 | "default": (0, 0), "mij": 0, "maj": 1, 77 | "label": ["X", "Y"]}), 78 | Lexicon.WH: ("VEC2", { 79 | "default": (512, 512), "mij": IMAGE_SIZE_MIN, "int": True, 80 | "label": ["W", "H"]}), 81 | Lexicon.TLTR: ("VEC4", { 82 | "default": (0, 0, 0, 1), "mij": 0, "maj": 1, 83 | "label": ["TOP", "LEFT", "TOP", "RIGHT"],}), 84 | Lexicon.BLBR: ("VEC4", { 85 | "default": (1, 0, 1, 1), "mij": 0, "maj": 1, 86 | "label": ["BOTTOM", "LEFT", "BOTTOM", "RIGHT"],}), 87 | Lexicon.MATTE: ("VEC4", { 88 | "default": (0, 0, 0, 255), "rgb": True,}) 89 | } 90 | }) 91 | return Lexicon._parse(d) 92 | 93 | def run(self, **kw) -> RGBAMaskType: 94 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None) 95 | func = parse_param(kw, Lexicon.FUNCTION, EnumCropMode, EnumCropMode.CENTER.name) 96 | # if less than 1 then use as scalar, over 1 = int(size) 97 | xy = parse_param(kw, Lexicon.XY, EnumConvertType.VEC2, (0, 0,)) 98 | wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (512, 512), IMAGE_SIZE_MIN) 99 | tltr = parse_param(kw, Lexicon.TLTR, EnumConvertType.VEC4, (0, 0, 0, 1,)) 100 | blbr = parse_param(kw, Lexicon.BLBR, EnumConvertType.VEC4, (1, 0, 1, 1,)) 101 | matte = parse_param(kw, Lexicon.MATTE, EnumConvertType.VEC4INT, (0, 0, 0, 255), 0, 255) 102 | params = list(zip_longest_fill(pA, func, xy, wihi, tltr, blbr, matte)) 103 | images = [] 104 | pbar = ProgressBar(len(params)) 105 | for idx, (pA, func, xy, wihi, tltr, blbr, matte) in enumerate(params): 106 | width, height = wihi 107 | pA = tensor_to_cv(pA) if pA is not None else channel_solid(width, height) 108 | alpha = None 109 | if pA.ndim == 3 and pA.shape[2] == 4: 110 | alpha = image_mask(pA) 111 | 112 | if func == EnumCropMode.FREE: 113 | x1, y1, x2, y2 = tltr 114 | x4, y4, x3, y3 = blbr 115 | points = (x1 * width, y1 * height), (x2 * width, y2 * height), \ 116 | (x3 * width, y3 * height), (x4 * width, y4 * height) 117 | pA = image_crop_polygonal(pA, points) 118 | if alpha is not None: 119 | alpha = image_crop_polygonal(alpha, points) 120 | pA[..., 3] = alpha[..., 0][:,:] 121 | elif func == EnumCropMode.XY: 122 | pA = image_crop(pA, width, height, xy) 123 | else: 124 | pA = image_crop_center(pA, width, height) 125 | images.append(cv_to_tensor_full(pA, matte)) 126 | pbar.update_absolute(idx) 127 | return image_stack(images) 128 | 129 | class FlattenNode(CozyImageNode): 130 | NAME = "FLATTEN (JOV) ⬇️" 131 | CATEGORY = JOV_CATEGORY 132 | DESCRIPTION = """ 133 | Combine multiple input images into a single image by summing their pixel values. This operation is useful for merging multiple layers or images into one composite image, such as combining different elements of a design or merging masks. Users can specify the blending mode and interpolation method to control how the images are combined. Additionally, a matte can be applied to adjust the transparency of the final composite image. 134 | """ 135 | 136 | @classmethod 137 | def INPUT_TYPES(cls) -> InputType: 138 | d = super().INPUT_TYPES() 139 | d = deep_merge(d, { 140 | "optional": { 141 | Lexicon.MODE: (EnumScaleMode._member_names_, { 142 | "default": EnumScaleMode.MATTE.name,}), 143 | Lexicon.WH: ("VEC2", { 144 | "default": (512, 512), "mij":1, "int": True, 145 | "label": ["W", "H"]}), 146 | Lexicon.SAMPLE: (EnumInterpolation._member_names_, { 147 | "default": EnumInterpolation.LANCZOS4.name,}), 148 | Lexicon.MATTE: ("VEC4", { 149 | "default": (0, 0, 0, 255), "rgb": True,}), 150 | Lexicon.OFFSET: ("VEC2", { 151 | "default": (0, 0), "mij":0, "int": True, 152 | "label": ["X", "Y"]}), 153 | } 154 | }) 155 | return Lexicon._parse(d) 156 | 157 | def run(self, **kw) -> RGBAMaskType: 158 | imgs = parse_dynamic(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None) 159 | if imgs is None: 160 | logger.warning("no images to flatten") 161 | return () 162 | 163 | # be less dumb when merging 164 | pA = [tensor_to_cv(i) for i in imgs] 165 | mode = parse_param(kw, Lexicon.MODE, EnumScaleMode, EnumScaleMode.MATTE.name)[0] 166 | wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (512, 512), 1)[0] 167 | sample = parse_param(kw, Lexicon.SAMPLE, EnumInterpolation, EnumInterpolation.LANCZOS4.name)[0] 168 | matte = parse_param(kw, Lexicon.MATTE, EnumConvertType.VEC4INT, (0, 0, 0, 255), 0, 255)[0] 169 | offset = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (0, 0), 0)[0] 170 | w, h = wihi 171 | x, y = offset 172 | current = image_flatten(pA, x, y, w, h, mode=mode, sample=sample) 173 | images = [] 174 | images.append(cv_to_tensor_full(current, matte)) 175 | return image_stack(images) 176 | 177 | class SplitNode(CozyBaseNode): 178 | NAME = "SPLIT (JOV) 🎭" 179 | CATEGORY = JOV_CATEGORY 180 | RETURN_TYPES = ("IMAGE", "IMAGE",) 181 | RETURN_NAMES = ("IMAGEA", "IMAGEB",) 182 | OUTPUT_TOOLTIPS = ( 183 | "Left/Top image", 184 | "Right/Bottom image" 185 | ) 186 | DESCRIPTION = """ 187 | Split an image into two or four images based on the percentages for width and height. 188 | """ 189 | 190 | @classmethod 191 | def INPUT_TYPES(cls) -> InputType: 192 | d = super().INPUT_TYPES() 193 | d = deep_merge(d, { 194 | "optional": { 195 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}), 196 | Lexicon.VALUE: ("FLOAT", { 197 | "default": 0.5, "min": 0, "max": 1, "step": 0.001 198 | }), 199 | Lexicon.FLIP: ("BOOLEAN", { 200 | "default": False, 201 | "tooltip": "Horizontal split (False) or Vertical split (True)" 202 | }), 203 | Lexicon.MODE: (EnumScaleMode._member_names_, { 204 | "default": EnumScaleMode.MATTE.name,}), 205 | Lexicon.WH: ("VEC2", { 206 | "default": (512, 512), "mij":IMAGE_SIZE_MIN, "int": True, 207 | "label": ["W", "H"]}), 208 | Lexicon.SAMPLE: (EnumInterpolation._member_names_, { 209 | "default": EnumInterpolation.LANCZOS4.name,}), 210 | Lexicon.MATTE: ("VEC4", { 211 | "default": (0, 0, 0, 255), "rgb": True,}) 212 | } 213 | }) 214 | return Lexicon._parse(d) 215 | 216 | def run(self, **kw) -> RGBAMaskType: 217 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None) 218 | percent = parse_param(kw, Lexicon.VALUE, EnumConvertType.FLOAT, 0.5, 0, 1) 219 | flip = parse_param(kw, Lexicon.FLIP, EnumConvertType.BOOLEAN, False) 220 | mode = parse_param(kw, Lexicon.MODE, EnumScaleMode, EnumScaleMode.MATTE.name) 221 | wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (512, 512), IMAGE_SIZE_MIN) 222 | sample = parse_param(kw, Lexicon.SAMPLE, EnumInterpolation, EnumInterpolation.LANCZOS4.name) 223 | matte = parse_param(kw, Lexicon.MATTE, EnumConvertType.VEC4INT, (0, 0, 0, 255), 0, 255) 224 | params = list(zip_longest_fill(pA, percent, flip, mode, wihi, sample, matte)) 225 | images = [] 226 | pbar = ProgressBar(len(params)) 227 | for idx, (pA, percent, flip, mode, wihi, sample, matte) in enumerate(params): 228 | w, h = wihi 229 | pA = channel_solid(w, h, matte) if pA is None else tensor_to_cv(pA) 230 | 231 | if flip: 232 | size = pA.shape[1] 233 | percent = max(1, min(size-1, int(size * percent))) 234 | image_a = pA[:, :percent] 235 | image_b = pA[:, percent:] 236 | else: 237 | size = pA.shape[0] 238 | percent = max(1, min(size-1, int(size * percent))) 239 | image_a = pA[:percent, :] 240 | image_b = pA[percent:, :] 241 | 242 | if mode != EnumScaleMode.MATTE: 243 | image_a = image_scalefit(image_a, w, h, mode, sample) 244 | image_b = image_scalefit(image_b, w, h, mode, sample) 245 | 246 | images.append([cv_to_tensor(img) for img in [image_a, image_b]]) 247 | pbar.update_absolute(idx) 248 | return image_stack(images) 249 | 250 | class StackNode(CozyImageNode): 251 | NAME = "STACK (JOV) ➕" 252 | CATEGORY = JOV_CATEGORY 253 | DESCRIPTION = """ 254 | Merge multiple input images into a single composite image by stacking them along a specified axis. 255 | 256 | Options include axis, stride, scaling mode, width and height, interpolation method, and matte color. 257 | 258 | The axis parameter allows for horizontal, vertical, or grid stacking of images, while stride controls the spacing between them. 259 | """ 260 | 261 | @classmethod 262 | def INPUT_TYPES(cls) -> InputType: 263 | d = super().INPUT_TYPES() 264 | d = deep_merge(d, { 265 | "optional": { 266 | Lexicon.AXIS: (EnumOrientation._member_names_, { 267 | "default": EnumOrientation.GRID.name,}), 268 | Lexicon.STEP: ("INT", { 269 | "default": 1, "min": 0, 270 | "tooltip":"How many images are placed before a new row starts (stride)"}), 271 | Lexicon.MODE: (EnumScaleMode._member_names_, { 272 | "default": EnumScaleMode.MATTE.name,}), 273 | Lexicon.WH: ("VEC2", { 274 | "default": (512, 512), "mij": IMAGE_SIZE_MIN, "int": True, 275 | "label": ["W", "H"]}), 276 | Lexicon.SAMPLE: (EnumInterpolation._member_names_, { 277 | "default": EnumInterpolation.LANCZOS4.name,}), 278 | Lexicon.MATTE: ("VEC4", { 279 | "default": (0, 0, 0, 255), "rgb": True,}) 280 | } 281 | }) 282 | return Lexicon._parse(d) 283 | 284 | def run(self, **kw) -> RGBAMaskType: 285 | images = parse_dynamic(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None) 286 | if len(images) == 0: 287 | logger.warning("no images to stack") 288 | return 289 | 290 | images = [tensor_to_cv(i) for i in images] 291 | axis = parse_param(kw, Lexicon.AXIS, EnumOrientation, EnumOrientation.GRID.name)[0] 292 | stride = parse_param(kw, Lexicon.STEP, EnumConvertType.INT, 1, 0)[0] 293 | mode = parse_param(kw, Lexicon.MODE, EnumScaleMode, EnumScaleMode.MATTE.name)[0] 294 | wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (512, 512), IMAGE_SIZE_MIN)[0] 295 | sample = parse_param(kw, Lexicon.SAMPLE, EnumInterpolation, EnumInterpolation.LANCZOS4.name)[0] 296 | matte = parse_param(kw, Lexicon.MATTE, EnumConvertType.VEC4INT, (0, 0, 0, 255), 0, 255)[0] 297 | img = image_stacker(images, axis, stride) #, matte) 298 | if mode != EnumScaleMode.MATTE: 299 | w, h = wihi 300 | img = image_scalefit(img, w, h, mode, sample) 301 | rgba, rgb, mask = cv_to_tensor_full(img, matte) 302 | return rgba.unsqueeze(0), rgb.unsqueeze(0), mask.unsqueeze(0) 303 | 304 | class TransformNode(CozyImageNode): 305 | NAME = "TRANSFORM (JOV) 🏝️" 306 | CATEGORY = JOV_CATEGORY 307 | DESCRIPTION = """ 308 | Apply various geometric transformations to images, including translation, rotation, scaling, mirroring, tiling and perspective projection. It offers extensive control over image manipulation to achieve desired visual effects. 309 | """ 310 | 311 | @classmethod 312 | def INPUT_TYPES(cls) -> InputType: 313 | d = super().INPUT_TYPES(prompt=True, dynprompt=True) 314 | d = deep_merge(d, { 315 | "optional": { 316 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}), 317 | Lexicon.MASK: (COZY_TYPE_IMAGE, { 318 | "tooltip": "Override Image mask"}), 319 | Lexicon.XY: ("VEC2", { 320 | "default": (0, 0,), "mij": -1, "maj": 1, 321 | "label": ["X", "Y"]}), 322 | Lexicon.ANGLE: ("FLOAT", { 323 | "default": 0, "min": -sys.float_info.max, "max": sys.float_info.max, "step": 0.1,}), 324 | Lexicon.SIZE: ("VEC2", { 325 | "default": (1, 1), "mij": 0.001, 326 | "label": ["X", "Y"]}), 327 | Lexicon.TILE: ("VEC2", { 328 | "default": (1, 1), "mij": 1, 329 | "label": ["X", "Y"]}), 330 | Lexicon.EDGE: (EnumEdge._member_names_, { 331 | "default": EnumEdge.CLIP.name}), 332 | Lexicon.MIRROR: (EnumMirrorMode._member_names_, { 333 | "default": EnumMirrorMode.NONE.name}), 334 | Lexicon.PIVOT: ("VEC2", { 335 | "default": (0.5, 0.5), "mij": 0, "maj": 1, "step": 0.01, 336 | "label": ["X", "Y"]}), 337 | Lexicon.PROJECTION: (EnumProjection._member_names_, { 338 | "default": EnumProjection.NORMAL.name}), 339 | Lexicon.TLTR: ("VEC4", { 340 | "default": (0, 0, 1, 0), "mij": 0, "maj": 1, "step": 0.005, 341 | "label": ["TOP", "LEFT", "TOP", "RIGHT"],}), 342 | Lexicon.BLBR: ("VEC4", { 343 | "default": (0, 1, 1, 1), "mij": 0, "maj": 1, "step": 0.005, 344 | "label": ["BOTTOM", "LEFT", "BOTTOM", "RIGHT"],}), 345 | Lexicon.STRENGTH: ("FLOAT", { 346 | "default": 1, "min": 0, "max": 1, "step": 0.005}), 347 | Lexicon.MODE: (EnumScaleMode._member_names_, { 348 | "default": EnumScaleMode.MATTE.name,}), 349 | Lexicon.WH: ("VEC2", { 350 | "default": (512, 512), "mij": IMAGE_SIZE_MIN, "int": True, 351 | "label": ["W", "H"]}), 352 | Lexicon.SAMPLE: (EnumInterpolation._member_names_, { 353 | "default": EnumInterpolation.LANCZOS4.name,}), 354 | Lexicon.MATTE: ("VEC4", { 355 | "default": (0, 0, 0, 255), "rgb": True,}) 356 | } 357 | }) 358 | return Lexicon._parse(d) 359 | 360 | def run(self, **kw) -> RGBAMaskType: 361 | pA = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None) 362 | mask = parse_param(kw, Lexicon.MASK, EnumConvertType.IMAGE, None) 363 | offset = parse_param(kw, Lexicon.XY, EnumConvertType.VEC2, (0, 0), -1, 1) 364 | angle = parse_param(kw, Lexicon.ANGLE, EnumConvertType.FLOAT, 0) 365 | size = parse_param(kw, Lexicon.SIZE, EnumConvertType.VEC2, (1, 1), 0.001) 366 | edge = parse_param(kw, Lexicon.EDGE, EnumEdge, EnumEdge.CLIP.name) 367 | mirror = parse_param(kw, Lexicon.MIRROR, EnumMirrorMode, EnumMirrorMode.NONE.name) 368 | mirror_pivot = parse_param(kw, Lexicon.PIVOT, EnumConvertType.VEC2, (0.5, 0.5), 0, 1) 369 | tile_xy = parse_param(kw, Lexicon.TILE, EnumConvertType.VEC2, (1, 1), 1) 370 | proj = parse_param(kw, Lexicon.PROJECTION, EnumProjection, EnumProjection.NORMAL.name) 371 | tltr = parse_param(kw, Lexicon.TLTR, EnumConvertType.VEC4, (0, 0, 1, 0), 0, 1) 372 | blbr = parse_param(kw, Lexicon.BLBR, EnumConvertType.VEC4, (0, 1, 1, 1), 0, 1) 373 | strength = parse_param(kw, Lexicon.STRENGTH, EnumConvertType.FLOAT, 1, 0, 1) 374 | mode = parse_param(kw, Lexicon.MODE, EnumScaleMode, EnumScaleMode.MATTE.name) 375 | wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (512, 512), IMAGE_SIZE_MIN) 376 | sample = parse_param(kw, Lexicon.SAMPLE, EnumInterpolation, EnumInterpolation.LANCZOS4.name) 377 | matte = parse_param(kw, Lexicon.MATTE, EnumConvertType.VEC4INT, (0, 0, 0, 255), 0, 255) 378 | params = list(zip_longest_fill(pA, mask, offset, angle, size, edge, tile_xy, mirror, mirror_pivot, proj, strength, tltr, blbr, mode, wihi, sample, matte)) 379 | images = [] 380 | pbar = ProgressBar(len(params)) 381 | for idx, (pA, mask, offset, angle, size, edge, tile_xy, mirror, mirror_pivot, proj, strength, tltr, blbr, mode, wihi, sample, matte) in enumerate(params): 382 | pA = tensor_to_cv(pA) if pA is not None else channel_solid() 383 | if mask is not None: 384 | mask = tensor_to_cv(mask) 385 | pA = image_mask_add(pA, mask) 386 | 387 | h, w = pA.shape[:2] 388 | pA = image_transform(pA, offset, angle, size, sample, edge) 389 | pA = image_crop_center(pA, w, h) 390 | 391 | if mirror != EnumMirrorMode.NONE: 392 | mpx, mpy = mirror_pivot 393 | pA = image_mirror(pA, mirror, mpx, mpy) 394 | pA = image_scalefit(pA, w, h, EnumScaleMode.FIT, sample) 395 | 396 | tx, ty = tile_xy 397 | if tx != 1. or ty != 1.: 398 | pA = image_edge_wrap(pA, tx / 2 - 0.5, ty / 2 - 0.5) 399 | pA = image_scalefit(pA, w, h, EnumScaleMode.FIT, sample) 400 | 401 | match proj: 402 | case EnumProjection.PERSPECTIVE: 403 | x1, y1, x2, y2 = tltr 404 | x4, y4, x3, y3 = blbr 405 | sh, sw = pA.shape[:2] 406 | x1, x2, x3, x4 = map(lambda x: x * sw, [x1, x2, x3, x4]) 407 | y1, y2, y3, y4 = map(lambda y: y * sh, [y1, y2, y3, y4]) 408 | pA = remap_perspective(pA, [[x1, y1], [x2, y2], [x3, y3], [x4, y4]]) 409 | case EnumProjection.SPHERICAL: 410 | pA = remap_sphere(pA, strength) 411 | case EnumProjection.FISHEYE: 412 | pA = remap_fisheye(pA, strength) 413 | case EnumProjection.POLAR: 414 | pA = remap_polar(pA) 415 | 416 | if proj != EnumProjection.NORMAL: 417 | pA = image_scalefit(pA, w, h, EnumScaleMode.FIT, sample) 418 | 419 | if mode != EnumScaleMode.MATTE: 420 | w, h = wihi 421 | pA = image_scalefit(pA, w, h, mode, sample) 422 | 423 | images.append(cv_to_tensor_full(pA, matte)) 424 | pbar.update_absolute(idx) 425 | return image_stack(images) 426 | -------------------------------------------------------------------------------- /core/utility/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/core/utility/__init__.py -------------------------------------------------------------------------------- /core/utility/info.py: -------------------------------------------------------------------------------- 1 | """ Jovimetrix - Utility """ 2 | 3 | import io 4 | import json 5 | from typing import Any 6 | 7 | import torch 8 | import numpy as np 9 | from PIL import Image 10 | import matplotlib.pyplot as plt 11 | 12 | from cozy_comfyui import \ 13 | IMAGE_SIZE_MIN, \ 14 | InputType, EnumConvertType, TensorType, \ 15 | deep_merge, parse_dynamic, parse_param 16 | 17 | from cozy_comfyui.lexicon import \ 18 | Lexicon 19 | 20 | from cozy_comfyui.node import \ 21 | COZY_TYPE_IMAGE, \ 22 | CozyBaseNode 23 | 24 | from cozy_comfyui.image.convert import \ 25 | pil_to_tensor 26 | 27 | from cozy_comfyui.api import \ 28 | parse_reset 29 | 30 | JOV_CATEGORY = "UTILITY/INFO" 31 | 32 | # ============================================================================== 33 | # === SUPPORT === 34 | # ============================================================================== 35 | 36 | def decode_tensor(tensor: TensorType) -> str: 37 | if tensor.ndim > 3: 38 | b, h, w, cc = tensor.shape 39 | elif tensor.ndim > 2: 40 | cc = 1 41 | b, h, w = tensor.shape 42 | else: 43 | b = 1 44 | cc = 1 45 | h, w = tensor.shape 46 | return f"{b}x{w}x{h}x{cc}" 47 | 48 | # ============================================================================== 49 | # === CLASS === 50 | # ============================================================================== 51 | 52 | class AkashicData: 53 | def __init__(self, **kw) -> None: 54 | for k, v in kw.items(): 55 | setattr(self, k, v) 56 | 57 | class AkashicNode(CozyBaseNode): 58 | NAME = "AKASHIC (JOV) 📓" 59 | CATEGORY = JOV_CATEGORY 60 | RETURN_NAMES = () 61 | OUTPUT_NODE = True 62 | DESCRIPTION = """ 63 | Visualize data. It accepts various types of data, including images, text, and other types. If no input is provided, it returns an empty result. The output consists of a dictionary containing UI-related information, such as base64-encoded images and text representations of the input data. 64 | """ 65 | 66 | def run(self, **kw) -> tuple[Any, Any]: 67 | kw.pop('ident', None) 68 | o = kw.values() 69 | output = {"ui": {"b64_images": [], "text": []}} 70 | if o is None or len(o) == 0: 71 | output["ui"]["result"] = (None, None, ) 72 | return output 73 | 74 | def __parse(val) -> str: 75 | ret = '' 76 | typ = ''.join(repr(type(val)).split("'")[1:2]) 77 | if isinstance(val, dict): 78 | # mixlab layer? 79 | if (image := val.get('image', None)) is not None: 80 | ret = image 81 | if (mask := val.get('mask', None)) is not None: 82 | while len(mask.shape) < len(image.shape): 83 | mask = mask.unsqueeze(-1) 84 | ret = torch.cat((image, mask), dim=-1) 85 | if ret.ndim < 4: 86 | ret = ret.unsqueeze(-1) 87 | ret = decode_tensor(ret) 88 | typ = "Mixlab Layer" 89 | 90 | # vector patch.... 91 | elif 'xyzw' in val: 92 | val = val["xyzw"] 93 | typ = "VECTOR" 94 | # latents.... 95 | elif 'samples' in val: 96 | ret = decode_tensor(val['samples'][0]) 97 | typ = "LATENT" 98 | # empty bugger 99 | elif len(val) == 0: 100 | ret = "" 101 | else: 102 | try: 103 | ret = json.dumps(val, indent=3, separators=(',', ': ')) 104 | except Exception as e: 105 | ret = str(e) 106 | elif isinstance(val, (tuple, set, list,)): 107 | if (size := len(val)) > 0: 108 | if isinstance(val, (np.ndarray,)): 109 | ret = str(val) 110 | typ = "NUMPY ARRAY" 111 | elif isinstance(val[0], (TensorType,)): 112 | ret = decode_tensor(val[0]) 113 | typ = type(val[0]) 114 | elif size == 1 and isinstance(val[0], (list,)) and isinstance(val[0][0], (TensorType,)): 115 | ret = decode_tensor(val[0][0]) 116 | typ = "CONDITIONING" 117 | elif all(isinstance(i, (tuple, set, list)) for i in val): 118 | ret = "[\n" + ",\n".join(f" {row}" for row in val) + "\n]" 119 | # ret = json.dumps(val, indent=4) 120 | elif all(isinstance(i, (bool, int, float)) for i in val): 121 | ret = ','.join([str(x) for x in val]) 122 | else: 123 | ret = str(val) 124 | elif isinstance(val, bool): 125 | ret = "True" if val else "False" 126 | elif isinstance(val, TensorType): 127 | ret = decode_tensor(val) 128 | else: 129 | ret = str(val) 130 | return json.dumps({typ: ret}, separators=(',', ': ')) 131 | 132 | for x in o: 133 | data = "" 134 | if len(x) > 1: 135 | data += "::\n" 136 | for p in x: 137 | data += __parse(p) + "\n" 138 | output["ui"]["text"].append(data) 139 | return output 140 | 141 | class GraphNode(CozyBaseNode): 142 | NAME = "GRAPH (JOV) 📈" 143 | CATEGORY = JOV_CATEGORY 144 | OUTPUT_NODE = True 145 | RETURN_TYPES = ("IMAGE", ) 146 | RETURN_NAMES = ("IMAGE",) 147 | OUTPUT_TOOLTIPS = ( 148 | "The graphed image" 149 | ) 150 | DESCRIPTION = """ 151 | Visualize a series of data points over time. It accepts a dynamic number of values to graph and display, with options to reset the graph or specify the number of values. The output is an image displaying the graph, allowing users to analyze trends and patterns. 152 | """ 153 | 154 | @classmethod 155 | def INPUT_TYPES(cls) -> InputType: 156 | d = super().INPUT_TYPES() 157 | d = deep_merge(d, { 158 | "optional": { 159 | Lexicon.RESET: ("BOOLEAN", { 160 | "default": False, 161 | "tooltip":"Clear the graph history"}), 162 | Lexicon.VALUE: ("INT", { 163 | "default": 60, "min": 0, 164 | "tooltip":"Number of values to graph and display"}), 165 | Lexicon.WH: ("VEC2", { 166 | "default": (512, 512), "mij":IMAGE_SIZE_MIN, "int": True, 167 | "label": ["W", "H"]}), 168 | } 169 | }) 170 | return Lexicon._parse(d) 171 | 172 | @classmethod 173 | def IS_CHANGED(cls, **kw) -> float: 174 | return float('nan') 175 | 176 | def __init__(self, *arg, **kw) -> None: 177 | super().__init__(*arg, **kw) 178 | self.__history = [] 179 | self.__fig, self.__ax = plt.subplots(figsize=(5.12, 5.12)) 180 | 181 | def run(self, ident, **kw) -> tuple[TensorType]: 182 | slice = parse_param(kw, Lexicon.VALUE, EnumConvertType.INT, 60)[0] 183 | wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (512, 512), IMAGE_SIZE_MIN)[0] 184 | if parse_reset(ident) > 0 or parse_param(kw, Lexicon.RESET, EnumConvertType.BOOLEAN, False)[0]: 185 | self.__history = [] 186 | longest_edge = 0 187 | dynamic = parse_dynamic(kw, Lexicon.DYNAMIC, EnumConvertType.FLOAT, 0) 188 | dynamic = [i[0] for i in dynamic] 189 | self.__ax.clear() 190 | for idx, val in enumerate(dynamic): 191 | if isinstance(val, (set, tuple,)): 192 | val = list(val) 193 | if not isinstance(val, (list, )): 194 | val = [val] 195 | while len(self.__history) <= idx: 196 | self.__history.append([]) 197 | self.__history[idx].extend(val) 198 | if slice > 0: 199 | stride = max(0, -slice + len(self.__history[idx]) + 1) 200 | longest_edge = max(longest_edge, stride) 201 | self.__history[idx] = self.__history[idx][stride:] 202 | self.__ax.plot(self.__history[idx], color="rgbcymk"[idx]) 203 | 204 | self.__history = self.__history[:slice+1] 205 | width, height = wihi 206 | width, height = (width / 100., height / 100.) 207 | self.__fig.set_figwidth(width) 208 | self.__fig.set_figheight(height) 209 | self.__fig.canvas.draw_idle() 210 | buffer = io.BytesIO() 211 | self.__fig.savefig(buffer, format="png") 212 | buffer.seek(0) 213 | image = Image.open(buffer) 214 | return (pil_to_tensor(image),) 215 | 216 | class ImageInfoNode(CozyBaseNode): 217 | NAME = "IMAGE INFO (JOV) 📚" 218 | CATEGORY = JOV_CATEGORY 219 | RETURN_TYPES = ("INT", "INT", "INT", "INT", "VEC2", "VEC3") 220 | RETURN_NAMES = ("COUNT", "W", "H", "C", "WH", "WHC") 221 | OUTPUT_TOOLTIPS = ( 222 | "Batch count", 223 | "Width", 224 | "Height", 225 | "Channels", 226 | "Width & Height as a VEC2", 227 | "Width, Height and Channels as a VEC3" 228 | ) 229 | DESCRIPTION = """ 230 | Exports and Displays immediate information about images. 231 | """ 232 | 233 | @classmethod 234 | def INPUT_TYPES(cls) -> InputType: 235 | d = super().INPUT_TYPES() 236 | d = deep_merge(d, { 237 | "optional": { 238 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}) 239 | } 240 | }) 241 | return Lexicon._parse(d) 242 | 243 | def run(self, **kw) -> tuple[int, list]: 244 | image = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None) 245 | height, width, cc = image[0].shape 246 | return (len(image), width, height, cc, (width, height), (width, height, cc)) 247 | -------------------------------------------------------------------------------- /core/utility/io.py: -------------------------------------------------------------------------------- 1 | """ Jovimetrix - Utility """ 2 | 3 | import os 4 | import json 5 | from uuid import uuid4 6 | from pathlib import Path 7 | from typing import Any 8 | 9 | import torch 10 | import numpy as np 11 | from PIL import Image 12 | from PIL.PngImagePlugin import PngInfo 13 | 14 | from comfy.utils import ProgressBar 15 | from folder_paths import get_output_directory 16 | from nodes import interrupt_processing 17 | 18 | from cozy_comfyui import \ 19 | logger, \ 20 | InputType, EnumConvertType, \ 21 | deep_merge, parse_param, parse_param_list, zip_longest_fill 22 | 23 | from cozy_comfyui.lexicon import \ 24 | Lexicon 25 | 26 | from cozy_comfyui.node import \ 27 | COZY_TYPE_IMAGE, COZY_TYPE_ANY, \ 28 | CozyBaseNode 29 | 30 | from cozy_comfyui.image.convert import \ 31 | tensor_to_pil, tensor_to_cv 32 | 33 | from cozy_comfyui.api import \ 34 | TimedOutException, ComfyAPIMessage, \ 35 | comfy_api_post 36 | 37 | # ============================================================================== 38 | # === GLOBAL === 39 | # ============================================================================== 40 | 41 | JOV_CATEGORY = "UTILITY/IO" 42 | 43 | # min amount of time before showing the cancel dialog 44 | JOV_DELAY_MIN = 5 45 | try: JOV_DELAY_MIN = int(os.getenv("JOV_DELAY_MIN", JOV_DELAY_MIN)) 46 | except: pass 47 | JOV_DELAY_MIN = max(1, JOV_DELAY_MIN) 48 | 49 | # max 10 minutes to start 50 | JOV_DELAY_MAX = 600 51 | try: JOV_DELAY_MAX = int(os.getenv("JOV_DELAY_MAX", JOV_DELAY_MAX)) 52 | except: pass 53 | 54 | FORMATS = ["gif", "png", "jpg"] 55 | if (JOV_GIFSKI := os.getenv("JOV_GIFSKI", None)) is not None: 56 | if not os.path.isfile(JOV_GIFSKI): 57 | logger.error(f"gifski missing [{JOV_GIFSKI}]") 58 | JOV_GIFSKI = None 59 | else: 60 | FORMATS = ["gifski"] + FORMATS 61 | logger.info("gifski support") 62 | else: 63 | logger.warning("no gifski support") 64 | 65 | # ============================================================================== 66 | # === SUPPORT === 67 | # ============================================================================== 68 | 69 | def path_next(pattern: str) -> str: 70 | """ 71 | Finds the next free path in an sequentially named list of files 72 | """ 73 | i = 1 74 | while os.path.exists(pattern % i): 75 | i = i * 2 76 | 77 | a, b = (i // 2, i) 78 | while a + 1 < b: 79 | c = (a + b) // 2 80 | a, b = (c, b) if os.path.exists(pattern % c) else (a, c) 81 | return pattern % b 82 | 83 | # ============================================================================== 84 | # === CLASS === 85 | # ============================================================================== 86 | 87 | class DelayNode(CozyBaseNode): 88 | NAME = "DELAY (JOV) ✋🏽" 89 | CATEGORY = JOV_CATEGORY 90 | RETURN_TYPES = (COZY_TYPE_ANY,) 91 | RETURN_NAMES = ("OUT",) 92 | OUTPUT_TOOLTIPS = ( 93 | "Pass through data when the delay ends" 94 | ) 95 | DESCRIPTION = """ 96 | Introduce pauses in the workflow that accept an optional input to pass through and a timer parameter to specify the duration of the delay. If no timer is provided, it defaults to a maximum delay. During the delay, it periodically checks for messages to interrupt the delay. Once the delay is completed, it returns the input passed to it. You can disable the screensaver with the `ENABLE` option 97 | """ 98 | 99 | @classmethod 100 | def INPUT_TYPES(cls) -> InputType: 101 | d = super().INPUT_TYPES() 102 | d = deep_merge(d, { 103 | "optional": { 104 | Lexicon.PASS_IN: (COZY_TYPE_ANY, { 105 | "default": None, 106 | "tooltip":"The data that should be held until the timer completes."}), 107 | Lexicon.TIMER: ("INT", { 108 | "default" : 0, "min": -1, 109 | "tooltip":"How long to delay if enabled. 0 means no delay."}), 110 | Lexicon.ENABLE: ("BOOLEAN", { 111 | "default": True, 112 | "tooltip":"Enable or disable the screensaver."}) 113 | } 114 | }) 115 | return Lexicon._parse(d) 116 | 117 | @classmethod 118 | def IS_CHANGED(cls, **kw) -> float: 119 | return float('nan') 120 | 121 | def run(self, ident, **kw) -> tuple[Any]: 122 | delay = parse_param(kw, Lexicon.TIMER, EnumConvertType.INT, -1, 0, JOV_DELAY_MAX)[0] 123 | if delay < 0: 124 | delay = JOV_DELAY_MAX 125 | if delay > JOV_DELAY_MIN: 126 | comfy_api_post("jovi-delay-user", ident, {"id": ident, "timeout": delay}) 127 | # enable = parse_param(kw, Lexicon.ENABLE, EnumConvertType.BOOLEAN, True)[0] 128 | 129 | step = 1 130 | pbar = ProgressBar(delay) 131 | while step <= delay: 132 | try: 133 | data = ComfyAPIMessage.poll(ident, timeout=1) 134 | if data.get('id', None) == ident: 135 | if data.get('cmd', False) == False: 136 | interrupt_processing(True) 137 | logger.warning(f"delay [cancelled] ({step}): {ident}") 138 | break 139 | except TimedOutException as _: 140 | if step % 10 == 0: 141 | logger.info(f"delay [continue] ({step}): {ident}") 142 | pbar.update_absolute(step) 143 | step += 1 144 | return kw[Lexicon.PASS_IN], 145 | 146 | class ExportNode(CozyBaseNode): 147 | NAME = "EXPORT (JOV) 📽" 148 | CATEGORY = JOV_CATEGORY 149 | NOT_IDEMPOTENT = True 150 | OUTPUT_NODE = True 151 | RETURN_TYPES = () 152 | DESCRIPTION = """ 153 | Responsible for saving images or animations to disk. It supports various output formats such as GIF and GIFSKI. Users can specify the output directory, filename prefix, image quality, frame rate, and other parameters. Additionally, it allows overwriting existing files or generating unique filenames to avoid conflicts. The node outputs the saved images or animation as a tensor. 154 | """ 155 | 156 | @classmethod 157 | def IS_CHANGED(cls, **kw) -> float: 158 | return float('nan') 159 | 160 | @classmethod 161 | def INPUT_TYPES(cls) -> InputType: 162 | d = super().INPUT_TYPES() 163 | d = deep_merge(d, { 164 | "optional": { 165 | Lexicon.IMAGE: (COZY_TYPE_IMAGE, {}), 166 | Lexicon.PATH: ("STRING", { 167 | "default": get_output_directory(), 168 | "default_top": "",}), 169 | Lexicon.FORMAT: (FORMATS, { 170 | "default": FORMATS[0],}), 171 | Lexicon.PREFIX: ("STRING", { 172 | "default": "jovi",}), 173 | Lexicon.OVERWRITE: ("BOOLEAN", { 174 | "default": False,}), 175 | # GIF ONLY 176 | Lexicon.OPTIMIZE: ("BOOLEAN", { 177 | "default": False,}), 178 | # GIFSKI ONLY 179 | Lexicon.QUALITY: ("INT", { 180 | "default": 90, "min": 1, "max": 100,}), 181 | Lexicon.QUALITY_M: ("INT", { 182 | "default": 100, "min": 1, "max": 100,}), 183 | # GIF OR GIFSKI 184 | Lexicon.FPS: ("INT", { 185 | "default": 24, "min": 1, "max": 60,}), 186 | # GIF OR GIFSKI 187 | Lexicon.LOOP: ("INT", { 188 | "default": 0, "min": 0,}), 189 | } 190 | }) 191 | return Lexicon._parse(d) 192 | 193 | def run(self, **kw) -> None: 194 | images = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None) 195 | suffix = parse_param(kw, Lexicon.PREFIX, EnumConvertType.STRING, uuid4().hex[:16])[0] 196 | output_dir = parse_param(kw, Lexicon.PATH, EnumConvertType.STRING, "")[0] 197 | format = parse_param(kw, Lexicon.FORMAT, EnumConvertType.STRING, "gif")[0] 198 | overwrite = parse_param(kw, Lexicon.OVERWRITE, EnumConvertType.BOOLEAN, False)[0] 199 | optimize = parse_param(kw, Lexicon.OPTIMIZE, EnumConvertType.BOOLEAN, False)[0] 200 | quality = parse_param(kw, Lexicon.QUALITY, EnumConvertType.INT, 90, 0, 100)[0] 201 | motion = parse_param(kw, Lexicon.QUALITY_M, EnumConvertType.INT, 100, 0, 100)[0] 202 | fps = parse_param(kw, Lexicon.FPS, EnumConvertType.INT, 24, 1, 60)[0] 203 | loop = parse_param(kw, Lexicon.LOOP, EnumConvertType.INT, 0, 0)[0] 204 | output_dir = Path(output_dir) 205 | output_dir.mkdir(parents=True, exist_ok=True) 206 | 207 | def output(extension) -> Path: 208 | path = output_dir / f"{suffix}.{extension}" 209 | if not overwrite and os.path.isfile(path): 210 | path = str(output_dir / f"{suffix}_%s.{extension}") 211 | path = path_next(path) 212 | return path 213 | 214 | images = [tensor_to_pil(i) for i in images] 215 | if format == "gifski": 216 | root = output_dir / f"{suffix}_{uuid4().hex[:16]}" 217 | # logger.debug(root) 218 | try: 219 | root.mkdir(parents=True, exist_ok=True) 220 | for idx, i in enumerate(images): 221 | fname = str(root / f"{suffix}_{idx}.png") 222 | i.save(fname) 223 | except Exception as e: 224 | logger.warning(output_dir) 225 | logger.error(str(e)) 226 | return 227 | else: 228 | out = output('gif') 229 | fps = f"--fps {fps}" if fps > 0 else "" 230 | q = f"--quality {quality}" 231 | mq = f"--motion-quality {motion}" 232 | cmd = f"{JOV_GIFSKI} -o {out} {q} {mq} {fps} {str(root)}/{suffix}_*.png" 233 | logger.info(cmd) 234 | try: 235 | os.system(cmd) 236 | except Exception as e: 237 | logger.warning(cmd) 238 | logger.error(str(e)) 239 | 240 | # shutil.rmtree(root) 241 | 242 | elif format == "gif": 243 | images[0].save( 244 | output('gif'), 245 | append_images=images[1:], 246 | disposal=2, 247 | duration=1 / fps * 1000 if fps else 0, 248 | loop=loop, 249 | optimize=optimize, 250 | save_all=True, 251 | ) 252 | else: 253 | for img in images: 254 | img.save(output(format), optimize=optimize) 255 | return () 256 | 257 | class RouteNode(CozyBaseNode): 258 | NAME = "ROUTE (JOV) 🚌" 259 | CATEGORY = JOV_CATEGORY 260 | RETURN_TYPES = ("BUS",) + (COZY_TYPE_ANY,) * 10 261 | RETURN_NAMES = ("ROUTE",) 262 | OUTPUT_TOOLTIPS = ( 263 | "Pass through for Route node" 264 | ) 265 | DESCRIPTION = """ 266 | Routes the input data from the optional input ports to the output port, preserving the order of inputs. The `PASS_IN` optional input is directly passed through to the output, while other optional inputs are collected and returned as tuples, preserving the order of insertion. 267 | """ 268 | 269 | @classmethod 270 | def INPUT_TYPES(cls) -> InputType: 271 | d = super().INPUT_TYPES() 272 | d = deep_merge(d, { 273 | "optional": { 274 | Lexicon.ROUTE: ("BUS", { 275 | "default": None,}), 276 | } 277 | }) 278 | return Lexicon._parse(d) 279 | 280 | def run(self, **kw) -> tuple[Any, ...]: 281 | inout = parse_param(kw, Lexicon.ROUTE, EnumConvertType.ANY, None) 282 | vars = kw.copy() 283 | vars.pop(Lexicon.ROUTE, None) 284 | vars.pop('ident', None) 285 | 286 | parsed = [] 287 | values = list(vars.values()) 288 | for x in values: 289 | p = parse_param_list(x, EnumConvertType.ANY, None) 290 | parsed.append(p) 291 | return inout, *parsed, 292 | 293 | class SaveOutputNode(CozyBaseNode): 294 | NAME = "SAVE OUTPUT (JOV) 💾" 295 | CATEGORY = JOV_CATEGORY 296 | NOT_IDEMPOTENT = True 297 | OUTPUT_NODE = True 298 | RETURN_TYPES = () 299 | DESCRIPTION = """ 300 | Save images with metadata to any specified path. Can save user metadata and prompt information. 301 | """ 302 | 303 | @classmethod 304 | def IS_CHANGED(cls, **kw) -> float: 305 | return float('nan') 306 | 307 | @classmethod 308 | def INPUT_TYPES(cls) -> InputType: 309 | d = super().INPUT_TYPES(True, True) 310 | d = deep_merge(d, { 311 | "optional": { 312 | Lexicon.IMAGE: ("IMAGE", {}), 313 | Lexicon.PATH: ("STRING", { 314 | "default": "", "dynamicPrompts":False}), 315 | Lexicon.NAME: ("STRING", { 316 | "default": "output", "dynamicPrompts":False,}), 317 | Lexicon.META: ("JSON", { 318 | "default": None,}), 319 | Lexicon.USER: ("STRING", { 320 | "default": "", "multiline": True, "dynamicPrompts":False,}), 321 | } 322 | }) 323 | return Lexicon._parse(d) 324 | 325 | def run(self, **kw) -> dict[str, Any]: 326 | image = parse_param(kw, Lexicon.IMAGE, EnumConvertType.IMAGE, None) 327 | path = parse_param(kw, Lexicon.PATH, EnumConvertType.STRING, "") 328 | fname = parse_param(kw, Lexicon.NAME, EnumConvertType.STRING, "output") 329 | metadata = parse_param(kw, Lexicon.META, EnumConvertType.DICT, {}) 330 | usermeta = parse_param(kw, Lexicon.USER, EnumConvertType.DICT, {}) 331 | prompt = parse_param(kw, 'prompt', EnumConvertType.STRING, "") 332 | pnginfo = parse_param(kw, 'extra_pnginfo', EnumConvertType.DICT, {}) 333 | params = list(zip_longest_fill(image, path, fname, metadata, usermeta, prompt, pnginfo)) 334 | pbar = ProgressBar(len(params)) 335 | for idx, (image, path, fname, metadata, usermeta, prompt, pnginfo) in enumerate(params): 336 | if image is None: 337 | logger.warning("no image") 338 | image = torch.zeros((32, 32, 4), dtype=torch.uint8, device="cpu") 339 | try: 340 | if not isinstance(usermeta, (dict,)): 341 | usermeta = json.loads(usermeta) 342 | metadata.update(usermeta) 343 | except json.decoder.JSONDecodeError: 344 | pass 345 | except Exception as e: 346 | logger.error(e) 347 | logger.error(usermeta) 348 | 349 | metadata["prompt"] = prompt 350 | metadata["workflow"] = json.dumps(pnginfo) 351 | image = tensor_to_cv(image) 352 | image = Image.fromarray(np.clip(image, 0, 255).astype(np.uint8)) 353 | meta_png = PngInfo() 354 | for x in metadata: 355 | try: 356 | data = json.dumps(metadata[x]) 357 | meta_png.add_text(x, data) 358 | except Exception as e: 359 | logger.error(e) 360 | logger.error(x) 361 | 362 | if path == "" or path is None: 363 | path = get_output_directory() 364 | 365 | root = Path(path) 366 | if not root.exists(): 367 | root = Path(get_output_directory()) 368 | 369 | root.mkdir(parents=True, exist_ok=True) 370 | 371 | outname = fname 372 | if len(params) > 1: 373 | outname += f"_{idx}" 374 | outname = (root / outname).with_suffix(".png") 375 | logger.info(f"wrote file: {outname}") 376 | image.save(outname, pnginfo=meta_png) 377 | pbar.update_absolute(idx) 378 | return () 379 | -------------------------------------------------------------------------------- /core/vars.py: -------------------------------------------------------------------------------- 1 | """ Jovimetrix - Variables """ 2 | 3 | import sys 4 | import random 5 | from typing import Any 6 | 7 | from comfy.utils import ProgressBar 8 | 9 | from cozy_comfyui import \ 10 | InputType, EnumConvertType, \ 11 | deep_merge, parse_param, parse_value, zip_longest_fill 12 | 13 | from cozy_comfyui.lexicon import \ 14 | Lexicon 15 | 16 | from cozy_comfyui.node import \ 17 | COZY_TYPE_ANY, COZY_TYPE_NUMERICAL, \ 18 | CozyBaseNode 19 | 20 | from . import \ 21 | EnumFillOperation 22 | 23 | # ============================================================================== 24 | # === GLOBAL === 25 | # ============================================================================== 26 | 27 | JOV_CATEGORY = "VARIABLE" 28 | 29 | # ============================================================================== 30 | # === CLASS === 31 | # ============================================================================== 32 | 33 | class ValueNode(CozyBaseNode): 34 | NAME = "VALUE (JOV) 🧬" 35 | CATEGORY = JOV_CATEGORY 36 | RETURN_TYPES = (COZY_TYPE_ANY, COZY_TYPE_ANY, COZY_TYPE_ANY, COZY_TYPE_ANY, COZY_TYPE_ANY,) 37 | RETURN_NAMES = ("❔", Lexicon.X, Lexicon.Y, Lexicon.Z, Lexicon.W,) 38 | OUTPUT_IS_LIST = (True, True, True, True, True,) 39 | DESCRIPTION = """ 40 | Supplies raw or default values for various data types, supporting vector input with components for X, Y, Z, and W. It also provides a string input option. 41 | """ 42 | UPDATE = False 43 | 44 | @classmethod 45 | def INPUT_TYPES(cls) -> InputType: 46 | d = super().INPUT_TYPES() 47 | typ = EnumConvertType._member_names_[:6] 48 | d = deep_merge(d, { 49 | "optional": { 50 | Lexicon.IN_A: (COZY_TYPE_ANY, { 51 | "default": None,}), 52 | Lexicon.X: (COZY_TYPE_NUMERICAL, { 53 | "default": 0, "mij": -sys.float_info.max, "maj": sys.float_info.max, 54 | "forceInput": True}), 55 | Lexicon.Y: (COZY_TYPE_NUMERICAL, { 56 | "default": 0, "mij": -sys.float_info.max, "maj": sys.float_info.max, 57 | "forceInput": True}), 58 | Lexicon.Z: (COZY_TYPE_NUMERICAL, { 59 | "default": 0, "mij": -sys.float_info.max, "maj": sys.float_info.max, 60 | "forceInput": True}), 61 | Lexicon.W: (COZY_TYPE_NUMERICAL, { 62 | "default": 0, "mij": -sys.float_info.max, "maj": sys.float_info.max, 63 | "forceInput": True}), 64 | Lexicon.TYPE: (typ, { 65 | "default": EnumConvertType.BOOLEAN.name}), 66 | Lexicon.DEFAULT_A: ("VEC4", { 67 | "default": (0, 0, 0, 0), "mij": -sys.float_info.max, "maj": sys.float_info.max, 68 | "label": [Lexicon.X, Lexicon.Y, Lexicon.Z, Lexicon.W]}), 69 | Lexicon.DEFAULT_B: ("VEC4", { 70 | "default": (1,1,1,1), "mij": -sys.float_info.max, "maj": sys.float_info.max, 71 | "label": [Lexicon.X, Lexicon.Y, Lexicon.Z, Lexicon.W]}), 72 | Lexicon.SEED: ("INT", { 73 | "default": 0, "min": 0, "max": sys.maxsize}), 74 | } 75 | }) 76 | return Lexicon._parse(d) 77 | 78 | def run(self, **kw) -> tuple[tuple[Any, ...]]: 79 | raw = parse_param(kw, Lexicon.IN_A, EnumConvertType.ANY, 0) 80 | r_x = parse_param(kw, Lexicon.X, EnumConvertType.FLOAT, None) 81 | r_y = parse_param(kw, Lexicon.Y, EnumConvertType.FLOAT, None) 82 | r_z = parse_param(kw, Lexicon.Z, EnumConvertType.FLOAT, None) 83 | r_w = parse_param(kw, Lexicon.W, EnumConvertType.FLOAT, None) 84 | typ = parse_param(kw, Lexicon.TYPE, EnumConvertType, EnumConvertType.BOOLEAN.name) 85 | xyzw = parse_param(kw, Lexicon.DEFAULT_A, EnumConvertType.VEC4, (0, 0, 0, 0)) 86 | yyzw = parse_param(kw, Lexicon.DEFAULT_B, EnumConvertType.VEC4, (1, 1, 1, 1)) 87 | seed = parse_param(kw, Lexicon.SEED, EnumConvertType.INT, 0) 88 | params = list(zip_longest_fill(raw, r_x, r_y, r_z, r_w, typ, xyzw, yyzw, seed)) 89 | results = [] 90 | pbar = ProgressBar(len(params)) 91 | old_seed = -1 92 | for idx, (raw, r_x, r_y, r_z, r_w, typ, xyzw, yyzw, seed) in enumerate(params): 93 | # default = [x_str] 94 | default2 = None 95 | a, b, c, d = xyzw 96 | a2, b2, c2, d2 = yyzw 97 | default = (a if r_x is None else r_x, 98 | b if r_y is None else r_y, 99 | c if r_z is None else r_z, 100 | d if r_w is None else r_w) 101 | default2 = (a2, b2, c2, d2) 102 | 103 | val = parse_value(raw, typ, default) 104 | val2 = parse_value(default2, typ, default2) 105 | 106 | # check if set to randomize.... 107 | self.UPDATE = False 108 | if seed != 0: 109 | self.UPDATE = True 110 | val = list(val) if isinstance(val, (tuple, list,)) else [val] 111 | val2 = list(val2) if isinstance(val2, (tuple, list,)) else [val2] 112 | 113 | for i in range(len(val)): 114 | mx = max(val[i], val2[i]) 115 | mn = min(val[i], val2[i]) 116 | if mn == mx: 117 | val[i] = mn 118 | else: 119 | if old_seed != seed: 120 | random.seed(seed) 121 | old_seed = seed 122 | if typ in [EnumConvertType.INT, EnumConvertType.BOOLEAN]: 123 | val[i] = random.randint(mn, mx) 124 | else: 125 | val[i] = mn + random.random() * (mx - mn) 126 | 127 | out = parse_value(val, typ, val) 128 | items = [out,0,0,0] if not isinstance(out, (tuple, list,)) else out 129 | results.append([out, *items]) 130 | pbar.update_absolute(idx) 131 | 132 | return *list(zip(*results)), 133 | 134 | class Vector2Node(CozyBaseNode): 135 | NAME = "VECTOR2 (JOV)" 136 | CATEGORY = JOV_CATEGORY 137 | RETURN_TYPES = ("VEC2",) 138 | RETURN_NAMES = ("VEC2",) 139 | OUTPUT_IS_LIST = (True,) 140 | OUTPUT_TOOLTIPS = ( 141 | "Vector2 with float values", 142 | ) 143 | DESCRIPTION = """ 144 | Outputs a VECTOR2. 145 | """ 146 | 147 | @classmethod 148 | def INPUT_TYPES(cls) -> InputType: 149 | d = super().INPUT_TYPES() 150 | d = deep_merge(d, { 151 | "optional": { 152 | Lexicon.X: (COZY_TYPE_NUMERICAL, { 153 | "min": -sys.float_info.max, "max": sys.float_info.max, 154 | "tooltip": "X channel value"}), 155 | Lexicon.Y: (COZY_TYPE_NUMERICAL, { 156 | "min": -sys.float_info.max, "max": sys.float_info.max, 157 | "tooltip": "Y channel value"}), 158 | Lexicon.DEFAULT: ("VEC2", { 159 | "default": (0,0), "mij": -sys.float_info.max, "maj": sys.float_info.max, 160 | "tooltip": "Default vector value"}), 161 | } 162 | }) 163 | return Lexicon._parse(d) 164 | 165 | def run(self, **kw) -> tuple[tuple[float, ...]]: 166 | x = parse_param(kw, Lexicon.X, EnumConvertType.FLOAT, None) 167 | y = parse_param(kw, Lexicon.Y, EnumConvertType.FLOAT, None) 168 | default = parse_param(kw, Lexicon.DEFAULT, EnumConvertType.VEC2, (0,0)) 169 | result = [] 170 | params = list(zip_longest_fill(x, y, default)) 171 | pbar = ProgressBar(len(params)) 172 | for idx, (x, y, default) in enumerate(params): 173 | x = round(default[0], 9) if x is None else round(x, 9) 174 | y = round(default[1], 9) if y is None else round(y, 9) 175 | result.append((x, y,)) 176 | pbar.update_absolute(idx) 177 | return result, 178 | 179 | class Vector3Node(CozyBaseNode): 180 | NAME = "VECTOR3 (JOV)" 181 | CATEGORY = JOV_CATEGORY 182 | RETURN_TYPES = ("VEC3",) 183 | RETURN_NAMES = ("VEC3",) 184 | OUTPUT_IS_LIST = (True,) 185 | OUTPUT_TOOLTIPS = ( 186 | "Vector3 with float values", 187 | ) 188 | DESCRIPTION = """ 189 | Outputs a VECTOR3. 190 | """ 191 | 192 | @classmethod 193 | def INPUT_TYPES(cls) -> InputType: 194 | d = super().INPUT_TYPES() 195 | d = deep_merge(d, { 196 | "optional": { 197 | Lexicon.X: (COZY_TYPE_NUMERICAL, { 198 | "min": -sys.float_info.max, "max": sys.float_info.max, 199 | "tooltip": "X channel value"}), 200 | Lexicon.Y: (COZY_TYPE_NUMERICAL, { 201 | "min": -sys.float_info.max, "max": sys.float_info.max, 202 | "tooltip": "Y channel value"}), 203 | Lexicon.Z: (COZY_TYPE_NUMERICAL, { 204 | "min": -sys.float_info.max, "max": sys.float_info.max, 205 | "tooltip": "Z channel value"}), 206 | Lexicon.DEFAULT: ("VEC3", { 207 | "default": (0,0,0), "mij": -sys.float_info.max, "maj": sys.float_info.max, 208 | "tooltip": "Default vector value"}), 209 | } 210 | }) 211 | return Lexicon._parse(d) 212 | 213 | def run(self, **kw) -> tuple[tuple[float, ...]]: 214 | x = parse_param(kw, Lexicon.X, EnumConvertType.FLOAT, None) 215 | y = parse_param(kw, Lexicon.Y, EnumConvertType.FLOAT, None) 216 | z = parse_param(kw, Lexicon.Z, EnumConvertType.FLOAT, None) 217 | default = parse_param(kw, Lexicon.DEFAULT, EnumConvertType.VEC3, (0,0,0)) 218 | result = [] 219 | params = list(zip_longest_fill(x, y, z, default)) 220 | pbar = ProgressBar(len(params)) 221 | for idx, (x, y, z, default) in enumerate(params): 222 | x = round(default[0], 9) if x is None else round(x, 9) 223 | y = round(default[1], 9) if y is None else round(y, 9) 224 | z = round(default[2], 9) if z is None else round(z, 9) 225 | result.append((x, y, z,)) 226 | pbar.update_absolute(idx) 227 | return result, 228 | 229 | class Vector4Node(CozyBaseNode): 230 | NAME = "VECTOR4 (JOV)" 231 | CATEGORY = JOV_CATEGORY 232 | RETURN_TYPES = ("VEC4",) 233 | RETURN_NAMES = ("VEC4",) 234 | OUTPUT_IS_LIST = (True,) 235 | OUTPUT_TOOLTIPS = ( 236 | "Vector4 with float values", 237 | ) 238 | DESCRIPTION = """ 239 | Outputs a VECTOR4. 240 | """ 241 | 242 | @classmethod 243 | def INPUT_TYPES(cls) -> InputType: 244 | d = super().INPUT_TYPES() 245 | d = deep_merge(d, { 246 | "optional": { 247 | Lexicon.X: (COZY_TYPE_NUMERICAL, { 248 | "min": -sys.float_info.max, "max": sys.float_info.max, 249 | "tooltip": "X channel value"}), 250 | Lexicon.Y: (COZY_TYPE_NUMERICAL, { 251 | "min": -sys.float_info.max, "max": sys.float_info.max, 252 | "tooltip": "Y channel value"}), 253 | Lexicon.Z: (COZY_TYPE_NUMERICAL, { 254 | "min": -sys.float_info.max, "max": sys.float_info.max, 255 | "tooltip": "Z channel value"}), 256 | Lexicon.W: (COZY_TYPE_NUMERICAL, { 257 | "min": -sys.float_info.max, "max": sys.float_info.max, 258 | "tooltip": "W channel value"}), 259 | Lexicon.DEFAULT: ("VEC4", { 260 | "default": (0,0,0,0), "mij": -sys.float_info.max, "maj": sys.float_info.max, 261 | "tooltip": "Default vector value"}), 262 | } 263 | }) 264 | return Lexicon._parse(d) 265 | 266 | def run(self, **kw) -> tuple[tuple[float, ...]]: 267 | x = parse_param(kw, Lexicon.X, EnumConvertType.FLOAT, None) 268 | y = parse_param(kw, Lexicon.Y, EnumConvertType.FLOAT, None) 269 | z = parse_param(kw, Lexicon.Z, EnumConvertType.FLOAT, None) 270 | w = parse_param(kw, Lexicon.W, EnumConvertType.FLOAT, None) 271 | default = parse_param(kw, Lexicon.DEFAULT, EnumConvertType.VEC4, (0,0,0,0)) 272 | result = [] 273 | params = list(zip_longest_fill(x, y, z, w, default)) 274 | pbar = ProgressBar(len(params)) 275 | for idx, (x, y, z, w, default) in enumerate(params): 276 | x = round(default[0], 9) if x is None else round(x, 9) 277 | y = round(default[1], 9) if y is None else round(y, 9) 278 | z = round(default[2], 9) if z is None else round(z, 9) 279 | w = round(default[3], 9) if w is None else round(w, 9) 280 | result.append((x, y, z, w,)) 281 | pbar.update_absolute(idx) 282 | return result, 283 | -------------------------------------------------------------------------------- /node_list.json: -------------------------------------------------------------------------------- 1 | { 2 | "ADJUST: BLUR (JOV)": "Enhance and modify images with various blur effects", 3 | "ADJUST: COLOR (JOV)": "Enhance and modify images with various blur effects", 4 | "ADJUST: EDGE (JOV)": "Enhanced edge detection", 5 | "ADJUST: EMBOSS (JOV)": "Emboss boss mode", 6 | "ADJUST: LEVELS (JOV)": "", 7 | "ADJUST: LIGHT (JOV)": "Tonal adjustments", 8 | "ADJUST: MORPHOLOGY (JOV)": "Operations based on the image shape", 9 | "ADJUST: PIXEL (JOV)": "Pixel-level transformations", 10 | "ADJUST: SHARPEN (JOV)": "Sharpen the pixels of an image", 11 | "AKASHIC (JOV) \ud83d\udcd3": "Visualize data", 12 | "ARRAY (JOV) \ud83d\udcda": "Processes a batch of data based on the selected mode", 13 | "BIT SPLIT (JOV) \u2b44": "Split an input into separate bits", 14 | "BLEND (JOV) \u2697\ufe0f": "Combine two input images using various blending modes, such as normal, screen, multiply, overlay, etc", 15 | "COLOR BLIND (JOV) \ud83d\udc41\u200d\ud83d\udde8": "Simulate color blindness effects on images", 16 | "COLOR MATCH (JOV) \ud83d\udc9e": "Adjust the color scheme of one image to match another with the Color Match Node", 17 | "COLOR MEANS (JOV) \u3030\ufe0f": "The top-k colors ordered from most->least used as a strip, tonal palette and 3D LUT", 18 | "COLOR THEORY (JOV) \ud83d\udede": "Generate a color harmony based on the selected scheme", 19 | "COMPARISON (JOV) \ud83d\udd75\ud83c\udffd": "Evaluates two inputs (A and B) with a specified comparison operators and optional values for successful and failed comparisons", 20 | "CONSTANT (JOV) \ud83d\udfea": "Generate a constant image or mask of a specified size and color", 21 | "CROP (JOV) \u2702\ufe0f": "Extract a portion of an input image or resize it", 22 | "DELAY (JOV) \u270b\ud83c\udffd": "Introduce pauses in the workflow that accept an optional input to pass through and a timer parameter to specify the duration of the delay", 23 | "EXPORT (JOV) \ud83d\udcfd": "Responsible for saving images or animations to disk", 24 | "FILTER MASK (JOV) \ud83e\udd3f": "Create masks based on specific color ranges within an image", 25 | "FLATTEN (JOV) \u2b07\ufe0f": "Combine multiple input images into a single image by summing their pixel values", 26 | "GRADIENT MAP (JOV) \ud83c\uddf2\ud83c\uddfa": "Remaps an input image using a gradient lookup table (LUT)", 27 | "GRAPH (JOV) \ud83d\udcc8": "Visualize a series of data points over time", 28 | "IMAGE INFO (JOV) \ud83d\udcda": "Exports and Displays immediate information about images", 29 | "LERP (JOV) \ud83d\udd30": "Calculate linear interpolation between two values or vectors based on a blending factor (alpha)", 30 | "OP BINARY (JOV) \ud83c\udf1f": "Execute binary operations like addition, subtraction, multiplication, division, and bitwise operations on input values, supporting various data types and vector sizes", 31 | "OP UNARY (JOV) \ud83c\udfb2": "Perform single function operations like absolute value, mean, median, mode, magnitude, normalization, maximum, or minimum on input values", 32 | "PIXEL MERGE (JOV) \ud83e\udec2": "Combines individual color channels (red, green, blue) along with an optional mask channel to create a composite image", 33 | "PIXEL SPLIT (JOV) \ud83d\udc94": "Split an input into individual color channels (red, green, blue, alpha)", 34 | "PIXEL SWAP (JOV) \ud83d\udd03": "Swap pixel values between two input images based on specified channel swizzle operations", 35 | "QUEUE (JOV) \ud83d\uddc3": "Manage a queue of items, such as file paths or data", 36 | "QUEUE TOO (JOV) \ud83d\uddc3": "Manage a queue of specific items: media files", 37 | "ROUTE (JOV) \ud83d\ude8c": "Routes the input data from the optional input ports to the output port, preserving the order of inputs", 38 | "SAVE OUTPUT (JOV) \ud83d\udcbe": "Save images with metadata to any specified path", 39 | "SHAPE GEN (JOV) \u2728": "Create n-sided polygons", 40 | "SPLIT (JOV) \ud83c\udfad": "Split an image into two or four images based on the percentages for width and height", 41 | "STACK (JOV) \u2795": "Merge multiple input images into a single composite image by stacking them along a specified axis", 42 | "STRINGER (JOV) \ud83e\ude80": "Manipulate strings through filtering", 43 | "SWIZZLE (JOV) \ud83d\ude35": "Swap components between two vectors based on specified swizzle patterns and values", 44 | "TEXT GEN (JOV) \ud83d\udcdd": "Generates images containing text based on parameters such as font, size, alignment, color, and position", 45 | "THRESHOLD (JOV) \ud83d\udcc9": "Define a range and apply it to an image for segmentation and feature extraction", 46 | "TICK (JOV) \u23f1": "Value generator with normalized values based on based on time interval", 47 | "TRANSFORM (JOV) \ud83c\udfdd\ufe0f": "Apply various geometric transformations to images, including translation, rotation, scaling, mirroring, tiling and perspective projection", 48 | "VALUE (JOV) \ud83e\uddec": "Supplies raw or default values for various data types, supporting vector input with components for X, Y, Z, and W", 49 | "VECTOR2 (JOV)": "Outputs a VECTOR2", 50 | "VECTOR3 (JOV)": "Outputs a VECTOR3", 51 | "VECTOR4 (JOV)": "Outputs a VECTOR4", 52 | "WAVE GEN (JOV) \ud83c\udf0a": "Produce waveforms like sine, square, or sawtooth with adjustable frequency, amplitude, phase, and offset" 53 | } -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "jovimetrix" 3 | description = "Animation via tick. Parameter manipulation with wave generator. Unary and Binary math support. Value convert int/float/bool, VectorN and Image, Mask types. Shape mask generator. Stack images, do channel ops, split, merge and randomize arrays and batches. Load images & video from anywhere. Dynamic bus routing. Save output anywhere! Flatten, crop, transform; check colorblindness or linear interpolate values." 4 | version = "2.1.10" 5 | license = { file = "LICENSE" } 6 | readme = "README.md" 7 | authors = [{ name = "Alexander G. Morano", email = "amorano@gmail.com" }] 8 | classifiers = [ 9 | "License :: OSI Approved :: MIT License", 10 | "Operating System :: OS Independent", 11 | "Programming Language :: Python", 12 | "Programming Language :: Python :: 3", 13 | "Programming Language :: Python :: 3.10", 14 | "Programming Language :: Python :: 3.11", 15 | "Programming Language :: Python :: 3.12", 16 | "Intended Audience :: Developers", 17 | ] 18 | requires-python = ">=3.10" 19 | dependencies = [ 20 | "aenum", 21 | "git+https://github.com/cozy-comfyui/cozy_comfyui@main#egg=cozy_comfyui", 22 | "matplotlib", 23 | "numpy<2", 24 | "opencv-contrib-python", 25 | "Pillow" 26 | ] 27 | 28 | [project.urls] 29 | Homepage = "https://github.com/Amorano/Jovimetrix" 30 | Documentation = "https://github.com/Amorano/Jovimetrix/wiki" 31 | Repository = "https://github.com/Amorano/Jovimetrix" 32 | Issues = "https://github.com/Amorano/Jovimetrix/issues" 33 | 34 | [tool.comfy] 35 | PublisherId = "amorano" 36 | DisplayName = "Jovimetrix" 37 | Icon = "https://raw.githubusercontent.com/Amorano/Jovimetrix-examples/refs/heads/master/res/logo-jvmx.png" 38 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aenum 2 | git+https://github.com/cozy-comfyui/cozy_comfyui@main#egg=cozy_comfyui 3 | matplotlib 4 | numpy<2 5 | opencv-contrib-python 6 | Pillow -------------------------------------------------------------------------------- /res/aud/bread.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/aud/bread.mp3 -------------------------------------------------------------------------------- /res/aud/bread.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/aud/bread.wav -------------------------------------------------------------------------------- /res/img/anim/anim (1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/anim/anim (1).png -------------------------------------------------------------------------------- /res/img/anim/anim (2).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/anim/anim (2).png -------------------------------------------------------------------------------- /res/img/anim/anim (3).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/anim/anim (3).png -------------------------------------------------------------------------------- /res/img/anim/anim (4).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/anim/anim (4).png -------------------------------------------------------------------------------- /res/img/anim/anim (5).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/anim/anim (5).png -------------------------------------------------------------------------------- /res/img/anim/anim (6).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/anim/anim (6).png -------------------------------------------------------------------------------- /res/img/anim/anim (7).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/anim/anim (7).png -------------------------------------------------------------------------------- /res/img/anim/anim (8).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/anim/anim (8).png -------------------------------------------------------------------------------- /res/img/color-a.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/color-a.png -------------------------------------------------------------------------------- /res/img/color-b.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/color-b.png -------------------------------------------------------------------------------- /res/img/color-c.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/color-c.png -------------------------------------------------------------------------------- /res/img/color-d.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/color-d.png -------------------------------------------------------------------------------- /res/img/color-e.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/color-e.png -------------------------------------------------------------------------------- /res/img/color-f.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/color-f.png -------------------------------------------------------------------------------- /res/img/color-g.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/color-g.png -------------------------------------------------------------------------------- /res/img/depth-a.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/depth-a.png -------------------------------------------------------------------------------- /res/img/depth-b.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/depth-b.png -------------------------------------------------------------------------------- /res/img/depth-c.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/depth-c.png -------------------------------------------------------------------------------- /res/img/mask-a.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/mask-a.png -------------------------------------------------------------------------------- /res/img/mask-b.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/mask-b.png -------------------------------------------------------------------------------- /res/img/mask-c.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/mask-c.png -------------------------------------------------------------------------------- /res/img/mask-e.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/mask-e.png -------------------------------------------------------------------------------- /res/img/shape-a.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/shape-a.png -------------------------------------------------------------------------------- /res/img/shape-b.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/shape-b.png -------------------------------------------------------------------------------- /res/img/shape-c.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/shape-c.png -------------------------------------------------------------------------------- /res/img/shape-d.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/shape-d.png -------------------------------------------------------------------------------- /res/img/test-a.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/test-a.png -------------------------------------------------------------------------------- /res/img/test-b.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/test-b.png -------------------------------------------------------------------------------- /res/img/test-c.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/test-c.png -------------------------------------------------------------------------------- /res/img/test-d.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/test-d.jpg -------------------------------------------------------------------------------- /res/img/tile-a.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/tile-a.png -------------------------------------------------------------------------------- /res/img/tile-b.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/tile-b.png -------------------------------------------------------------------------------- /res/img/tile-c.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/tile-c.png -------------------------------------------------------------------------------- /res/img/tile-d.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/img/tile-d.png -------------------------------------------------------------------------------- /res/wiki/YouTube.svg: -------------------------------------------------------------------------------- 1 | YOUTUBEYOUTUBE -------------------------------------------------------------------------------- /res/wiki/help_002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Amorano/Jovimetrix/3ac86e664b7b4fe6a19f12176055da52014ad0bb/res/wiki/help_002.png -------------------------------------------------------------------------------- /web/core.js: -------------------------------------------------------------------------------- 1 | /** 2 | ASYNC 3 | init 4 | setup 5 | registerCustomNodes 6 | nodeCreated 7 | beforeRegisterNodeDef 8 | getCustomWidgets 9 | afterConfigureGraph 10 | refreshComboInNodes 11 | 12 | NON-ASYNC 13 | onNodeOutputsUpdated 14 | beforeRegisterVueAppNodeDefs 15 | loadedGraphNode 16 | */ 17 | 18 | import { app } from "../../scripts/app.js" 19 | 20 | app.registerExtension({ 21 | name: "jovimetrix", 22 | async init() { 23 | const styleTagId = 'jovimetrix-stylesheet'; 24 | let styleTag = document.getElementById(styleTagId); 25 | if (styleTag) { 26 | return; 27 | } 28 | 29 | document.head.appendChild(Object.assign(document.createElement('script'), { 30 | src: "https://cdn.jsdelivr.net/npm/@jaames/iro@5" 31 | })); 32 | } 33 | }); -------------------------------------------------------------------------------- /web/fun.js: -------------------------------------------------------------------------------- 1 | /**/ 2 | 3 | import { app } from "../../scripts/app.js"; 4 | 5 | export const bewm = function(ex, ey) { 6 | //- adapted from "Anchor Click Canvas Animation" by Nick Sheffield 7 | //- https://codepen.io/nicksheffield/pen/NNEoLg/ 8 | const colors = [ '#ffc000', '#ff3b3b', '#ff8400' ]; 9 | const bubbles = 25; 10 | 11 | const explode = () => { 12 | let particles = []; 13 | const ctx = app.canvas; 14 | const canvas = ctx.getContext('2d'); 15 | ctx.style.pointerEvents = 'none'; 16 | 17 | for(var i = 0; i < bubbles; i++) { 18 | particles.push({ 19 | x: canvas.width / 2, 20 | y: canvas.height / 2, 21 | radius: r(20, 30), 22 | color: colors[Math.floor(Math.random() * colors.length)], 23 | rotation: r(0, 360, true), 24 | speed: r(12, 16), 25 | friction: 0.9, 26 | opacity: r(0, 0.5, true), 27 | yVel: 0, 28 | gravity: 0.15 29 | }); 30 | } 31 | render(particles, ctx); 32 | } 33 | 34 | const render = (particles, ctx) => { 35 | requestAnimationFrame(() => render(particles, ctx)); 36 | 37 | particles.forEach((p) => { 38 | p.x += p.speed * Math.cos(p.rotation * Math.PI / 180); 39 | p.y += p.speed * Math.sin(p.rotation * Math.PI / 180); 40 | 41 | p.opacity -= 0.01; 42 | p.speed *= p.friction; 43 | p.radius *= p.friction; 44 | p.yVel += p.gravity; 45 | p.y += p.yVel; 46 | 47 | if(p.opacity < 0 || p.radius < 0) return; 48 | 49 | ctx.beginPath(); 50 | ctx.globalAlpha = p.opacity; 51 | ctx.fillStyle = p.color; 52 | ctx.arc(p.x, p.y, p.radius, 0, 2 * Math.PI, false); 53 | ctx.fill(); 54 | }); 55 | } 56 | 57 | const r = (a, b, c) => parseFloat((Math.random() * ((a ? a : 1) - (b ? b : 0)) + (b ? b : 0)).toFixed(c ? c : 0)); 58 | explode(ex, ey); 59 | } 60 | 61 | export const bubbles = function() { 62 | const canvas = document.getElementById("graph-canvas"); 63 | const context = canvas.getContext("2d"); 64 | window.bubbles_alive = true; 65 | let mouseX; 66 | let mouseY; 67 | 68 | const particleArray = []; 69 | class Particle { 70 | constructor() { 71 | this.x = Math.random() * canvas.width * 0.85; 72 | this.y = canvas.height * 0.85; 73 | this.radius = Math.random() * 30; 74 | this.dx = Math.random() - 0.5 75 | this.dx = Math.sign(this.dx) * Math.random() * 1.27; 76 | this.dy = 3 + Math.random() * 3; 77 | this.hue = 25 + Math.random() * 250; 78 | this.sat = 85 + Math.random() * 15; 79 | this.val = 35 + Math.random() * 20; 80 | } 81 | 82 | draw() { 83 | context.beginPath(); 84 | context.arc(this.x, this.y, this.radius, 0, 2 * Math.PI); 85 | context.strokeStyle = `hsl(${this.hue} ${this.sat}% ${this.val}%)`; 86 | context.stroke(); 87 | 88 | const gradient = context.createRadialGradient( 89 | this.x, 90 | this.y, 91 | 1, 92 | this.x + 0.5, 93 | this.y + 0.5, 94 | this.radius 95 | ); 96 | 97 | gradient.addColorStop(0.3, "rgba(255, 255, 255, 0.3)"); 98 | gradient.addColorStop(0.95, "#E7FEFF7F"); 99 | context.fillStyle = gradient; 100 | context.fill(); 101 | } 102 | 103 | move() { 104 | this.x = this.x + this.dx + (Math.random() - 0.5) * 0.5; 105 | this.y = this.y - this.dy + (Math.random() - 0.5) * 1.5; 106 | 107 | // Check if the particle is outside the canvas boundaries 108 | if ( 109 | this.x < -this.radius || 110 | this.x > canvas.width + this.radius || 111 | this.y < -this.radius || 112 | this.y > canvas.height + this.radius 113 | ) { 114 | // Remove the particle from the array 115 | particleArray.splice(particleArray.indexOf(this), 1); 116 | } 117 | } 118 | } 119 | 120 | const animate = () => { 121 | //context.clearRect(0, 0, canvas.width, canvas.height); 122 | app.canvas.setDirty(true); 123 | 124 | particleArray.forEach((particle) => { 125 | particle?.move(); 126 | particle?.draw(); 127 | }); 128 | 129 | if (window.bubbles_alive) { 130 | requestAnimationFrame(animate); 131 | if (Math.random() > 0.5) { 132 | const particle = new Particle(mouseX, mouseY); 133 | particleArray.push(particle); 134 | } 135 | } else { 136 | canvas.removeEventListener("mousemove", handleMouseMove); 137 | particleArray.length = 0; // Clear the particleArray 138 | return; 139 | } 140 | }; 141 | 142 | const handleMouseMove = (event) => { 143 | mouseX = event.clientX; 144 | mouseY = event.clientY; 145 | }; 146 | 147 | canvas.addEventListener("mousemove", handleMouseMove); 148 | animate(); 149 | } 150 | 151 | // flash status for each element 152 | const flashStatusMap = new Map(); 153 | 154 | export async function flashBackgroundColor(element, duration, flashCount, color="red") { 155 | if (flashStatusMap.get(element)) { 156 | return; 157 | } 158 | 159 | flashStatusMap.set(element, true); 160 | const originalColor = element.style.backgroundColor; 161 | 162 | for (let i = 0; i < flashCount; i++) { 163 | element.style.backgroundColor = color; 164 | await new Promise(resolve => setTimeout(resolve, duration / 2)); 165 | element.style.backgroundColor = originalColor; 166 | await new Promise(resolve => setTimeout(resolve, duration / 2)); 167 | } 168 | flashStatusMap.set(element, false); 169 | } -------------------------------------------------------------------------------- /web/nodes/akashic.js: -------------------------------------------------------------------------------- 1 | /**/ 2 | 3 | import { app } from "../../../scripts/app.js" 4 | import { ComfyWidgets } from '../../../scripts/widgets.js'; 5 | import { nodeAddDynamic } from "../util.js" 6 | 7 | const _prefix = '📥' 8 | const _id = "AKASHIC (JOV) 📓" 9 | 10 | app.registerExtension({ 11 | name: 'jovimetrix.node.' + _id, 12 | async beforeRegisterNodeDef(nodeType, nodeData, app) { 13 | if (nodeData.name !== _id) { 14 | return 15 | } 16 | 17 | await nodeAddDynamic(nodeType, _prefix); 18 | 19 | const onExecuted = nodeType.prototype.onExecuted; 20 | nodeType.prototype.onExecuted = async function (message) { 21 | const me = onExecuted?.apply(this, arguments) 22 | if (this.widgets) { 23 | for (let i = 0; i < this.widgets.length; i++) { 24 | this.widgets[i].onRemove?.(); 25 | this.widgets.splice(i, 0); 26 | } 27 | this.widgets.length = 0; 28 | } 29 | if (this.inputs.length>1) { 30 | for (let i = 0; i < this.inputs.length-1; i++) { 31 | let textWidget = ComfyWidgets["STRING"](this, this.inputs[i].name, ["STRING", { multiline: true }], app).widget; 32 | textWidget.inputEl.readOnly = true; 33 | textWidget.inputEl.style.margin = "1px"; 34 | textWidget.inputEl.style.padding = "1px"; 35 | textWidget.inputEl.style.border = "1px"; 36 | textWidget.inputEl.style.backgroundColor = "#222"; 37 | textWidget.value = this.inputs[i].name + " "; 38 | let raw = message["text"][i] 39 | .replace(/\\n/g, '\n') 40 | .replace(/"/g, ''); 41 | 42 | try { 43 | raw = JSON.parse('"' + raw.replace(/"/g, '\\"') + '"'); 44 | } catch (e) { 45 | } 46 | 47 | textWidget.value += raw; 48 | } 49 | } 50 | return me; 51 | } 52 | } 53 | }) 54 | -------------------------------------------------------------------------------- /web/nodes/array.js: -------------------------------------------------------------------------------- 1 | /** 2 | * File: array.js 3 | * Project: Jovimetrix 4 | * 5 | */ 6 | 7 | import { app } from "../../../scripts/app.js" 8 | import { nodeAddDynamic } from "../util.js" 9 | 10 | const _id = "ARRAY (JOV) 📚" 11 | const _prefix = '❔' 12 | 13 | app.registerExtension({ 14 | name: 'jovimetrix.node.' + _id, 15 | async beforeRegisterNodeDef(nodeType, nodeData) { 16 | if (nodeData.name !== _id) { 17 | return; 18 | } 19 | 20 | await nodeAddDynamic(nodeType, _prefix); 21 | } 22 | }) 23 | -------------------------------------------------------------------------------- /web/nodes/delay.js: -------------------------------------------------------------------------------- 1 | /**/ 2 | 3 | import { api } from "../../../scripts/api.js"; 4 | import { app } from "../../../scripts/app.js"; 5 | import { apiJovimetrix } from "../util.js" 6 | import { bubbles } from '../fun.js' 7 | 8 | const _id = "DELAY (JOV) ✋🏽" 9 | const EVENT_JOVI_DELAY = "jovi-delay-user"; 10 | const EVENT_JOVI_UPDATE = "jovi-delay-update"; 11 | 12 | function domShowModal(innerHTML, eventCallback, timeout=null) { 13 | return new Promise((resolve, reject) => { 14 | const modal = document.createElement("div"); 15 | modal.className = "modal"; 16 | modal.innerHTML = innerHTML; 17 | document.body.appendChild(modal); 18 | 19 | // center 20 | const modalContent = modal.querySelector(".jov-modal-content"); 21 | modalContent.style.position = "absolute"; 22 | modalContent.style.left = "50%"; 23 | modalContent.style.top = "50%"; 24 | modalContent.style.transform = "translate(-50%, -50%)"; 25 | 26 | let timeoutId; 27 | 28 | const handleEvent = (event) => { 29 | const targetId = event.target.id; 30 | const result = eventCallback(targetId); 31 | 32 | if (result != null) { 33 | if (timeoutId) { 34 | clearTimeout(timeoutId); 35 | timeoutId = null; 36 | } 37 | modal.remove(); 38 | resolve(result); 39 | } 40 | }; 41 | modalContent.addEventListener("click", handleEvent); 42 | modalContent.addEventListener("dblclick", handleEvent); 43 | 44 | if (timeout) { 45 | timeout *= 1000; 46 | timeoutId = setTimeout(() => { 47 | modal.remove(); 48 | reject(new Error("TIMEOUT")); 49 | }, timeout); 50 | } 51 | 52 | //setTimeout(() => { 53 | // modal.dispatchEvent(new Event('tick')); 54 | //}, 1000); 55 | }); 56 | } 57 | 58 | app.registerExtension({ 59 | name: 'jovimetrix.node.' + _id, 60 | async beforeRegisterNodeDef(nodeType, nodeData) { 61 | if (nodeData.name !== _id) { 62 | return 63 | } 64 | 65 | const onNodeCreated = nodeType.prototype.onNodeCreated; 66 | nodeType.prototype.onNodeCreated = async function () { 67 | const me = await onNodeCreated?.apply(this, arguments); 68 | const widget_time = this.widgets.find(w => w.name == 'timer'); 69 | const widget_enable = this.widgets.find(w => w.name == 'enable'); 70 | this.total_timeout = 0; 71 | let showing = false; 72 | let delay_modal; 73 | const self = this; 74 | 75 | async function python_delay_user(event) { 76 | if (showing || event.detail.id != self.id) { 77 | return; 78 | } 79 | 80 | if (widget_time.value > 4 && widget_enable.value == true) { 81 | bubbles(); 82 | } 83 | 84 | showing = true; 85 | delay_modal = domShowModal(` 86 |
87 |

DELAY NODE #${event.detail?.title || event.detail.id}

88 |

CANCEL OR CONTINUE RENDER?

89 |
90 | 91 | 92 |
93 |
`, 94 | (button) => { 95 | return (button != "jov-submit-cancel"); 96 | }, 97 | widget_time.value); 98 | 99 | let value = false; 100 | try { 101 | value = await delay_modal; 102 | } catch (e) { 103 | if (e.message != "TIMEOUT") { 104 | console.error(e); 105 | } 106 | } 107 | apiJovimetrix(event.detail.id, value); 108 | 109 | showing = false; 110 | window.bubbles_alive = false; 111 | } 112 | 113 | async function python_delay_update() { 114 | } 115 | 116 | api.addEventListener(EVENT_JOVI_DELAY, python_delay_user); 117 | api.addEventListener(EVENT_JOVI_UPDATE, python_delay_update); 118 | 119 | this.onDestroy = () => { 120 | api.removeEventListener(EVENT_JOVI_DELAY, python_delay_user); 121 | api.removeEventListener(EVENT_JOVI_UPDATE, python_delay_update); 122 | }; 123 | return me; 124 | } 125 | 126 | const onExecutionStart = nodeType.prototype.onExecutionStart 127 | nodeType.prototype.onExecutionStart = function() { 128 | onExecutionStart?.apply(this, arguments); 129 | self.total_timeout = 0; 130 | } 131 | 132 | } 133 | }) 134 | -------------------------------------------------------------------------------- /web/nodes/flatten.js: -------------------------------------------------------------------------------- 1 | /**/ 2 | 3 | import { app } from "../../../scripts/app.js" 4 | import { nodeAddDynamic } from "../util.js" 5 | 6 | const _id = "FLATTEN (JOV) ⬇️" 7 | const _prefix = 'image' 8 | 9 | app.registerExtension({ 10 | name: 'jovimetrix.node.' + _id, 11 | async beforeRegisterNodeDef(nodeType, nodeData) { 12 | if (nodeData.name !== _id) { 13 | return; 14 | } 15 | 16 | await nodeAddDynamic(nodeType, _prefix, "IMAGE,MASK"); 17 | } 18 | }) 19 | -------------------------------------------------------------------------------- /web/nodes/graph.js: -------------------------------------------------------------------------------- 1 | /**/ 2 | 3 | import { app } from "../../../scripts/app.js" 4 | import { apiJovimetrix, nodeAddDynamic } from "../util.js" 5 | 6 | const _id = "GRAPH (JOV) 📈" 7 | const _prefix = '❔' 8 | 9 | app.registerExtension({ 10 | name: 'jovimetrix.node.' + _id, 11 | async init() { 12 | LGraphCanvas.link_type_colors['JOV_VG_0'] = "#A00"; 13 | LGraphCanvas.link_type_colors['JOV_VG_1'] = "#0A0"; 14 | LGraphCanvas.link_type_colors['JOV_VG_2'] = "#00A"; 15 | LGraphCanvas.link_type_colors['JOV_VG_3'] = "#0AA"; 16 | LGraphCanvas.link_type_colors['JOV_VG_4'] = "#AA0"; 17 | LGraphCanvas.link_type_colors['JOV_VG_5'] = "#A0A"; 18 | LGraphCanvas.link_type_colors['JOV_VG_6'] = "#000"; 19 | }, 20 | async beforeRegisterNodeDef(nodeType, nodeData, app) { 21 | if (nodeData.name !== _id) { 22 | return; 23 | } 24 | 25 | await nodeAddDynamic(nodeType, _prefix); 26 | 27 | const onNodeCreated = nodeType.prototype.onNodeCreated; 28 | nodeType.prototype.onNodeCreated = async function () { 29 | const me = await onNodeCreated?.apply(this, arguments); 30 | const self = this; 31 | const widget_reset = this.widgets.find(w => w.name == 'reset'); 32 | widget_reset.callback = async() => { 33 | widget_reset.value = false; 34 | apiJovimetrix(self.id, "reset"); 35 | } 36 | return me; 37 | } 38 | 39 | const onConnectionsChange = nodeType.prototype.onConnectionsChange 40 | nodeType.prototype.onConnectionsChange = function (slotType, slot, event, link_info) { 41 | const me = onConnectionsChange?.apply(this, arguments); 42 | if (!link_info || slot == this.inputs.length) { 43 | return; 44 | } 45 | let count = 0; 46 | for (let i = 0; i < this.inputs.length; i++) { 47 | const link_id = this.inputs[i].link; 48 | const link = app.graph.links[link_id]; 49 | const nameParts = this.inputs[i].name.split('_'); 50 | const isInteger = nameParts.length > 1 && !isNaN(nameParts[0]) && Number.isInteger(parseFloat(nameParts[0])); 51 | if (link && isInteger && nameParts[1].substring(0, _prefix.length) == _prefix) { 52 | //if(link && this.inputs[i].name.substring(0, _prefix.length) == _prefix) { 53 | link.type = `JOV_VG_${count}`; 54 | this.inputs[i].color_on = LGraphCanvas.link_type_colors[link.type]; 55 | count += 1; 56 | } 57 | } 58 | app.graph.setDirtyCanvas(true, true); 59 | return me; 60 | } 61 | } 62 | }) 63 | -------------------------------------------------------------------------------- /web/nodes/lerp.js: -------------------------------------------------------------------------------- 1 | /**/ 2 | 3 | import { app } from "../../../scripts/app.js" 4 | import { widgetHookControl } from "../util.js" 5 | 6 | const _id = "LERP (JOV) 🔰" 7 | 8 | app.registerExtension({ 9 | name: 'jovimetrix.node.' + _id, 10 | async beforeRegisterNodeDef(nodeType, nodeData) { 11 | if (nodeData.name !== _id) { 12 | return; 13 | } 14 | 15 | const onNodeCreated = nodeType.prototype.onNodeCreated 16 | nodeType.prototype.onNodeCreated = async function () { 17 | const me = await onNodeCreated?.apply(this, arguments); 18 | await widgetHookControl(this, 'type', 'alpha', true); 19 | await widgetHookControl(this, 'type', 'aa'); 20 | await widgetHookControl(this, 'type', 'bb'); 21 | return me; 22 | } 23 | return nodeType; 24 | } 25 | }) 26 | -------------------------------------------------------------------------------- /web/nodes/op_binary.js: -------------------------------------------------------------------------------- 1 | /**/ 2 | 3 | import { app } from "../../../scripts/app.js" 4 | import { widgetHookControl } from "../util.js" 5 | 6 | const _id = "OP BINARY (JOV) 🌟" 7 | 8 | app.registerExtension({ 9 | name: 'jovimetrix.node.' + _id, 10 | async beforeRegisterNodeDef(nodeType, nodeData) { 11 | if (nodeData.name !== _id) { 12 | return; 13 | } 14 | 15 | const onNodeCreated = nodeType.prototype.onNodeCreated 16 | nodeType.prototype.onNodeCreated = async function () { 17 | const me = await onNodeCreated?.apply(this, arguments); 18 | await widgetHookControl(this, 'type', 'aa'); 19 | await widgetHookControl(this, 'type', 'bb'); 20 | return me; 21 | } 22 | 23 | return nodeType; 24 | } 25 | }) 26 | -------------------------------------------------------------------------------- /web/nodes/op_unary.js: -------------------------------------------------------------------------------- 1 | /**/ 2 | 3 | import { app } from "../../../scripts/app.js" 4 | import { widgetHookControl } from "../util.js" 5 | 6 | const _id = "OP UNARY (JOV) 🎲" 7 | 8 | app.registerExtension({ 9 | name: 'jovimetrix.node.' + _id, 10 | async beforeRegisterNodeDef(nodeType, nodeData) { 11 | 12 | if (nodeData.name !== _id) { 13 | return; 14 | } 15 | 16 | const onNodeCreated = nodeType.prototype.onNodeCreated 17 | nodeType.prototype.onNodeCreated = async function () { 18 | const me = await onNodeCreated?.apply(this, arguments); 19 | await widgetHookControl(this, 'type', 'aa'); 20 | return me; 21 | } 22 | return nodeType; 23 | } 24 | }) -------------------------------------------------------------------------------- /web/nodes/queue.js: -------------------------------------------------------------------------------- 1 | /**/ 2 | 3 | import { api } from "../../../scripts/api.js"; 4 | import { app } from "../../../scripts/app.js"; 5 | import { ComfyWidgets } from '../../../scripts/widgets.js'; 6 | import { apiJovimetrix, TypeSlotEvent, TypeSlot } from "../util.js" 7 | import { flashBackgroundColor } from '../fun.js' 8 | 9 | const _id1 = "QUEUE (JOV) 🗃"; 10 | const _id2 = "QUEUE TOO (JOV) 🗃"; 11 | const _prefix = '❔'; 12 | const EVENT_JOVI_PING = "jovi-queue-ping"; 13 | const EVENT_JOVI_DONE = "jovi-queue-done"; 14 | 15 | app.registerExtension({ 16 | name: 'jovimetrix.node.' + _id1, 17 | async beforeRegisterNodeDef(nodeType, nodeData, app) { 18 | if (nodeData.name != _id1 && nodeData.name != _id2) { 19 | return; 20 | } 21 | 22 | function update_report(self) { 23 | self.widget_report.value = `[${self.data_index+1} / ${self.data_all.length}]\n${self.data_current}`; 24 | app.canvas.setDirty(true); 25 | } 26 | 27 | function update_list(self, value) { 28 | self.data_count = value.length; 29 | self.data_index = 1; 30 | self.data_current = ""; 31 | update_report(self); 32 | apiJovimetrix(self.id, "reset"); 33 | } 34 | 35 | const onNodeCreated = nodeType.prototype.onNodeCreated; 36 | nodeType.prototype.onNodeCreated = async function () { 37 | const me = await onNodeCreated?.apply(this, arguments); 38 | const self = this; 39 | this.data_index = 1; 40 | this.data_current = ""; 41 | this.data_all = []; 42 | this.widget_report = ComfyWidgets.STRING(this, 'QUEUE IS EMPTY 🔜', [ 43 | 'STRING', { 44 | multiline: true, 45 | }, 46 | ], app).widget; 47 | this.widget_report.inputEl.readOnly = true; 48 | this.widget_report.serializeValue = async () => { }; 49 | 50 | const widget_queue = this.widgets.find(w => w.name == 'queue'); 51 | const widget_batch = this.widgets.find(w => w.name == 'batch'); 52 | const widget_hold = this.widgets.find(w => w.name == 'hold'); 53 | const widget_reset = this.widgets.find(w => w.name == 'reset'); 54 | 55 | widget_queue.inputEl.addEventListener('input', function () { 56 | const value = widget_queue.value.split('\n'); 57 | update_list(self, value); 58 | }); 59 | 60 | widget_reset.callback = () => { 61 | widget_reset.value = false; 62 | apiJovimetrix(self.id, "reset"); 63 | } 64 | 65 | async function python_queue_ping(event) { 66 | if (event.detail.id != self.id) { 67 | return; 68 | } 69 | self.data_index = event.detail.i; 70 | self.data_all = event.detail.l; 71 | self.data_current = event.detail.c; 72 | update_report(self); 73 | } 74 | 75 | // Add names to list control that collapses. And counter to see where we are in the overall 76 | async function python_queue_done(event) { 77 | if (event.detail.id != self.id) { 78 | return; 79 | } 80 | await flashBackgroundColor(self.widget_queue.inputEl, 650, 4, "#995242CC"); 81 | } 82 | 83 | api.addEventListener(EVENT_JOVI_PING, python_queue_ping); 84 | api.addEventListener(EVENT_JOVI_DONE, python_queue_done); 85 | 86 | this.onDestroy = () => { 87 | api.removeEventListener(EVENT_JOVI_PING, python_queue_ping); 88 | api.removeEventListener(EVENT_JOVI_DONE, python_queue_done); 89 | }; 90 | 91 | setTimeout(() => { widget_hold.callback(); }, 5); 92 | setTimeout(() => { widget_batch.callback(); }, 5); 93 | return me; 94 | } 95 | 96 | const onConnectOutput = nodeType.prototype.onConnectOutput; 97 | nodeType.prototype.onConnectOutput = function(outputIndex, inputType, inputSlot, inputNode) { 98 | if (outputIndex == 0 && inputType == "COMBO") { 99 | // can link the "same" list -- user breaks it past that, their problem atm 100 | const widget_queue = this.widgets.find(w => w.name == 'queue'); 101 | const widget = inputNode.widgets.find(w => w.name == inputSlot.name); 102 | widget_queue.value = widget.options.values.join('\n'); 103 | } 104 | return onConnectOutput?.apply(this, arguments); 105 | } 106 | 107 | const onConnectionsChange = nodeType.prototype.onConnectionsChange; 108 | nodeType.prototype.onConnectionsChange = function (slotType, slot, event, link_info) 109 | //side, slot, connected, link_info 110 | { 111 | if (slotType == TypeSlot.Output && slot == 0 && link_info && event == TypeSlotEvent.Connect) { 112 | const node = app.graph.getNodeById(link_info.target_id); 113 | if (node === undefined || node.inputs === undefined) { 114 | return; 115 | } 116 | const target = node.inputs[link_info.target_slot]; 117 | if (target === undefined) { 118 | return; 119 | } 120 | 121 | const widget = node.widgets?.find(w => w.name == target.name); 122 | if (widget === undefined) { 123 | return; 124 | } 125 | this.outputs[0].name = widget.name; 126 | if (widget?.origType == "combo" || widget.type == "COMBO") { 127 | const values = widget.options.values; 128 | const widget_queue = this.widgets.find(w => w.name == 'queue'); 129 | // remove all connections that don't match the list? 130 | widget_queue.value = values.join('\n'); 131 | update_list(this, values); 132 | } 133 | this.outputs[0].name = _prefix; 134 | } 135 | return onConnectionsChange?.apply(this, arguments); 136 | }; 137 | } 138 | }) 139 | -------------------------------------------------------------------------------- /web/nodes/route.js: -------------------------------------------------------------------------------- 1 | /**/ 2 | 3 | import { app } from "../../../scripts/app.js" 4 | import { 5 | TypeSlot, TypeSlotEvent, nodeFitHeight, 6 | nodeVirtualLinkRoot, nodeInputsClear, nodeOutputsClear 7 | } from "../util.js" 8 | 9 | const _id = "ROUTE (JOV) 🚌"; 10 | const _prefix = '🔮'; 11 | const _dynamic_type = "*"; 12 | 13 | app.registerExtension({ 14 | name: 'jovimetrix.node.' + _id, 15 | async beforeRegisterNodeDef(nodeType, nodeData) { 16 | if (nodeData.name !== _id) { 17 | return; 18 | } 19 | 20 | const onNodeCreated = nodeType.prototype.onNodeCreated 21 | nodeType.prototype.onNodeCreated = async function () { 22 | const me = await onNodeCreated?.apply(this, arguments); 23 | this.addInput(_prefix, _dynamic_type); 24 | nodeOutputsClear(this, 1); 25 | return me; 26 | } 27 | 28 | const onConnectionsChange = nodeType.prototype.onConnectionsChange 29 | nodeType.prototype.onConnectionsChange = function (slotType, slot_idx, event, link_info, node_slot) { 30 | const me = onConnectionsChange?.apply(this, arguments); 31 | let bus_connected = false; 32 | if (event == TypeSlotEvent.Connect && link_info) { 33 | let fromNode = this.graph._nodes.find( 34 | (otherNode) => otherNode.id == link_info.origin_id 35 | ); 36 | if (slotType == TypeSlot.Input) { 37 | if (slot_idx == 0) { 38 | fromNode = nodeVirtualLinkRoot(fromNode); 39 | if (fromNode?.outputs && fromNode.outputs[0].type == node_slot.type) { 40 | // bus connection 41 | bus_connected = true; 42 | nodeInputsClear(this, 1); 43 | nodeOutputsClear(this, 1); 44 | } 45 | } else { 46 | // normal connection 47 | const parent_link = fromNode?.outputs[link_info.origin_slot]; 48 | if (parent_link) { 49 | node_slot.type = parent_link.type; 50 | node_slot.name = parent_link.name ; //`${fromNode.id}_${parent_link.name}`; 51 | // make sure there is a matching output... 52 | while(this.outputs.length < slot_idx + 1) { 53 | this.addOutput(_prefix, _dynamic_type); 54 | } 55 | this.outputs[slot_idx].name = node_slot.name; 56 | this.outputs[slot_idx].type = node_slot.type; 57 | } 58 | } 59 | } 60 | } else if (event == TypeSlotEvent.Disconnect) { 61 | bus_connected = false; 62 | if (slot_idx == 0) { 63 | nodeInputsClear(this, 1); 64 | nodeOutputsClear(this, 1); 65 | } else { 66 | this.removeInput(slot_idx); 67 | this.removeOutput(slot_idx); 68 | } 69 | } 70 | 71 | // add extra input if we are not in BUS connection mode 72 | if (!bus_connected) { 73 | const last = this.inputs[this.inputs.length-1]; 74 | if (last.name != _prefix || last.type != _dynamic_type) { 75 | this.addInput(_prefix, _dynamic_type); 76 | } 77 | } 78 | nodeFitHeight(this); 79 | return me; 80 | } 81 | 82 | return nodeType; 83 | } 84 | }) 85 | -------------------------------------------------------------------------------- /web/nodes/stack.js: -------------------------------------------------------------------------------- 1 | /**/ 2 | 3 | import { app } from "../../../scripts/app.js" 4 | import { nodeAddDynamic} from "../util.js" 5 | 6 | const _id = "STACK (JOV) ➕" 7 | const _prefix = 'image' 8 | 9 | app.registerExtension({ 10 | name: 'jovimetrix.node.' + _id, 11 | async beforeRegisterNodeDef(nodeType, nodeData) { 12 | if (nodeData.name !== _id) { 13 | return; 14 | } 15 | 16 | await nodeAddDynamic(nodeType, _prefix); 17 | } 18 | }) 19 | -------------------------------------------------------------------------------- /web/nodes/stringer.js: -------------------------------------------------------------------------------- 1 | /**/ 2 | 3 | import { app } from "../../../scripts/app.js" 4 | import { nodeAddDynamic } from "../util.js" 5 | 6 | const _id = "STRINGER (JOV) 🪀" 7 | const _prefix = 'string' 8 | 9 | app.registerExtension({ 10 | name: 'jovimetrix.node.' + _id, 11 | async beforeRegisterNodeDef(nodeType, nodeData) { 12 | if (nodeData.name !== _id) { 13 | return; 14 | } 15 | 16 | await nodeAddDynamic(nodeType, _prefix); 17 | } 18 | }) -------------------------------------------------------------------------------- /web/nodes/value.js: -------------------------------------------------------------------------------- 1 | /**/ 2 | 3 | import { app } from "../../../scripts/app.js" 4 | import { widgetHookControl, nodeFitHeight} from "../util.js" 5 | 6 | const _id = "VALUE (JOV) 🧬" 7 | 8 | app.registerExtension({ 9 | name: 'jovimetrix.node.' + _id, 10 | async beforeRegisterNodeDef(nodeType, nodeData) { 11 | if (nodeData.name !== _id) { 12 | return; 13 | } 14 | 15 | const onNodeCreated = nodeType.prototype.onNodeCreated 16 | nodeType.prototype.onNodeCreated = async function () { 17 | const me = await onNodeCreated?.apply(this, arguments); 18 | 19 | this.outputs[1].type = "*"; 20 | this.outputs[2].type = "*"; 21 | this.outputs[3].type = "*"; 22 | this.outputs[4].type = "*"; 23 | 24 | const ab_data = await widgetHookControl(this, 'type', 'aa'); 25 | await widgetHookControl(this, 'type', 'bb'); 26 | 27 | const oldCallback = ab_data.callback; 28 | ab_data.callback = () => { 29 | oldCallback?.apply(this, arguments); 30 | 31 | this.outputs[0].name = ab_data.value; 32 | this.outputs[0].type = ab_data.value; 33 | let type = ab_data.value; 34 | type = "FLOAT"; 35 | if (ab_data.value == "INT") { 36 | type = "INT"; 37 | } else if (ab_data.value == "BOOLEAN") { 38 | type = "BOOLEAN"; 39 | } 40 | this.outputs[1].type = type; 41 | this.outputs[2].type = type; 42 | this.outputs[3].type = type; 43 | this.outputs[4].type = type; 44 | nodeFitHeight(this); 45 | } 46 | return me; 47 | } 48 | } 49 | }) 50 | -------------------------------------------------------------------------------- /web/util.js: -------------------------------------------------------------------------------- 1 | /**/ 2 | 3 | import { app } from "../../scripts/app.js" 4 | import { api } from "../../scripts/api.js" 5 | 6 | export const TypeSlot = { 7 | Input: 1, 8 | Output: 2, 9 | }; 10 | 11 | export const TypeSlotEvent = { 12 | Connect: true, 13 | Disconnect: false, 14 | }; 15 | 16 | export async function apiJovimetrix(id, cmd, data=null, route="message", ) { 17 | try { 18 | const response = await api.fetchApi(`/cozy_comfyui/${route}`, { 19 | method: "POST", 20 | headers: { 21 | "Content-Type": "application/json", 22 | }, 23 | body: JSON.stringify({ 24 | id: id, 25 | cmd: cmd, 26 | data: data 27 | }), 28 | }); 29 | 30 | if (!response.ok) { 31 | throw new Error(`Error: ${response.status} - ${response.statusText}`); 32 | } 33 | return response; 34 | 35 | } catch (error) { 36 | console.error("API call to Jovimetrix failed:", error); 37 | throw error; 38 | } 39 | } 40 | 41 | /* 42 | * matchFloatSize forces the target to be float[n] based on its type size 43 | */ 44 | export async function widgetHookControl(node, control_key, child_key) { 45 | const control = node.widgets.find(w => w.name == control_key); 46 | const target = node.widgets.find(w => w.name == child_key); 47 | const target_input = node.inputs.find(w => w.name == child_key); 48 | 49 | if (!control || !target || !target_input) { 50 | throw new Error("Required widgets not found"); 51 | } 52 | 53 | const track_xyzw = { 54 | 0: target.options?.default?.[0] || 0, 55 | 1: target.options?.default?.[1] || 0, 56 | 2: target.options?.default?.[2] || 0, 57 | 3: target.options?.default?.[3] || 0, 58 | }; 59 | 60 | const track_options = {} 61 | Object.assign(track_options, target.options); 62 | 63 | const controlCallback = control.callback; 64 | control.callback = async () => { 65 | const me = await controlCallback?.apply(this, arguments); 66 | Object.assign(target.options, track_options); 67 | 68 | if (["VEC2", "VEC3", "VEC4", "FLOAT", "INT", "BOOLEAN"].includes(control.value)) { 69 | target_input.type = control.value; 70 | 71 | if (["INT", "FLOAT", "BOOLEAN"].includes(control.value)) { 72 | target.type = "VEC1"; 73 | } else { 74 | target.type = control.value; 75 | } 76 | target.options.type = target.type; 77 | 78 | let size = 1; 79 | if (["VEC2", "VEC3", "VEC4"].includes(target.type)) { 80 | const match = /\d/.exec(target.type); 81 | size = match[0]; 82 | } 83 | 84 | target.value = {}; 85 | if (["VEC2", "VEC3", "VEC4", "FLOAT"].includes(control.value)) { 86 | for (let i = 0; i < size; i++) { 87 | target.value[i] = parseFloat(track_xyzw[i]).toFixed(target.options.precision); 88 | } 89 | } else if (control.value == "INT") { 90 | target.options.step = 1; 91 | target.options.round = 0; 92 | target.options.precision = 0; 93 | target.options.int = true; 94 | 95 | target.value[0] = Number(track_xyzw[0]); 96 | } else if (control.value == "BOOLEAN") { 97 | target.options.step = 1; 98 | target.options.precision = 0; 99 | target.options.mij = 0; 100 | target.options.maj = 1; 101 | target.options.int = true; 102 | target.value[0] = track_xyzw[0] != 0 ? 1 : 0; 103 | } 104 | } 105 | nodeFitHeight(node); 106 | return me; 107 | } 108 | 109 | const targetCallback = target.callback; 110 | target.callback = async () => { 111 | const me = await targetCallback?.apply(this, arguments); 112 | if (target.type == "toggle") { 113 | track_xyzw[0] = target.value != 0 ? 1 : 0; 114 | } else if (["INT", "FLOAT"].includes(target.type)) { 115 | track_xyzw[0] = target.value; 116 | } else { 117 | Object.keys(target.value).forEach((key) => { 118 | track_xyzw[key] = target.value[key]; 119 | }); 120 | } 121 | return me; 122 | }; 123 | 124 | await control.callback(); 125 | return control; 126 | } 127 | 128 | export function nodeFitHeight(node) { 129 | const size_old = node.size; 130 | node.computeSize(); 131 | node.setSize([Math.max(size_old[0], node.size[0]), Math.min(size_old[1], node.size[1])]); 132 | node.setDirtyCanvas(!0, !1); 133 | app.graph.setDirtyCanvas(!0, !1); 134 | } 135 | 136 | /** 137 | * Manage the slots on a node to allow a dynamic number of inputs 138 | */ 139 | export async function nodeAddDynamic(nodeType, prefix, dynamic_type='*') { 140 | /* 141 | this one should just put the "prefix" as the last empty entry. 142 | Means we have to pay attention not to collide key names in the 143 | input list. 144 | 145 | Also need to make sure that we keep any non-dynamic ports. 146 | */ 147 | 148 | const onNodeCreated = nodeType.prototype.onNodeCreated 149 | nodeType.prototype.onNodeCreated = async function () { 150 | const me = await onNodeCreated?.apply(this, arguments); 151 | 152 | if (this.inputs.length == 0 || this.inputs[this.inputs.length-1].name != prefix) { 153 | this.addInput(prefix, dynamic_type); 154 | } 155 | return me; 156 | } 157 | 158 | function slot_name(slot) { 159 | return slot.name.split('_'); 160 | } 161 | 162 | const onConnectionsChange = nodeType.prototype.onConnectionsChange 163 | nodeType.prototype.onConnectionsChange = async function (slotType, slot_idx, event, link_info, node_slot) { 164 | const me = onConnectionsChange?.apply(this, arguments); 165 | const slot_parts = slot_name(node_slot); 166 | if ((node_slot.type === dynamic_type || slot_parts.length > 1) && slotType === TypeSlot.Input && link_info !== null) { 167 | const fromNode = this.graph._nodes.find( 168 | (otherNode) => otherNode.id == link_info.origin_id 169 | ) 170 | const parent_slot = fromNode.outputs[link_info.origin_slot]; 171 | if (event === TypeSlotEvent.Connect) { 172 | node_slot.type = parent_slot.type; 173 | node_slot.name = `0_${parent_slot.name}`; 174 | } else { 175 | this.removeInput(slot_idx); 176 | node_slot.type = dynamic_type; 177 | node_slot.name = prefix; 178 | node_slot.link = null; 179 | } 180 | 181 | // clean off missing slot connects 182 | let idx = 0; 183 | let offset = 0; 184 | while (idx < this.inputs.length) { 185 | const parts = slot_name(this.inputs[idx]); 186 | if (parts.length > 1) { 187 | const name = parts.slice(1).join(''); 188 | this.inputs[idx].name = `${offset}_${name}`; 189 | offset += 1; 190 | } 191 | idx += 1; 192 | } 193 | 194 | // check that the last slot is a dynamic entry.... 195 | let last = this.inputs[this.inputs.length-1]; 196 | if (last.type != dynamic_type || last.name != prefix) { 197 | this.addInput(prefix, dynamic_type); 198 | } 199 | nodeFitHeight(this); 200 | } 201 | return me; 202 | } 203 | } 204 | 205 | /** 206 | * Trace to the root node that is not a virtual node. 207 | * 208 | * @param {Object} node - The starting node to trace from. 209 | * @returns {Object} - The first physical (non-virtual) node encountered, or the last node if no physical node is found. 210 | */ 211 | export function nodeVirtualLinkRoot(node) { 212 | while (node) { 213 | const { isVirtualNode, findSetter } = node; 214 | 215 | if (!isVirtualNode || !findSetter) break; 216 | const nextNode = findSetter(node.graph); 217 | 218 | if (!nextNode) break; 219 | node = nextNode; 220 | } 221 | return node; 222 | } 223 | 224 | /** 225 | * Trace through outputs until a physical (non-virtual) node is found. 226 | * 227 | * @param {Object} node - The starting node to trace from. 228 | * @returns {Object} - The first physical node encountered, or the last node if no physical node is found. 229 | */ 230 | function nodeVirtualLinkChild(node) { 231 | while (node) { 232 | const { isVirtualNode, findGetter } = node; 233 | 234 | if (!isVirtualNode || !findGetter) break; 235 | const nextNode = findGetter(node.graph); 236 | 237 | if (!nextNode) break; 238 | node = nextNode; 239 | } 240 | return node; 241 | } 242 | 243 | /** 244 | * Remove inputs from a node until the stop condition is met. 245 | * 246 | * @param {Array} inputs - The list of inputs associated with the node. 247 | * @param {number} stop - The minimum number of inputs to retain. Default is 0. 248 | */ 249 | export function nodeInputsClear(node, stop = 0) { 250 | while (node.inputs?.length > stop) { 251 | node.removeInput(node.inputs.length - 1); 252 | } 253 | } 254 | 255 | /** 256 | * Remove outputs from a node until the stop condition is met. 257 | * 258 | * @param {Array} outputs - The list of outputs associated with the node. 259 | * @param {number} stop - The minimum number of outputs to retain. Default is 0. 260 | */ 261 | export function nodeOutputsClear(node, stop = 0) { 262 | while (node.outputs?.length > stop) { 263 | node.removeOutput(node.outputs.length - 1); 264 | } 265 | } 266 | -------------------------------------------------------------------------------- /web/widget_vector.js: -------------------------------------------------------------------------------- 1 | /**/ 2 | 3 | import { app } from "../../scripts/app.js" 4 | import { $el } from "../../scripts/ui.js" 5 | /** @import { IWidget, LGraphCanvas } from '../../types/litegraph/litegraph.d.ts' */ 6 | 7 | function arrayToObject(values, length, parseFn) { 8 | const result = {}; 9 | for (let i = 0; i < length; i++) { 10 | result[i] = parseFn(values[i]); 11 | } 12 | return result; 13 | } 14 | 15 | function domInnerValueChange(node, pos, widget, value, event=undefined) { 16 | //const numtype = widget.type.includes("INT") ? Number : parseFloat 17 | widget.value = arrayToObject(value, Object.keys(value).length, widget.convert); 18 | if ( 19 | widget.options && 20 | widget.options.property && 21 | node.properties[widget.options.property] !== undefined 22 | ) { 23 | node.setProperty(widget.options.property, widget.value) 24 | } 25 | if (widget.callback) { 26 | widget.callback(widget.value, app.canvas, node, pos, event) 27 | } 28 | } 29 | 30 | function colorHex2RGB(hex) { 31 | hex = hex.replace(/^#/, ''); 32 | const bigint = parseInt(hex, 16); 33 | const r = (bigint >> 16) & 255; 34 | const g = (bigint >> 8) & 255; 35 | const b = bigint & 255; 36 | return [r, g, b]; 37 | } 38 | 39 | function colorRGB2Hex(input) { 40 | const rgbArray = typeof input == 'string' ? input.match(/\d+/g) : input; 41 | if (rgbArray.length < 3) { 42 | throw new Error('input not 3 or 4 values'); 43 | } 44 | const hexValues = rgbArray.map((value, index) => { 45 | if (index == 3 && !value) return 'ff'; 46 | const hex = parseInt(value).toString(16); 47 | return hex.length == 1 ? '0' + hex : hex; 48 | }); 49 | return '#' + hexValues.slice(0, 3).join('') + (hexValues[3] || ''); 50 | } 51 | 52 | const VectorWidget = (app, inputName, options, initial) => { 53 | const values = options[1]?.default || initial; 54 | /** @type {IWidget} */ 55 | const widget = { 56 | name: inputName, 57 | type: options[0], 58 | y: 0, 59 | value: values, 60 | options: options[1] 61 | } 62 | 63 | widget.convert = parseFloat; 64 | widget.options.precision = widget.options?.precision || 2; 65 | widget.options.step = widget.options?.step || 0.01; 66 | widget.options.round = 1 / 10 ** widget.options.step; 67 | 68 | if (widget.options?.rgb || widget.options?.int || false) { 69 | widget.options.step = 1; 70 | widget.options.round = 1; 71 | widget.options.precision = 0; 72 | widget.convert = Number; 73 | } 74 | 75 | if (widget.options?.rgb || false) { 76 | widget.options.maj = 255; 77 | widget.options.mij = 0; 78 | widget.options.label = ['🟥', '🟩', '🟦', 'ALPHA']; 79 | } 80 | 81 | const offset_y = 4; 82 | const widget_padding_left = 13; 83 | const widget_padding = 30; 84 | const label_full = 72; 85 | const label_center = label_full / 2; 86 | 87 | /** @type {HTMLInputElement} */ 88 | let picker; 89 | 90 | widget.draw = function(ctx, node, width, Y, height) { 91 | // if ((app.canvas.ds.scale < 0.50) || (!this.type2.startsWith("VEC") && this.type2 != "COORD2D")) return; 92 | if ((app.canvas.ds.scale < 0.50) || (!this.type.startsWith("VEC"))) return; 93 | ctx.save() 94 | ctx.beginPath() 95 | ctx.lineWidth = 1 96 | ctx.fillStyle = LiteGraph.WIDGET_OUTLINE_COLOR 97 | ctx.roundRect(widget_padding_left+2, Y, width - widget_padding, height, 15) 98 | ctx.stroke() 99 | ctx.lineWidth = 1 100 | ctx.fillStyle = LiteGraph.WIDGET_BGCOLOR 101 | ctx.roundRect(widget_padding_left+2, Y, width - widget_padding, height, 15) 102 | ctx.fill() 103 | 104 | // label 105 | ctx.fillStyle = LiteGraph.WIDGET_SECONDARY_TEXT_COLOR 106 | ctx.fillText(inputName, label_center - (inputName.length * 1.5), Y + height / 2 + offset_y) 107 | let x = label_full + 1 108 | 109 | const fields = Object.keys(this?.value || []); 110 | let count = fields.length; 111 | if (widget.options?.rgb) { 112 | count += 0.23; 113 | } 114 | const element_width = (width - label_full - widget_padding) / count; 115 | const element_width2 = element_width / 2; 116 | 117 | let converted = []; 118 | for (const idx of fields) { 119 | ctx.save() 120 | ctx.beginPath() 121 | ctx.fillStyle = LiteGraph.WIDGET_OUTLINE_COLOR 122 | // separation bar 123 | if (idx != fields.length || (idx == fields.length && !this.options?.rgb)) { 124 | ctx.moveTo(x, Y) 125 | ctx.lineTo(x, Y+height) 126 | ctx.stroke(); 127 | } 128 | 129 | // value 130 | ctx.fillStyle = LiteGraph.WIDGET_TEXT_COLOR 131 | const it = this.value[idx.toString()]; 132 | let value = (widget.options.precision == 0) ? Number(it) : parseFloat(it).toFixed(widget.options.precision); 133 | converted.push(value); 134 | const text = value.toString(); 135 | ctx.fillText(text, x + element_width2 - text.length * 3.3, Y + height/2 + offset_y); 136 | ctx.restore(); 137 | x += element_width; 138 | } 139 | 140 | if (this.options?.rgb && converted.length > 2) { 141 | try { 142 | ctx.fillStyle = colorRGB2Hex(converted); 143 | } catch (e) { 144 | console.error(converted, e); 145 | ctx.fillStyle = "#FFF"; 146 | } 147 | ctx.roundRect(width-1.17 * widget_padding, Y+1, 19, height-2, 16); 148 | ctx.fill() 149 | } 150 | ctx.restore() 151 | } 152 | 153 | function clamp(widget, v, idx) { 154 | v = Math.min(v, widget.options?.maj !== undefined ? widget.options.maj : v); 155 | v = Math.max(v, widget.options?.mij !== undefined ? widget.options.mij : v); 156 | widget.value[idx] = (widget.options.precision == 0) ? Number(v) : parseFloat(v).toFixed(widget.options.precision); 157 | } 158 | 159 | /** 160 | * @todo ▶️, 🖱️, 😀 161 | * @this IWidget 162 | */ 163 | widget.onPointerDown = function (pointer, node, canvas) { 164 | const e = pointer.eDown 165 | const x = e.canvasX - node.pos[0] - label_full; 166 | const size = Object.keys(this.value).length; 167 | const element_width = (node.size[0] - label_full - widget_padding * 1.25) / size; 168 | const index = Math.floor(x / element_width); 169 | 170 | pointer.onClick = (eUp) => { 171 | /* if click on header, reset to defaults */ 172 | if (index == -1 && eUp.shiftKey) { 173 | widget.value = Object.assign({}, widget.options.default); 174 | return; 175 | } 176 | else if (index >= 0 && index < size) { 177 | const pos = [eUp.canvasX - node.pos[0], eUp.canvasY - node.pos[1]] 178 | const old_value = { ...this.value }; 179 | const label = this.options?.label ? this.name + '➖' + this.options.label?.[index] : this.name; 180 | 181 | LGraphCanvas.active_canvas.prompt(label, this.value[index], function(v) { 182 | if (/^[0-9+\-*/()\s]+|\d+\.\d+$/.test(v)) { 183 | try { 184 | v = eval(v); 185 | } catch { 186 | v = old_value[index]; 187 | } 188 | } else { 189 | v = old_value[index]; 190 | } 191 | 192 | if (this.value[index] != v) { 193 | setTimeout( 194 | function () { 195 | clamp(this, v, index); 196 | domInnerValueChange(node, pos, this, this.value, eUp); 197 | }.bind(this), 5) 198 | } 199 | }.bind(this), eUp); 200 | return; 201 | } 202 | if (!this.options?.rgb) return; 203 | 204 | const rgba = Object.values(this?.value || []); 205 | const color = colorRGB2Hex(rgba.slice(0, 3)); 206 | 207 | if (index != size && (x < 0 && rgba.length > 2)) { 208 | const target = Object.values(rgba.map((item) => 255 - item)).slice(0, 3); 209 | this.value = Object.values(this.value); 210 | this.value.splice(0, 3, ...target); 211 | return 212 | } 213 | 214 | if (!picker) { 215 | // firefox? 216 | //position: "absolute", // Use absolute positioning for consistency 217 | //left: `${eUp.pageX}px`, // Use pageX for more consistent placement 218 | //top: `${eUp.pageY}px`, 219 | picker = $el("input", { 220 | type: "color", 221 | parent: document.body, 222 | style: { 223 | position: "fixed", 224 | left: `${eUp.clientX}px`, 225 | top: `${eUp.clientY}px`, 226 | height: "0px", 227 | width: "0px", 228 | padding: "0px", 229 | opacity: 0, 230 | }, 231 | }); 232 | picker.addEventListener('blur', () => picker.style.display = 'none') 233 | picker.addEventListener('input', () => { 234 | if (!picker.value) return; 235 | 236 | widget.value = colorHex2RGB(picker.value); 237 | if (rgba.length > 3) { 238 | widget.value.push(rgba[3]); 239 | } 240 | canvas.setDirty(true) 241 | }) 242 | } else { 243 | picker.style.display = 'revert' 244 | picker.style.left = `${eUp.clientX}px` 245 | picker.style.top = `${eUp.clientY}px` 246 | } 247 | picker.value = color; 248 | requestAnimationFrame(() => { 249 | picker.showPicker() 250 | picker.focus() 251 | }) 252 | } 253 | 254 | pointer.onDrag = (eMove) => { 255 | if (!eMove.deltaX || !(index > -1)) return; 256 | if (index >= size) return; 257 | let v = parseFloat(this.value[index]); 258 | v += this.options.step * Math.sign(eMove.deltaX); 259 | clamp(this, v, index); 260 | if (widget.callback) { 261 | widget.callback(widget.value, app.canvas, node) 262 | } 263 | } 264 | } 265 | 266 | widget.serializeValue = async (node, index) => { 267 | const rawValues = Array.isArray(widget.value) 268 | ? widget.value 269 | : Object.values(widget.value); 270 | const funct = widget.options?.int ? Number : parseFloat; 271 | return rawValues.map(v => funct(v)); 272 | }; 273 | 274 | return widget; 275 | } 276 | 277 | app.registerExtension({ 278 | name: "jovi.widget.spinner", 279 | async getCustomWidgets(app) { 280 | return { 281 | VEC2: (node, inputName, inputData, app) => ({ 282 | widget: node.addCustomWidget(VectorWidget(app, inputName, inputData, [0, 0])), 283 | }), 284 | VEC3: (node, inputName, inputData, app) => ({ 285 | widget: node.addCustomWidget(VectorWidget(app, inputName, inputData, [0, 0, 0])), 286 | }), 287 | VEC4: (node, inputName, inputData, app) => ({ 288 | widget: node.addCustomWidget(VectorWidget(app, inputName, inputData, [0, 0, 0, 0])), 289 | }) 290 | } 291 | } 292 | }) 293 | --------------------------------------------------------------------------------