├── requirements.txt ├── color_transfer_example.png ├── pyproject.toml ├── .github └── workflows │ └── publish.yml ├── __init__.py ├── README.md ├── LICENSE ├── utils.py ├── .gitignore ├── workflow_examples ├── example_workflow.json └── example_workflow_new_nodes.json └── color_transfer.py /requirements.txt: -------------------------------------------------------------------------------- 1 | scikit-learn 2 | opencv-python 3 | POT 4 | -------------------------------------------------------------------------------- /color_transfer_example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/45uee/ComfyUI-Color_Transfer/HEAD/color_transfer_example.png -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "comfyui-color_transfer" 3 | description = "Implementation of color transfer using KMeans algorithm" 4 | version = "1.0.0" 5 | license = {file = "LICENSE"} 6 | 7 | [project.urls] 8 | Repository = "https://github.com/45uee/ComfyUI-Color_Transfer" 9 | # Used by Comfy Registry https://comfyregistry.org 10 | 11 | [tool.comfy] 12 | PublisherId = "q45uue" 13 | DisplayName = "ComfyUI-Color_Transfer" 14 | Icon = "" 15 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish to Comfy registry 2 | on: 3 | workflow_dispatch: 4 | push: 5 | branches: 6 | - main 7 | paths: 8 | - "pyproject.toml" 9 | 10 | jobs: 11 | publish-node: 12 | name: Publish Custom Node to registry 13 | runs-on: ubuntu-latest 14 | # if this is a forked repository. Skipping the workflow. 15 | if: github.event.repository.fork == false 16 | steps: 17 | - name: Check out code 18 | uses: actions/checkout@v4 19 | - name: Publish Custom Node 20 | uses: Comfy-Org/publish-node-action@main 21 | with: 22 | ## Add your own personal access token to your Github Repository secrets and reference it here. 23 | personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }} 24 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | from .color_transfer import PaletteTransferNode, PalleteTransferClustering, PaletteTransferReinhard, PaletteSoftTransfer, PaletteRbfTransfer, PaletteOptimalTransportTransfer, ReferenceTransferReinhard, ColorPaletteNode, ExtractPaletteNode 2 | 3 | 4 | NODE_CLASS_MAPPINGS = { 5 | "PaletteTransfer": PaletteTransferNode, 6 | "PalleteTransferClustering": PalleteTransferClustering, 7 | "PaletteTransferReinhard": PaletteTransferReinhard, 8 | "PalletteSoftTransfer": PaletteSoftTransfer, 9 | "PaletteRbfTransfer": PaletteRbfTransfer, 10 | "PaletteOptimalTransportTransfer": PaletteOptimalTransportTransfer, 11 | "ColorTransferReinhard": ReferenceTransferReinhard, 12 | "ColorPalette": ColorPaletteNode, 13 | "ExtractPalette": ExtractPaletteNode 14 | } 15 | NODE_DISPLAY_NAME_MAPPINGS = { 16 | "PaletteTransfer": "Palette Transfer", 17 | } 18 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ComfyUI-Color_Transfer 2 | 3 | Postprocessing nodes that implement color palette transfer in images. It replaces the dominant colors in an image with a target color palette. 4 | 5 | ## Prevention/troubleshooting of import error 6 | 7 | To avoid import issues install python package using CLI ```pip install scikit-learn```. 8 | 9 | ## Installation 10 | 11 | Clone repo to your custom_nodes folder 12 | ```git clone https://github.com/45uee/ComfyUI-Color_Transfer.git``` 13 | 14 | ## Usage 15 | 16 | 1. Create a "Color Palette" node containing RGB values of your desired colors. Color must be defined in this format: [(Value, Value, Value), ...], for example [(30, 32, 30), (60, 61, 55), (105, 117, 101), (236, 223, 204)] 17 | 2. Create a "Palette Transfer" node, and connect your image and palette as input, that's all. 18 | 19 | - You can specify color clustering and comparing distance method, by default MiniBatchKMeans faster but can be less accurate 20 | 21 | ## Example 22 | 23 | ![alt text](https://github.com/45uee/ComfyUI-Color_Transfer/blob/main/color_transfer_example.png) 24 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 45uee 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | 4 | 5 | def EuclideanDistance(detected_color, target_colors): 6 | return np.linalg.norm(detected_color - target_colors, axis=1) 7 | 8 | 9 | def ManhattanDistance(detected_color, target_colors): 10 | return np.sum(np.abs(detected_color - target_colors), axis=1) 11 | 12 | 13 | def CosineSimilarity(detected_color, target_colors): 14 | return -np.dot(target_colors, detected_color) / (np.linalg.norm(detected_color) * np.linalg.norm(target_colors, axis=1)) 15 | 16 | 17 | def RGBWeightedDistance(detected_color, target_colors): 18 | detected_color = np.array(detected_color) 19 | target_colors = np.array(target_colors) 20 | 21 | weights = np.array([0.299, 0.587, 0.114]) 22 | 23 | weighted_detected_color = np.dot(detected_color, weights) 24 | weighted_target_colors = np.dot(target_colors, weights) 25 | 26 | return np.abs(weighted_detected_color - weighted_target_colors) 27 | 28 | 29 | def RGBWeightedSimilarity(detected_color, target_colors): 30 | detected_color = np.array(detected_color) 31 | target_colors = np.array(target_colors) 32 | 33 | weights = np.array([0.299, 0.587, 0.114]) 34 | 35 | weighted_detected_color = np.dot(detected_color, weights) 36 | weighted_target_colors = np.dot(target_colors, weights) 37 | 38 | dot_products = np.dot(weighted_detected_color, weighted_target_colors) 39 | norm1 = np.linalg.norm(weighted_detected_color) 40 | norm2 = np.linalg.norm(weighted_target_colors) 41 | 42 | return -dot_products / (norm1 * norm2) 43 | 44 | 45 | def HSVColorSimilarity(detected_color, target_colors): 46 | detected_color = np.array(detected_color) 47 | target_colors = np.array(target_colors) 48 | 49 | h1, s1, _ = detected_color 50 | h2 = target_colors[:, 0] 51 | s2 = target_colors[:, 1] 52 | 53 | h1_rad = np.radians(h1) 54 | h2_rad = np.radians(h2) 55 | 56 | v1_x = s1 * np.cos(h1_rad) 57 | v1_y = s1 * np.sin(h1_rad) 58 | v1 = np.array([v1_x, v1_y]) 59 | 60 | 61 | v2_x = s2 * np.cos(h2_rad) 62 | v2_y = s2 * np.sin(h2_rad) 63 | v2 = np.vstack([v2_x, v2_y]) 64 | 65 | dot_products = np.dot(v1, v2) 66 | 67 | v1_norm = np.linalg.norm(v1) 68 | v2_norms = np.linalg.norm(v2, axis=0) 69 | 70 | similarities = dot_products / (v1_norm * v2_norms) 71 | 72 | return -similarities 73 | 74 | 75 | def Blur(image, kernel_size): 76 | return cv2.medianBlur(image.astype(np.uint8), kernel_size) -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 110 | .pdm.toml 111 | .pdm-python 112 | .pdm-build/ 113 | 114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 115 | __pypackages__/ 116 | 117 | # Celery stuff 118 | celerybeat-schedule 119 | celerybeat.pid 120 | 121 | # SageMath parsed files 122 | *.sage.py 123 | 124 | # Environments 125 | .env 126 | .venv 127 | env/ 128 | venv/ 129 | ENV/ 130 | env.bak/ 131 | venv.bak/ 132 | 133 | # Spyder project settings 134 | .spyderproject 135 | .spyproject 136 | 137 | # Rope project settings 138 | .ropeproject 139 | 140 | # mkdocs documentation 141 | /site 142 | 143 | # mypy 144 | .mypy_cache/ 145 | .dmypy.json 146 | dmypy.json 147 | 148 | # Pyre type checker 149 | .pyre/ 150 | 151 | # pytype static type analyzer 152 | .pytype/ 153 | 154 | # Cython debug symbols 155 | cython_debug/ 156 | 157 | # PyCharm 158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 160 | # and can be added to the global gitignore or merged into this file. For a more nuclear 161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 162 | #.idea/ 163 | -------------------------------------------------------------------------------- /workflow_examples/example_workflow.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 71, 3 | "last_link_id": 113, 4 | "nodes": [ 5 | { 6 | "id": 58, 7 | "type": "PreviewImage", 8 | "pos": [ 9 | 1580, 10 | 520 11 | ], 12 | "size": { 13 | "0": 562.9608764648438, 14 | "1": 307.1505432128906 15 | }, 16 | "flags": {}, 17 | "order": 13, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "images", 22 | "type": "IMAGE", 23 | "link": 107 24 | } 25 | ], 26 | "properties": { 27 | "Node name for S&R": "PreviewImage" 28 | } 29 | }, 30 | { 31 | "id": 44, 32 | "type": "CLIPTextEncode", 33 | "pos": [ 34 | 271, 35 | 432 36 | ], 37 | "size": { 38 | "0": 407.7621154785156, 39 | "1": 86.47399139404297 40 | }, 41 | "flags": {}, 42 | "order": 5, 43 | "mode": 0, 44 | "inputs": [ 45 | { 46 | "name": "clip", 47 | "type": "CLIP", 48 | "link": 67, 49 | "slot_index": 0 50 | } 51 | ], 52 | "outputs": [ 53 | { 54 | "name": "CONDITIONING", 55 | "type": "CONDITIONING", 56 | "links": [ 57 | 60 58 | ], 59 | "shape": 3, 60 | "slot_index": 0 61 | } 62 | ], 63 | "properties": { 64 | "Node name for S&R": "CLIPTextEncode" 65 | }, 66 | "widgets_values": [ 67 | "low quality, (bad artist, watermark, text), blurry, blurry edges," 68 | ] 69 | }, 70 | { 71 | "id": 68, 72 | "type": "PaletteTransfer", 73 | "pos": [ 74 | 1230, 75 | 680 76 | ], 77 | "size": { 78 | "0": 315, 79 | "1": 102 80 | }, 81 | "flags": {}, 82 | "order": 10, 83 | "mode": 0, 84 | "inputs": [ 85 | { 86 | "name": "image", 87 | "type": "IMAGE", 88 | "link": 101 89 | }, 90 | { 91 | "name": "target_colors", 92 | "type": "COLORS", 93 | "link": 104 94 | } 95 | ], 96 | "outputs": [ 97 | { 98 | "name": "IMAGE", 99 | "type": "IMAGE", 100 | "links": [ 101 | 108 102 | ], 103 | "shape": 3, 104 | "slot_index": 0 105 | } 106 | ], 107 | "properties": { 108 | "Node name for S&R": "PaletteTransfer" 109 | }, 110 | "widgets_values": [ 111 | "Kmeans", 112 | "Manhattan" 113 | ] 114 | }, 115 | { 116 | "id": 69, 117 | "type": "PaletteTransfer", 118 | "pos": [ 119 | 1230, 120 | 830 121 | ], 122 | "size": { 123 | "0": 315, 124 | "1": 102 125 | }, 126 | "flags": {}, 127 | "order": 11, 128 | "mode": 0, 129 | "inputs": [ 130 | { 131 | "name": "image", 132 | "type": "IMAGE", 133 | "link": 102 134 | }, 135 | { 136 | "name": "target_colors", 137 | "type": "COLORS", 138 | "link": 111 139 | } 140 | ], 141 | "outputs": [ 142 | { 143 | "name": "IMAGE", 144 | "type": "IMAGE", 145 | "links": [ 146 | 109 147 | ], 148 | "shape": 3, 149 | "slot_index": 0 150 | } 151 | ], 152 | "properties": { 153 | "Node name for S&R": "PaletteTransfer" 154 | }, 155 | "widgets_values": [ 156 | "Mini batch Kmeans", 157 | "Euclidean" 158 | ] 159 | }, 160 | { 161 | "id": 61, 162 | "type": "PreviewImage", 163 | "pos": [ 164 | 2160, 165 | 520 166 | ], 167 | "size": { 168 | "0": 562.9608764648438, 169 | "1": 307.1505432128906 170 | }, 171 | "flags": {}, 172 | "order": 14, 173 | "mode": 0, 174 | "inputs": [ 175 | { 176 | "name": "images", 177 | "type": "IMAGE", 178 | "link": 108 179 | } 180 | ], 181 | "properties": { 182 | "Node name for S&R": "PreviewImage" 183 | } 184 | }, 185 | { 186 | "id": 63, 187 | "type": "PreviewImage", 188 | "pos": [ 189 | 2158, 190 | 888 191 | ], 192 | "size": { 193 | "0": 562.9608764648438, 194 | "1": 307.1505432128906 195 | }, 196 | "flags": {}, 197 | "order": 16, 198 | "mode": 0, 199 | "inputs": [ 200 | { 201 | "name": "images", 202 | "type": "IMAGE", 203 | "link": 110 204 | } 205 | ], 206 | "properties": { 207 | "Node name for S&R": "PreviewImage" 208 | } 209 | }, 210 | { 211 | "id": 62, 212 | "type": "PreviewImage", 213 | "pos": [ 214 | 1576, 215 | 888 216 | ], 217 | "size": { 218 | "0": 562.9608764648438, 219 | "1": 307.1505432128906 220 | }, 221 | "flags": {}, 222 | "order": 15, 223 | "mode": 0, 224 | "inputs": [ 225 | { 226 | "name": "images", 227 | "type": "IMAGE", 228 | "link": 109 229 | } 230 | ], 231 | "properties": { 232 | "Node name for S&R": "PreviewImage" 233 | } 234 | }, 235 | { 236 | "id": 40, 237 | "type": "EmptyLatentImage", 238 | "pos": [ 239 | 318, 240 | 588 241 | ], 242 | "size": { 243 | "0": 315, 244 | "1": 106 245 | }, 246 | "flags": {}, 247 | "order": 0, 248 | "mode": 0, 249 | "outputs": [ 250 | { 251 | "name": "LATENT", 252 | "type": "LATENT", 253 | "links": [ 254 | 61 255 | ], 256 | "slot_index": 0 257 | } 258 | ], 259 | "properties": { 260 | "Node name for S&R": "EmptyLatentImage" 261 | }, 262 | "widgets_values": [ 263 | 1344, 264 | 768, 265 | 1 266 | ] 267 | }, 268 | { 269 | "id": 41, 270 | "type": "CLIPTextEncode", 271 | "pos": [ 272 | 268, 273 | 274 274 | ], 275 | "size": { 276 | "0": 411.97210693359375, 277 | "1": 96.00001525878906 278 | }, 279 | "flags": {}, 280 | "order": 4, 281 | "mode": 0, 282 | "inputs": [ 283 | { 284 | "name": "clip", 285 | "type": "CLIP", 286 | "link": 62 287 | } 288 | ], 289 | "outputs": [ 290 | { 291 | "name": "CONDITIONING", 292 | "type": "CONDITIONING", 293 | "links": [ 294 | 59 295 | ], 296 | "slot_index": 0 297 | } 298 | ], 299 | "properties": { 300 | "Node name for S&R": "CLIPTextEncode" 301 | }, 302 | "widgets_values": [ 303 | "realistic, landscape, sunset" 304 | ] 305 | }, 306 | { 307 | "id": 38, 308 | "type": "KSampler", 309 | "pos": [ 310 | 710, 311 | 270 312 | ], 313 | "size": { 314 | "0": 315, 315 | "1": 262 316 | }, 317 | "flags": {}, 318 | "order": 6, 319 | "mode": 0, 320 | "inputs": [ 321 | { 322 | "name": "model", 323 | "type": "MODEL", 324 | "link": 113 325 | }, 326 | { 327 | "name": "positive", 328 | "type": "CONDITIONING", 329 | "link": 59 330 | }, 331 | { 332 | "name": "negative", 333 | "type": "CONDITIONING", 334 | "link": 60 335 | }, 336 | { 337 | "name": "latent_image", 338 | "type": "LATENT", 339 | "link": 61 340 | } 341 | ], 342 | "outputs": [ 343 | { 344 | "name": "LATENT", 345 | "type": "LATENT", 346 | "links": [ 347 | 64 348 | ], 349 | "slot_index": 0 350 | } 351 | ], 352 | "properties": { 353 | "Node name for S&R": "KSampler" 354 | }, 355 | "widgets_values": [ 356 | 111312, 357 | "fixed", 358 | 15, 359 | 11, 360 | "heun", 361 | "karras", 362 | 1 363 | ] 364 | }, 365 | { 366 | "id": 70, 367 | "type": "PaletteTransfer", 368 | "pos": [ 369 | 1230, 370 | 1000 371 | ], 372 | "size": { 373 | "0": 315, 374 | "1": 102 375 | }, 376 | "flags": {}, 377 | "order": 12, 378 | "mode": 0, 379 | "inputs": [ 380 | { 381 | "name": "image", 382 | "type": "IMAGE", 383 | "link": 103 384 | }, 385 | { 386 | "name": "target_colors", 387 | "type": "COLORS", 388 | "link": 112 389 | } 390 | ], 391 | "outputs": [ 392 | { 393 | "name": "IMAGE", 394 | "type": "IMAGE", 395 | "links": [ 396 | 110 397 | ], 398 | "shape": 3, 399 | "slot_index": 0 400 | } 401 | ], 402 | "properties": { 403 | "Node name for S&R": "PaletteTransfer" 404 | }, 405 | "widgets_values": [ 406 | "Mini batch Kmeans", 407 | "Manhattan" 408 | ] 409 | }, 410 | { 411 | "id": 45, 412 | "type": "PreviewImage", 413 | "pos": [ 414 | 1579, 415 | 162 416 | ], 417 | "size": { 418 | "0": 562.9608764648438, 419 | "1": 307.1505432128906 420 | }, 421 | "flags": {}, 422 | "order": 8, 423 | "mode": 0, 424 | "inputs": [ 425 | { 426 | "name": "images", 427 | "type": "IMAGE", 428 | "link": 68 429 | } 430 | ], 431 | "properties": { 432 | "Node name for S&R": "PreviewImage" 433 | } 434 | }, 435 | { 436 | "id": 67, 437 | "type": "PaletteTransfer", 438 | "pos": [ 439 | 1230, 440 | 520 441 | ], 442 | "size": { 443 | "0": 315, 444 | "1": 102 445 | }, 446 | "flags": {}, 447 | "order": 9, 448 | "mode": 0, 449 | "inputs": [ 450 | { 451 | "name": "image", 452 | "type": "IMAGE", 453 | "link": 100 454 | }, 455 | { 456 | "name": "target_colors", 457 | "type": "COLORS", 458 | "link": 99 459 | } 460 | ], 461 | "outputs": [ 462 | { 463 | "name": "IMAGE", 464 | "type": "IMAGE", 465 | "links": [ 466 | 107 467 | ], 468 | "shape": 3, 469 | "slot_index": 0 470 | } 471 | ], 472 | "properties": { 473 | "Node name for S&R": "PaletteTransfer" 474 | }, 475 | "widgets_values": [ 476 | "Kmeans", 477 | "Euclidean" 478 | ] 479 | }, 480 | { 481 | "id": 59, 482 | "type": "ColorPalette", 483 | "pos": [ 484 | 980, 485 | 600 486 | ], 487 | "size": { 488 | "0": 210, 489 | "1": 94.74728393554688 490 | }, 491 | "flags": {}, 492 | "order": 1, 493 | "mode": 0, 494 | "outputs": [ 495 | { 496 | "name": "Color palette", 497 | "type": "COLORS", 498 | "links": [ 499 | 99, 500 | 104 501 | ], 502 | "shape": 3, 503 | "slot_index": 0 504 | } 505 | ], 506 | "properties": { 507 | "Node name for S&R": "ColorPalette" 508 | }, 509 | "widgets_values": [ 510 | "[(68,71,96), (97,105,174), \t(241,122,143), (255,187,107)\n,(244,237,237)]" 511 | ] 512 | }, 513 | { 514 | "id": 71, 515 | "type": "ColorPalette", 516 | "pos": [ 517 | 977, 518 | 919 519 | ], 520 | "size": { 521 | "0": 210, 522 | "1": 94.74728393554688 523 | }, 524 | "flags": {}, 525 | "order": 2, 526 | "mode": 0, 527 | "outputs": [ 528 | { 529 | "name": "Color palette", 530 | "type": "COLORS", 531 | "links": [ 532 | 111, 533 | 112 534 | ], 535 | "shape": 3, 536 | "slot_index": 0 537 | } 538 | ], 539 | "properties": { 540 | "Node name for S&R": "ColorPalette" 541 | }, 542 | "widgets_values": [ 543 | "[(31,31,31), (66,54,88), \t(92,65,95), (123,86,106)\n,(146,123,134)]" 544 | ] 545 | }, 546 | { 547 | "id": 42, 548 | "type": "VAEDecode", 549 | "pos": [ 550 | 1060, 551 | 290 552 | ], 553 | "size": { 554 | "0": 140, 555 | "1": 46 556 | }, 557 | "flags": {}, 558 | "order": 7, 559 | "mode": 0, 560 | "inputs": [ 561 | { 562 | "name": "samples", 563 | "type": "LATENT", 564 | "link": 64 565 | }, 566 | { 567 | "name": "vae", 568 | "type": "VAE", 569 | "link": 65 570 | } 571 | ], 572 | "outputs": [ 573 | { 574 | "name": "IMAGE", 575 | "type": "IMAGE", 576 | "links": [ 577 | 68, 578 | 100, 579 | 101, 580 | 102, 581 | 103 582 | ], 583 | "slot_index": 0 584 | } 585 | ], 586 | "properties": { 587 | "Node name for S&R": "VAEDecode" 588 | } 589 | }, 590 | { 591 | "id": 39, 592 | "type": "CheckpointLoaderSimple", 593 | "pos": [ 594 | -253, 595 | 616 596 | ], 597 | "size": { 598 | "0": 350.9955749511719, 599 | "1": 98 600 | }, 601 | "flags": {}, 602 | "order": 3, 603 | "mode": 0, 604 | "outputs": [ 605 | { 606 | "name": "MODEL", 607 | "type": "MODEL", 608 | "links": [ 609 | 113 610 | ], 611 | "slot_index": 0 612 | }, 613 | { 614 | "name": "CLIP", 615 | "type": "CLIP", 616 | "links": [ 617 | 62, 618 | 67 619 | ], 620 | "slot_index": 1 621 | }, 622 | { 623 | "name": "VAE", 624 | "type": "VAE", 625 | "links": [ 626 | 65 627 | ], 628 | "slot_index": 2 629 | } 630 | ], 631 | "properties": { 632 | "Node name for S&R": "CheckpointLoaderSimple" 633 | }, 634 | "widgets_values": [ 635 | "SDXLFaetastic_v20.safetensors" 636 | ] 637 | } 638 | ], 639 | "links": [ 640 | [ 641 | 59, 642 | 41, 643 | 0, 644 | 38, 645 | 1, 646 | "CONDITIONING" 647 | ], 648 | [ 649 | 60, 650 | 44, 651 | 0, 652 | 38, 653 | 2, 654 | "CONDITIONING" 655 | ], 656 | [ 657 | 61, 658 | 40, 659 | 0, 660 | 38, 661 | 3, 662 | "LATENT" 663 | ], 664 | [ 665 | 62, 666 | 39, 667 | 1, 668 | 41, 669 | 0, 670 | "CLIP" 671 | ], 672 | [ 673 | 64, 674 | 38, 675 | 0, 676 | 42, 677 | 0, 678 | "LATENT" 679 | ], 680 | [ 681 | 65, 682 | 39, 683 | 2, 684 | 42, 685 | 1, 686 | "VAE" 687 | ], 688 | [ 689 | 67, 690 | 39, 691 | 1, 692 | 44, 693 | 0, 694 | "CLIP" 695 | ], 696 | [ 697 | 68, 698 | 42, 699 | 0, 700 | 45, 701 | 0, 702 | "IMAGE" 703 | ], 704 | [ 705 | 99, 706 | 59, 707 | 0, 708 | 67, 709 | 1, 710 | "COLORS" 711 | ], 712 | [ 713 | 100, 714 | 42, 715 | 0, 716 | 67, 717 | 0, 718 | "IMAGE" 719 | ], 720 | [ 721 | 101, 722 | 42, 723 | 0, 724 | 68, 725 | 0, 726 | "IMAGE" 727 | ], 728 | [ 729 | 102, 730 | 42, 731 | 0, 732 | 69, 733 | 0, 734 | "IMAGE" 735 | ], 736 | [ 737 | 103, 738 | 42, 739 | 0, 740 | 70, 741 | 0, 742 | "IMAGE" 743 | ], 744 | [ 745 | 104, 746 | 59, 747 | 0, 748 | 68, 749 | 1, 750 | "COLORS" 751 | ], 752 | [ 753 | 107, 754 | 67, 755 | 0, 756 | 58, 757 | 0, 758 | "IMAGE" 759 | ], 760 | [ 761 | 108, 762 | 68, 763 | 0, 764 | 61, 765 | 0, 766 | "IMAGE" 767 | ], 768 | [ 769 | 109, 770 | 69, 771 | 0, 772 | 62, 773 | 0, 774 | "IMAGE" 775 | ], 776 | [ 777 | 110, 778 | 70, 779 | 0, 780 | 63, 781 | 0, 782 | "IMAGE" 783 | ], 784 | [ 785 | 111, 786 | 71, 787 | 0, 788 | 69, 789 | 1, 790 | "COLORS" 791 | ], 792 | [ 793 | 112, 794 | 71, 795 | 0, 796 | 70, 797 | 1, 798 | "COLORS" 799 | ], 800 | [ 801 | 113, 802 | 39, 803 | 0, 804 | 38, 805 | 0, 806 | "MODEL" 807 | ] 808 | ], 809 | "groups": [], 810 | "config": {}, 811 | "extra": { 812 | "ds": { 813 | "scale": 0.8390545288824196, 814 | "offset": { 815 | "0": -781.0907873516428, 816 | "1": -113.29259441450375 817 | } 818 | } 819 | }, 820 | "version": 0.4 821 | } -------------------------------------------------------------------------------- /workflow_examples/example_workflow_new_nodes.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "59457a40-c7d2-4390-9f07-01ce5d3b80f6", 3 | "revision": 0, 4 | "last_node_id": 38, 5 | "last_link_id": 78, 6 | "nodes": [ 7 | { 8 | "id": 26, 9 | "type": "LoadImage", 10 | "pos": [ 11 | -42.56291580200195, 12 | 746.2694702148438 13 | ], 14 | "size": [ 15 | 274.080078125, 16 | 314 17 | ], 18 | "flags": {}, 19 | "order": 0, 20 | "mode": 0, 21 | "inputs": [], 22 | "outputs": [ 23 | { 24 | "name": "IMAGE", 25 | "type": "IMAGE", 26 | "links": [ 27 | 44 28 | ] 29 | }, 30 | { 31 | "name": "MASK", 32 | "type": "MASK", 33 | "links": null 34 | } 35 | ], 36 | "properties": { 37 | "cnr_id": "comfy-core", 38 | "ver": "0.3.32", 39 | "Node name for S&R": "LoadImage" 40 | }, 41 | "widgets_values": [ 42 | "360_F_14894119_v4bqEYTmsztxtIlrHEyaKRuBkbN6oMQZ.webp", 43 | "image" 44 | ] 45 | }, 46 | { 47 | "id": 27, 48 | "type": "LoadImage", 49 | "pos": [ 50 | -41.83220291137695, 51 | 1126.92236328125 52 | ], 53 | "size": [ 54 | 274.080078125, 55 | 314 56 | ], 57 | "flags": {}, 58 | "order": 1, 59 | "mode": 0, 60 | "inputs": [], 61 | "outputs": [ 62 | { 63 | "name": "IMAGE", 64 | "type": "IMAGE", 65 | "links": [ 66 | 47, 67 | 62 68 | ] 69 | }, 70 | { 71 | "name": "MASK", 72 | "type": "MASK", 73 | "links": null 74 | } 75 | ], 76 | "properties": { 77 | "cnr_id": "comfy-core", 78 | "ver": "0.3.32", 79 | "Node name for S&R": "LoadImage" 80 | }, 81 | "widgets_values": [ 82 | "yellow_icon.png", 83 | "image" 84 | ] 85 | }, 86 | { 87 | "id": 20, 88 | "type": "LoadImage", 89 | "pos": [ 90 | -91.65433502197266, 91 | 336.6929931640625 92 | ], 93 | "size": [ 94 | 362.4837646484375, 95 | 352.2925720214844 96 | ], 97 | "flags": {}, 98 | "order": 2, 99 | "mode": 0, 100 | "inputs": [], 101 | "outputs": [ 102 | { 103 | "name": "IMAGE", 104 | "type": "IMAGE", 105 | "links": [ 106 | 43 107 | ] 108 | }, 109 | { 110 | "name": "MASK", 111 | "type": "MASK", 112 | "links": null 113 | } 114 | ], 115 | "properties": { 116 | "cnr_id": "comfy-core", 117 | "ver": "0.3.32", 118 | "Node name for S&R": "LoadImage" 119 | }, 120 | "widgets_values": [ 121 | "02702-1453193612.png", 122 | "image" 123 | ] 124 | }, 125 | { 126 | "id": 28, 127 | "type": "ImageBatch", 128 | "pos": [ 129 | 580.4439086914062, 130 | 745.9865112304688 131 | ], 132 | "size": [ 133 | 140, 134 | 46 135 | ], 136 | "flags": {}, 137 | "order": 5, 138 | "mode": 0, 139 | "inputs": [ 140 | { 141 | "name": "image1", 142 | "type": "IMAGE", 143 | "link": 48 144 | }, 145 | { 146 | "name": "image2", 147 | "type": "IMAGE", 148 | "link": 47 149 | } 150 | ], 151 | "outputs": [ 152 | { 153 | "name": "IMAGE", 154 | "type": "IMAGE", 155 | "links": [ 156 | 56, 157 | 59, 158 | 66, 159 | 70, 160 | 72, 161 | 74 162 | ] 163 | } 164 | ], 165 | "properties": { 166 | "cnr_id": "comfy-core", 167 | "ver": "0.3.32", 168 | "Node name for S&R": "ImageBatch" 169 | }, 170 | "widgets_values": [] 171 | }, 172 | { 173 | "id": 15, 174 | "type": "PreviewImage", 175 | "pos": [ 176 | 1350.9637451171875, 177 | 172.0648193359375 178 | ], 179 | "size": [ 180 | 1472.510009765625, 181 | 317.8247985839844 182 | ], 183 | "flags": {}, 184 | "order": 16, 185 | "mode": 0, 186 | "inputs": [ 187 | { 188 | "name": "images", 189 | "type": "IMAGE", 190 | "link": 73 191 | } 192 | ], 193 | "outputs": [], 194 | "properties": { 195 | "cnr_id": "comfy-core", 196 | "ver": "0.3.32", 197 | "Node name for S&R": "PreviewImage" 198 | }, 199 | "widgets_values": [] 200 | }, 201 | { 202 | "id": 35, 203 | "type": "PreviewImage", 204 | "pos": [ 205 | 1357.0638427734375, 206 | -180.43519592285156 207 | ], 208 | "size": [ 209 | 1472.510009765625, 210 | 317.8247985839844 211 | ], 212 | "flags": {}, 213 | "order": 12, 214 | "mode": 0, 215 | "inputs": [ 216 | { 217 | "name": "images", 218 | "type": "IMAGE", 219 | "link": 75 220 | } 221 | ], 222 | "outputs": [], 223 | "properties": { 224 | "cnr_id": "comfy-core", 225 | "ver": "0.3.32", 226 | "Node name for S&R": "PreviewImage" 227 | }, 228 | "widgets_values": [] 229 | }, 230 | { 231 | "id": 36, 232 | "type": "PreviewImage", 233 | "pos": [ 234 | 1333.7437744140625, 235 | 939.0889282226562 236 | ], 237 | "size": [ 238 | 1472.510009765625, 239 | 317.8247985839844 240 | ], 241 | "flags": {}, 242 | "order": 13, 243 | "mode": 0, 244 | "inputs": [ 245 | { 246 | "name": "images", 247 | "type": "IMAGE", 248 | "link": 76 249 | } 250 | ], 251 | "outputs": [], 252 | "properties": { 253 | "cnr_id": "comfy-core", 254 | "ver": "0.3.32", 255 | "Node name for S&R": "PreviewImage" 256 | }, 257 | "widgets_values": [] 258 | }, 259 | { 260 | "id": 29, 261 | "type": "PaletteTransferReinhard", 262 | "pos": [ 263 | 936.725830078125, 264 | 947.5828247070312 265 | ], 266 | "size": [ 267 | 191.90390014648438, 268 | 46 269 | ], 270 | "flags": {}, 271 | "order": 7, 272 | "mode": 0, 273 | "inputs": [ 274 | { 275 | "name": "image", 276 | "type": "IMAGE", 277 | "link": 59 278 | }, 279 | { 280 | "name": "target_colors", 281 | "type": "COLOR_LIST", 282 | "link": 54 283 | } 284 | ], 285 | "outputs": [ 286 | { 287 | "name": "IMAGE", 288 | "type": "IMAGE", 289 | "links": [ 290 | 76 291 | ] 292 | } 293 | ], 294 | "properties": { 295 | "aux_id": "stdkoehler/ComfyUI-Color_Transfer", 296 | "ver": "68b88335054fca4f82b12d34cde8c6f16283cc41", 297 | "Node name for S&R": "PaletteTransferReinhard" 298 | }, 299 | "widgets_values": [] 300 | }, 301 | { 302 | "id": 37, 303 | "type": "PreviewImage", 304 | "pos": [ 305 | 1341.2269287109375, 306 | 553.5296020507812 307 | ], 308 | "size": [ 309 | 1472.510009765625, 310 | 317.8247985839844 311 | ], 312 | "flags": {}, 313 | "order": 14, 314 | "mode": 0, 315 | "inputs": [ 316 | { 317 | "name": "images", 318 | "type": "IMAGE", 319 | "link": 77 320 | } 321 | ], 322 | "outputs": [], 323 | "properties": { 324 | "cnr_id": "comfy-core", 325 | "ver": "0.3.32", 326 | "Node name for S&R": "PreviewImage" 327 | }, 328 | "widgets_values": [] 329 | }, 330 | { 331 | "id": 23, 332 | "type": "PalleteTransferClustering", 333 | "pos": [ 334 | 915.8627319335938, 335 | -39.150146484375 336 | ], 337 | "size": [ 338 | 304.771484375, 339 | 126 340 | ], 341 | "flags": {}, 342 | "order": 6, 343 | "mode": 0, 344 | "inputs": [ 345 | { 346 | "name": "image", 347 | "type": "IMAGE", 348 | "link": 56 349 | }, 350 | { 351 | "name": "target_colors", 352 | "type": "COLOR_LIST", 353 | "link": 57 354 | } 355 | ], 356 | "outputs": [ 357 | { 358 | "name": "IMAGE", 359 | "type": "IMAGE", 360 | "links": [ 361 | 75 362 | ] 363 | } 364 | ], 365 | "properties": { 366 | "aux_id": "stdkoehler/ComfyUI-Color_Transfer", 367 | "ver": "08dab8b0691d35783216ccf9d5e65ae043dced51", 368 | "Node name for S&R": "PalleteTransferClustering" 369 | }, 370 | "widgets_values": [ 371 | "Dense", 372 | 5, 373 | 0 374 | ] 375 | }, 376 | { 377 | "id": 33, 378 | "type": "PaletteRbfTransfer", 379 | "pos": [ 380 | 920.2803955078125, 381 | 1332.6422119140625 382 | ], 383 | "size": [ 384 | 270, 385 | 102 386 | ], 387 | "flags": {}, 388 | "order": 9, 389 | "mode": 0, 390 | "inputs": [ 391 | { 392 | "name": "image", 393 | "type": "IMAGE", 394 | "link": 70 395 | }, 396 | { 397 | "name": "target_colors", 398 | "type": "COLOR_LIST", 399 | "link": 68 400 | } 401 | ], 402 | "outputs": [ 403 | { 404 | "name": "IMAGE", 405 | "type": "IMAGE", 406 | "links": [ 407 | 78 408 | ] 409 | } 410 | ], 411 | "properties": { 412 | "aux_id": "stdkoehler/ComfyUI-Color_Transfer", 413 | "ver": "68b88335054fca4f82b12d34cde8c6f16283cc41", 414 | "Node name for S&R": "PaletteRbfTransfer" 415 | }, 416 | "widgets_values": [ 417 | "gaussian", 418 | 0.7000000000000001 419 | ] 420 | }, 421 | { 422 | "id": 38, 423 | "type": "PreviewImage", 424 | "pos": [ 425 | 1337.157470703125, 426 | 1334.32080078125 427 | ], 428 | "size": [ 429 | 1472.510009765625, 430 | 317.8247985839844 431 | ], 432 | "flags": {}, 433 | "order": 15, 434 | "mode": 0, 435 | "inputs": [ 436 | { 437 | "name": "images", 438 | "type": "IMAGE", 439 | "link": 78 440 | } 441 | ], 442 | "outputs": [], 443 | "properties": { 444 | "cnr_id": "comfy-core", 445 | "ver": "0.3.32", 446 | "Node name for S&R": "PreviewImage" 447 | }, 448 | "widgets_values": [] 449 | }, 450 | { 451 | "id": 31, 452 | "type": "ColorTransferReinhard", 453 | "pos": [ 454 | 978.1552734375, 455 | 1694.1551513671875 456 | ], 457 | "size": [ 458 | 181.77304077148438, 459 | 46 460 | ], 461 | "flags": {}, 462 | "order": 11, 463 | "mode": 0, 464 | "inputs": [ 465 | { 466 | "name": "image", 467 | "type": "IMAGE", 468 | "link": 74 469 | }, 470 | { 471 | "name": "image_reference", 472 | "type": "IMAGE", 473 | "link": 62 474 | } 475 | ], 476 | "outputs": [ 477 | { 478 | "name": "IMAGE", 479 | "type": "IMAGE", 480 | "links": [ 481 | 61 482 | ] 483 | } 484 | ], 485 | "properties": { 486 | "aux_id": "stdkoehler/ComfyUI-Color_Transfer", 487 | "ver": "68b88335054fca4f82b12d34cde8c6f16283cc41", 488 | "Node name for S&R": "ColorTransferReinhard" 489 | }, 490 | "widgets_values": [] 491 | }, 492 | { 493 | "id": 30, 494 | "type": "PreviewImage", 495 | "pos": [ 496 | 1354.0823974609375, 497 | 1717.6182861328125 498 | ], 499 | "size": [ 500 | 1460.6881103515625, 501 | 326.49432373046875 502 | ], 503 | "flags": {}, 504 | "order": 17, 505 | "mode": 0, 506 | "inputs": [ 507 | { 508 | "name": "images", 509 | "type": "IMAGE", 510 | "link": 61 511 | } 512 | ], 513 | "outputs": [], 514 | "properties": { 515 | "cnr_id": "comfy-core", 516 | "ver": "0.3.32", 517 | "Node name for S&R": "PreviewImage" 518 | }, 519 | "widgets_values": [] 520 | }, 521 | { 522 | "id": 25, 523 | "type": "ImageBatch", 524 | "pos": [ 525 | 336.3641357421875, 526 | 619.2080688476562 527 | ], 528 | "size": [ 529 | 140, 530 | 46 531 | ], 532 | "flags": {}, 533 | "order": 4, 534 | "mode": 0, 535 | "inputs": [ 536 | { 537 | "name": "image1", 538 | "type": "IMAGE", 539 | "link": 43 540 | }, 541 | { 542 | "name": "image2", 543 | "type": "IMAGE", 544 | "link": 44 545 | } 546 | ], 547 | "outputs": [ 548 | { 549 | "name": "IMAGE", 550 | "type": "IMAGE", 551 | "links": [ 552 | 48 553 | ] 554 | } 555 | ], 556 | "properties": { 557 | "cnr_id": "comfy-core", 558 | "ver": "0.3.32", 559 | "Node name for S&R": "ImageBatch" 560 | }, 561 | "widgets_values": [] 562 | }, 563 | { 564 | "id": 12, 565 | "type": "ColorPalette", 566 | "pos": [ 567 | -45.319950103759766, 568 | -34.96229553222656 569 | ], 570 | "size": [ 571 | 400, 572 | 200 573 | ], 574 | "flags": {}, 575 | "order": 3, 576 | "mode": 0, 577 | "inputs": [], 578 | "outputs": [ 579 | { 580 | "name": "Color palette", 581 | "type": "COLOR_LIST", 582 | "links": [ 583 | 54, 584 | 57, 585 | 65, 586 | 68, 587 | 71 588 | ] 589 | } 590 | ], 591 | "properties": { 592 | "aux_id": "stdkoehler/ComfyUI-Color_Transfer", 593 | "ver": "08dab8b0691d35783216ccf9d5e65ae043dced51", 594 | "Node name for S&R": "ColorPalette", 595 | "cnr_id": "ComfyUI-Color_Transfer" 596 | }, 597 | "widgets_values": [ 598 | "[\n (255, 0, 0),\n (103, 58, 183),\n]" 599 | ] 600 | }, 601 | { 602 | "id": 32, 603 | "type": "PalletteSoftTransfer", 604 | "pos": [ 605 | 980.4177856445312, 606 | 558.8236083984375 607 | ], 608 | "size": [ 609 | 270, 610 | 126 611 | ], 612 | "flags": {}, 613 | "order": 8, 614 | "mode": 0, 615 | "inputs": [ 616 | { 617 | "name": "image", 618 | "type": "IMAGE", 619 | "link": 66 620 | }, 621 | { 622 | "name": "target_colors", 623 | "type": "COLOR_LIST", 624 | "link": 65 625 | } 626 | ], 627 | "outputs": [ 628 | { 629 | "name": "IMAGE", 630 | "type": "IMAGE", 631 | "links": [ 632 | 77 633 | ] 634 | } 635 | ], 636 | "properties": { 637 | "aux_id": "stdkoehler/ComfyUI-Color_Transfer", 638 | "ver": "68b88335054fca4f82b12d34cde8c6f16283cc41", 639 | "Node name for S&R": "PalletteSoftTransfer" 640 | }, 641 | "widgets_values": [ 642 | "Grayscale", 643 | 0.4, 644 | 1 645 | ] 646 | }, 647 | { 648 | "id": 34, 649 | "type": "PaletteOptimalTransportTransfer", 650 | "pos": [ 651 | 910.7619018554688, 652 | 171.1381378173828 653 | ], 654 | "size": [ 655 | 316.4439697265625, 656 | 150 657 | ], 658 | "flags": {}, 659 | "order": 10, 660 | "mode": 0, 661 | "inputs": [ 662 | { 663 | "name": "image", 664 | "type": "IMAGE", 665 | "link": 72 666 | }, 667 | { 668 | "name": "target_colors", 669 | "type": "COLOR_LIST", 670 | "link": 71 671 | } 672 | ], 673 | "outputs": [ 674 | { 675 | "name": "IMAGE", 676 | "type": "IMAGE", 677 | "links": [ 678 | 73 679 | ] 680 | } 681 | ], 682 | "properties": { 683 | "aux_id": "stdkoehler/ComfyUI-Color_Transfer", 684 | "ver": "68b88335054fca4f82b12d34cde8c6f16283cc41", 685 | "Node name for S&R": "PaletteOptimalTransportTransfer" 686 | }, 687 | "widgets_values": [ 688 | "Dense", 689 | 5, 690 | "Grayscale", 691 | 0.5 692 | ] 693 | } 694 | ], 695 | "links": [ 696 | [ 697 | 43, 698 | 20, 699 | 0, 700 | 25, 701 | 0, 702 | "IMAGE" 703 | ], 704 | [ 705 | 44, 706 | 26, 707 | 0, 708 | 25, 709 | 1, 710 | "IMAGE" 711 | ], 712 | [ 713 | 47, 714 | 27, 715 | 0, 716 | 28, 717 | 1, 718 | "IMAGE" 719 | ], 720 | [ 721 | 48, 722 | 25, 723 | 0, 724 | 28, 725 | 0, 726 | "IMAGE" 727 | ], 728 | [ 729 | 54, 730 | 12, 731 | 0, 732 | 29, 733 | 1, 734 | "COLOR_LIST" 735 | ], 736 | [ 737 | 56, 738 | 28, 739 | 0, 740 | 23, 741 | 0, 742 | "IMAGE" 743 | ], 744 | [ 745 | 57, 746 | 12, 747 | 0, 748 | 23, 749 | 1, 750 | "COLOR_LIST" 751 | ], 752 | [ 753 | 59, 754 | 28, 755 | 0, 756 | 29, 757 | 0, 758 | "IMAGE" 759 | ], 760 | [ 761 | 61, 762 | 31, 763 | 0, 764 | 30, 765 | 0, 766 | "IMAGE" 767 | ], 768 | [ 769 | 62, 770 | 27, 771 | 0, 772 | 31, 773 | 1, 774 | "IMAGE" 775 | ], 776 | [ 777 | 65, 778 | 12, 779 | 0, 780 | 32, 781 | 1, 782 | "COLOR_LIST" 783 | ], 784 | [ 785 | 66, 786 | 28, 787 | 0, 788 | 32, 789 | 0, 790 | "IMAGE" 791 | ], 792 | [ 793 | 68, 794 | 12, 795 | 0, 796 | 33, 797 | 1, 798 | "COLOR_LIST" 799 | ], 800 | [ 801 | 70, 802 | 28, 803 | 0, 804 | 33, 805 | 0, 806 | "IMAGE" 807 | ], 808 | [ 809 | 71, 810 | 12, 811 | 0, 812 | 34, 813 | 1, 814 | "COLOR_LIST" 815 | ], 816 | [ 817 | 72, 818 | 28, 819 | 0, 820 | 34, 821 | 0, 822 | "IMAGE" 823 | ], 824 | [ 825 | 73, 826 | 34, 827 | 0, 828 | 15, 829 | 0, 830 | "IMAGE" 831 | ], 832 | [ 833 | 74, 834 | 28, 835 | 0, 836 | 31, 837 | 0, 838 | "IMAGE" 839 | ], 840 | [ 841 | 75, 842 | 23, 843 | 0, 844 | 35, 845 | 0, 846 | "IMAGE" 847 | ], 848 | [ 849 | 76, 850 | 29, 851 | 0, 852 | 36, 853 | 0, 854 | "IMAGE" 855 | ], 856 | [ 857 | 77, 858 | 32, 859 | 0, 860 | 37, 861 | 0, 862 | "IMAGE" 863 | ], 864 | [ 865 | 78, 866 | 33, 867 | 0, 868 | 38, 869 | 0, 870 | "IMAGE" 871 | ] 872 | ], 873 | "groups": [], 874 | "config": {}, 875 | "extra": { 876 | "ds": { 877 | "scale": 0.7513148009015777, 878 | "offset": [ 879 | 2464.874353627938, 880 | 249.70144758588646 881 | ] 882 | }, 883 | "frontendVersion": "1.18.9" 884 | }, 885 | "version": 0.4 886 | } -------------------------------------------------------------------------------- /color_transfer.py: -------------------------------------------------------------------------------- 1 | from itertools import combinations 2 | 3 | import numpy as np 4 | import torch 5 | import ast 6 | import cv2 7 | import ot 8 | 9 | from sklearn.cluster import KMeans, MiniBatchKMeans 10 | from scipy.spatial import Delaunay 11 | from skimage import color 12 | from scipy.interpolate import Rbf 13 | 14 | from comfy.comfy_types import IO, ComfyNodeABC 15 | 16 | from .utils import ( 17 | EuclideanDistance, 18 | ManhattanDistance, 19 | CosineSimilarity, 20 | HSVColorSimilarity, 21 | RGBWeightedDistance, 22 | RGBWeightedSimilarity, 23 | Blur, 24 | ) 25 | 26 | 27 | class PaletteExtension: 28 | @staticmethod 29 | def dense_palette( 30 | base_palette: list[tuple[int, int, int]], 31 | points: int = 5, 32 | iterations: int = 2, 33 | extend_bw: bool = True, 34 | ) -> list[tuple[int, int, int]]: 35 | """ 36 | Interpolate N points between each pair of colors in the base_palette. 37 | Do the same for the new colors generated, for a given number of iterations to 38 | create a dense mesh of interpolated colors. 39 | """ 40 | # add black and white to the base palette 41 | if extend_bw: 42 | base_palette = set(base_palette) 43 | base_palette.add((0, 0, 0)) 44 | base_palette.add((255, 255, 255)) 45 | 46 | palette = np.array(list(base_palette), dtype=float) 47 | 48 | for _ in range(iterations): 49 | # build all combinations of two distinct indices 50 | idx_pairs = list(combinations(range(len(palette)), 2)) 51 | 52 | # precompute interpolation fractions 53 | t = np.linspace(0, 1, points + 2)[1:-1] 54 | 55 | # for each pair, generate points intermediates 56 | new_colors = [] 57 | for i, j in idx_pairs: 58 | c1, c2 = palette[i], palette[j] 59 | # broadcast interpolation in one go: 60 | inter = c1[None, :] + (c2 - c1)[None, :] * t[:, None] 61 | new_colors.append(inter) 62 | if new_colors: 63 | new_colors = np.vstack(new_colors) 64 | palette = np.vstack([palette, new_colors]) 65 | 66 | # remove duplicates 67 | palette = np.unique(np.rint(palette).astype(int), axis=0).astype(float) 68 | 69 | # final unique, integer RGB list 70 | result = [tuple(rgb.astype(int)) for rgb in palette] 71 | return result 72 | 73 | @staticmethod 74 | def edge_based_palette( 75 | base_palette: list[tuple[int, int, int]], points: int = 5, iterations: int = 2 76 | ) -> list[tuple[int, int, int]]: 77 | """ 78 | Use Delaunay triangulation to find edges between colors in the base_colors. 79 | Do the same for the new colors generated, for a given number of iterations to 80 | Create a dense mesh of interpolated colors. 81 | In contrast to dense_palette, this method uses the edges of the triangulation 82 | to generate new colors, which can lead to a more structured palette. 83 | """ 84 | # add black and white to the base palette 85 | base_palette = set(base_palette) 86 | base_palette.add((0, 0, 0)) 87 | base_palette.add((255, 255, 255)) 88 | 89 | palette = np.array(list(base_palette), dtype=float) 90 | 91 | for _ in range(iterations): 92 | # Triangulate current palette to find adjacency edges 93 | if len(palette) >= palette.shape[1] + 1: 94 | tri = Delaunay(palette) 95 | edges = set() 96 | for simplex in tri.simplices: 97 | # add all edges of the simplex 98 | for i in range(len(simplex)): 99 | for j in range(i + 1, len(simplex)): 100 | a, b = simplex[i], simplex[j] 101 | edges.add(tuple(sorted((a, b)))) 102 | else: 103 | # fallback to all pairs 104 | idxs = range(len(palette)) 105 | edges = set(tuple(sorted((i, j))) for i in idxs for j in idxs if i < j) 106 | 107 | new_colors = [] 108 | t = np.linspace(0, 1, points + 2)[1:-1][:, None] # shape (points,1) 109 | 110 | for i, j in edges: 111 | c1, c2 = palette[i], palette[j] 112 | inters = c1 + (c2 - c1) * t # (points,3) 113 | new_colors.append(inters) 114 | 115 | if new_colors: 116 | new_stack = np.vstack(new_colors) 117 | palette = np.vstack([palette, new_stack]) 118 | # remove duplicates 119 | palette = np.unique(np.rint(palette).astype(int), axis=0).astype(float) 120 | 121 | return [tuple(c.astype(int)) for c in palette] 122 | 123 | 124 | class ColorSpaceConvert: 125 | @staticmethod 126 | def convert_to_target_space( 127 | image: np.ndarray, target_colors: list[tuple[int, int, int]], color_space: str 128 | ) -> tuple[np.ndarray, list[tuple[int, int, int]]]: 129 | """Convert image and target colors to specified color space.""" 130 | if color_space == "RGB": 131 | return image, target_colors 132 | 133 | conversion_map = { 134 | "HSV": (cv2.COLOR_RGB2HSV, cv2.COLOR_HSV2RGB), 135 | "LAB": (cv2.COLOR_RGB2LAB, cv2.COLOR_LAB2RGB), 136 | } 137 | 138 | forward_conversion, _ = conversion_map[color_space] 139 | 140 | converted_image = cv2.cvtColor(image, forward_conversion) 141 | 142 | target_colors_array = np.array(target_colors, dtype=np.uint8).reshape(-1, 1, 3) 143 | converted_colors = cv2.cvtColor(target_colors_array, forward_conversion) 144 | converted_colors = [tuple(color[0]) for color in converted_colors] 145 | 146 | return converted_image, converted_colors 147 | 148 | @staticmethod 149 | def convert_to_rgb(image: np.ndarray, color_space: str) -> np.ndarray: 150 | """Convert image back to RGB color space.""" 151 | if color_space == "RGB": 152 | return image 153 | 154 | conversion_map = {"HSV": cv2.COLOR_HSV2RGB, "LAB": cv2.COLOR_LAB2RGB} 155 | 156 | return cv2.cvtColor(image, conversion_map[color_space]) 157 | 158 | 159 | class ColorClustering: 160 | def __init__(self, cluster_method: str): 161 | self.clustering_methods = { 162 | "Kmeans": KMeans, 163 | "Mini batch Kmeans": MiniBatchKMeans, 164 | } 165 | self.method = self.clustering_methods[cluster_method] 166 | 167 | def cluster_colors(self, image: np.ndarray, k: int) -> dict: 168 | """Perform color clustering on the image.""" 169 | img_array = image.reshape((-1, 3)) 170 | clustering_model = self.method(n_clusters=k, n_init="auto") 171 | clustering_model.fit(img_array) 172 | 173 | return { 174 | "image": image, 175 | "main_colors": clustering_model.cluster_centers_.astype(int), 176 | "model": clustering_model, 177 | } 178 | 179 | 180 | class ColorMatcher: 181 | def __init__(self, distance_method: str): 182 | self.distance_methods = { 183 | "Euclidean": EuclideanDistance, 184 | "Manhattan": ManhattanDistance, 185 | "Cosine Similarity": CosineSimilarity, 186 | "HSV Distance": HSVColorSimilarity, 187 | "RGB Weighted Distance": RGBWeightedDistance, 188 | "RGB Weighted Similarity": RGBWeightedSimilarity, 189 | } 190 | self.distance_func = self.distance_methods[distance_method] 191 | 192 | def match_colors( 193 | self, 194 | detected_colors: np.ndarray, 195 | target_colors: list[tuple[int, int, int]], 196 | clustering_model: KMeans | MiniBatchKMeans, 197 | image_shape: tuple[int, int, int], 198 | ) -> np.ndarray: 199 | """Match detected colors with target colors using the specified distance method.""" 200 | closest_colors = [] 201 | 202 | for color in detected_colors: 203 | distances = self.distance_func(color, target_colors) 204 | closest_color = target_colors[np.argmin(distances)] 205 | closest_colors.append(closest_color) 206 | 207 | closest_colors = np.array(closest_colors) 208 | return closest_colors[clustering_model.labels_].reshape(image_shape) 209 | 210 | 211 | class ImagePostProcessor: 212 | def __init__(self, gaussian_blur: int = 0): 213 | self.gaussian_blur = gaussian_blur 214 | 215 | def process_image(self, image: np.ndarray) -> np.ndarray: 216 | """Apply post-processing to the image.""" 217 | processed = np.array(image).astype(np.float32) 218 | 219 | if self.gaussian_blur: 220 | processed = Blur(processed, self.gaussian_blur) 221 | 222 | return processed / 255.0 223 | 224 | 225 | def process_image_with_palette( 226 | image: list[torch.Tensor], 227 | target_colors: list[tuple[int, int, int]], 228 | color_space: str, 229 | cluster_method: str, 230 | distance_method: str, 231 | gaussian_blur: int, 232 | ) -> torch.Tensor: 233 | """ 234 | Shared function to process an image with the given parameters. 235 | """ 236 | processedImages = [] 237 | 238 | # Initialize components 239 | converter = ColorSpaceConvert() 240 | clustering_engine = ColorClustering(cluster_method) 241 | color_matcher = ColorMatcher(distance_method) 242 | image_processor = ImagePostProcessor(gaussian_blur) 243 | 244 | for img_tensor in image: 245 | # Prepare image 246 | img = 255.0 * img_tensor.cpu().numpy() 247 | 248 | # Convert color space 249 | converted_img, converted_colors = converter.convert_to_target_space( 250 | img, target_colors, color_space 251 | ) 252 | 253 | # Perform clustering 254 | clustering_result = clustering_engine.cluster_colors( 255 | converted_img, len(target_colors) 256 | ) 257 | 258 | # Match colors 259 | processed = color_matcher.match_colors( 260 | clustering_result["main_colors"], 261 | converted_colors, 262 | clustering_result["model"], 263 | converted_img.shape, 264 | ) 265 | 266 | # Convert back to RGB 267 | processed = converter.convert_to_rgb(processed, color_space) 268 | 269 | # Post-process 270 | processed = image_processor.process_image(processed) 271 | processed_tensor = torch.from_numpy(processed)[None,] 272 | 273 | processedImages.append(processed_tensor) 274 | 275 | return torch.cat(processedImages, dim=0) 276 | 277 | 278 | class ReferenceTransferReinhard(ComfyNodeABC): 279 | @classmethod 280 | def INPUT_TYPES(cls) -> dict: 281 | data_in = { 282 | "required": { 283 | "image": (IO.IMAGE,), 284 | "image_reference": (IO.IMAGE,), 285 | } 286 | } 287 | return data_in 288 | 289 | RETURN_TYPES = (IO.IMAGE,) 290 | FUNCTION = "color_transfer" 291 | CATEGORY = "Color Transfer/Reference Transfer" 292 | 293 | def color_transfer( 294 | self, image: torch.Tensor, image_reference: torch.Tensor 295 | ) -> tuple[torch.Tensor]: 296 | 297 | processed_images = [] 298 | 299 | # Handle reference image: flatten if it's a batch 300 | target = image_reference.cpu().numpy() 301 | if len(target.shape) == 4: # If shape is (N, X, Y, 3) 302 | # Combine all N images into one big image for statistics calculation 303 | target = np.concatenate([target[i] for i in range(target.shape[0])], axis=0) 304 | 305 | for img_tensor in image: 306 | source = img_tensor.cpu().numpy() 307 | 308 | # Convert to Lab 309 | source_lab = color.rgb2lab(source) 310 | target_lab = color.rgb2lab(target) 311 | 312 | # Compute mean and std of each channel 313 | s_mean, s_std = source_lab.mean(axis=(0, 1)), source_lab.std(axis=(0, 1)) 314 | t_mean, t_std = target_lab.mean(axis=(0, 1)), target_lab.std(axis=(0, 1)) 315 | 316 | # Transfer color 317 | result_lab = (source_lab - s_mean) / s_std * t_std + t_mean 318 | result_rgb = np.clip(color.lab2rgb(result_lab), 0, 1) 319 | 320 | # Convert back to ComfyUI format 321 | # result_array = (result_rgb).astype(np.uint8) 322 | result_tensor = torch.from_numpy(result_rgb).unsqueeze( 323 | 0 324 | ) # Add batch dimension 325 | 326 | processed_images.append(result_tensor) 327 | 328 | return (torch.cat(processed_images, dim=0),) 329 | 330 | 331 | class PaletteOptimalTransportTransfer(ComfyNodeABC): 332 | @classmethod 333 | def INPUT_TYPES(cls) -> dict: 334 | data_in = { 335 | "required": { 336 | "image": (IO.IMAGE,), 337 | "target_colors": ("COLOR_LIST",), 338 | "palette_extension_method": ( 339 | ["Dense", "Edge", "None"], 340 | {"default": "None"}, 341 | ), 342 | "palette_extension_points": ( 343 | IO.INT, 344 | { 345 | "min": 2, 346 | "max": 20, 347 | "step": 1, 348 | "default": 5, 349 | }, 350 | ), 351 | "blend_mode": (["Original", "Grayscale"], {"default": "Original"}), 352 | "blend_ratio": ( 353 | IO.FLOAT, 354 | { 355 | "min": 0, 356 | "max": 1, 357 | "step": 0.1, 358 | "default": 0.5, 359 | }, 360 | ), 361 | } 362 | } 363 | return data_in 364 | 365 | RETURN_TYPES = (IO.IMAGE,) 366 | FUNCTION = "color_transfer" 367 | CATEGORY = "Color Transfer/Palette Transfer" 368 | 369 | def color_transfer( 370 | self, 371 | image: torch.Tensor, 372 | target_colors: list[tuple[int, int, int]], 373 | palette_extension_method: str, 374 | palette_extension_points: int, 375 | blend_mode: str, 376 | blend_ratio: float, 377 | ) -> tuple[torch.Tensor]: 378 | 379 | if palette_extension_method == "Dense": 380 | target_colors = PaletteExtension.dense_palette( 381 | target_colors, points=palette_extension_points 382 | ) 383 | elif palette_extension_method == "Edge": 384 | target_colors = PaletteExtension.edge_based_palette( 385 | target_colors, points=palette_extension_points 386 | ) 387 | 388 | palette = np.array(target_colors, dtype=np.float32) / 255.0 389 | n_palette = palette.shape[0] 390 | palette_weights = np.ones((n_palette,)) / n_palette 391 | 392 | processed_images = [] 393 | 394 | for img_tensor in image: 395 | source = img_tensor.cpu().numpy() 396 | h, w, _ = source.shape 397 | pixels = source.reshape(-1, 3) 398 | 399 | # KMeans clustering 400 | n_source_colors = 1000 401 | kmeans = MiniBatchKMeans(n_clusters=n_source_colors) 402 | kmeans.fit(pixels) 403 | source_centroids = kmeans.cluster_centers_ 404 | pixel_labels = kmeans.labels_ 405 | 406 | source_weights = np.bincount(pixel_labels) / len(pixel_labels) 407 | 408 | # Cost matrix (DO NOT normalize) 409 | cost_matrix = ot.dist(source_centroids, palette, metric="euclidean") ** 2 410 | 411 | # Compute OT transport plan 412 | transport_plan = ot.sinkhorn( 413 | source_weights, 414 | palette_weights, 415 | cost_matrix, 416 | reg=1e-2, 417 | numItermax=100000, 418 | ) 419 | 420 | # Barycentric mapping (normalize per row) 421 | mapped_centroids = np.dot(transport_plan, palette) / np.sum( 422 | transport_plan, axis=1, keepdims=True 423 | ) 424 | 425 | if blend_mode == "Original": 426 | # Blend between original and mapped colors 427 | recolored_pixels = ( 428 | 1 - blend_ratio 429 | ) * pixels + blend_ratio * mapped_centroids[pixel_labels] 430 | elif blend_mode == "Grayscale": 431 | gray = color.rgb2gray(source) 432 | gray_rgb = np.stack([gray] * 3, axis=-1) 433 | gray_pixels = gray_rgb.reshape(-1, 3) 434 | # Blend between original and mapped colors 435 | recolored_pixels = ( 436 | 1 - blend_ratio 437 | ) * gray_pixels + blend_ratio * mapped_centroids[pixel_labels] 438 | 439 | recolored_image = recolored_pixels.reshape(h, w, 3) 440 | 441 | result_tensor = torch.from_numpy(recolored_image).float().unsqueeze(0) 442 | processed_images.append(result_tensor) 443 | 444 | return (torch.cat(processed_images, dim=0),) 445 | 446 | 447 | class PaletteRbfTransfer(ComfyNodeABC): 448 | @classmethod 449 | def INPUT_TYPES(cls) -> dict: 450 | data_in = { 451 | "required": { 452 | "image": (IO.IMAGE,), 453 | "target_colors": ("COLOR_LIST",), 454 | "rbf_function": ( 455 | ["thin_plate", "multiquadric", "inverse", "gaussian"], 456 | {"default": "gaussian"}, 457 | ), 458 | "epsilon": ( 459 | IO.FLOAT, 460 | {"min": 0.01, "max": 100, "step": 0.1, "default": 1.0}, 461 | ), 462 | } 463 | } 464 | return data_in 465 | 466 | RETURN_TYPES = (IO.IMAGE,) 467 | FUNCTION = "color_transfer" 468 | CATEGORY = "Color Transfer/Palette Transfer" 469 | 470 | def color_transfer( 471 | self, 472 | image: torch.Tensor, 473 | target_colors: list[tuple[int, int, int]], 474 | rbf_function: str, 475 | epsilon: float, 476 | ) -> tuple[torch.Tensor]: 477 | """ 478 | Applies RBF interpolation to map image colors based on a given palette. 479 | 480 | Parameters: 481 | - image: Input image as a NumPy array in RGB format. 482 | - palette: List of RGB tuples representing the target palette. 483 | - rbf_function: Type of RBF ('thin_plate', 'multiquadric', 'inverse', 'gaussian', etc.). 484 | - epsilon: Adjustable constant for some RBF functions. 485 | 486 | Returns: 487 | - Recolored image as a NumPy array in RGB format. 488 | """ 489 | # Extract R, G, B channels from the palette 490 | 491 | palette = np.array(target_colors, dtype=np.float32) / 255 492 | r, g, b = palette[:, 0], palette[:, 1], palette[:, 2] 493 | 494 | # Create RBF interpolators for each channel 495 | rbf_r = Rbf(r, g, b, r, function=rbf_function, epsilon=epsilon) 496 | rbf_g = Rbf(r, g, b, g, function=rbf_function, epsilon=epsilon) 497 | rbf_b = Rbf(r, g, b, b, function=rbf_function, epsilon=epsilon) 498 | 499 | processed_images = [] 500 | 501 | for img_tensor in image: 502 | source = img_tensor.cpu().numpy() 503 | h, w, _ = source.shape 504 | pixels = source.reshape(-1, 3) 505 | 506 | # Apply RBF interpolation to each pixel 507 | mapped_r = rbf_r(pixels[:, 0], pixels[:, 1], pixels[:, 2]) 508 | mapped_g = rbf_g(pixels[:, 0], pixels[:, 1], pixels[:, 2]) 509 | mapped_b = rbf_b(pixels[:, 0], pixels[:, 1], pixels[:, 2]) 510 | 511 | # Stack and reshape the mapped channels 512 | mapped_pixels = np.stack((mapped_r, mapped_g, mapped_b), axis=-1) 513 | mapped_pixels = np.clip(mapped_pixels, 0, 1) 514 | recolored_image = mapped_pixels.reshape(h, w, 3) 515 | 516 | result_tensor = torch.from_numpy(recolored_image).unsqueeze(0) 517 | 518 | processed_images.append(result_tensor) 519 | 520 | return (torch.cat(processed_images, dim=0),) 521 | 522 | 523 | class PaletteSoftTransfer(ComfyNodeABC): 524 | @classmethod 525 | def INPUT_TYPES(cls) -> dict: 526 | data_in = { 527 | "required": { 528 | "image": (IO.IMAGE,), 529 | "target_colors": ("COLOR_LIST",), 530 | "blend_mode": (["Original", "Grayscale"], {"default": "Original"}), 531 | "blend_ratio": ( 532 | IO.FLOAT, 533 | { 534 | "min": 0, 535 | "max": 1, 536 | "step": 0.1, 537 | "default": 0.5, 538 | }, 539 | ), 540 | "softness": ( 541 | IO.FLOAT, 542 | { 543 | "min": 0, 544 | "max": 20, 545 | "step": 0.1, 546 | "default": 1, 547 | }, 548 | ), 549 | } 550 | } 551 | return data_in 552 | 553 | RETURN_TYPES = (IO.IMAGE,) 554 | FUNCTION = "color_transfer" 555 | CATEGORY = "Color Transfer/Palette Transfer" 556 | 557 | def color_transfer( 558 | self, 559 | image: torch.Tensor, 560 | target_colors: list[tuple[int, int, int]], 561 | blend_mode: str, 562 | blend_ratio: float, 563 | softness: float, 564 | ) -> tuple[torch.Tensor]: 565 | """ 566 | Shift image color mood towards N palette colors (soft harmonization) 567 | 568 | palette: list of N RGB colors [(R,G,B), ...] 569 | blend_ratio: how strongly to pull towards palette (0 = none, 1 = full) 570 | softness: how softly weights decay (higher = sharper attraction to nearest color) 571 | """ 572 | if len(target_colors) < 2: 573 | raise ValueError("Palette must contain at least 2 colors") 574 | 575 | processed_images = [] 576 | 577 | for img_tensor in image: 578 | source = img_tensor.cpu().numpy() 579 | 580 | # Convert image + palette to Lab 581 | img_lab = color.rgb2lab(source) 582 | palette_lab = np.array( 583 | [color.rgb2lab(np.array([[c]]) / 255.0)[0, 0] for c in target_colors] 584 | ) 585 | 586 | # Flatten image to pixels 587 | pixels = img_lab.reshape(-1, 3) 588 | 589 | # Compute distances to each palette color (Euclidean in Lab space) 590 | dists = np.array( 591 | [np.linalg.norm(pixels - p, axis=1) for p in palette_lab] 592 | ) # shape: (N_colors, N_pixels) 593 | 594 | # Convert distances to soft weights (inverse distance weighting) 595 | weights = np.exp(-softness * dists) 596 | weights /= weights.sum(axis=0) # normalize to sum=1 597 | 598 | # Compute weighted average color for each pixel 599 | projected = np.tensordot( 600 | weights.T, palette_lab, axes=(1, 0) 601 | ) # shape: (N_pixels, 3) 602 | 603 | if blend_mode == "Original": 604 | # Blend between original and projected colors 605 | blended = (1 - blend_ratio) * pixels + blend_ratio * projected 606 | elif blend_mode == "Grayscale": 607 | gray = color.rgb2gray(source) # shape: (H, W) 608 | gray_rgb = np.stack([gray] * 3, axis=-1) # shape: (H, W, 3) 609 | # Convert grayscale RGB to Lab (so all blending happens in Lab) 610 | gray_lab = color.rgb2lab(gray_rgb) 611 | gray_pixels = gray_lab.reshape(-1, 3) 612 | # Blend between original and projected colors 613 | blended = (1 - blend_ratio) * gray_pixels + blend_ratio * projected 614 | 615 | # Reshape back and convert to RGB 616 | blended_lab = blended.reshape(img_lab.shape) 617 | blended_rgb = np.clip(color.lab2rgb(blended_lab), 0, 1) 618 | 619 | result_tensor = torch.from_numpy(blended_rgb).unsqueeze( 620 | 0 621 | ) # Add batch dimension 622 | 623 | processed_images.append(result_tensor) 624 | 625 | return (torch.cat(processed_images, dim=0),) 626 | 627 | 628 | class PaletteTransferReinhard(ComfyNodeABC): 629 | @classmethod 630 | def INPUT_TYPES(cls) -> dict: 631 | data_in = { 632 | "required": { 633 | "image": (IO.IMAGE,), 634 | "target_colors": ("COLOR_LIST",), 635 | } 636 | } 637 | return data_in 638 | 639 | RETURN_TYPES = (IO.IMAGE,) 640 | FUNCTION = "color_transfer" 641 | CATEGORY = "Color Transfer/Palette Transfer" 642 | 643 | def color_transfer( 644 | self, image: torch.Tensor, target_colors: list[tuple[int, int, int]] 645 | ) -> tuple[torch.Tensor]: 646 | if len(target_colors) == 0: 647 | return (image,) 648 | 649 | target_colors = PaletteExtension.dense_palette(target_colors, points=3) 650 | 651 | def create_palette_image( 652 | palette: list[tuple[int, int, int]], size: tuple[int, int] = (1000, 1000) 653 | ) -> np.ndarray: 654 | """Creates a synthetic image from a list of RGB palette colors""" 655 | N = len(palette) 656 | width, height = size 657 | block_height = height // N 658 | 659 | img_array = np.zeros((height, width, 3), dtype=np.uint8) 660 | 661 | for i, color in enumerate(palette): 662 | img_array[i * block_height : (i + 1) * block_height, :] = color 663 | 664 | return img_array 665 | 666 | processed_images = [] 667 | 668 | for img_tensor in image: 669 | source = img_tensor.cpu().numpy() 670 | 671 | target = create_palette_image(target_colors) / 255.0 672 | 673 | # Convert to Lab 674 | source_lab = color.rgb2lab(source) 675 | target_lab = color.rgb2lab(target) 676 | 677 | # Compute mean and std of each channel 678 | s_mean, s_std = source_lab.mean(axis=(0, 1)), source_lab.std(axis=(0, 1)) 679 | t_mean, t_std = target_lab.mean(axis=(0, 1)), target_lab.std(axis=(0, 1)) 680 | 681 | # Transfer color 682 | result_lab = (source_lab - s_mean) / s_std * t_std + t_mean 683 | result_rgb = np.clip(color.lab2rgb(result_lab), 0, 1) 684 | 685 | result_tensor = torch.from_numpy(result_rgb).unsqueeze( 686 | 0 687 | ) # Add batch dimension 688 | 689 | processed_images.append(result_tensor) 690 | 691 | return (torch.cat(processed_images, dim=0),) 692 | 693 | 694 | class PalleteTransferClustering(ComfyNodeABC): 695 | @classmethod 696 | def INPUT_TYPES(cls) -> dict: 697 | data_in = { 698 | "required": { 699 | "image": (IO.IMAGE,), 700 | "target_colors": ("COLOR_LIST",), 701 | "palette_extension_method": ( 702 | ["Dense", "Edge", "None"], 703 | {"default": "None"}, 704 | ), 705 | "palette_extension_points": ( 706 | IO.INT, 707 | { 708 | "min": 2, 709 | "max": 20, 710 | "step": 1, 711 | "default": 5, 712 | }, 713 | ), 714 | "gaussian_blur": ( 715 | IO.INT, 716 | {"default": 3, "min": 0, "max": 27, "step": 1}, 717 | ), 718 | } 719 | } 720 | return data_in 721 | 722 | RETURN_TYPES = (IO.IMAGE,) 723 | FUNCTION = "color_transfer" 724 | CATEGORY = "Color Transfer/Palette Transfer" 725 | 726 | def color_transfer( 727 | self, 728 | image: torch.Tensor, 729 | target_colors: list[tuple[int, int, int]], 730 | palette_extension_method: str, 731 | palette_extension_points: int, 732 | gaussian_blur: int, 733 | ) -> tuple[torch.Tensor]: 734 | if len(target_colors) == 0: 735 | return (image,) 736 | 737 | if palette_extension_method == "Dense": 738 | target_colors = PaletteExtension.dense_palette( 739 | target_colors, points=palette_extension_points 740 | ) 741 | elif palette_extension_method == "Edge": 742 | target_colors = PaletteExtension.edge_based_palette( 743 | target_colors, points=palette_extension_points 744 | ) 745 | 746 | output = process_image_with_palette( 747 | image=image, 748 | target_colors=target_colors, 749 | color_space="RGB", 750 | cluster_method="Mini batch Kmeans", 751 | distance_method="Euclidean", 752 | gaussian_blur=gaussian_blur, 753 | ) 754 | 755 | return (output,) 756 | 757 | 758 | class PaletteTransferNode(ComfyNodeABC): 759 | @classmethod 760 | def INPUT_TYPES(cls) -> dict: 761 | data_in = { 762 | "required": { 763 | "image": (IO.IMAGE,), 764 | "target_colors": ("COLOR_LIST",), 765 | "color_space": (["RGB", "HSV", "LAB"], {"default": "RGB"}), 766 | "cluster_method": ( 767 | ["Kmeans", "Mini batch Kmeans"], 768 | {"default": "Kmeans"}, 769 | ), 770 | "distance_method": ( 771 | [ 772 | "Euclidean", 773 | "Manhattan", 774 | "Cosine Similarity", 775 | "HSV Distance", 776 | "RGB Weighted Distance", 777 | "RGB Weighted Similarity", 778 | ], 779 | {"default": "Euclidean"}, 780 | ), 781 | "gaussian_blur": ( 782 | IO.INT, 783 | {"default": 3, "min": 0, "max": 27, "step": 1}, 784 | ), 785 | } 786 | } 787 | return data_in 788 | 789 | RETURN_TYPES = (IO.IMAGE,) 790 | FUNCTION = "color_transfer" 791 | CATEGORY = "Color Transfer/Palette Transfer" 792 | 793 | def color_transfer( 794 | self, 795 | image: torch.Tensor, 796 | target_colors: list[tuple[int, int, int]], 797 | color_space: str, 798 | cluster_method: str, 799 | distance_method: str, 800 | gaussian_blur: int, 801 | ) -> tuple[torch.Tensor]: 802 | if len(target_colors) == 0: 803 | return (image,) 804 | 805 | output = process_image_with_palette( 806 | image=image, 807 | target_colors=target_colors, 808 | color_space=color_space, 809 | cluster_method=cluster_method, 810 | distance_method=distance_method, 811 | gaussian_blur=gaussian_blur, 812 | ) 813 | 814 | return (output,) 815 | 816 | 817 | class ColorPaletteNode(ComfyNodeABC): 818 | @classmethod 819 | def INPUT_TYPES(s) -> dict: 820 | return { 821 | "required": { 822 | "color_palette": ( 823 | IO.STRING, 824 | { 825 | "default": "[(30, 32, 30), (60, 61, 55), (105, 117, 101), (236, 223, 204)]", 826 | "multiline": True, 827 | }, 828 | ), 829 | }, 830 | } 831 | 832 | RETURN_TYPES = ("COLOR_LIST",) 833 | RETURN_NAMES = ("Color palette",) 834 | FUNCTION = "color_list" 835 | CATEGORY = "Color Transfer/Palette Transfer" 836 | 837 | def color_list(self, color_palette: str) -> tuple[list[tuple[int, int, int]]]: 838 | return (ast.literal_eval(color_palette),) 839 | 840 | 841 | class ExtractPaletteNode(ComfyNodeABC): 842 | @classmethod 843 | def INPUT_TYPES(cls) -> dict: 844 | return { 845 | "required": { 846 | "image": (IO.IMAGE,), 847 | "num_colors": ( 848 | IO.INT, 849 | {"default": 5, "min": 1, "max": 50, "step": 1}, 850 | ), 851 | "cluster_method": ( 852 | ["Kmeans", "Mini batch Kmeans"], 853 | {"default": "Mini batch Kmeans"}, 854 | ), 855 | }, 856 | } 857 | 858 | RETURN_TYPES = ("COLOR_LIST",) 859 | RETURN_NAMES = ("Color palette",) 860 | FUNCTION = "extract_palette" 861 | CATEGORY = "Color Transfer/Palette Transfer" 862 | 863 | def extract_palette( 864 | self, image: torch.Tensor, num_colors: int, cluster_method: str 865 | ) -> tuple[list[tuple[int, int, int]]]: 866 | """Extract dominant colors from an image using clustering.""" 867 | 868 | img_tensor = image[0] if len(image.shape) == 4 else image 869 | 870 | img = (255.0 * img_tensor.cpu().numpy()).astype(np.uint8) 871 | 872 | clustering_engine = ColorClustering(cluster_method) 873 | clustering_result = clustering_engine.cluster_colors(img, num_colors) 874 | 875 | colors = clustering_result["main_colors"] 876 | color_list = [tuple(map(int, color)) for color in colors] 877 | 878 | return (color_list,) --------------------------------------------------------------------------------