├── .github └── workflows │ └── publish.yml ├── LICENSE ├── README.md ├── __init__.py ├── example ├── IC image combination example.json └── aa.py ├── font ├── a.py └── alcatraz+script.otf ├── py ├── Image.py ├── ImageIC.py ├── ImageICAdvanced.py ├── ImageLoaderAdvanced.py ├── ImageUpscaleTiled.py ├── ImagesFromFolder.py ├── Mask.py ├── MaskBatchNode.py ├── MaskComposite.py ├── MaskCropNode.py ├── TextProcessNode.py ├── image_blend_resize.py ├── image_selector.py ├── logic.py ├── mask_smart_valley_split.py ├── mask_to_ellipse.py └── text_image_node.py ├── pyproject.toml └── requirements.txt /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish to Comfy registry 2 | on: 3 | workflow_dispatch: 4 | push: 5 | branches: 6 | - main 7 | - master 8 | paths: 9 | - "pyproject.toml" 10 | 11 | permissions: 12 | issues: write 13 | 14 | jobs: 15 | publish-node: 16 | name: Publish Custom Node to registry 17 | runs-on: ubuntu-latest 18 | if: ${{ github.repository_owner == 'yichengup' }} 19 | steps: 20 | - name: Check out code 21 | uses: actions/checkout@v4 22 | with: 23 | submodules: true 24 | - name: Publish Custom Node 25 | uses: Comfy-Org/publish-node-action@v1 26 | with: 27 | ## Add your own personal access token to your Github Repository secrets and reference it here. 28 | personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }} 29 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 yicheng 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ComfyUI-YCNodes 2 | 3 | A collection of image processing extension nodes for ComfyUI. 4 | 5 | ## Nodes Description 6 | ### Image mirror, image rotate, image mosaic 7 | ### ImageSelector 8 | Multiple input images, select by custom name, select the specified image output (Ten fixed inputs) 9 | ### ImageBatchSelector 10 | Image combination batch, receiving batch image input, and selecting output specified images by custom name. Theoretically, there is no limit to the number of inputs. 11 | ### Image IC 12 | Basic image combination node with the following features: 13 | - Horizontal/vertical image concatenation 14 | - Aspect ratio scaling based on reference edges 15 | - Flexible position adjustment (top/center/bottom/left/right) 16 | - Custom background color 17 | - Mask processing support 18 | 19 | ### Image IC Advanced 20 | Advanced image combination node, adding the following features to the basic version: 21 | - Overlay mode support 22 | - Precise XY coordinate control 23 | - Independent scaling control 24 | - Additional output options 25 | 26 | ### This two node provides advanced image combination functions, designed for IClora and fill+redux partial redraw migration images. Optimize the combination of two graphs, save the size of Flux iclora and Fill+redux when partial redrawing, and save computing power. 27 | 28 | 29 | - ![image](https://github.com/user-attachments/assets/a81c8e3f-b32d-4e26-ada5-ecf145fafce6) 30 | - ![image](https://github.com/user-attachments/assets/26c561f3-4169-4bc6-8404-e7246349a82f) 31 | The basic code of the nodes ic image and ic image advanced comes from https://github.com/lrzjason/Comfyui-In-Context-Lora-Utils 32 | Based on it, I optimized two nodes: ic image and ic image advanced. 33 | Thanks to the original author @小志Jason 34 | 35 | ## Installation 36 | 37 | Clone this repository into ComfyUI's custom_nodes directory: 38 | ```bash 39 | cd custom_nodes 40 | git clone https://github.com/your-username/Comfyui-yicheng-node.git 41 | ``` 42 | 43 | ## 关于我 | About me 44 | 45 | Bilibili:[我的B站主页](https://space.bilibili.com/498399023?spm_id_from=333.1007.0.0) 46 | QQ号 3260561522 47 | wechat微信: DLONG189one 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | import importlib.util 2 | import os 3 | import sys 4 | import json 5 | 6 | NODE_CLASS_MAPPINGS = {} 7 | NODE_DISPLAY_NAME_MAPPINGS = {} 8 | 9 | python = sys.executable 10 | 11 | def get_ext_dir(subpath=None, mkdir=False): 12 | dir = os.path.dirname(__file__) 13 | if subpath is not None: 14 | dir = os.path.join(dir, subpath) 15 | 16 | dir = os.path.abspath(dir) 17 | 18 | if mkdir and not os.path.exists(dir): 19 | os.makedirs(dir) 20 | return dir 21 | 22 | def serialize(obj): 23 | if isinstance(obj, (str, int, float, bool, list, dict, type(None))): 24 | return obj 25 | return str(obj) 26 | 27 | # 获取py目录路径 28 | py = get_ext_dir("py") 29 | 30 | # 遍历py目录中的所有.py文件 31 | files = os.listdir(py) 32 | all_nodes = {} 33 | 34 | for file in files: 35 | if not file.endswith(".py"): 36 | continue 37 | name = os.path.splitext(file)[0] 38 | imported_module = importlib.import_module(".py.{}".format(name), __name__) 39 | try: 40 | NODE_CLASS_MAPPINGS = {**NODE_CLASS_MAPPINGS, **imported_module.NODE_CLASS_MAPPINGS} 41 | NODE_DISPLAY_NAME_MAPPINGS = {**NODE_DISPLAY_NAME_MAPPINGS, **imported_module.NODE_DISPLAY_NAME_MAPPINGS} 42 | serialized_CLASS_MAPPINGS = {k: serialize(v) for k, v in imported_module.NODE_CLASS_MAPPINGS.items()} 43 | serialized_DISPLAY_NAME_MAPPINGS = {k: serialize(v) for k, v in imported_module.NODE_DISPLAY_NAME_MAPPINGS.items()} 44 | all_nodes[file] = { 45 | "NODE_CLASS_MAPPINGS": serialized_CLASS_MAPPINGS, 46 | "NODE_DISPLAY_NAME_MAPPINGS": serialized_DISPLAY_NAME_MAPPINGS 47 | } 48 | except Exception as e: 49 | print(f"Error loading {file}: {str(e)}") 50 | 51 | # 定义web目录(如果需要前端资源) 52 | WEB_DIRECTORY = "./js" 53 | 54 | __all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS", "WEB_DIRECTORY"] -------------------------------------------------------------------------------- /example/IC image combination example.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 138, 3 | "last_link_id": 247, 4 | "nodes": [ 5 | { 6 | "id": 121, 7 | "type": "MaskToImage", 8 | "pos": [ 9 | 3220, 10 | -2040 11 | ], 12 | "size": [ 13 | 210, 14 | 26 15 | ], 16 | "flags": {}, 17 | "order": 14, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "mask", 22 | "type": "MASK", 23 | "link": 219, 24 | "label": "遮罩" 25 | } 26 | ], 27 | "outputs": [ 28 | { 29 | "name": "IMAGE", 30 | "type": "IMAGE", 31 | "links": [ 32 | 203 33 | ], 34 | "slot_index": 0, 35 | "label": "图像" 36 | } 37 | ], 38 | "properties": { 39 | "Node name for S&R": "MaskToImage" 40 | }, 41 | "widgets_values": [] 42 | }, 43 | { 44 | "id": 119, 45 | "type": "MaskToImage", 46 | "pos": [ 47 | 3220, 48 | -2130 49 | ], 50 | "size": [ 51 | 210, 52 | 26 53 | ], 54 | "flags": {}, 55 | "order": 13, 56 | "mode": 0, 57 | "inputs": [ 58 | { 59 | "name": "mask", 60 | "type": "MASK", 61 | "link": 218, 62 | "label": "遮罩" 63 | } 64 | ], 65 | "outputs": [ 66 | { 67 | "name": "IMAGE", 68 | "type": "IMAGE", 69 | "links": [ 70 | 201 71 | ], 72 | "slot_index": 0, 73 | "label": "图像" 74 | } 75 | ], 76 | "properties": { 77 | "Node name for S&R": "MaskToImage" 78 | }, 79 | "widgets_values": [] 80 | }, 81 | { 82 | "id": 127, 83 | "type": "MaskToImage", 84 | "pos": [ 85 | 3260, 86 | -1390 87 | ], 88 | "size": [ 89 | 210, 90 | 26 91 | ], 92 | "flags": {}, 93 | "order": 7, 94 | "mode": 0, 95 | "inputs": [ 96 | { 97 | "name": "mask", 98 | "type": "MASK", 99 | "link": 241, 100 | "label": "遮罩" 101 | } 102 | ], 103 | "outputs": [ 104 | { 105 | "name": "IMAGE", 106 | "type": "IMAGE", 107 | "links": [ 108 | 224 109 | ], 110 | "slot_index": 0, 111 | "label": "图像" 112 | } 113 | ], 114 | "properties": { 115 | "Node name for S&R": "MaskToImage" 116 | } 117 | }, 118 | { 119 | "id": 132, 120 | "type": "MaskToImage", 121 | "pos": [ 122 | 3260, 123 | -1220 124 | ], 125 | "size": [ 126 | 210, 127 | 26 128 | ], 129 | "flags": {}, 130 | "order": 9, 131 | "mode": 0, 132 | "inputs": [ 133 | { 134 | "name": "mask", 135 | "type": "MASK", 136 | "link": 243, 137 | "label": "遮罩" 138 | } 139 | ], 140 | "outputs": [ 141 | { 142 | "name": "IMAGE", 143 | "type": "IMAGE", 144 | "links": [ 145 | 222 146 | ], 147 | "slot_index": 0, 148 | "label": "图像" 149 | } 150 | ], 151 | "properties": { 152 | "Node name for S&R": "MaskToImage" 153 | } 154 | }, 155 | { 156 | "id": 133, 157 | "type": "MaskToImage", 158 | "pos": [ 159 | 3260, 160 | -1310 161 | ], 162 | "size": [ 163 | 210, 164 | 26 165 | ], 166 | "flags": {}, 167 | "order": 8, 168 | "mode": 0, 169 | "inputs": [ 170 | { 171 | "name": "mask", 172 | "type": "MASK", 173 | "link": 242, 174 | "label": "遮罩" 175 | } 176 | ], 177 | "outputs": [ 178 | { 179 | "name": "IMAGE", 180 | "type": "IMAGE", 181 | "links": [ 182 | 221 183 | ], 184 | "slot_index": 0, 185 | "label": "图像" 186 | } 187 | ], 188 | "properties": { 189 | "Node name for S&R": "MaskToImage" 190 | } 191 | }, 192 | { 193 | "id": 87, 194 | "type": "LoadImage", 195 | "pos": [ 196 | 2556.71533203125, 197 | -2070.189453125 198 | ], 199 | "size": [ 200 | 247.8831024169922, 201 | 212.69451904296875 202 | ], 203 | "flags": {}, 204 | "order": 0, 205 | "mode": 0, 206 | "inputs": [], 207 | "outputs": [ 208 | { 209 | "name": "IMAGE", 210 | "type": "IMAGE", 211 | "links": [ 212 | 213 213 | ], 214 | "slot_index": 0, 215 | "label": "图像" 216 | }, 217 | { 218 | "name": "MASK", 219 | "type": "MASK", 220 | "links": [ 221 | 215 222 | ], 223 | "slot_index": 1, 224 | "label": "遮罩" 225 | } 226 | ], 227 | "properties": { 228 | "Node name for S&R": "LoadImage" 229 | }, 230 | "widgets_values": [ 231 | "clipspace/clipspace-mask-731916.8000000715.png [input]", 232 | "image" 233 | ] 234 | }, 235 | { 236 | "id": 110, 237 | "type": "MaskToImage", 238 | "pos": [ 239 | 3220, 240 | -2210 241 | ], 242 | "size": [ 243 | 210, 244 | 26 245 | ], 246 | "flags": {}, 247 | "order": 12, 248 | "mode": 0, 249 | "inputs": [ 250 | { 251 | "name": "mask", 252 | "type": "MASK", 253 | "link": 217, 254 | "label": "遮罩" 255 | } 256 | ], 257 | "outputs": [ 258 | { 259 | "name": "IMAGE", 260 | "type": "IMAGE", 261 | "links": [ 262 | 170 263 | ], 264 | "slot_index": 0, 265 | "label": "图像" 266 | } 267 | ], 268 | "properties": { 269 | "Node name for S&R": "MaskToImage" 270 | }, 271 | "widgets_values": [] 272 | }, 273 | { 274 | "id": 137, 275 | "type": "ImageICAdvanced", 276 | "pos": [ 277 | 2896.51904296875, 278 | -1454.118408203125 279 | ], 280 | "size": [ 281 | 315, 282 | 346 283 | ], 284 | "flags": {}, 285 | "order": 4, 286 | "mode": 0, 287 | "inputs": [ 288 | { 289 | "name": "first_image", 290 | "type": "IMAGE", 291 | "link": 237, 292 | "label": "first_image" 293 | }, 294 | { 295 | "name": "second_image", 296 | "type": "IMAGE", 297 | "link": 238, 298 | "label": "second_image" 299 | }, 300 | { 301 | "name": "first_mask", 302 | "type": "MASK", 303 | "link": 246, 304 | "shape": 7, 305 | "label": "first_mask" 306 | }, 307 | { 308 | "name": "second_mask", 309 | "type": "MASK", 310 | "link": 247, 311 | "shape": 7, 312 | "label": "second_mask" 313 | } 314 | ], 315 | "outputs": [ 316 | { 317 | "name": "IMAGE", 318 | "type": "IMAGE", 319 | "links": [ 320 | 244 321 | ], 322 | "label": "IMAGE", 323 | "slot_index": 0 324 | }, 325 | { 326 | "name": "MASK", 327 | "type": "MASK", 328 | "links": [ 329 | 241 330 | ], 331 | "label": "MASK", 332 | "slot_index": 1 333 | }, 334 | { 335 | "name": "FIRST_MASK", 336 | "type": "MASK", 337 | "links": [ 338 | 242 339 | ], 340 | "label": "FIRST_MASK", 341 | "slot_index": 2 342 | }, 343 | { 344 | "name": "SECOND_MASK", 345 | "type": "MASK", 346 | "links": [ 347 | 243 348 | ], 349 | "label": "SECOND_MASK", 350 | "slot_index": 3 351 | }, 352 | { 353 | "name": "MAIN_IMAGE", 354 | "type": "IMAGE", 355 | "links": [ 356 | 245 357 | ], 358 | "label": "MAIN_IMAGE", 359 | "slot_index": 4 360 | }, 361 | { 362 | "name": "first_size", 363 | "type": "TUPLE", 364 | "links": null, 365 | "label": "first_size" 366 | }, 367 | { 368 | "name": "second_size", 369 | "type": "TUPLE", 370 | "links": null, 371 | "label": "second_size" 372 | } 373 | ], 374 | "properties": { 375 | "Node name for S&R": "ImageICAdvanced" 376 | }, 377 | "widgets_values": [ 378 | "image1_width", 379 | "overlay", 380 | 0.4, 381 | "top", 382 | 74.10000000000001, 383 | 93.80000000000001, 384 | 1024, 385 | "#FFFFFF" 386 | ] 387 | }, 388 | { 389 | "id": 135, 390 | "type": "LoadImage", 391 | "pos": [ 392 | 2601.04150390625, 393 | -1304.5562744140625 394 | ], 395 | "size": [ 396 | 245.57566833496094, 397 | 223.0775146484375 398 | ], 399 | "flags": {}, 400 | "order": 1, 401 | "mode": 0, 402 | "inputs": [], 403 | "outputs": [ 404 | { 405 | "name": "IMAGE", 406 | "type": "IMAGE", 407 | "links": [ 408 | 238 409 | ], 410 | "slot_index": 0, 411 | "label": "图像" 412 | }, 413 | { 414 | "name": "MASK", 415 | "type": "MASK", 416 | "links": [ 417 | 247 418 | ], 419 | "slot_index": 1, 420 | "label": "遮罩" 421 | } 422 | ], 423 | "properties": { 424 | "Node name for S&R": "LoadImage" 425 | }, 426 | "widgets_values": [ 427 | "clipspace/clipspace-mask-731916.8000000715.png [input]", 428 | "image" 429 | ] 430 | }, 431 | { 432 | "id": 134, 433 | "type": "LoadImage", 434 | "pos": [ 435 | 2603.027587890625, 436 | -1645.15380859375 437 | ], 438 | "size": [ 439 | 247.82542419433594, 440 | 275.5860290527344 441 | ], 442 | "flags": {}, 443 | "order": 2, 444 | "mode": 0, 445 | "inputs": [], 446 | "outputs": [ 447 | { 448 | "name": "IMAGE", 449 | "type": "IMAGE", 450 | "links": [ 451 | 237 452 | ], 453 | "slot_index": 0, 454 | "label": "图像" 455 | }, 456 | { 457 | "name": "MASK", 458 | "type": "MASK", 459 | "links": [ 460 | 246 461 | ], 462 | "slot_index": 1, 463 | "label": "遮罩" 464 | } 465 | ], 466 | "properties": { 467 | "Node name for S&R": "LoadImage" 468 | }, 469 | "widgets_values": [ 470 | "clipspace/clipspace-mask-1674957.7999999523.png [input]", 471 | "image" 472 | ] 473 | }, 474 | { 475 | "id": 130, 476 | "type": "PreviewImage", 477 | "pos": [ 478 | 3267.51708984375, 479 | -1681.3355712890625 480 | ], 481 | "size": [ 482 | 223.61061096191406, 483 | 246 484 | ], 485 | "flags": {}, 486 | "order": 6, 487 | "mode": 0, 488 | "inputs": [ 489 | { 490 | "name": "images", 491 | "type": "IMAGE", 492 | "link": 244, 493 | "label": "图像" 494 | } 495 | ], 496 | "outputs": [], 497 | "properties": { 498 | "Node name for S&R": "PreviewImage" 499 | } 500 | }, 501 | { 502 | "id": 131, 503 | "type": "PreviewImage", 504 | "pos": [ 505 | 3520.98486328125, 506 | -1678.8648681640625 507 | ], 508 | "size": [ 509 | 182.7692413330078, 510 | 246 511 | ], 512 | "flags": {}, 513 | "order": 15, 514 | "mode": 0, 515 | "inputs": [ 516 | { 517 | "name": "images", 518 | "type": "IMAGE", 519 | "link": 224, 520 | "label": "图像" 521 | } 522 | ], 523 | "outputs": [], 524 | "properties": { 525 | "Node name for S&R": "PreviewImage" 526 | } 527 | }, 528 | { 529 | "id": 138, 530 | "type": "PreviewImage", 531 | "pos": [ 532 | 3730.328857421875, 533 | -1687.3011474609375 534 | ], 535 | "size": [ 536 | 183.0326690673828, 537 | 246 538 | ], 539 | "flags": {}, 540 | "order": 10, 541 | "mode": 0, 542 | "inputs": [ 543 | { 544 | "name": "images", 545 | "type": "IMAGE", 546 | "link": 245, 547 | "label": "图像" 548 | } 549 | ], 550 | "outputs": [], 551 | "properties": { 552 | "Node name for S&R": "PreviewImage" 553 | } 554 | }, 555 | { 556 | "id": 129, 557 | "type": "PreviewImage", 558 | "pos": [ 559 | 3746.182861328125, 560 | -1375.7838134765625 561 | ], 562 | "size": [ 563 | 157.5600128173828, 564 | 246 565 | ], 566 | "flags": {}, 567 | "order": 17, 568 | "mode": 0, 569 | "inputs": [ 570 | { 571 | "name": "images", 572 | "type": "IMAGE", 573 | "link": 222, 574 | "label": "图像" 575 | } 576 | ], 577 | "outputs": [], 578 | "properties": { 579 | "Node name for S&R": "PreviewImage" 580 | } 581 | }, 582 | { 583 | "id": 128, 584 | "type": "PreviewImage", 585 | "pos": [ 586 | 3527.069580078125, 587 | -1375.1636962890625 588 | ], 589 | "size": [ 590 | 187.76138305664062, 591 | 252.89476013183594 592 | ], 593 | "flags": {}, 594 | "order": 16, 595 | "mode": 0, 596 | "inputs": [ 597 | { 598 | "name": "images", 599 | "type": "IMAGE", 600 | "link": 221, 601 | "label": "图像" 602 | } 603 | ], 604 | "outputs": [], 605 | "properties": { 606 | "Node name for S&R": "PreviewImage" 607 | } 608 | }, 609 | { 610 | "id": 86, 611 | "type": "LoadImage", 612 | "pos": [ 613 | 2543.831298828125, 614 | -2356.5478515625 615 | ], 616 | "size": [ 617 | 263.0655212402344, 618 | 236.9929656982422 619 | ], 620 | "flags": {}, 621 | "order": 3, 622 | "mode": 0, 623 | "inputs": [], 624 | "outputs": [ 625 | { 626 | "name": "IMAGE", 627 | "type": "IMAGE", 628 | "links": [ 629 | 212 630 | ], 631 | "slot_index": 0, 632 | "label": "图像" 633 | }, 634 | { 635 | "name": "MASK", 636 | "type": "MASK", 637 | "links": [ 638 | 214 639 | ], 640 | "slot_index": 1, 641 | "label": "遮罩" 642 | } 643 | ], 644 | "properties": { 645 | "Node name for S&R": "LoadImage" 646 | }, 647 | "widgets_values": [ 648 | "clipspace/clipspace-mask-1759880.899999857.png [input]", 649 | "image" 650 | ] 651 | }, 652 | { 653 | "id": 124, 654 | "type": "ImageIC", 655 | "pos": [ 656 | 2830, 657 | -2150 658 | ], 659 | "size": [ 660 | 315, 661 | 278 662 | ], 663 | "flags": {}, 664 | "order": 5, 665 | "mode": 0, 666 | "inputs": [ 667 | { 668 | "name": "first_image", 669 | "type": "IMAGE", 670 | "link": 212, 671 | "label": "first_image" 672 | }, 673 | { 674 | "name": "second_image", 675 | "type": "IMAGE", 676 | "link": 213, 677 | "label": "second_image" 678 | }, 679 | { 680 | "name": "first_mask", 681 | "type": "MASK", 682 | "link": 214, 683 | "shape": 7, 684 | "label": "first_mask" 685 | }, 686 | { 687 | "name": "second_mask", 688 | "type": "MASK", 689 | "link": 215, 690 | "shape": 7, 691 | "label": "second_mask" 692 | } 693 | ], 694 | "outputs": [ 695 | { 696 | "name": "IMAGE", 697 | "type": "IMAGE", 698 | "links": [ 699 | 216 700 | ], 701 | "label": "IMAGE", 702 | "slot_index": 0 703 | }, 704 | { 705 | "name": "MASK", 706 | "type": "MASK", 707 | "links": [ 708 | 217 709 | ], 710 | "label": "MASK", 711 | "slot_index": 1 712 | }, 713 | { 714 | "name": "FIRST_MASK", 715 | "type": "MASK", 716 | "links": [ 717 | 218 718 | ], 719 | "label": "FIRST_MASK", 720 | "slot_index": 2 721 | }, 722 | { 723 | "name": "SECOND_MASK", 724 | "type": "MASK", 725 | "links": [ 726 | 219 727 | ], 728 | "label": "SECOND_MASK", 729 | "slot_index": 3 730 | }, 731 | { 732 | "name": "first_size", 733 | "type": "TUPLE", 734 | "links": null, 735 | "label": "first_size" 736 | }, 737 | { 738 | "name": "second_size", 739 | "type": "TUPLE", 740 | "links": null, 741 | "label": "second_size" 742 | } 743 | ], 744 | "properties": { 745 | "Node name for S&R": "ImageIC" 746 | }, 747 | "widgets_values": [ 748 | "image1_width", 749 | "horizontal", 750 | 0.5, 751 | "bottom", 752 | 1024, 753 | "#FFFFFF" 754 | ] 755 | }, 756 | { 757 | "id": 111, 758 | "type": "PreviewImage", 759 | "pos": [ 760 | 3426.1474609375, 761 | -2459.13671875 762 | ], 763 | "size": [ 764 | 169.24900817871094, 765 | 194.06504821777344 766 | ], 767 | "flags": {}, 768 | "order": 18, 769 | "mode": 0, 770 | "inputs": [ 771 | { 772 | "name": "images", 773 | "type": "IMAGE", 774 | "link": 170, 775 | "label": "图像" 776 | } 777 | ], 778 | "outputs": [], 779 | "properties": { 780 | "Node name for S&R": "PreviewImage" 781 | }, 782 | "widgets_values": [] 783 | }, 784 | { 785 | "id": 88, 786 | "type": "PreviewImage", 787 | "pos": [ 788 | 3212.453857421875, 789 | -2462.171630859375 790 | ], 791 | "size": [ 792 | 192.9071807861328, 793 | 192.274169921875 794 | ], 795 | "flags": {}, 796 | "order": 11, 797 | "mode": 0, 798 | "inputs": [ 799 | { 800 | "name": "images", 801 | "type": "IMAGE", 802 | "link": 216, 803 | "label": "图像" 804 | } 805 | ], 806 | "outputs": [], 807 | "properties": { 808 | "Node name for S&R": "PreviewImage" 809 | }, 810 | "widgets_values": [] 811 | }, 812 | { 813 | "id": 120, 814 | "type": "PreviewImage", 815 | "pos": [ 816 | 3461.54296875, 817 | -2206.09423828125 818 | ], 819 | "size": [ 820 | 151.707763671875, 821 | 136.75753784179688 822 | ], 823 | "flags": {}, 824 | "order": 19, 825 | "mode": 0, 826 | "inputs": [ 827 | { 828 | "name": "images", 829 | "type": "IMAGE", 830 | "link": 201, 831 | "label": "图像" 832 | } 833 | ], 834 | "outputs": [], 835 | "properties": { 836 | "Node name for S&R": "PreviewImage" 837 | }, 838 | "widgets_values": [] 839 | }, 840 | { 841 | "id": 122, 842 | "type": "PreviewImage", 843 | "pos": [ 844 | 3463.87353515625, 845 | -2010.6051025390625 846 | ], 847 | "size": [ 848 | 157.01315307617188, 849 | 119.74433135986328 850 | ], 851 | "flags": {}, 852 | "order": 20, 853 | "mode": 0, 854 | "inputs": [ 855 | { 856 | "name": "images", 857 | "type": "IMAGE", 858 | "link": 203, 859 | "label": "图像" 860 | } 861 | ], 862 | "outputs": [], 863 | "properties": { 864 | "Node name for S&R": "PreviewImage" 865 | }, 866 | "widgets_values": [] 867 | } 868 | ], 869 | "links": [ 870 | [ 871 | 170, 872 | 110, 873 | 0, 874 | 111, 875 | 0, 876 | "IMAGE" 877 | ], 878 | [ 879 | 201, 880 | 119, 881 | 0, 882 | 120, 883 | 0, 884 | "IMAGE" 885 | ], 886 | [ 887 | 203, 888 | 121, 889 | 0, 890 | 122, 891 | 0, 892 | "IMAGE" 893 | ], 894 | [ 895 | 212, 896 | 86, 897 | 0, 898 | 124, 899 | 0, 900 | "IMAGE" 901 | ], 902 | [ 903 | 213, 904 | 87, 905 | 0, 906 | 124, 907 | 1, 908 | "IMAGE" 909 | ], 910 | [ 911 | 214, 912 | 86, 913 | 1, 914 | 124, 915 | 2, 916 | "MASK" 917 | ], 918 | [ 919 | 215, 920 | 87, 921 | 1, 922 | 124, 923 | 3, 924 | "MASK" 925 | ], 926 | [ 927 | 216, 928 | 124, 929 | 0, 930 | 88, 931 | 0, 932 | "IMAGE" 933 | ], 934 | [ 935 | 217, 936 | 124, 937 | 1, 938 | 110, 939 | 0, 940 | "MASK" 941 | ], 942 | [ 943 | 218, 944 | 124, 945 | 2, 946 | 119, 947 | 0, 948 | "MASK" 949 | ], 950 | [ 951 | 219, 952 | 124, 953 | 3, 954 | 121, 955 | 0, 956 | "MASK" 957 | ], 958 | [ 959 | 221, 960 | 133, 961 | 0, 962 | 128, 963 | 0, 964 | "IMAGE" 965 | ], 966 | [ 967 | 222, 968 | 132, 969 | 0, 970 | 129, 971 | 0, 972 | "IMAGE" 973 | ], 974 | [ 975 | 224, 976 | 127, 977 | 0, 978 | 131, 979 | 0, 980 | "IMAGE" 981 | ], 982 | [ 983 | 237, 984 | 134, 985 | 0, 986 | 137, 987 | 0, 988 | "IMAGE" 989 | ], 990 | [ 991 | 238, 992 | 135, 993 | 0, 994 | 137, 995 | 1, 996 | "IMAGE" 997 | ], 998 | [ 999 | 241, 1000 | 137, 1001 | 1, 1002 | 127, 1003 | 0, 1004 | "MASK" 1005 | ], 1006 | [ 1007 | 242, 1008 | 137, 1009 | 2, 1010 | 133, 1011 | 0, 1012 | "MASK" 1013 | ], 1014 | [ 1015 | 243, 1016 | 137, 1017 | 3, 1018 | 132, 1019 | 0, 1020 | "MASK" 1021 | ], 1022 | [ 1023 | 244, 1024 | 137, 1025 | 0, 1026 | 130, 1027 | 0, 1028 | "IMAGE" 1029 | ], 1030 | [ 1031 | 245, 1032 | 137, 1033 | 4, 1034 | 138, 1035 | 0, 1036 | "IMAGE" 1037 | ], 1038 | [ 1039 | 246, 1040 | 134, 1041 | 1, 1042 | 137, 1043 | 2, 1044 | "MASK" 1045 | ], 1046 | [ 1047 | 247, 1048 | 135, 1049 | 1, 1050 | 137, 1051 | 3, 1052 | "MASK" 1053 | ] 1054 | ], 1055 | "groups": [], 1056 | "config": {}, 1057 | "extra": { 1058 | "ds": { 1059 | "scale": 0.3914251301220421, 1060 | "offset": [ 1061 | -1514.3433008948414, 1062 | 2810.6052963330776 1063 | ] 1064 | } 1065 | }, 1066 | "version": 0.4 1067 | } -------------------------------------------------------------------------------- /example/aa.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /font/a.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /font/alcatraz+script.otf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yichengup/ComfyUI-YCNodes/fe8a728fb6f38282a0c9882671a2b34a41fe2bf4/font/alcatraz+script.otf -------------------------------------------------------------------------------- /py/Image.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from PIL import Image 4 | 5 | class ImageMirror: 6 | """图像镜像节点 - 实现整体图像的水平或垂直镜像""" 7 | 8 | def __init__(self): 9 | pass 10 | 11 | @classmethod 12 | def INPUT_TYPES(cls): 13 | return { 14 | "required": { 15 | "image": ("IMAGE",), 16 | "mirror_type": (["horizontal", "vertical"],), 17 | } 18 | } 19 | 20 | RETURN_TYPES = ("IMAGE",) 21 | FUNCTION = "mirror_image" 22 | CATEGORY = "YCNode/Image" 23 | 24 | def mirror_image(self, image, mirror_type): 25 | # 确保输入是正确的格式 26 | if not isinstance(image, torch.Tensor): 27 | raise ValueError("输入必须是torch.Tensor类型") 28 | 29 | # 转换为numpy数组进行操作 30 | x = image.cpu().numpy() 31 | 32 | if mirror_type == "horizontal": 33 | # 水平镜像(左右翻转) 34 | x = np.flip(x, axis=2) 35 | else: 36 | # 垂直镜像(上下翻转) 37 | x = np.flip(x, axis=1) 38 | 39 | # 转回tensor并保持设备一致 40 | result = torch.from_numpy(x.copy()).to(image.device) 41 | return (result,) 42 | 43 | 44 | class ImageRotate: 45 | """图像旋转节点 - 实现90度、180度、270度旋转""" 46 | 47 | def __init__(self): 48 | pass 49 | 50 | @classmethod 51 | def INPUT_TYPES(cls): 52 | return { 53 | "required": { 54 | "image": ("IMAGE",), 55 | "angle": (["90", "180", "270"],), 56 | } 57 | } 58 | 59 | RETURN_TYPES = ("IMAGE",) 60 | FUNCTION = "rotate_image" 61 | CATEGORY = "YCNode/Image" 62 | 63 | def rotate_image(self, image, angle): 64 | # 确保输入是正确的格式 65 | if not isinstance(image, torch.Tensor): 66 | raise ValueError("输入必须是torch.Tensor类型") 67 | 68 | # 直接根据角度旋转 69 | if angle == "90": 70 | # 顺时针旋转90度 71 | result = torch.rot90(image, k=3, dims=[1, 2]) 72 | elif angle == "180": 73 | # 旋转180度 74 | result = torch.rot90(image, k=2, dims=[1, 2]) 75 | else: # 270度 76 | # 顺时针旋转270度 77 | result = torch.rot90(image, k=1, dims=[1, 2]) 78 | 79 | return (result,) 80 | 81 | 82 | class ImageMosaic: 83 | """图像马赛克节点 - 实现图像马赛克效果""" 84 | 85 | @classmethod 86 | def INPUT_TYPES(s): 87 | return { 88 | "required": { 89 | "image": ("IMAGE",), 90 | "mosaic_size": ("INT", { 91 | "default": 10, 92 | "min": 2, 93 | "max": 100, 94 | "step": 1, 95 | "display": "slider", 96 | }), 97 | }, 98 | } 99 | 100 | RETURN_TYPES = ("IMAGE",) 101 | FUNCTION = "apply_mosaic" 102 | CATEGORY = "YCNode/Image" 103 | 104 | def apply_mosaic(self, image, mosaic_size): 105 | # 将图像转换为numpy数组 106 | if isinstance(image, torch.Tensor): 107 | image = image.cpu().numpy() 108 | 109 | # 确保图像在0-1范围内 110 | if image.max() > 1.0: 111 | image = image / 255.0 112 | 113 | # 获取图像尺寸 114 | h, w = image.shape[1:3] 115 | 116 | # 计算每个马赛克块的尺寸 117 | block_h = h // mosaic_size 118 | block_w = w // mosaic_size 119 | 120 | # 创建新图像 121 | mosaic_image = image.copy() 122 | 123 | # 应用马赛克效果 124 | for i in range(0, h - block_h + 1, block_h): 125 | for j in range(0, w - block_w + 1, block_w): 126 | # 计算块的平均颜色 127 | block = image[:, i:i+block_h, j:j+block_w] 128 | mean_color = np.mean(block, axis=(1, 2), keepdims=True) 129 | 130 | # 将块填充为平均颜色 131 | mosaic_image[:, i:i+block_h, j:j+block_w] = mean_color 132 | 133 | # 转换回torch tensor 134 | return (torch.from_numpy(mosaic_image),) 135 | 136 | 137 | # 注册所有节点 138 | NODE_CLASS_MAPPINGS = { 139 | "ImageMirror": ImageMirror, 140 | "ImageRotate": ImageRotate, 141 | "ImageMosaic": ImageMosaic 142 | } 143 | 144 | # 显示名称映射 145 | NODE_DISPLAY_NAME_MAPPINGS = { 146 | "ImageMirror": "Image Mirror", 147 | "ImageRotate": "Image Rotate", 148 | "ImageMosaic": "Image Mosaic" 149 | } 150 | -------------------------------------------------------------------------------- /py/ImageIC.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import cv2 4 | 5 | def screen_blend(mask1, mask2): 6 | """滤色模式混合两个遮罩""" 7 | return 255 - ((255 - mask1.astype(float)) * (255 - mask2.astype(float)) / 255) 8 | 9 | def resize_with_aspect_ratio(img, target_size, target_dim='width', interpolation=cv2.INTER_CUBIC): 10 | """等比例缩放图片""" 11 | h, w = img.shape[:2] 12 | if target_dim == 'width': 13 | aspect = h / w 14 | new_w = target_size 15 | new_h = int(aspect * new_w) 16 | else: 17 | aspect = w / h 18 | new_h = target_size 19 | new_w = int(aspect * new_h) 20 | return cv2.resize(img, (new_w, new_h), interpolation=interpolation) 21 | 22 | def create_canvas_with_image(canvas_size, image, position, bg_color, is_mask=False): 23 | """在画布上放置图片 24 | Args: 25 | canvas_size: (width, height) 26 | image: 输入图像 27 | position: 位置 (top/center/bottom) 28 | bg_color: 背景颜色 29 | is_mask: 是否是遮罩图像 30 | """ 31 | # 确保画布尺寸正确(宽度在前,高度在后) 32 | canvas_w, canvas_h = canvas_size 33 | 34 | # 获取图像尺寸,处理单通道或三通道情况 35 | if len(image.shape) == 2: 36 | img_h, img_w = image.shape 37 | else: 38 | img_h, img_w = image.shape[:2] 39 | 40 | # 创建画布 41 | if is_mask: 42 | canvas = np.zeros((canvas_h, canvas_w), dtype=np.uint8) 43 | else: 44 | canvas = np.full((canvas_h, canvas_w, 3), bg_color, dtype=np.uint8) 45 | 46 | # 计算水平居中位置 47 | x = (canvas_w - img_w) // 2 48 | 49 | # 根据位置计算垂直位置 50 | if position == "top": 51 | y = 0 52 | elif position == "bottom": 53 | y = canvas_h - img_h 54 | else: # center 55 | y = (canvas_h - img_h) // 2 56 | 57 | # 确保坐标不会为负 58 | x = max(0, min(x, canvas_w - img_w)) 59 | y = max(0, min(y, canvas_h - img_h)) 60 | 61 | # 放置图片 62 | try: 63 | # 确保图片数据类型正确 64 | if not is_mask: 65 | image = image.astype(np.uint8) 66 | canvas[y:y+img_h, x:x+img_w] = image 67 | except ValueError as e: 68 | print(f"Debug info: canvas_shape={canvas.shape}, image_shape={image.shape}, x={x}, y={y}, img_w={img_w}, img_h={img_h}, is_mask={is_mask}") 69 | raise e 70 | 71 | return canvas 72 | 73 | class ImageIC: 74 | @classmethod 75 | def INPUT_TYPES(s): 76 | return { 77 | "required": { 78 | "first_image": ("IMAGE",), 79 | "second_image": ("IMAGE",), 80 | "reference_edge": (["image1_width", "image1_height", "image2_width", "image2_height"], { 81 | "default": "image1_width", 82 | }), 83 | "combine_mode": (["horizontal", "vertical"], { 84 | "default": "horizontal", 85 | }), 86 | "second_image_scale": ("FLOAT", { 87 | "default": 1.0, 88 | "min": 0.1, 89 | "max": 2.0, 90 | "step": 0.1 91 | }), 92 | "second_image_position": (["top", "center", "bottom", "left", "right"], { 93 | "default": "center", 94 | }), 95 | "final_size": ("INT", { 96 | "default": 1024, 97 | "min": 64, 98 | "max": 8192, 99 | "step": 64 100 | }), 101 | "background_color": ("STRING", { 102 | "default": "#FFFFFF", 103 | "multiline": False, 104 | }), 105 | }, 106 | "optional": { 107 | "first_mask": ("MASK",), 108 | "second_mask": ("MASK",), 109 | } 110 | } 111 | 112 | RETURN_TYPES = ("IMAGE", "MASK", "MASK", "MASK", "TUPLE", "TUPLE") 113 | RETURN_NAMES = ("IMAGE", "MASK", "FIRST_MASK", "SECOND_MASK", "first_size", "second_size") 114 | FUNCTION = "combine_images" 115 | CATEGORY = "YCNode/Image" 116 | 117 | def combine_images(self, first_image, second_image, reference_edge, combine_mode, 118 | second_image_scale, second_image_position, final_size, background_color, 119 | first_mask=None, second_mask=None): 120 | # 获取输入图像并确保数据类型正确 121 | image1 = (first_image[0].detach().cpu().numpy() * 255).astype(np.uint8) 122 | image2 = (second_image[0].detach().cpu().numpy() * 255).astype(np.uint8) 123 | 124 | # 获取原始尺寸 125 | h1, w1 = image1.shape[:2] 126 | h2, w2 = image2.shape[:2] 127 | 128 | # 验证并处理第一个遮罩 129 | if first_mask is not None: 130 | mask1_h, mask1_w = first_mask[0].shape 131 | if mask1_h != h1 or mask1_w != w1: 132 | # 如果尺寸不匹配,创建新的空遮罩 133 | base_mask = np.zeros((h1, w1), dtype=np.float32) 134 | else: 135 | base_mask = first_mask[0].numpy() 136 | else: 137 | base_mask = np.zeros((h1, w1), dtype=np.float32) 138 | 139 | # 验证并处理第二个遮罩 140 | if second_mask is not None: 141 | mask2_h, mask2_w = second_mask[0].shape 142 | if mask2_h != h2 or mask2_w != w2: 143 | # 如果尺寸不匹配,创建新的空遮罩 144 | second_img_mask = np.zeros((h2, w2), dtype=np.float32) 145 | else: 146 | second_img_mask = second_mask[0].numpy() 147 | else: 148 | second_img_mask = np.zeros((h2, w2), dtype=np.float32) 149 | 150 | # 确定基准图和第二张图 151 | if reference_edge.startswith('image1'): 152 | base_image = image1 153 | second_img = image2 154 | target_size = w1 if reference_edge.endswith('width') else h1 155 | else: 156 | base_image = image2 157 | # 交换遮罩 158 | base_mask, second_img_mask = second_img_mask, base_mask 159 | second_img = image1 160 | target_size = w2 if reference_edge.endswith('width') else h2 161 | 162 | # 将遮罩转换为0-255范围用于处理 163 | base_mask_255 = (base_mask * 255).astype(np.uint8) 164 | second_img_mask_255 = (second_img_mask * 255).astype(np.uint8) 165 | 166 | # 转换背景颜色 167 | if background_color.startswith('#'): 168 | bg_color = tuple(int(background_color[i:i+2], 16) for i in (5, 3, 1))[::-1] 169 | 170 | # 等比例缩放图片 171 | target_dim = 'width' if reference_edge.endswith('width') else 'height' 172 | scaled_second = resize_with_aspect_ratio(second_img, target_size, target_dim) 173 | scaled_second_mask = resize_with_aspect_ratio(second_img_mask, target_size, target_dim, cv2.INTER_NEAREST) 174 | 175 | # 第二张图片额外缩放 176 | if second_image_scale != 1.0: 177 | h, w = scaled_second.shape[:2] 178 | new_w = int(w * second_image_scale) 179 | new_h = int(h * second_image_scale) 180 | scaled_second = cv2.resize(scaled_second, (new_w, new_h)) 181 | scaled_second_mask = cv2.resize(scaled_second_mask, (new_w, new_h), interpolation=cv2.INTER_NEAREST) 182 | 183 | # 将缩放后的遮罩转换为255范围 184 | scaled_second_mask_255 = (scaled_second_mask * 255).astype(np.uint8) 185 | 186 | # 获取最终尺寸 187 | base_h, base_w = base_image.shape[:2] 188 | second_h, second_w = scaled_second.shape[:2] 189 | 190 | # 创建最终画布 191 | if combine_mode == "horizontal": 192 | canvas_w = base_w + second_w 193 | canvas_h = max(base_h, second_h) 194 | else: 195 | canvas_w = max(base_w, second_w) 196 | canvas_h = base_h + second_h 197 | 198 | # 创建画布 199 | final_canvas = np.full((canvas_h, canvas_w, 3), bg_color, dtype=np.uint8) 200 | final_mask = np.zeros((canvas_h, canvas_w), dtype=np.uint8) 201 | 202 | # 放置基准图(总是在左边或顶部) 203 | y1 = (canvas_h - base_h) // 2 if combine_mode == "horizontal" else 0 204 | x1 = 0 205 | final_canvas[y1:y1+base_h, x1:x1+base_w] = base_image 206 | final_mask[y1:y1+base_h, x1:x1+base_w] = base_mask_255 207 | 208 | # 创建第二张图片的画布 209 | if combine_mode == "horizontal": 210 | second_canvas_size = (second_w, canvas_h) 211 | x_offset = base_w 212 | y_offset = 0 213 | else: 214 | second_canvas_size = (canvas_w, second_h) 215 | x_offset = 0 216 | y_offset = base_h 217 | 218 | # 处理左右位置选项 219 | if second_image_position in ["left", "right"]: 220 | # 如果是左右位置,需要调整水平偏移 221 | if second_image_position == "left": 222 | x = 0 223 | else: # right 224 | x = second_canvas_size[0] - scaled_second.shape[1] 225 | # 垂直居中 226 | y = (second_canvas_size[1] - scaled_second.shape[0]) // 2 227 | else: 228 | # 原有的上中下位置逻辑 229 | x = (second_canvas_size[0] - scaled_second.shape[1]) // 2 230 | if second_image_position == "top": 231 | y = 0 232 | elif second_image_position == "bottom": 233 | y = second_canvas_size[1] - scaled_second.shape[0] 234 | else: # center 235 | y = (second_canvas_size[1] - scaled_second.shape[0]) // 2 236 | 237 | # 创建第二张图片的画布 238 | second_canvas = np.full((second_canvas_size[1], second_canvas_size[0], 3), bg_color, dtype=np.uint8) 239 | second_mask_canvas = np.zeros((second_canvas_size[1], second_canvas_size[0]), dtype=np.uint8) 240 | 241 | # 放置第二张图片 242 | second_canvas[y:y+scaled_second.shape[0], x:x+scaled_second.shape[1]] = scaled_second 243 | second_mask_canvas[y:y+scaled_second.shape[0], x:x+scaled_second.shape[1]] = scaled_second_mask_255 244 | 245 | # 将第二张图片放入最终画布 246 | if combine_mode == "horizontal": 247 | final_canvas[:, x_offset:] = second_canvas 248 | # 使用滤色模式合并遮罩 249 | second_mask_area = np.zeros_like(final_mask) 250 | second_mask_area[:, x_offset:] = second_mask_canvas 251 | final_mask = screen_blend(final_mask, second_mask_area).astype(np.uint8) 252 | else: 253 | final_canvas[y_offset:, :] = second_canvas 254 | # 使用滤色模式合并遮罩 255 | second_mask_area = np.zeros_like(final_mask) 256 | second_mask_area[y_offset:, :] = second_mask_canvas 257 | final_mask = screen_blend(final_mask, second_mask_area).astype(np.uint8) 258 | 259 | # 最终尺寸调整 260 | if combine_mode == "horizontal": 261 | aspect = canvas_h / canvas_w 262 | new_w = final_size 263 | new_h = int(aspect * new_w) 264 | else: 265 | aspect = canvas_w / canvas_h 266 | new_h = final_size 267 | new_w = int(aspect * new_h) 268 | 269 | # 调整最终画布尺寸 270 | final_canvas = cv2.resize(final_canvas, (new_w, new_h)) 271 | final_mask = cv2.resize(final_mask, (new_w, new_h), interpolation=cv2.INTER_NEAREST) 272 | 273 | # 创建分离的遮罩 274 | # 为第一张图片创建遮罩画布 275 | first_separate_mask = np.zeros((new_h, new_w), dtype=np.uint8) 276 | # 为第二张图片创建遮罩画布 277 | second_separate_mask = np.zeros((new_h, new_w), dtype=np.uint8) 278 | 279 | # 计算缩放比例 280 | scale_w = new_w / canvas_w 281 | scale_h = new_h / canvas_h 282 | 283 | # 计算第一张图片在最终画布中的位置和尺寸 284 | x1_scaled = int(x1 * scale_w) 285 | y1_scaled = int(y1 * scale_h) 286 | w1_scaled = int(base_w * scale_w) 287 | h1_scaled = int(base_h * scale_h) 288 | 289 | # 计算第二张图片在最终画布中的位置和尺寸 290 | x2_scaled = int(x_offset * scale_w) 291 | y2_scaled = int(y_offset * scale_h) 292 | w2_scaled = int(second_canvas_size[0] * scale_w) 293 | h2_scaled = int(second_canvas_size[1] * scale_h) 294 | 295 | # 填充分离的遮罩 296 | first_separate_mask[y1_scaled:y1_scaled+h1_scaled, x1_scaled:x1_scaled+w1_scaled] = 255 297 | second_separate_mask[y2_scaled:y2_scaled+h2_scaled, x2_scaled:x2_scaled+w2_scaled] = 255 298 | 299 | # 转换为 torch tensor 300 | final_canvas = final_canvas.astype(np.float32) / 255.0 301 | final_canvas = torch.from_numpy(final_canvas)[None,] 302 | 303 | # 将遮罩转换为float32类型,保持0-1范围 304 | final_mask = final_mask.astype(np.float32) / 255.0 305 | first_separate_mask = first_separate_mask.astype(np.float32) / 255.0 306 | second_separate_mask = second_separate_mask.astype(np.float32) / 255.0 307 | 308 | # 转换为tensor 309 | final_mask = torch.from_numpy(final_mask)[None,] 310 | first_separate_mask = torch.from_numpy(first_separate_mask)[None,] 311 | second_separate_mask = torch.from_numpy(second_separate_mask)[None,] 312 | 313 | # 返回整合后的尺寸信息 314 | first_size = (w1, h1) 315 | second_size = (w2, h2) 316 | 317 | return (final_canvas, final_mask, first_separate_mask, second_separate_mask, first_size, second_size) 318 | 319 | NODE_CLASS_MAPPINGS = { 320 | "ImageIC": ImageIC 321 | } 322 | 323 | NODE_DISPLAY_NAME_MAPPINGS = { 324 | "ImageIC": "Image IC" 325 | } 326 | -------------------------------------------------------------------------------- /py/ImageICAdvanced.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import cv2 4 | 5 | def resize_with_aspect_ratio(img, target_size, target_dim='width', interpolation=cv2.INTER_CUBIC): 6 | """等比例缩放图片""" 7 | h, w = img.shape[:2] 8 | if target_dim == 'width': 9 | aspect = h / w 10 | new_w = target_size 11 | new_h = int(aspect * new_w) 12 | else: 13 | aspect = w / h 14 | new_h = target_size 15 | new_w = int(aspect * new_h) 16 | return cv2.resize(img, (new_w, new_h), interpolation=interpolation) 17 | 18 | def find_content_bounds(mask): 19 | """找到内容的边界""" 20 | if not mask.any(): # 如果遮罩全为0 21 | return (0, 0, mask.shape[1], mask.shape[0]) 22 | 23 | rows = np.any(mask, axis=1) 24 | cols = np.any(mask, axis=0) 25 | y_min, y_max = np.where(rows)[0][[0, -1]] 26 | x_min, x_max = np.where(cols)[0][[0, -1]] 27 | return x_min, y_min, x_max + 1, y_max + 1 28 | 29 | def safe_divide(a, b, default=1): 30 | """安全除法,避免除以0""" 31 | return a / b if b != 0 else default 32 | 33 | def screen_blend(mask1, mask2): 34 | """滤色模式混合两个遮罩""" 35 | return 255 - ((255 - mask1.astype(float)) * (255 - mask2.astype(float)) / 255) 36 | 37 | class ImageICAdvanced: 38 | @classmethod 39 | def INPUT_TYPES(s): 40 | return { 41 | "required": { 42 | "first_image": ("IMAGE",), 43 | "second_image": ("IMAGE",), 44 | "reference_edge": (["image1_width", "image1_height", "image2_width", "image2_height"], { 45 | "default": "image1_width", 46 | }), 47 | "combine_mode": (["horizontal", "vertical", "overlay"], { 48 | "default": "horizontal", 49 | }), 50 | "second_image_scale": ("FLOAT", { 51 | "default": 1.0, 52 | "min": 0.1, 53 | "max": 2.0, 54 | "step": 0.1 55 | }), 56 | "position_type": (["top", "center", "bottom", "left", "right"], { 57 | "default": "center", 58 | }), 59 | "x_position": ("FLOAT", { 60 | "default": 50.0, 61 | "min": 0.0, 62 | "max": 100.0, 63 | "step": 0.1, 64 | "display": "slider" 65 | }), 66 | "y_position": ("FLOAT", { 67 | "default": 50.0, 68 | "min": 0.0, 69 | "max": 100.0, 70 | "step": 0.1, 71 | "display": "slider" 72 | }), 73 | "final_size": ("INT", { 74 | "default": 1024, 75 | "min": 64, 76 | "max": 8192, 77 | "step": 64 78 | }), 79 | "background_color": ("STRING", { 80 | "default": "#FFFFFF", 81 | "multiline": False, 82 | }), 83 | }, 84 | "optional": { 85 | "first_mask": ("MASK",), 86 | "second_mask": ("MASK",), 87 | } 88 | } 89 | 90 | RETURN_TYPES = ("IMAGE", "MASK", "MASK", "MASK", "IMAGE", "TUPLE", "TUPLE") 91 | RETURN_NAMES = ("IMAGE", "MASK", "FIRST_MASK", "SECOND_MASK", "MAIN_IMAGE", "first_size", "second_size") 92 | FUNCTION = "combine_images" 93 | CATEGORY = "YCNode/Image" 94 | 95 | def combine_images(self, first_image, second_image, reference_edge, combine_mode, 96 | second_image_scale, position_type, x_position, y_position, final_size, background_color, 97 | first_mask=None, second_mask=None): 98 | # 获取输入图像并确保数据类型正确 99 | image1 = (first_image[0].detach().cpu().numpy() * 255).astype(np.uint8) 100 | image2 = (second_image[0].detach().cpu().numpy() * 255).astype(np.uint8) 101 | 102 | # 获取原始尺寸 103 | h1, w1 = image1.shape[:2] 104 | h2, w2 = image2.shape[:2] 105 | 106 | # 确定主图和次图 107 | is_first_main = reference_edge.startswith('image1') 108 | main_image = image1 if is_first_main else image2 109 | second_img = image2 if is_first_main else image1 110 | main_h, main_w = main_image.shape[:2] 111 | 112 | # 验证并处理第一个遮罩 113 | if first_mask is not None: 114 | mask1_h, mask1_w = first_mask[0].shape 115 | if mask1_h != h1 or mask1_w != w1: 116 | # 如果尺寸不匹配,创建新的空遮罩 117 | first_mask = np.zeros((h1, w1), dtype=np.float32) 118 | else: 119 | first_mask = first_mask[0].numpy() 120 | else: 121 | first_mask = np.zeros((h1, w1), dtype=np.float32) 122 | 123 | # 验证并处理第二个遮罩 124 | if second_mask is not None: 125 | mask2_h, mask2_w = second_mask[0].shape 126 | if mask2_h != h2 or mask2_w != w2: 127 | # 如果尺寸不匹配,创建新的空遮罩 128 | second_mask = np.zeros((h2, w2), dtype=np.float32) 129 | else: 130 | second_mask = second_mask[0].numpy() 131 | else: 132 | second_mask = np.zeros((h2, w2), dtype=np.float32) 133 | 134 | # 确定主遮罩和次遮罩 135 | main_mask = first_mask if is_first_main else second_mask 136 | second_mask = second_mask if is_first_main else first_mask 137 | 138 | # 将遮罩转换为0-255范围用于处理 139 | first_mask_255 = (first_mask * 255).astype(np.uint8) 140 | second_mask_255 = (second_mask * 255).astype(np.uint8) 141 | 142 | # 转换背景颜色 143 | if background_color.startswith('#'): 144 | bg_color = tuple(int(background_color[i:i+2], 16) for i in (5, 3, 1))[::-1] 145 | 146 | # 根据基准边计算目标尺寸 147 | target_size = main_w if reference_edge.endswith('width') else main_h 148 | target_dim = 'width' if reference_edge.endswith('width') else 'height' 149 | 150 | # 等比例缩放第二张图片 151 | scaled_second = resize_with_aspect_ratio(second_img, target_size, target_dim) 152 | scaled_second_mask = resize_with_aspect_ratio(second_mask, target_size, target_dim, cv2.INTER_LINEAR) 153 | 154 | # 第二张图片额外缩放 155 | if second_image_scale != 1.0: 156 | h, w = scaled_second.shape[:2] 157 | new_w = int(w * second_image_scale) 158 | new_h = int(h * second_image_scale) 159 | scaled_second = cv2.resize(scaled_second, (new_w, new_h)) 160 | scaled_second_mask = cv2.resize(scaled_second_mask, (new_w, new_h), interpolation=cv2.INTER_LINEAR) 161 | 162 | # 将缩放后的遮罩转换为255范围 163 | scaled_second_mask_255 = (scaled_second_mask * 255).astype(np.uint8) 164 | 165 | # 创建画布 166 | if combine_mode == "overlay": 167 | canvas_w = main_w + scaled_second.shape[1] 168 | canvas_h = max(main_h, scaled_second.shape[0]) 169 | elif combine_mode == "horizontal": 170 | canvas_w = main_w + scaled_second.shape[1] 171 | canvas_h = max(main_h, scaled_second.shape[0]) 172 | else: # vertical 173 | canvas_w = max(main_w, scaled_second.shape[1]) 174 | canvas_h = main_h + scaled_second.shape[0] 175 | 176 | # 创建画布和遮罩 177 | final_canvas = np.full((canvas_h, canvas_w, 3), bg_color, dtype=np.uint8) 178 | final_mask = np.zeros((canvas_h, canvas_w), dtype=np.uint8) 179 | first_separate_mask = np.zeros((canvas_h, canvas_w), dtype=np.uint8) 180 | second_separate_mask = np.zeros((canvas_h, canvas_w), dtype=np.uint8) 181 | 182 | # 放置主图(总是在左边或顶部) 183 | y1 = (canvas_h - main_h) // 2 if combine_mode == "horizontal" else 0 184 | x1 = 0 185 | final_canvas[y1:y1+main_h, x1:x1+main_w] = main_image 186 | 187 | # 创建主图区域遮罩 188 | main_region_mask = np.zeros((canvas_h, canvas_w), dtype=np.uint8) 189 | main_region_mask[y1:y1+main_h, x1:x1+main_w] = 255 190 | if is_first_main: 191 | first_separate_mask = main_region_mask.copy() 192 | final_mask[y1:y1+main_h, x1:x1+main_w] = first_mask_255 193 | else: 194 | second_separate_mask = main_region_mask.copy() 195 | final_mask[y1:y1+main_h, x1:x1+main_w] = second_mask_255 196 | 197 | # 放置第二张图片 198 | h2, w2 = scaled_second.shape[:2] 199 | if combine_mode == "overlay": 200 | # 使用百分比位置 201 | x2 = int((canvas_w - w2) * x_position / 100) 202 | y2 = int((canvas_h - h2) * y_position / 100) 203 | elif combine_mode == "horizontal": 204 | x2 = main_w if position_type != "left" else 0 205 | if position_type == "top": 206 | y2 = 0 207 | elif position_type == "bottom": 208 | y2 = canvas_h - h2 209 | else: # center 210 | y2 = (canvas_h - h2) // 2 211 | else: # vertical 212 | if position_type == "left": 213 | x2 = 0 214 | elif position_type == "right": 215 | x2 = canvas_w - w2 216 | else: # center 217 | x2 = (canvas_w - w2) // 2 218 | y2 = main_h 219 | 220 | # 确保坐标不会超出画布范围 221 | x2 = max(0, min(x2, canvas_w - w2)) 222 | y2 = max(0, min(y2, canvas_h - h2)) 223 | 224 | # 将第二张图片放入画布 225 | final_canvas[y2:y2+h2, x2:x2+w2] = scaled_second 226 | 227 | # 创建第二张图区域遮罩 228 | second_region_mask = np.zeros((canvas_h, canvas_w), dtype=np.uint8) 229 | second_region_mask[y2:y2+h2, x2:x2+w2] = 255 230 | if is_first_main: 231 | second_separate_mask = second_region_mask.copy() 232 | # 使用滤色模式合并遮罩 233 | second_mask_area = np.zeros_like(final_mask) 234 | second_mask_area[y2:y2+h2, x2:x2+w2] = scaled_second_mask_255 235 | final_mask = screen_blend(final_mask, second_mask_area).astype(np.uint8) 236 | else: 237 | first_separate_mask = second_region_mask.copy() 238 | # 使用滤色模式合并遮罩 239 | first_mask_area = np.zeros_like(final_mask) 240 | first_mask_area[y2:y2+h2, x2:x2+w2] = scaled_second_mask_255 241 | final_mask = screen_blend(final_mask, first_mask_area).astype(np.uint8) 242 | 243 | # 找到有效内容区域 244 | content_mask = np.zeros((canvas_h, canvas_w), dtype=np.uint8) 245 | content_mask[final_mask > 0] = 255 # 遮罩区域 246 | content_mask[np.any(final_canvas != bg_color, axis=2)] = 255 # 非背景色区域 247 | 248 | # 获取有效区域边界 249 | x_min, y_min, x_max, y_max = find_content_bounds(content_mask) 250 | 251 | # 确保边界有效 252 | x_min = min(x_min, canvas_w - 1) 253 | y_min = min(y_min, canvas_h - 1) 254 | x_max = max(x_min + 1, min(x_max, canvas_w)) 255 | y_max = max(y_min + 1, min(y_max, canvas_h)) 256 | 257 | # 裁剪到有效区域 258 | final_canvas = final_canvas[y_min:y_max, x_min:x_max] 259 | final_mask = final_mask[y_min:y_max, x_min:x_max] 260 | first_separate_mask = first_separate_mask[y_min:y_max, x_min:x_max] 261 | second_separate_mask = second_separate_mask[y_min:y_max, x_min:x_max] 262 | 263 | # 计算最终尺寸 264 | h, w = final_canvas.shape[:2] 265 | if w > h: 266 | aspect = safe_divide(h, w) 267 | new_w = final_size 268 | new_h = int(aspect * new_w) 269 | else: 270 | aspect = safe_divide(w, h) 271 | new_h = final_size 272 | new_w = int(aspect * new_h) 273 | 274 | # 确保最小尺寸 275 | new_w = max(1, new_w) 276 | new_h = max(1, new_h) 277 | 278 | # 计算缩放比例 279 | scale_x = safe_divide(new_w, w) 280 | scale_y = safe_divide(new_h, h) 281 | 282 | # 计算主图在最终画布中的位置和尺寸,保持原始比例 283 | main_aspect = main_h / main_w 284 | if main_w > main_h: 285 | main_final_w = min(new_w, int(new_h / main_aspect)) 286 | main_final_h = int(main_final_w * main_aspect) 287 | else: 288 | main_final_h = min(new_h, int(new_w * main_aspect)) 289 | main_final_w = int(main_final_h / main_aspect) 290 | 291 | # 计算主图位置,保持在画布内 292 | main_x = int((x1 - x_min) * scale_x) 293 | main_y = int((y1 - y_min) * scale_y) 294 | 295 | # 确保主图位置有效 296 | main_x = min(max(0, main_x), new_w - main_final_w) 297 | main_y = min(max(0, main_y), new_h - main_final_h) 298 | 299 | # 准备主图输出(保持原始比例) 300 | if main_final_w > 0 and main_final_h > 0: 301 | main_region = cv2.resize(main_image, (main_final_w, main_final_h), interpolation=cv2.INTER_LANCZOS4) 302 | main_output = np.full((new_h, new_w, 3), bg_color, dtype=np.uint8) 303 | main_output[main_y:main_y+main_final_h, main_x:main_x+main_final_w] = main_region 304 | else: 305 | main_output = np.full((new_h, new_w, 3), bg_color, dtype=np.uint8) 306 | 307 | # 调整所有图像的大小 308 | final_canvas = cv2.resize(final_canvas, (new_w, new_h)) 309 | final_mask = cv2.resize(final_mask, (new_w, new_h), interpolation=cv2.INTER_LINEAR) 310 | first_separate_mask = cv2.resize(first_separate_mask, (new_w, new_h), interpolation=cv2.INTER_LINEAR) 311 | second_separate_mask = cv2.resize(second_separate_mask, (new_w, new_h), interpolation=cv2.INTER_LINEAR) 312 | 313 | # 转换为tensor格式 314 | final_canvas = final_canvas.astype(np.float32) / 255.0 315 | final_canvas = torch.from_numpy(final_canvas)[None,] 316 | 317 | # 将遮罩转换为float32类型,保持0-1范围 318 | final_mask = final_mask.astype(np.float32) / 255.0 319 | first_separate_mask = first_separate_mask.astype(np.float32) / 255.0 320 | second_separate_mask = second_separate_mask.astype(np.float32) / 255.0 321 | 322 | # 转换为tensor 323 | final_mask = torch.from_numpy(final_mask)[None,] 324 | first_separate_mask = torch.from_numpy(first_separate_mask)[None,] 325 | second_separate_mask = torch.from_numpy(second_separate_mask)[None,] 326 | main_output = torch.from_numpy(main_output.astype(np.float32) / 255.0)[None,] 327 | 328 | # 返回原始尺寸信息 329 | first_size = (w1, h1) 330 | second_size = (w2, h2) 331 | 332 | return (final_canvas, final_mask, first_separate_mask, second_separate_mask, main_output, first_size, second_size) 333 | 334 | NODE_CLASS_MAPPINGS = { 335 | "ImageICAdvanced": ImageICAdvanced 336 | } 337 | 338 | NODE_DISPLAY_NAME_MAPPINGS = { 339 | "ImageICAdvanced": "Image IC Advanced" 340 | } 341 | -------------------------------------------------------------------------------- /py/ImageLoaderAdvanced.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import cv2 4 | from PIL import Image, ImageOps, ImageSequence 5 | import os 6 | from pathlib import Path 7 | import folder_paths 8 | import hashlib 9 | 10 | class ImageLoaderAdvanced: 11 | @classmethod 12 | def get_all_files(cls): 13 | """获取所有可用文件的列表""" 14 | input_dir = folder_paths.get_input_directory() 15 | files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))] 16 | return sorted(files) 17 | 18 | @classmethod 19 | def INPUT_TYPES(s): 20 | return { 21 | "required": { 22 | "image": (s.get_all_files(), { 23 | "image_upload": True, 24 | }), 25 | "mask_mode": (["none", "alpha"], {"default": "alpha"}), 26 | "mask_blur": ("FLOAT", { 27 | "default": 0.0, 28 | "min": 0.0, 29 | "max": 10.0, 30 | "step": 0.1, 31 | "display": "slider" 32 | }), 33 | "mask_strength": ("FLOAT", { 34 | "default": 1.0, 35 | "min": 0.0, 36 | "max": 1.0, 37 | "step": 0.01, 38 | "display": "slider" 39 | }), 40 | }, 41 | } 42 | 43 | RETURN_TYPES = ("IMAGE", "MASK") 44 | FUNCTION = "load_image" 45 | CATEGORY = "YCNode/Image" 46 | 47 | INPUT_IS_LIST = False 48 | OUTPUT_IS_LIST = (False, False) 49 | OUTPUT_NODE = True 50 | 51 | def load_image(self, image, mask_mode="alpha", mask_blur=0.0, mask_strength=1.0): 52 | try: 53 | # 获取图片路径 54 | image_path = folder_paths.get_annotated_filepath(image) 55 | if not os.path.exists(image_path): 56 | raise FileNotFoundError(f"Image file not found: {image_path}") 57 | 58 | # 使用PIL加载图像 59 | img = Image.open(image_path) 60 | 61 | output_images = [] 62 | output_masks = [] 63 | w, h = None, None 64 | 65 | # 处理图像序列 66 | for i in ImageSequence.Iterator(img): 67 | i = ImageOps.exif_transpose(i) 68 | 69 | # 处理特殊模式 70 | if i.mode == 'I': 71 | i = i.point(lambda i: i * (1 / 255)) 72 | 73 | # 转换为RGB 74 | image = i.convert("RGB") 75 | 76 | # 记录第一帧的尺寸 77 | if len(output_images) == 0: 78 | w = image.size[0] 79 | h = image.size[1] 80 | 81 | # 确保所有帧尺寸一致 82 | if image.size[0] != w or image.size[1] != h: 83 | continue 84 | 85 | # 转换图像 86 | image = np.array(image).astype(np.float32) / 255.0 87 | image = torch.from_numpy(image)[None,] 88 | 89 | # 处理遮罩 90 | if mask_mode == "alpha" and 'A' in i.getbands(): 91 | # 获取alpha通道 92 | mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 93 | # 应用遮罩强度 94 | mask = mask * mask_strength 95 | # 应用高斯模糊 96 | if mask_blur > 0: 97 | kernel_size = int(mask_blur * 2) * 2 + 1 98 | mask = cv2.GaussianBlur(mask, (kernel_size, kernel_size), mask_blur) 99 | else: 100 | mask = np.ones((h, w), dtype=np.float32) 101 | 102 | mask = torch.from_numpy(mask) 103 | 104 | output_images.append(image) 105 | output_masks.append(mask.unsqueeze(0)) 106 | 107 | # 处理多帧图像 108 | if len(output_images) > 1 and img.format not in ['MPO']: 109 | output_image = torch.cat(output_images, dim=0) 110 | output_mask = torch.cat(output_masks, dim=0) 111 | else: 112 | output_image = output_images[0] 113 | output_mask = output_masks[0] 114 | 115 | return (output_image, output_mask) 116 | 117 | except Exception as e: 118 | print(f"Error loading image {image}: {str(e)}") 119 | raise e 120 | 121 | @classmethod 122 | def VALIDATE_INPUTS(s, image): 123 | if not folder_paths.exists_annotated_filepath(image): 124 | return f"Invalid image file: {image}" 125 | return True 126 | 127 | @classmethod 128 | def IS_CHANGED(s, image): 129 | image_path = folder_paths.get_annotated_filepath(image) 130 | m = hashlib.sha256() 131 | with open(image_path, 'rb') as f: 132 | m.update(f.read()) 133 | return m.digest().hex() 134 | 135 | NODE_CLASS_MAPPINGS = { 136 | "ImageLoaderAdvanced": ImageLoaderAdvanced 137 | } 138 | 139 | NODE_DISPLAY_NAME_MAPPINGS = { 140 | "ImageLoaderAdvanced": "Load Image Advanced" 141 | } 142 | -------------------------------------------------------------------------------- /py/ImageUpscaleTiled.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from PIL import Image 4 | import os 5 | import folder_paths 6 | from spandrel import ModelLoader 7 | import comfy.utils 8 | import model_management 9 | 10 | class ImageUpscaleTiled: 11 | @classmethod 12 | def INPUT_TYPES(s): 13 | return { 14 | "required": { 15 | "image": ("IMAGE",), 16 | "model_name": (folder_paths.get_filename_list("upscale_models"), ), 17 | "rows": ("INT", { "default": 2, "min": 1, "max": 8, "step": 1 }), 18 | "cols": ("INT", { "default": 2, "min": 1, "max": 8, "step": 1 }), 19 | "overlap": ("FLOAT", { "default": 0.1, "min": 0, "max": 0.5, "step": 0.01 }), 20 | } 21 | } 22 | 23 | RETURN_TYPES = ("IMAGE",) 24 | FUNCTION = "upscale_tiled" 25 | CATEGORY = "YCNode/Image" 26 | 27 | def upscale_tiled(self, image, model_name, rows, cols, overlap): 28 | device = model_management.get_torch_device() 29 | 30 | # 1. 加载放大模型 31 | model_path = folder_paths.get_full_path("upscale_models", model_name) 32 | sd = comfy.utils.load_torch_file(model_path, safe_load=True) 33 | model = ModelLoader().load_from_state_dict(sd) 34 | 35 | if not hasattr(model, 'model'): 36 | raise Exception("Invalid upscale model") 37 | 38 | upscale_model = model.model.eval() 39 | 40 | # 2. 计算分块参数 41 | h, w = image.shape[1:3] 42 | tile_h = h // rows 43 | tile_w = w // cols 44 | h = tile_h * rows 45 | w = tile_w * cols 46 | 47 | overlap_h = int(tile_h * overlap) 48 | overlap_w = int(tile_w * overlap) 49 | 50 | # 限制最大重叠为tile大小的一半 51 | overlap_h = min(tile_h // 2, overlap_h) 52 | overlap_w = min(tile_w // 2, overlap_w) 53 | 54 | if rows == 1: 55 | overlap_h = 0 56 | if cols == 1: 57 | overlap_w = 0 58 | 59 | # 3. 内存管理 60 | memory_required = model_management.module_size(upscale_model) 61 | memory_required += (tile_h * tile_w * 3) * image.element_size() * model.scale * model.scale * 2 62 | model_management.free_memory(memory_required, device) 63 | 64 | # 4. 移动模型到设备 65 | upscale_model = upscale_model.to(device) 66 | 67 | try: 68 | # 创建进度条 69 | total_tiles = rows * cols 70 | pbar = comfy.utils.ProgressBar(total_tiles) 71 | 72 | # 5. 分块处理 73 | tiles = [] 74 | for i in range(rows): 75 | for j in range(cols): 76 | y1 = i * tile_h 77 | x1 = j * tile_w 78 | 79 | if i > 0: 80 | y1 -= overlap_h 81 | if j > 0: 82 | x1 -= overlap_w 83 | 84 | y2 = y1 + tile_h + overlap_h 85 | x2 = x1 + tile_w + overlap_w 86 | 87 | if y2 > h: 88 | y2 = h 89 | y1 = y2 - tile_h - overlap_h 90 | if x2 > w: 91 | x2 = w 92 | x1 = x2 - tile_w - overlap_w 93 | 94 | # 提取tile 95 | tile = image[:, y1:y2, x1:x2, :] 96 | 97 | # 转换格式并放大 98 | tile = tile.movedim(-1,-3).to(device) 99 | with torch.no_grad(): 100 | upscaled_tile = upscale_model(tile) 101 | tiles.append(upscaled_tile) 102 | 103 | # 更新进度条 104 | pbar.update(1) 105 | 106 | # 6. 合并tiles 107 | tiles = torch.cat(tiles, dim=0) 108 | 109 | # 7. 计算输出尺寸 110 | out_h = h * model.scale 111 | out_w = w * model.scale 112 | overlap_h_up = overlap_h * model.scale 113 | overlap_w_up = overlap_w * model.scale 114 | tile_h_up = tile_h * model.scale 115 | tile_w_up = tile_w * model.scale 116 | 117 | # 8. 创建输出tensor 118 | out = torch.zeros((1, tiles.shape[1], out_h, out_w), device=device, dtype=tiles.dtype) 119 | 120 | # 9. 合并tiles 121 | idx = 0 122 | for i in range(rows): 123 | for j in range(cols): 124 | y1 = i * tile_h_up 125 | x1 = j * tile_w_up 126 | 127 | if i > 0: 128 | y1 -= overlap_h_up 129 | if j > 0: 130 | x1 -= overlap_w_up 131 | 132 | y2 = y1 + tile_h_up + overlap_h_up 133 | x2 = x1 + tile_w_up + overlap_w_up 134 | 135 | if y2 > out_h: 136 | y2 = out_h 137 | y1 = y2 - tile_h_up - overlap_h_up 138 | if x2 > out_w: 139 | x2 = out_w 140 | x1 = x2 - tile_w_up - overlap_w_up 141 | 142 | # 创建渐变mask 143 | mask = torch.ones((1, 1, tile_h_up+overlap_h_up, tile_w_up+overlap_w_up), device=device, dtype=tiles.dtype) 144 | if i > 0 and overlap_h_up > 0: 145 | mask[:, :, :overlap_h_up, :] *= torch.linspace(0, 1, overlap_h_up, device=device, dtype=tiles.dtype).view(1, 1, -1, 1) 146 | if j > 0 and overlap_w_up > 0: 147 | mask[:, :, :, :overlap_w_up] *= torch.linspace(0, 1, overlap_w_up, device=device, dtype=tiles.dtype).view(1, 1, 1, -1) 148 | 149 | # 应用mask 150 | tile = tiles[idx:idx+1] # 保持 NCHW 格式 151 | mask = mask.repeat(1, tile.shape[1], 1, 1) 152 | tile = tile * mask 153 | out[:, :, y1:y2, x1:x2] = out[:, :, y1:y2, x1:x2] * (1 - mask) + tile 154 | idx += 1 155 | 156 | # 最后转换回原始格式 157 | out = out.movedim(1, -1) # NCHW -> NHWC 158 | 159 | finally: 160 | # 10. 清理资源 161 | upscale_model.to("cpu") 162 | if str(device) == 'cuda': 163 | torch.cuda.empty_cache() 164 | 165 | return (out,) 166 | 167 | NODE_CLASS_MAPPINGS = { 168 | "ImageUpscaleTiled": ImageUpscaleTiled 169 | } 170 | 171 | NODE_DISPLAY_NAME_MAPPINGS = { 172 | "ImageUpscaleTiled": "Image Upscale Tiled" 173 | } 174 | -------------------------------------------------------------------------------- /py/ImagesFromFolder.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import numpy as np 4 | import random 5 | from PIL import Image, ImageOps 6 | import folder_paths 7 | from nodes import LoadImage 8 | import torchvision.transforms as transforms 9 | import re 10 | import comfy.utils 11 | from PIL.PngImagePlugin import PngInfo 12 | import json 13 | 14 | # 定义允许的文件扩展名 15 | ALLOWED_EXT = ['.png', '.jpg', '.jpeg', '.bmp', '.webp', '.tiff', '.gif'] 16 | 17 | class LoadImagesFromFolder: 18 | @classmethod 19 | def INPUT_TYPES(s): 20 | input_dir = folder_paths.get_input_directory() 21 | folders = [f for f in os.listdir(input_dir) if os.path.isdir(os.path.join(input_dir, f))] 22 | 23 | return {"required": { 24 | "path_type": (["input_dir", "custom_path"],), 25 | "mode": (["single", "incremental", "random", "batch"],), 26 | "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), 27 | }, 28 | "optional": { 29 | "image_index": ("INT", {"default": 0, "min": 0, "max": 99999, "step": 1}), 30 | "folder_name": ([""] + sorted(folders),), 31 | "custom_path": ("STRING", {"default": "C:/Users/Pictures"}), 32 | "label": ("STRING", {"default": "batch 001", "multiline": False}), 33 | "output_filename": (["true", "false"],), 34 | "allow_RGBA": (["false", "true"],), 35 | "image_pattern": ("STRING", {"default": "*.png;*.jpg;*.jpeg;*.webp"}), 36 | }} 37 | 38 | RETURN_TYPES = ("IMAGE", "STRING") 39 | RETURN_NAMES = ("image", "filename") 40 | FUNCTION = "load_images" 41 | CATEGORY = "YCNode/Image" 42 | 43 | # 存储递增模式的状态 - 现在基于标签而不是目录ID 44 | batch_states = {} 45 | # 存储文件名缓存 46 | filename_cache = {} 47 | 48 | def load_images(self, path_type, mode, seed=0, image_index=0, folder_name="", 49 | custom_path="", image_pattern="*.png;*.jpg;*.jpeg;*.webp", 50 | label="batch 001", output_filename="true", allow_RGBA="false"): 51 | # 确定目标目录 52 | if path_type == "input_dir": 53 | input_dir = folder_paths.get_input_directory() 54 | if folder_name == "": 55 | target_dir = input_dir 56 | else: 57 | target_dir = os.path.join(input_dir, folder_name) 58 | else: # custom_path 59 | if not os.path.exists(custom_path): 60 | print(f"警告: 自定义路径 {custom_path} 不存在!") 61 | empty = torch.zeros((1, 64, 64, 3)) 62 | return (empty, "") 63 | target_dir = custom_path 64 | 65 | # 生成批次信息 66 | batch_key = label 67 | 68 | # 处理文件名开关,如果设置为false,清空标签的文件名缓存 69 | if output_filename == "false": 70 | if batch_key in self.filename_cache: 71 | del self.filename_cache[batch_key] 72 | filename_output = "" 73 | 74 | # 获取所有匹配的图片文件 75 | patterns = image_pattern.split(";") 76 | image_files = [] 77 | for pattern in patterns: 78 | pattern = pattern.strip() 79 | if pattern.startswith("*."): 80 | ext = pattern[2:].lower() 81 | image_files.extend([f for f in os.listdir(target_dir) 82 | if os.path.isfile(os.path.join(target_dir, f)) and 83 | f.lower().endswith(f".{ext}")]) 84 | 85 | image_files = sorted(image_files) 86 | if not image_files: 87 | print(f"警告: 在 {target_dir} 中未找到匹配的图片") 88 | empty = torch.zeros((1, 64, 64, 3)) 89 | return (empty, "") 90 | 91 | # 如果是第一次使用该标签或批次信息变更,初始化状态 92 | if batch_key not in self.batch_states: 93 | self.batch_states[batch_key] = { 94 | "path": target_dir, 95 | "pattern": image_pattern, 96 | "index": 0 97 | } 98 | else: 99 | # 如果路径或模式变了,重置索引 100 | state = self.batch_states[batch_key] 101 | if state["path"] != target_dir or state["pattern"] != image_pattern: 102 | state["path"] = target_dir 103 | state["pattern"] = image_pattern 104 | state["index"] = 0 105 | print(f"标签 '{label}' 的路径或模式已更改,索引已重置。") 106 | 107 | def load_single_image(image_path): 108 | try: 109 | img = Image.open(image_path) 110 | 111 | # 处理RGBA图片 112 | if img.mode == 'RGBA' and allow_RGBA == "true": 113 | # 保留RGBA格式 114 | img_rgb = img 115 | has_alpha = True 116 | elif img.mode == 'RGBA': 117 | # 转换RGBA为RGB 118 | img_rgb = img.convert('RGB') 119 | has_alpha = False 120 | else: 121 | img_rgb = img.convert('RGB') 122 | has_alpha = False 123 | 124 | # 获取不带扩展名的文件名 125 | basename = os.path.basename(image_path) 126 | filename = os.path.splitext(basename)[0] 127 | 128 | # 转换为tensor,保持原始尺寸 129 | if has_alpha: 130 | # 处理RGBA图像,转换为具有4个通道的tensor 131 | img_array = np.array(img_rgb).astype(np.float32) / 255.0 132 | image_tensor = torch.from_numpy(img_array)[None,] 133 | else: 134 | # 处理RGB图像 135 | img_array = np.array(img_rgb).astype(np.float32) / 255.0 136 | image_tensor = torch.from_numpy(img_array)[None,] 137 | 138 | return image_tensor, filename 139 | except Exception as e: 140 | print(f"加载图片时出错 {image_path}: {str(e)}") 141 | return None, "" 142 | 143 | if mode == "batch": 144 | # 批处理模式 - 加载所有图片 145 | images = [] 146 | filenames = [] 147 | for img_file in image_files: 148 | image_path = os.path.join(target_dir, img_file) 149 | img_tensor, filename = load_single_image(image_path) 150 | if img_tensor is not None: 151 | images.append(img_tensor) 152 | if output_filename == "true": 153 | filenames.append(filename) 154 | 155 | if images: 156 | batched_images = torch.cat(images, dim=0) 157 | if output_filename == "true": 158 | filename_output = ";".join(filenames) 159 | # 缓存文件名 160 | self.filename_cache[batch_key] = filename_output 161 | else: 162 | filename_output = "" 163 | return (batched_images, filename_output) 164 | else: 165 | empty = torch.zeros((1, 64, 64, 3)) 166 | return (empty, "") 167 | else: 168 | # 单图模式 (single, incremental, random) 169 | if mode == "single": 170 | # 使用指定索引 171 | sel_index = min(max(0, image_index), len(image_files) - 1) 172 | elif mode == "random": 173 | # 使用随机索引 174 | random.seed(seed) 175 | sel_index = random.randint(0, len(image_files) - 1) 176 | print(f"标签 '{label}' 随机选择图片索引: {sel_index} (种子: {seed})") 177 | else: # incremental 178 | # 使用基于标签的递增索引 179 | sel_index = self.batch_states[batch_key]["index"] 180 | # 更新下一次的索引 181 | next_index = (sel_index + 1) % len(image_files) 182 | self.batch_states[batch_key]["index"] = next_index 183 | print(f"标签 '{label}' 递增图片索引: {sel_index} (下一次: {next_index})") 184 | 185 | image_path = os.path.join(target_dir, image_files[sel_index]) 186 | img_tensor, filename = load_single_image(image_path) 187 | if img_tensor is None: 188 | empty = torch.zeros((1, 64, 64, 3)) 189 | return (empty, "") 190 | 191 | if output_filename == "true": 192 | # 更新文件名缓存 193 | self.filename_cache[batch_key] = filename 194 | return (img_tensor, filename) 195 | else: 196 | return (img_tensor, "") 197 | 198 | class YC_Image_Save: 199 | def __init__(self): 200 | self.output_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "output") 201 | self.type = 'output' 202 | 203 | @classmethod 204 | def INPUT_TYPES(cls): 205 | return { 206 | "required": { 207 | "images": ("IMAGE", ), 208 | "output_path": ("STRING", {"default": '', "multiline": False}), 209 | "filename_prefix": ("STRING", {"default": "ComfyUI"}), 210 | "filename_delimiter": ("STRING", {"default":"_"}), 211 | "filename_number_padding": ("INT", {"default":4, "min":1, "max":9, "step":1}), 212 | "filename_number_start": (["false", "true"],), 213 | "extension": (['png', 'jpg', 'jpeg', 'gif', 'tiff', 'webp', 'bmp'], ), 214 | }, 215 | "optional": { 216 | "caption": ("STRING", {"forceInput": True}), 217 | "caption_file_extension": ("STRING", {"default": ".txt", "tooltip": "文本文件的扩展名"}), 218 | }, 219 | "hidden": { 220 | "prompt": "PROMPT", 221 | "extra_pnginfo": "EXTRA_PNGINFO" 222 | }, 223 | } 224 | 225 | RETURN_TYPES = ("IMAGE", "STRING",) 226 | RETURN_NAMES = ("images", "filenames",) 227 | INPUT_IS_LIST = False 228 | OUTPUT_IS_LIST = (False, False) 229 | 230 | FUNCTION = "yc_save_images" 231 | 232 | OUTPUT_NODE = True 233 | 234 | CATEGORY = "YCNode/Image" 235 | 236 | def yc_save_images(self, images, output_path='', filename_prefix="ComfyUI", filename_delimiter='_', 237 | extension='png', filename_number_padding=4, filename_number_start='false', 238 | caption=None, caption_file_extension=".txt", 239 | prompt=None, extra_pnginfo=None): 240 | 241 | delimiter = filename_delimiter 242 | number_padding = filename_number_padding 243 | 244 | # 使用固定值替代之前的参数 245 | quality = 100 # 固定使用最高质量 246 | overwrite_mode = 'false' # 默认不覆盖 247 | 248 | # 处理输出路径 249 | if output_path in [None, '', "none", "."]: 250 | output_path = self.output_dir 251 | if not os.path.isabs(output_path): 252 | output_path = os.path.join(self.output_dir, output_path) 253 | 254 | # 确保输出目录存在 255 | if not os.path.exists(output_path): 256 | print(f"警告: 路径 {output_path} 不存在,正在创建目录。") 257 | os.makedirs(output_path, exist_ok=True) 258 | 259 | # 查找现有的计数器值 260 | try: 261 | if filename_number_start == 'true': 262 | pattern = f"(\\d+){re.escape(delimiter)}{re.escape(filename_prefix)}" 263 | else: 264 | pattern = f"{re.escape(filename_prefix)}{re.escape(delimiter)}(\\d+)" 265 | 266 | existing_counters = [] 267 | for filename in os.listdir(output_path): 268 | match = re.match(pattern, filename) 269 | if match: 270 | try: 271 | existing_counters.append(int(match.group(1))) 272 | except (ValueError, IndexError): 273 | pass 274 | 275 | existing_counters.sort(reverse=True) 276 | counter = existing_counters[0] + 1 if existing_counters else 1 277 | except Exception as e: 278 | print(f"警告: 计数器初始化失败: {str(e)},使用默认值1") 279 | counter = 1 280 | 281 | # 设置扩展名 282 | file_extension = '.' + extension 283 | if file_extension not in ALLOWED_EXT: 284 | print(f"警告: 扩展名 {extension} 无效。有效格式为: {', '.join([ext[1:] for ext in ALLOWED_EXT])}") 285 | file_extension = ".png" 286 | 287 | results = [] 288 | output_filenames = [] # 存储文件名(而非完整路径) 289 | for image in images: 290 | i = 255. * image.cpu().numpy() 291 | img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) 292 | 293 | # 处理元数据 294 | if extension == 'webp': 295 | img_exif = img.getexif() 296 | exif_data = img_exif.tobytes() 297 | else: 298 | metadata = PngInfo() 299 | exif_data = metadata 300 | 301 | # 生成文件名 (固定使用overwrite_mode='false') 302 | if filename_number_start == 'true': 303 | file = f"{counter:0{number_padding}}{delimiter}{filename_prefix}{file_extension}" 304 | base_filename = f"{counter:0{number_padding}}{delimiter}{filename_prefix}" 305 | else: 306 | file = f"{filename_prefix}{delimiter}{counter:0{number_padding}}{file_extension}" 307 | base_filename = f"{filename_prefix}{delimiter}{counter:0{number_padding}}" 308 | 309 | if os.path.exists(os.path.join(output_path, file)): 310 | counter += 1 311 | # 重新生成文件名 312 | if filename_number_start == 'true': 313 | file = f"{counter:0{number_padding}}{delimiter}{filename_prefix}{file_extension}" 314 | base_filename = f"{counter:0{number_padding}}{delimiter}{filename_prefix}" 315 | else: 316 | file = f"{filename_prefix}{delimiter}{counter:0{number_padding}}{file_extension}" 317 | base_filename = f"{filename_prefix}{delimiter}{counter:0{number_padding}}" 318 | 319 | # 保存图片 320 | try: 321 | output_file = os.path.abspath(os.path.join(output_path, file)) 322 | if extension in ["jpg", "jpeg"]: 323 | img.save(output_file, quality=quality) 324 | elif extension == 'webp': 325 | img.save(output_file, quality=quality, exif=exif_data) 326 | elif extension == 'png': 327 | img.save(output_file, pnginfo=exif_data) 328 | elif extension == 'bmp': 329 | img.save(output_file) 330 | elif extension == 'tiff': 331 | img.save(output_file, quality=quality) 332 | else: 333 | img.save(output_file, pnginfo=exif_data) 334 | 335 | print(f"图片保存至: {output_file}") 336 | output_filenames.append(file) # 只添加文件名,不含路径 337 | 338 | # 保存文本描述(如果提供) 339 | if caption is not None and caption.strip() != "": 340 | txt_file = base_filename + caption_file_extension 341 | txt_path = os.path.abspath(os.path.join(output_path, txt_file)) 342 | try: 343 | with open(txt_path, 'w', encoding='utf-8') as f: 344 | f.write(caption) 345 | print(f"文本描述保存至: {txt_path}") 346 | except Exception as e: 347 | print(f"保存文本描述失败: {str(e)}") 348 | 349 | # 添加到结果 350 | results.append({ 351 | "filename": file, 352 | "subfolder": "", 353 | "type": self.type 354 | }) 355 | 356 | except OSError as e: 357 | print(f'保存文件失败: {output_file}, 错误: {str(e)}') 358 | except Exception as e: 359 | print(f'保存文件失败,错误: {str(e)}') 360 | 361 | counter += 1 362 | 363 | # 返回结果 - 修改为返回文件名而不是完整路径 364 | return {"ui": {"images": results}, "result": (images, output_filenames,)} 365 | 366 | # 注册节点 367 | NODE_CLASS_MAPPINGS = { 368 | "LoadImagesFromFolder": LoadImagesFromFolder, 369 | "YC_Image_Save": YC_Image_Save 370 | } 371 | 372 | # 显示名称映射 373 | NODE_DISPLAY_NAME_MAPPINGS = { 374 | "LoadImagesFromFolder": "Load Images From Folder YC", 375 | "YC_Image_Save": "Image Save YC" 376 | } -------------------------------------------------------------------------------- /py/Mask.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import torch 4 | import folder_paths 5 | import random 6 | from nodes import SaveImage 7 | 8 | # 节点类定义 9 | class MaskTopNFilter: 10 | def __init__(self): 11 | self.type = "MaskTopNFilter" 12 | self.output_node = True 13 | self.input_node = True 14 | 15 | @classmethod 16 | def INPUT_TYPES(cls): 17 | return { 18 | "required": { 19 | "mask": ("MASK",), 20 | "keep_top_n": ("INT", { 21 | "default": 2, 22 | "min": 1, 23 | "max": 10, 24 | "step": 1 25 | }), 26 | }, 27 | } 28 | 29 | RETURN_TYPES = ("MASK",) # 只返回过滤后的mask 30 | RETURN_NAMES = ("filtered_mask",) 31 | FUNCTION = "filter_mask" 32 | CATEGORY = "YCNode/Mask" 33 | 34 | def filter_mask(self, mask, keep_top_n): 35 | # 1. 处理输入mask 36 | if isinstance(mask, torch.Tensor): 37 | mask = mask.cpu().numpy() 38 | 39 | # 2. 自动处理通道 40 | if len(mask.shape) == 3: 41 | mask = mask[0] # 如果是3D,取第一个通道 42 | elif len(mask.shape) == 4: 43 | mask = mask[0, 0] # 如果是4D,取第一个batch的第一个通道 44 | 45 | # 3. 转换为二值图像 46 | binary_mask = (mask > 0.5).astype(np.uint8) 47 | 48 | # 4. 连通区域分析 49 | num_labels, labels = cv2.connectedComponents(binary_mask) 50 | 51 | # 5. 处理结果 52 | if num_labels - 1 <= keep_top_n: 53 | return (torch.from_numpy(binary_mask.astype(np.float32)),) 54 | 55 | # 6. 面积计算和排序 56 | areas = [(i, np.sum(labels == i)) for i in range(1, num_labels)] 57 | areas.sort(key=lambda x: x[1], reverse=True) 58 | 59 | # 7. 生成新mask 60 | new_mask = np.zeros_like(binary_mask) 61 | for i in range(min(keep_top_n, len(areas))): 62 | label_idx = areas[i][0] 63 | new_mask[labels == label_idx] = 1 64 | 65 | return (torch.from_numpy(new_mask.astype(np.float32)),) # 只返回mask 66 | 67 | class MaskSplitFilter: 68 | def __init__(self): 69 | pass 70 | 71 | @classmethod 72 | def INPUT_TYPES(cls): 73 | return { 74 | "required": { 75 | "mask": ("MASK",), 76 | }, 77 | } 78 | 79 | RETURN_TYPES = ("MASK",) 80 | RETURN_NAMES = ("segmented_masks",) 81 | FUNCTION = "segment_mask" 82 | CATEGORY = "YCNode/Mask" 83 | 84 | def find_top_left_point(self, mask_np): 85 | """找到mask中最左上角的点""" 86 | y_coords, x_coords = np.nonzero(mask_np) 87 | if len(x_coords) == 0: 88 | return float('inf'), float('inf') 89 | 90 | min_x = np.min(x_coords) 91 | min_y = np.min(y_coords[x_coords == min_x]) 92 | 93 | return min_x, min_y 94 | 95 | def segment_mask(self, mask): 96 | # 保存原始设备信息 97 | device = mask.device if isinstance(mask, torch.Tensor) else torch.device('cpu') 98 | 99 | # 确保mask是正确的形状并转换为numpy数组 100 | if isinstance(mask, torch.Tensor): 101 | if len(mask.shape) == 2: 102 | mask = mask.unsqueeze(0) 103 | mask_np = (mask[0] * 255).cpu().numpy().astype(np.uint8) 104 | else: 105 | mask_np = (mask * 255).astype(np.uint8) 106 | 107 | # 使用OpenCV找到轮廓 108 | contours, hierarchy = cv2.findContours( 109 | mask_np, 110 | cv2.RETR_TREE, 111 | cv2.CHAIN_APPROX_SIMPLE 112 | ) 113 | 114 | mask_info = [] # 用于排序的信息列表 115 | 116 | if hierarchy is not None and len(contours) > 0: 117 | hierarchy = hierarchy[0] 118 | contour_masks = {} 119 | 120 | # 创建每个轮廓的mask 121 | for i, contour in enumerate(contours): 122 | mask = np.zeros_like(mask_np) 123 | cv2.drawContours(mask, [contour], -1, 255, -1) 124 | contour_masks[i] = mask 125 | 126 | # 处理每个轮廓 127 | processed_indices = set() 128 | 129 | for i, (contour, h) in enumerate(zip(contours, hierarchy)): 130 | if i in processed_indices: 131 | continue 132 | 133 | current_mask = contour_masks[i].copy() 134 | child_idx = h[2] 135 | 136 | if child_idx != -1: 137 | while child_idx != -1: 138 | current_mask = cv2.subtract(current_mask, contour_masks[child_idx]) 139 | processed_indices.add(child_idx) 140 | child_idx = hierarchy[child_idx][0] 141 | 142 | # 找到最左上角的点用于排序 143 | min_x, min_y = self.find_top_left_point(current_mask) 144 | 145 | # 转换为tensor 146 | mask_tensor = torch.from_numpy(current_mask).float() / 255.0 147 | mask_tensor = mask_tensor.unsqueeze(0) 148 | mask_tensor = mask_tensor.to(device) 149 | 150 | mask_info.append((mask_tensor, min_x, min_y)) 151 | processed_indices.add(i) 152 | 153 | # 如果没有找到任何轮廓,返回原始mask 154 | if not mask_info: 155 | if isinstance(mask, torch.Tensor): 156 | return (mask,) 157 | else: 158 | mask_tensor = torch.from_numpy(mask).float() 159 | if len(mask_tensor.shape) == 2: 160 | mask_tensor = mask_tensor.unsqueeze(0) 161 | mask_tensor = mask_tensor.to(device) 162 | return (mask_tensor,) 163 | 164 | # 根据最左上角点排序 165 | mask_info.sort(key=lambda x: (x[1], x[2])) 166 | 167 | # 合并所有mask 168 | result_masks = None 169 | for mask_tensor, _, _ in mask_info: 170 | if result_masks is None: 171 | result_masks = mask_tensor 172 | else: 173 | result_masks = torch.cat([result_masks, mask_tensor], dim=0) 174 | 175 | return (result_masks,) 176 | 177 | class MaskPreviewNode(SaveImage): 178 | def __init__(self): 179 | self.output_dir = folder_paths.get_temp_directory() 180 | self.type = "temp" 181 | self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5)) 182 | self.compress_level = 4 183 | 184 | @classmethod 185 | def INPUT_TYPES(cls): 186 | return { 187 | "required": { 188 | "mask": ("MASK",), 189 | }, 190 | "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, 191 | } 192 | 193 | RETURN_TYPES = () 194 | RETURN_NAMES = () 195 | FUNCTION = "preview_mask" 196 | CATEGORY = "YCNode/Mask" 197 | OUTPUT_NODE = True 198 | 199 | def preview_mask(self, mask, prompt=None, extra_pnginfo=None): 200 | # 处理批处理维度和通道 201 | preview = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])) 202 | preview = preview.movedim(1, -1).expand(-1, -1, -1, 3) 203 | preview = torch.clamp(preview, 0.0, 1.0) 204 | return self.save_images(preview, "mask_preview", prompt, extra_pnginfo) 205 | 206 | class MaskContourFillNode: 207 | def __init__(self): 208 | pass 209 | 210 | @classmethod 211 | def INPUT_TYPES(cls): 212 | return { 213 | "required": { 214 | "mask": ("MASK",), 215 | "min_area": ("INT", { 216 | "default": 50, 217 | "min": 0, 218 | "max": 10000, 219 | "step": 10 220 | }), 221 | }, 222 | } 223 | 224 | RETURN_TYPES = ("MASK",) 225 | RETURN_NAMES = ("filled_mask",) 226 | FUNCTION = "fill_mask_contours" 227 | CATEGORY = "YCNode/Mask" 228 | 229 | def fill_mask_contours(self, mask, min_area): 230 | # 保存原始设备信息 231 | device = mask.device if isinstance(mask, torch.Tensor) else torch.device('cpu') 232 | 233 | # 确保mask是正确的格式 234 | if isinstance(mask, torch.Tensor): 235 | if len(mask.shape) == 2: 236 | mask = mask.unsqueeze(0) 237 | # 如果是批次多个遮罩,只取第一个 238 | if len(mask.shape) > 3: 239 | mask = mask[0].unsqueeze(0) 240 | 241 | # 转为numpy进行处理 242 | mask_np = mask.cpu().numpy()[0] # [H, W] 243 | else: 244 | mask_np = mask 245 | 246 | # 确保值范围在[0, 1]并转换为8位图像 247 | mask_np = np.clip(mask_np, 0.0, 1.0) 248 | mask_8bit = (mask_np * 255).astype(np.uint8) 249 | 250 | # 创建初始的填充遮罩(与输入相同) 251 | filled_mask = mask_8bit.copy() 252 | 253 | # 使用所有轮廓模式查找轮廓 254 | contours, hierarchy = cv2.findContours(mask_8bit, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) 255 | 256 | # 填充轮廓 257 | if contours: 258 | # 创建新的全零遮罩 259 | filled_mask = np.zeros_like(mask_8bit) 260 | 261 | # 根据面积过滤轮廓 262 | filtered_contours = [cnt for cnt in contours if cv2.contourArea(cnt) >= min_area] 263 | 264 | # 填充通过过滤的轮廓 265 | for i, contour in enumerate(filtered_contours): 266 | # 填充遮罩 267 | cv2.drawContours(filled_mask, [contour], -1, 255, -1) 268 | 269 | # 将遮罩转回torch tensor 270 | filled_tensor = torch.from_numpy(filled_mask.astype(np.float32) / 255.0) 271 | filled_tensor = filled_tensor.unsqueeze(0) # 添加通道维度 [1, H, W] 272 | filled_tensor = filled_tensor.to(device) 273 | 274 | return (filled_tensor,) 275 | 276 | class YCRemapMaskRange: 277 | @classmethod 278 | def INPUT_TYPES(s): 279 | return { 280 | "required": { 281 | "mask": ("MASK",), 282 | "min": ("FLOAT", {"default": 0.0,"min": -10.0, "max": 1.0, "step": 0.01}), 283 | "max": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 10.0, "step": 0.01}), 284 | } 285 | } 286 | 287 | RETURN_TYPES = ("MASK",) 288 | RETURN_NAMES = ("mask",) 289 | FUNCTION = "remap" 290 | CATEGORY = "YCNode/Mask" 291 | DESCRIPTION = """ 292 | Sets new min and max values for the mask. 293 | """ 294 | 295 | def remap(self, mask, min, max): 296 | 297 | # Find the maximum value in the mask 298 | mask_max = torch.max(mask) 299 | 300 | # If the maximum mask value is zero, avoid division by zero by setting it to 1 301 | mask_max = mask_max if mask_max > 0 else 1 302 | 303 | # Scale the mask values to the new range defined by min and max 304 | # The highest pixel value in the mask will be scaled to max 305 | scaled_mask = (mask / mask_max) * (max - min) + min 306 | 307 | # Clamp the values to ensure they are within [0.0, 1.0] 308 | scaled_mask = torch.clamp(scaled_mask, min=0.0, max=1.0) 309 | 310 | return (scaled_mask, ) 311 | 312 | 313 | def get_mask_polygon(self, mask_np): 314 | import cv2 315 | """Helper function to get polygon points from mask""" 316 | # Find contours 317 | contours, _ = cv2.findContours(mask_np, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) 318 | 319 | if not contours: 320 | return None 321 | 322 | # Get the largest contour 323 | largest_contour = max(contours, key=cv2.contourArea) 324 | 325 | # Approximate polygon 326 | epsilon = 0.02 * cv2.arcLength(largest_contour, True) 327 | polygon = cv2.approxPolyDP(largest_contour, epsilon, True) 328 | 329 | return polygon.squeeze() 330 | # 节点注册 331 | NODE_CLASS_MAPPINGS = { 332 | "MaskTopNFilter": MaskTopNFilter, 333 | "MaskSplitFilter": MaskSplitFilter, 334 | "MaskPreviewNode": MaskPreviewNode, 335 | "MaskContourFillNode": MaskContourFillNode, 336 | "YCRemapMaskRange": YCRemapMaskRange 337 | } 338 | 339 | # 节点显示名称 340 | NODE_DISPLAY_NAME_MAPPINGS = { 341 | "MaskTopNFilter": "Mask Top-N Filter", 342 | "MaskSplitFilter": "Mask Split Filter", 343 | "MaskPreviewNode": "MaskPreview_YC", 344 | "MaskContourFillNode": "MaskContourFill_YC", 345 | "YCRemapMaskRange": "Remap Mask Range (YC)" 346 | } 347 | -------------------------------------------------------------------------------- /py/MaskBatchNode.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | class MaskFromBatch: 5 | @classmethod 6 | def INPUT_TYPES(s): 7 | return { 8 | "required": { 9 | "masks": ("MASK",), 10 | "batch_index": ("INT", { 11 | "default": 0, 12 | "min": 0, 13 | "max": 63, 14 | "step": 1 15 | }), 16 | "length": ("INT", { 17 | "default": 1, 18 | "min": 1, 19 | "max": 64, 20 | "step": 1 21 | }), 22 | }, 23 | } 24 | 25 | RETURN_TYPES = ("MASK",) 26 | RETURN_NAMES = ("selected_masks",) 27 | FUNCTION = "batch_select" 28 | CATEGORY = "YCNode/Mask/Batch" 29 | 30 | def batch_select(self, masks, batch_index, length): 31 | # 确保mask是正确的形状 32 | if isinstance(masks, torch.Tensor): 33 | if len(masks.shape) == 2: 34 | masks = masks.unsqueeze(0) 35 | 36 | # 获取批次大小 37 | batch_size = masks.shape[0] 38 | 39 | # 确保索引和长度在有效范围内 40 | batch_index = min(batch_size - 1, batch_index) 41 | length = min(batch_size - batch_index, length) 42 | 43 | # 选择指定的批次 44 | selected_masks = masks[batch_index:batch_index + length].clone() 45 | 46 | return (selected_masks,) 47 | 48 | class MaskRepeatBatch: 49 | @classmethod 50 | def INPUT_TYPES(s): 51 | return { 52 | "required": { 53 | "masks": ("MASK",), 54 | "amount": ("INT", { 55 | "default": 1, 56 | "min": 1, 57 | "max": 64, 58 | "step": 1 59 | }), 60 | }, 61 | } 62 | 63 | RETURN_TYPES = ("MASK",) 64 | RETURN_NAMES = ("repeated_masks",) 65 | FUNCTION = "repeat" 66 | CATEGORY = "YCNode/Mask/Batch" 67 | 68 | def repeat(self, masks, amount): 69 | # 确保mask是正确的形状 70 | if isinstance(masks, torch.Tensor): 71 | if len(masks.shape) == 2: 72 | masks = masks.unsqueeze(0) 73 | 74 | # 重复指定次数 75 | repeated_masks = masks.repeat((amount, 1, 1)) 76 | 77 | return (repeated_masks,) 78 | 79 | class MaskBatchCopy: 80 | @classmethod 81 | def INPUT_TYPES(s): 82 | return { 83 | "required": { 84 | "masks": ("MASK",), 85 | } 86 | } 87 | 88 | RETURN_TYPES = ("MASK", "MASK") 89 | RETURN_NAMES = ("masks_1", "masks_2") 90 | FUNCTION = "copy" 91 | CATEGORY = "YCNode/Mask/Batch" 92 | 93 | def copy(self, masks): 94 | # 确保mask是正确的形状 95 | if isinstance(masks, torch.Tensor): 96 | if len(masks.shape) == 2: 97 | masks = masks.unsqueeze(0) 98 | 99 | # 创建两个独立的副本 100 | copy1 = masks.clone() 101 | copy2 = masks.clone() 102 | 103 | return (copy1, copy2) 104 | 105 | class MaskBatchComposite: 106 | @classmethod 107 | def INPUT_TYPES(s): 108 | return { 109 | "required": { 110 | "masks_1": ("MASK",), 111 | "masks_2": ("MASK",), 112 | } 113 | } 114 | 115 | RETURN_TYPES = ("MASK",) 116 | RETURN_NAMES = ("combined_masks",) 117 | FUNCTION = "combine" 118 | CATEGORY = "YCNode/Mask/Batch" 119 | 120 | def combine(self, masks_1, masks_2): 121 | # 确保两个mask都是正确的形状 122 | if isinstance(masks_1, torch.Tensor): 123 | if len(masks_1.shape) == 2: 124 | masks_1 = masks_1.unsqueeze(0) 125 | if isinstance(masks_2, torch.Tensor): 126 | if len(masks_2.shape) == 2: 127 | masks_2 = masks_2.unsqueeze(0) 128 | 129 | # 合并mask批次 130 | combined_masks = torch.cat([masks_1, masks_2], dim=0) 131 | 132 | return (combined_masks,) 133 | 134 | # 更新节点注册 135 | NODE_CLASS_MAPPINGS = { 136 | "MaskFromBatch": MaskFromBatch, 137 | "MaskRepeatBatch": MaskRepeatBatch, 138 | "MaskBatchCopy": MaskBatchCopy, 139 | "MaskBatchComposite": MaskBatchComposite 140 | } 141 | 142 | # 更新显示名称 143 | NODE_DISPLAY_NAME_MAPPINGS = { 144 | "MaskFromBatch": "Mask From Batch", 145 | "MaskRepeatBatch": "Mask Repeat Batch", 146 | "MaskBatchCopy": "Mask Batch Copy", 147 | "MaskBatchComposite": "Mask Batch Composite" 148 | } 149 | -------------------------------------------------------------------------------- /py/MaskComposite.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | # 定义最大分辨率常量 5 | MAX_RESOLUTION = 8192 6 | 7 | class MaskComposite: 8 | @classmethod 9 | def INPUT_TYPES(cls): 10 | return { 11 | "required": { 12 | "destination": ("MASK",), 13 | "source": ("MASK",), 14 | "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), 15 | "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), 16 | "operation": (["multiply", "add", "subtract", "and", "or", "xor", "overlay", "natural_blend"],), 17 | "opacity": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), 18 | "blend_power": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "调整自然混合的强度,值越高越接近简单相加"}), 19 | } 20 | } 21 | 22 | CATEGORY = "YCNode/Mask" 23 | 24 | RETURN_TYPES = ("MASK",) 25 | RETURN_NAMES = ("mask",) 26 | FUNCTION = "combine" 27 | 28 | def combine(self, destination, source, x, y, operation, opacity=1.0, blend_power=0.5): 29 | output = destination.reshape((-1, destination.shape[-2], destination.shape[-1])).clone() 30 | source = source.reshape((-1, source.shape[-2], source.shape[-1])) 31 | 32 | left, top = (x, y,) 33 | right, bottom = (min(left + source.shape[-1], destination.shape[-1]), min(top + source.shape[-2], destination.shape[-2])) 34 | visible_width, visible_height = (right - left, bottom - top,) 35 | 36 | source_portion = source[:, :visible_height, :visible_width] 37 | destination_portion = destination[:, top:bottom, left:right] 38 | 39 | # 应用不透明度 40 | if opacity < 1.0: 41 | source_portion = source_portion * opacity 42 | 43 | if operation == "multiply": 44 | output[:, top:bottom, left:right] = destination_portion * source_portion 45 | elif operation == "add": 46 | output[:, top:bottom, left:right] = destination_portion + source_portion 47 | elif operation == "subtract": 48 | output[:, top:bottom, left:right] = destination_portion - source_portion 49 | elif operation == "and": 50 | output[:, top:bottom, left:right] = torch.bitwise_and(destination_portion.round().bool(), source_portion.round().bool()).float() 51 | elif operation == "or": 52 | output[:, top:bottom, left:right] = torch.bitwise_or(destination_portion.round().bool(), source_portion.round().bool()).float() 53 | elif operation == "xor": 54 | output[:, top:bottom, left:right] = torch.bitwise_xor(destination_portion.round().bool(), source_portion.round().bool()).float() 55 | elif operation == "overlay": 56 | # 实现Photoshop的overlay模式 57 | # 当底图<0.5时,结果=2*底图*叠加图;当底图>=0.5时,结果=1-2*(1-底图)*(1-叠加图) 58 | low_mask = destination_portion < 0.5 59 | high_mask = ~low_mask 60 | result = torch.zeros_like(destination_portion) 61 | result[low_mask] = 2 * destination_portion[low_mask] * source_portion[low_mask] 62 | result[high_mask] = 1 - 2 * (1 - destination_portion[high_mask]) * (1 - source_portion[high_mask]) 63 | output[:, top:bottom, left:right] = result 64 | elif operation == "natural_blend": 65 | # 自然混合模式 - 专为遮罩的自然过渡设计 66 | # 在暗部使用screen模式,在亮部使用加权平均 67 | # 公式: dest + src - dest*src (类似screen) 加上 权重控制 68 | 69 | # 基础混合(类似screen模式,但保留更多亮度信息) 70 | base_blend = destination_portion + source_portion - destination_portion * source_portion 71 | 72 | # 加权求和(在白色区域避免过亮) 73 | sum_weighted = torch.clamp(destination_portion + source_portion, 0.0, 1.0) 74 | 75 | # 计算混合因子,决定使用多少screen效果和多少加权平均 76 | # 当像素越亮时(接近1),越倾向于使用加权平均而非screen 77 | bright_areas = (destination_portion + source_portion) / 2.0 78 | blend_factor = torch.pow(bright_areas, 2.0 - blend_power) # 可调整的指数,控制过渡点 79 | 80 | # 根据混合因子在两种模式间平滑过渡 81 | result = base_blend * (1.0 - blend_factor) + sum_weighted * blend_factor 82 | 83 | output[:, top:bottom, left:right] = result 84 | 85 | output = torch.clamp(output, 0.0, 1.0) 86 | 87 | return (output,) 88 | 89 | # 节点注册 90 | NODE_CLASS_MAPPINGS = { 91 | "YCMaskComposite": MaskComposite 92 | } 93 | 94 | # 节点显示名称 95 | NODE_DISPLAY_NAME_MAPPINGS = { 96 | "YCMaskComposite": "Mask Composite (YC)" 97 | } 98 | -------------------------------------------------------------------------------- /py/MaskCropNode.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import cv2 4 | 5 | class MaskCrop_YC: 6 | def __init__(self): 7 | pass 8 | 9 | @classmethod 10 | def INPUT_TYPES(cls): 11 | return { 12 | "required": { 13 | "image": ("IMAGE",), 14 | "crop_frame": ("MASK",), 15 | "top_padding": ("INT", { 16 | "default": 50, 17 | "min": 0, 18 | "max": 1000, 19 | "step": 1 20 | }), 21 | "bottom_padding": ("INT", { 22 | "default": 50, 23 | "min": 0, 24 | "max": 1000, 25 | "step": 1 26 | }), 27 | "left_padding": ("INT", { 28 | "default": 50, 29 | "min": 0, 30 | "max": 1000, 31 | "step": 1 32 | }), 33 | "right_padding": ("INT", { 34 | "default": 50, 35 | "min": 0, 36 | "max": 1000, 37 | "step": 1 38 | }), 39 | "round_to_multiple": ("INT", { 40 | "default": 8, 41 | "min": 1, 42 | "max": 64, 43 | "step": 1 44 | }), 45 | "invert_mask": ("BOOLEAN", {"default": False}), 46 | "detection_method": (["mask_area"],), 47 | }, 48 | "optional": { 49 | "mask": ("MASK",), 50 | } 51 | } 52 | 53 | RETURN_TYPES = ("IMAGE", "MASK", "MASK", "IMAGE", "COORDS") 54 | RETURN_NAMES = ("cropped_image", "cropped_mask", "crop_frame", "crop_frame_preview", "crop_coords") 55 | FUNCTION = "crop_with_mask" 56 | CATEGORY = "YCNode/Mask" 57 | 58 | def crop_with_mask(self, image, crop_frame, top_padding, bottom_padding, left_padding, right_padding, round_to_multiple, invert_mask, detection_method, mask=None): 59 | # 如果没有提供mask,创建一个全零mask 60 | if mask is None: 61 | # 创建一个与图像尺寸相同的全零mask 62 | mask = torch.zeros((1, image.shape[2], image.shape[3]), device=image.device) 63 | 64 | # 确保输入是正确的格式 65 | if isinstance(mask, torch.Tensor): 66 | mask = mask.cpu().numpy() 67 | if isinstance(crop_frame, torch.Tensor): 68 | crop_frame = crop_frame.cpu().numpy() 69 | 70 | # 处理mask维度 71 | if len(mask.shape) == 3: 72 | mask = mask[0] 73 | elif len(mask.shape) == 4: 74 | mask = mask[0, 0] 75 | 76 | if len(crop_frame.shape) == 3: 77 | crop_frame = crop_frame[0] 78 | elif len(crop_frame.shape) == 4: 79 | crop_frame = crop_frame[0, 0] 80 | 81 | # 如果需要反转遮罩 82 | if invert_mask: 83 | crop_frame = 1 - crop_frame 84 | 85 | # 转换为二值图像 86 | binary_mask = (crop_frame > 0.5).astype(np.uint8) 87 | 88 | # 找到mask的边界框 89 | if detection_method == "mask_area": 90 | y_indices, x_indices = np.nonzero(binary_mask) 91 | if len(y_indices) == 0 or len(x_indices) == 0: 92 | # 创建一个空的预览图像,直接使用原始图像 93 | original_img_np = image[0].cpu().numpy() # [H, W, C] 94 | if original_img_np.dtype != np.float32: 95 | original_img_np = original_img_np.astype(np.float32) 96 | if original_img_np.max() > 1.0: 97 | original_img_np = original_img_np / 255.0 98 | 99 | # 直接使用原始图像,不添加任何标注 100 | empty_preview_tensor = torch.from_numpy(original_img_np).float() 101 | empty_preview_tensor = empty_preview_tensor.unsqueeze(0) # 添加批次维度 102 | return (image, mask, crop_frame, empty_preview_tensor, (0, 0, image.shape[3], image.shape[2])) 103 | 104 | x_min, x_max = np.min(x_indices), np.max(x_indices) 105 | y_min, y_max = np.min(y_indices), np.max(y_indices) 106 | 107 | # 添加padding 108 | x_min = max(0, x_min - left_padding) 109 | x_max = min(binary_mask.shape[1], x_max + right_padding) 110 | y_min = max(0, y_min - top_padding) 111 | y_max = min(binary_mask.shape[0], y_max + bottom_padding) 112 | 113 | # 确保尺寸是round_to_multiple的倍数 114 | width = x_max - x_min 115 | height = y_max - y_min 116 | 117 | new_width = ((width + round_to_multiple - 1) // round_to_multiple) * round_to_multiple 118 | new_height = ((height + round_to_multiple - 1) // round_to_multiple) * round_to_multiple 119 | 120 | # 调整padding以达到所需尺寸 121 | x_pad = new_width - width 122 | y_pad = new_height - height 123 | 124 | x_min = max(0, x_min - x_pad // 2) 125 | x_max = min(binary_mask.shape[1], x_max + (x_pad - x_pad // 2)) 126 | y_min = max(0, y_min - y_pad // 2) 127 | y_max = min(binary_mask.shape[0], y_max + (y_pad - y_pad // 2)) 128 | 129 | # 裁剪图像和mask 130 | cropped_image = image[:, y_min:y_max, x_min:x_max, :] 131 | cropped_mask = mask[y_min:y_max, x_min:x_max] 132 | cropped_crop_frame = crop_frame[y_min:y_max, x_min:x_max] 133 | 134 | # 创建预览用的crop_frame - 改为图片格式,并绘制红色边框 135 | # 首先将图像转换到numpy进行处理 136 | original_img_np = image[0].cpu().numpy() # [H, W, C] 137 | 138 | # 确保是RGB格式,且值范围为[0,1] 139 | if original_img_np.dtype != np.float32: 140 | original_img_np = original_img_np.astype(np.float32) 141 | if original_img_np.max() > 1.0: 142 | original_img_np = original_img_np / 255.0 143 | 144 | # 创建预览图像副本,不显示半透明遮罩,只显示边框 145 | preview_img = original_img_np.copy() 146 | 147 | # 找到原始mask的边界 - 未添加padding的原始区域 148 | orig_y_indices, orig_x_indices = np.nonzero(binary_mask) 149 | orig_x_min, orig_x_max = np.min(orig_x_indices), np.max(orig_x_indices) 150 | orig_y_min, orig_y_max = np.min(orig_y_indices), np.max(orig_y_indices) 151 | 152 | # 确保坐标是整数 153 | x_min_int, y_min_int = int(x_min), int(y_min) 154 | x_max_int, y_max_int = int(x_max), int(y_max) 155 | orig_x_min_int, orig_y_min_int = int(orig_x_min), int(orig_y_min) 156 | orig_x_max_int, orig_y_max_int = int(orig_x_max), int(orig_y_max) 157 | 158 | # 定义颜色 159 | red_color = np.array([1.0, 0.0, 0.0]) # 红色用于标注最终裁剪区域(含padding) 160 | green_color = np.array([0.0, 1.0, 0.0]) # 绿色用于标注多出的padding区域 161 | 162 | # 线宽 163 | line_width = 2 164 | 165 | # 绘制红色边框(最终裁剪区域,包含padding) 166 | # 顶部边框 167 | preview_img[y_min_int:y_min_int+line_width, x_min_int:x_max_int] = red_color 168 | # 底部边框 169 | preview_img[y_max_int-line_width:y_max_int, x_min_int:x_max_int] = red_color 170 | # 左侧边框 171 | preview_img[y_min_int:y_max_int, x_min_int:x_min_int+line_width] = red_color 172 | # 右侧边框 173 | preview_img[y_min_int:y_max_int, x_max_int-line_width:x_max_int] = red_color 174 | 175 | # 绘制绿色边框(原始遮罩区域,不含padding) 176 | # 只有当原始区域与裁剪区域不同时才绘制绿色边框 177 | if (orig_x_min_int != x_min_int or orig_y_min_int != y_min_int or 178 | orig_x_max_int != x_max_int or orig_y_max_int != y_max_int): 179 | # 顶部边框 180 | preview_img[orig_y_min_int:orig_y_min_int+line_width, orig_x_min_int:orig_x_max_int] = green_color 181 | # 底部边框 182 | preview_img[orig_y_max_int-line_width:orig_y_max_int, orig_x_min_int:orig_x_max_int] = green_color 183 | # 左侧边框 184 | preview_img[orig_y_min_int:orig_y_max_int, orig_x_min_int:orig_x_min_int+line_width] = green_color 185 | # 右侧边框 186 | preview_img[orig_y_min_int:orig_y_max_int, orig_x_max_int-line_width:orig_x_max_int] = green_color 187 | 188 | # 转换回PyTorch张量格式 [B, H, W, C] 189 | preview_img_tensor = torch.from_numpy(preview_img).float() 190 | preview_img_tensor = preview_img_tensor.unsqueeze(0) # 添加批次维度 191 | 192 | # 转换回torch tensor 193 | cropped_mask = torch.from_numpy(cropped_mask).float() 194 | cropped_crop_frame = torch.from_numpy(cropped_crop_frame).float() 195 | 196 | if len(cropped_mask.shape) == 2: 197 | cropped_mask = cropped_mask.unsqueeze(0) 198 | if len(cropped_crop_frame.shape) == 2: 199 | cropped_crop_frame = cropped_crop_frame.unsqueeze(0) 200 | 201 | # 返回裁剪坐标 (x_min, y_min, x_max, y_max) 202 | crop_coords = (int(x_min), int(y_min), int(x_max), int(y_max)) 203 | return (cropped_image, cropped_mask, cropped_crop_frame, preview_img_tensor, crop_coords) 204 | 205 | # 如果没有检测到有效区域,返回原始图像和空的预览图 206 | original_img_np = image[0].cpu().numpy() 207 | if original_img_np.dtype != np.float32: 208 | original_img_np = original_img_np.astype(np.float32) 209 | if original_img_np.max() > 1.0: 210 | original_img_np = original_img_np / 255.0 211 | 212 | # 直接使用原始图像,不添加任何标注 213 | empty_preview_tensor = torch.from_numpy(original_img_np).float() 214 | empty_preview_tensor = empty_preview_tensor.unsqueeze(0) # 添加批次维度 215 | 216 | return (image, mask, crop_frame, empty_preview_tensor, (0, 0, image.shape[3], image.shape[2])) 217 | 218 | class MaskCropRestore_YC: 219 | def __init__(self): 220 | pass 221 | 222 | @classmethod 223 | def INPUT_TYPES(cls): 224 | return { 225 | "required": { 226 | "background_image": ("IMAGE",), 227 | "cropped_image": ("IMAGE",), 228 | "crop_coords": ("COORDS",), 229 | "invert_mask": ("BOOLEAN", {"default": False}), 230 | }, 231 | "optional": { 232 | "cropped_mask": ("MASK",), 233 | "crop_frame": ("MASK",), 234 | } 235 | } 236 | 237 | RETURN_TYPES = ("IMAGE", "MASK") 238 | RETURN_NAMES = ("image", "inpaint_mask") 239 | FUNCTION = "restore_crop" 240 | CATEGORY = "YCNode/Mask" 241 | 242 | def restore_crop(self, background_image, cropped_image, crop_coords, invert_mask, cropped_mask=None, crop_frame=None): 243 | x_min, y_min, x_max, y_max = crop_coords 244 | 245 | # 正确解析图像维度 246 | # 图像形状: [batch, height, width, channels] 247 | batch_size = background_image.shape[0] 248 | bg_height = background_image.shape[1] 249 | bg_width = background_image.shape[2] 250 | channels = background_image.shape[3] 251 | 252 | # 检查图像维度是否合理 253 | if cropped_image.shape[1] < 10 or cropped_image.shape[2] < 10: 254 | # 修复坐标 - 确保至少有1像素的差距 255 | if x_min >= x_max: 256 | x_max = x_min + 1 257 | if y_min >= y_max: 258 | y_max = y_min + 1 259 | 260 | # 强制确保裁剪区域有合理的高度和宽度(至少10像素) 261 | if y_max - y_min < 10: 262 | height_to_add = 10 - (y_max - y_min) 263 | y_min = max(0, y_min - height_to_add // 2) 264 | y_max = min(bg_height, y_max + (height_to_add - height_to_add // 2)) 265 | 266 | if x_max - x_min < 10: 267 | width_to_add = 10 - (x_max - x_min) 268 | x_min = max(0, x_min - width_to_add // 2) 269 | x_max = min(bg_width, x_max + (width_to_add - width_to_add // 2)) 270 | 271 | # 确保坐标在有效范围内 272 | x_min = max(0, min(x_min, bg_width - 1)) 273 | y_min = max(0, min(y_min, bg_height - 1)) 274 | x_max = max(x_min + 1, min(x_max, bg_width)) 275 | y_max = max(y_min + 1, min(y_max, bg_height)) 276 | 277 | # 创建输出图像和mask(与背景图像相同大小) 278 | output_image = background_image.clone() 279 | output_mask = torch.zeros((1, bg_height, bg_width), device=background_image.device) 280 | 281 | # 确保裁剪图像尺寸与目标区域匹配 282 | expected_height = y_max - y_min 283 | expected_width = x_max - x_min 284 | 285 | # 验证裁剪图像尺寸 286 | if cropped_image.shape[1] != expected_height or cropped_image.shape[2] != expected_width: 287 | # 调整放置策略 288 | if expected_width == 0 or expected_height == 0: 289 | # 使用安全的默认值 290 | x_min, y_min = 0, 0 291 | x_max = min(cropped_image.shape[2], bg_width) 292 | y_max = min(cropped_image.shape[1], bg_height) 293 | expected_width = x_max - x_min 294 | expected_height = y_max - y_min 295 | else: 296 | # 尝试调整坐标以匹配实际图像尺寸 297 | src_height, src_width = cropped_image.shape[1], cropped_image.shape[2] 298 | 299 | # 如果输入图像尺寸小于目标区域,直接使用输入图像尺寸 300 | if src_height < expected_height: 301 | y_max = y_min + src_height 302 | if src_width < expected_width: 303 | x_max = x_min + src_width 304 | 305 | # 检查调整后尺寸是否合理 306 | if y_max - y_min < 10: 307 | y_max = min(y_min + 10, bg_height) 308 | 309 | if x_max - x_min < 10: 310 | x_max = min(x_min + 10, bg_width) 311 | 312 | # 如果输入图像尺寸大于目标区域,可能需要裁剪输入图像 313 | expected_height = y_max - y_min 314 | expected_width = x_max - x_min 315 | 316 | # 将裁剪的图像放回原位置 317 | try: 318 | # 确保区域有效 319 | if expected_width <= 0 or expected_height <= 0: 320 | raise ValueError(f"无效的区域大小: 宽度={expected_width}, 高度={expected_height}") 321 | 322 | # 安全复制,考虑到输入图像可能与目标区域大小不匹配 323 | copy_height = min(expected_height, cropped_image.shape[1]) 324 | copy_width = min(expected_width, cropped_image.shape[2]) 325 | 326 | if copy_width > 0 and copy_height > 0: 327 | # 只复制有效的部分 - 修正索引 328 | output_image[:, y_min:y_min+copy_height, x_min:x_min+copy_width, :] = cropped_image[:, :copy_height, :copy_width, :] 329 | 330 | except RuntimeError: 331 | # 如果还是失败,尝试最保守的方法 332 | try: 333 | min_height = min(cropped_image.shape[1], bg_height - y_min) 334 | min_width = min(cropped_image.shape[2], bg_width - x_min) 335 | 336 | if min_height > 0 and min_width > 0: 337 | output_image[:, y_min:y_min+min_height, x_min:x_min+min_width, :] = cropped_image[:, :min_height, :min_width, :] 338 | # 更新复制区域的大小,供后面遮罩使用 339 | copy_height, copy_width = min_height, min_width 340 | except: 341 | pass 342 | 343 | # 安全处理mask 344 | try: 345 | # 确定要使用的遮罩 346 | mask_to_use = None 347 | if cropped_mask is not None: 348 | mask_to_use = cropped_mask 349 | elif crop_frame is not None: 350 | mask_to_use = crop_frame 351 | else: 352 | # 如果两个遮罩都没有提供,创建一个全1遮罩(即选中整个裁剪区域) 353 | # 使用与裁剪图像一致的尺寸 354 | height, width = cropped_image.shape[1], cropped_image.shape[2] 355 | mask_to_use = torch.ones((1, height, width), device=cropped_image.device) 356 | 357 | # 将裁剪的遮罩转换为正确的格式 358 | if isinstance(mask_to_use, torch.Tensor): 359 | if len(mask_to_use.shape) == 4: 360 | mask_to_use = mask_to_use[0] 361 | 362 | # 确保mask的维度至少是3维 [C, H, W] 363 | if len(mask_to_use.shape) == 2: 364 | mask_to_use = mask_to_use.unsqueeze(0) 365 | 366 | # 验证遮罩维度 367 | if len(mask_to_use.shape) != 3: 368 | return (output_image, output_mask) 369 | 370 | # 验证遮罩尺寸 371 | mask_height, mask_width = mask_to_use.shape[1], mask_to_use.shape[2] 372 | 373 | # 检查遮罩尺寸是否异常小 374 | if mask_height < 10 or mask_width < 10: 375 | # 如果遮罩高度或宽度异常小,尝试调整 376 | if expected_height > 10 and expected_width > 10: 377 | try: 378 | # 创建新的遮罩并调整大小 379 | new_mask = torch.nn.functional.interpolate( 380 | mask_to_use.unsqueeze(0) if len(mask_to_use.shape) == 3 else mask_to_use, 381 | size=(expected_height, expected_width), 382 | mode='nearest' 383 | ) 384 | mask_to_use = new_mask.squeeze(0) if len(mask_to_use.shape) == 3 else new_mask 385 | mask_height, mask_width = expected_height, expected_width 386 | except: 387 | pass 388 | 389 | # 使用已经调整过的目标区域大小 390 | copy_height = min(copy_height if 'copy_height' in locals() else expected_height, mask_height) 391 | copy_width = min(copy_width if 'copy_width' in locals() else expected_width, mask_width) 392 | 393 | # 确保copy_height和copy_width至少有10像素(防止线状遮罩) 394 | copy_height = max(10, copy_height) 395 | copy_width = max(10, copy_width) 396 | 397 | # 确保不超出背景边界 398 | if y_min + copy_height > bg_height: 399 | copy_height = bg_height - y_min 400 | if x_min + copy_width > bg_width: 401 | copy_width = bg_width - x_min 402 | 403 | if copy_width > 0 and copy_height > 0: 404 | # 确保索引有效 405 | if copy_height > mask_to_use.shape[1]: 406 | copy_height = mask_to_use.shape[1] 407 | if copy_width > mask_to_use.shape[2]: 408 | copy_width = mask_to_use.shape[2] 409 | 410 | # 安全复制遮罩 - 修正索引 411 | try: 412 | output_mask[:, y_min:y_min+copy_height, x_min:x_min+copy_width] = mask_to_use[:, :copy_height, :copy_width] 413 | except: 414 | try: 415 | # 强制重新调整遮罩尺寸 416 | adjusted_mask = torch.nn.functional.interpolate( 417 | mask_to_use.unsqueeze(0) if len(mask_to_use.shape) == 3 else mask_to_use, 418 | size=(copy_height, copy_width), 419 | mode='nearest' 420 | ) 421 | adjusted_mask = adjusted_mask.squeeze(0) if len(mask_to_use.shape) == 3 else adjusted_mask 422 | 423 | # 再次尝试放置 424 | output_mask[:, y_min:y_min+copy_height, x_min:x_min+copy_width] = adjusted_mask 425 | except: 426 | pass 427 | 428 | # 验证输出遮罩 429 | nonzero_count = torch.count_nonzero(output_mask) 430 | if nonzero_count == 0: 431 | # 最后尝试 - 创建一个简单的矩形遮罩 432 | try: 433 | output_mask[:, y_min:y_min+copy_height, x_min:x_min+copy_width] = 1.0 434 | except: 435 | pass 436 | except RuntimeError: 437 | # 如果还是失败,尝试最保守的方法 438 | try: 439 | if 'mask_to_use' in locals() and isinstance(mask_to_use, torch.Tensor): 440 | mask_height, mask_width = mask_to_use.shape[1], mask_to_use.shape[2] 441 | min_height = min(mask_height, bg_height - y_min) 442 | min_width = min(mask_width, bg_width - x_min) 443 | 444 | # 确保最小高度和宽度不小于10像素 445 | min_height = max(10, min_height) 446 | min_width = max(10, min_width) 447 | 448 | # 确保不超出背景边界 449 | if y_min + min_height > bg_height: 450 | min_height = bg_height - y_min 451 | if x_min + min_width > bg_width: 452 | min_width = bg_width - x_min 453 | 454 | if min_height > 0 and min_width > 0: 455 | # 确保不超出mask边界 456 | mask_part = mask_to_use[:, :min(min_height, mask_height), :min(min_width, mask_width)] 457 | # 确保目标区域足够大 458 | if y_min + mask_part.shape[1] <= bg_height and x_min + mask_part.shape[2] <= bg_width: 459 | output_mask[:, y_min:y_min+mask_part.shape[1], x_min:x_min+mask_part.shape[2]] = mask_part 460 | except: 461 | pass 462 | 463 | # 最后的尝试:创建一个简单的矩形遮罩 464 | try: 465 | valid_height = min(10, bg_height - y_min) 466 | valid_width = min(10, bg_width - x_min) 467 | if valid_height > 0 and valid_width > 0: 468 | output_mask[:, y_min:y_min+valid_height, x_min:x_min+valid_width] = 1.0 469 | except: 470 | pass 471 | 472 | if invert_mask: 473 | output_mask = 1 - output_mask 474 | 475 | return (output_image, output_mask) 476 | 477 | # 节点注册 478 | NODE_CLASS_MAPPINGS = { 479 | "MaskCrop_YC": MaskCrop_YC, 480 | "MaskCropRestore_YC": MaskCropRestore_YC 481 | } 482 | 483 | # 节点显示名称 484 | NODE_DISPLAY_NAME_MAPPINGS = { 485 | "MaskCrop_YC": "MaskCrop_YC", 486 | "MaskCropRestore_YC": "MaskCropRestore_YC" 487 | } -------------------------------------------------------------------------------- /py/TextProcessNode.py: -------------------------------------------------------------------------------- 1 | class TextBeforeKeyword: 2 | @classmethod 3 | def INPUT_TYPES(s): 4 | return { 5 | "required": { 6 | "text": ("STRING", { 7 | "multiline": True, # 允许多行输入 8 | "default": "Your text here" 9 | }), 10 | "keyword": ("STRING", { 11 | "multiline": False, # 关键词单行输入 12 | "default": "keyword" 13 | }), 14 | "case_sensitive": ("BOOLEAN", { 15 | "default": True, 16 | "label": "区分大小写" # 是否区分大小写 17 | }) 18 | } 19 | } 20 | 21 | RETURN_TYPES = ("STRING",) 22 | RETURN_NAMES = ("processed_text",) 23 | FUNCTION = "process_text" 24 | CATEGORY = "YCNode/Text" 25 | 26 | def process_text(self, text, keyword, case_sensitive=True): 27 | if not case_sensitive: 28 | # 如果不区分大小写,都转换为小写进行处理 29 | text_to_search = text.lower() 30 | keyword = keyword.lower() 31 | # 在小写版本中找到位置 32 | index = text_to_search.find(keyword) 33 | # 但返回原始文本的对应部分 34 | result = text[:index] if index != -1 else text 35 | else: 36 | # 区分大小写的处理 37 | index = text.find(keyword) 38 | result = text[:index] if index != -1 else text 39 | 40 | # 去除结果末尾的空白字符 41 | result = result.rstrip() 42 | 43 | return (result,) 44 | 45 | # 节点注册 46 | NODE_CLASS_MAPPINGS = { 47 | "TextBeforeKeyword": TextBeforeKeyword 48 | } 49 | 50 | # 节点显示名称 51 | NODE_DISPLAY_NAME_MAPPINGS = { 52 | "TextBeforeKeyword": "Text Before Keyword" 53 | } 54 | -------------------------------------------------------------------------------- /py/image_blend_resize.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from PIL import Image 4 | 5 | # 混合模式列表 6 | BLEND_MODES = [ 7 | 'normal', 'multiply', 'screen', 'overlay', 'soft_light', 'hard_light', 8 | 'darken', 'lighten', 'color_dodge', 'color_burn', 'difference', 9 | 'exclusion', 'hue', 'saturation', 'color', 'luminosity' 10 | ] 11 | 12 | # resize模式 13 | RESIZE_MODES = [ 14 | 'contain', # 等比缩放包含 15 | 'cover', # 等比缩放覆盖 16 | 'fix_width', # 固定宽度,高度按比例调整 17 | 'fix_height' # 固定高度,宽度按比例调整 18 | ] 19 | 20 | # 对齐方式 21 | ALIGN_MODES = [ 22 | 'center', 23 | 'top_left', 'top_center', 'top_right', 24 | 'middle_left', 'middle_right', 25 | 'bottom_left', 'bottom_center', 'bottom_right' 26 | ] 27 | 28 | def pil2tensor(image): 29 | return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) 30 | 31 | def tensor2pil(image): 32 | return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) 33 | 34 | def blend_images(bg, fg, mode='normal', opacity=100): 35 | """图像混合核心函数""" 36 | if mode not in BLEND_MODES: 37 | return fg 38 | 39 | bg = np.array(bg).astype(float) 40 | fg = np.array(fg).astype(float) 41 | 42 | # 应用不透明度 43 | opacity = opacity / 100.0 44 | 45 | if mode == 'normal': 46 | result = fg 47 | elif mode == 'multiply': 48 | result = bg * fg / 255.0 49 | elif mode == 'screen': 50 | result = 255.0 - ((255.0 - bg) * (255.0 - fg) / 255.0) 51 | elif mode == 'overlay': 52 | mask = bg >= 128 53 | result = np.zeros_like(bg) 54 | result[mask] = 255.0 - ((255.0 - 2*(bg[mask]-128)) * (255.0 - fg[mask]) / 255.0) 55 | result[~mask] = (2*bg[~mask] * fg[~mask]) / 255.0 56 | else: 57 | # 其他模式默认使用normal 58 | result = fg 59 | 60 | # 应用透明度 61 | result = bg * (1 - opacity) + result * opacity 62 | return np.clip(result, 0, 255).astype(np.uint8) 63 | 64 | def resize_and_position(image, target_size, resize_mode='contain', x_pos=50, y_pos=50, rotation=0, scale=100, fixed_width=None, fixed_height=None, bg_color=(0,0,0)): 65 | """调整图像大小、位置和旋转""" 66 | src_width, src_height = image.size 67 | target_width, target_height = target_size 68 | src_ratio = src_width / src_height 69 | 70 | # 第一步:根据resize_mode调整大小 71 | if resize_mode == 'contain': 72 | target_ratio = target_width / target_height 73 | if src_ratio > target_ratio: 74 | new_size = (target_width, int(target_width / src_ratio)) 75 | else: 76 | new_size = (int(target_height * src_ratio), target_height) 77 | elif resize_mode == 'cover': 78 | target_ratio = target_width / target_height 79 | if src_ratio > target_ratio: 80 | new_size = (int(target_height * src_ratio), target_height) 81 | else: 82 | new_size = (target_width, int(target_width / src_ratio)) 83 | elif resize_mode == 'fix_width': 84 | # 固定宽度缩放,高度按原比例调整 85 | fixed_w = fixed_width if fixed_width is not None else target_width 86 | new_size = (fixed_w, int(fixed_w / src_ratio)) 87 | elif resize_mode == 'fix_height': 88 | # 固定高度缩放,宽度按原比例调整 89 | fixed_h = fixed_height if fixed_height is not None else target_height 90 | new_size = (int(fixed_h * src_ratio), fixed_h) 91 | else: 92 | new_size = (target_width, target_height) # 默认情况 93 | 94 | # 第二步:应用scale缩放 95 | scale_factor = scale / 100.0 96 | final_size = (int(new_size[0] * scale_factor), int(new_size[1] * scale_factor)) 97 | 98 | # 第三步:调整图像大小 99 | resized = image.resize(final_size, Image.LANCZOS) 100 | 101 | # 第四步:处理旋转 102 | if rotation != 0: 103 | resized = resized.rotate(rotation, Image.BICUBIC, expand=True) 104 | final_size = resized.size 105 | 106 | # 第五步:创建目标画布 107 | result = Image.new('RGB', target_size, bg_color) 108 | 109 | # 第六步:计算最终位置 110 | # 计算可用空间 111 | available_width = target_size[0] - final_size[0] 112 | available_height = target_size[1] - final_size[1] 113 | 114 | # 直接使用百分比计算位置 115 | x = int(available_width * (x_pos / 100)) 116 | y = int(available_height * (y_pos / 100)) 117 | 118 | # 确保坐标不会超出画布范围 119 | x = max(0, min(x, available_width)) 120 | y = max(0, min(y, available_height)) 121 | 122 | # 特殊处理边缘情况 123 | if y_pos <= 1: # 顶部对齐 124 | y = 0 125 | elif y_pos >= 99: # 底部对齐 126 | y = available_height 127 | 128 | if x_pos <= 1: # 左对齐 129 | x = 0 130 | elif x_pos >= 99: # 右对齐 131 | x = available_width 132 | 133 | # 最后:粘贴图像 134 | result.paste(resized, (x, y)) 135 | return result, (x, y, final_size[0], final_size[1]) 136 | 137 | class ImageBlendResize: 138 | def __init__(self): 139 | self.NODE_NAME = 'ImageBlendResize' 140 | 141 | @classmethod 142 | def INPUT_TYPES(cls): 143 | return { 144 | "required": { 145 | "background_image": ("IMAGE",), 146 | "layer_image": ("IMAGE",), 147 | "blend_mode": (BLEND_MODES,), 148 | "resize_mode": (RESIZE_MODES,), 149 | "scale": ("FLOAT", {"default": 100.0, "min": 1.0, "max": 400.0, "step": 0.1}), 150 | "x_pos": ("FLOAT", {"default": 50.0, "min": 0.0, "max": 100.0, "step": 0.1}), 151 | "y_pos": ("FLOAT", {"default": 50.0, "min": 0.0, "max": 100.0, "step": 0.1}), 152 | "rotation": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0, "step": 0.5}), 153 | "opacity": ("INT", {"default": 100, "min": 0, "max": 100, "step": 1}), 154 | "invert_mask": ("BOOLEAN", {"default": False}), 155 | }, 156 | "optional": { 157 | "layer_mask": ("MASK",), 158 | "fixed_width": ("INT", {"default": 512, "min": 1, "max": 4096, "step": 1}), 159 | "fixed_height": ("INT", {"default": 512, "min": 1, "max": 4096, "step": 1}), 160 | } 161 | } 162 | 163 | RETURN_TYPES = ("IMAGE",) 164 | RETURN_NAMES = ("image",) 165 | FUNCTION = "blend_resize" 166 | CATEGORY = "YCNode/Image" 167 | 168 | def blend_resize(self, background_image, layer_image, blend_mode, resize_mode, 169 | scale, x_pos, y_pos, rotation, opacity, invert_mask, 170 | layer_mask=None, fixed_width=None, fixed_height=None): 171 | # 处理batch 172 | b_images = [] 173 | l_images = [] 174 | l_masks = [] 175 | ret_images = [] 176 | 177 | # 处理背景图batch 178 | for b in background_image: 179 | b_images.append(torch.unsqueeze(b, 0)) 180 | 181 | # 处理前景图batch 182 | for l in layer_image: 183 | l_images.append(torch.unsqueeze(l, 0)) 184 | m = tensor2pil(l) 185 | if m.mode == 'RGBA': 186 | l_masks.append(m.split()[-1]) 187 | else: 188 | l_masks.append(Image.new('L', m.size, 'white')) 189 | 190 | # 处理mask 191 | if layer_mask is not None: 192 | if layer_mask.dim() == 2: 193 | layer_mask = torch.unsqueeze(layer_mask, 0) 194 | l_masks = [] 195 | for m in layer_mask: 196 | if invert_mask: 197 | m = 1 - m 198 | l_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L')) 199 | 200 | # 获取最大batch数 201 | max_batch = max(len(b_images), len(l_images), len(l_masks)) 202 | 203 | # 处理每个batch 204 | for i in range(max_batch): 205 | # 获取当前batch的图像 206 | bg_tensor = b_images[i] if i < len(b_images) else b_images[-1] 207 | fg_tensor = l_images[i] if i < len(l_images) else l_images[-1] 208 | curr_mask = l_masks[i] if i < len(l_masks) else l_masks[-1] 209 | 210 | # 转换为PIL 211 | bg_pil = tensor2pil(bg_tensor) 212 | fg_pil = tensor2pil(fg_tensor) 213 | 214 | # 调整前景图大小、位置和旋转 215 | fg_resized, (x, y, w, h) = resize_and_position( 216 | fg_pil, bg_pil.size, resize_mode, x_pos, y_pos, rotation, scale, 217 | fixed_width, fixed_height 218 | ) 219 | 220 | # 同步处理mask 221 | curr_mask = curr_mask.resize(fg_pil.size, Image.LANCZOS) 222 | if rotation != 0: 223 | curr_mask = curr_mask.rotate(rotation, Image.BICUBIC, expand=True) 224 | curr_mask = curr_mask.resize((w, h), Image.LANCZOS) 225 | 226 | # 创建完整尺寸的mask 227 | full_mask = Image.new('L', bg_pil.size, 0) 228 | full_mask.paste(curr_mask, (x, y)) 229 | 230 | # 混合图像 231 | result_array = blend_images(bg_pil, fg_resized, blend_mode, opacity) 232 | result_pil = Image.fromarray(result_array) 233 | 234 | # 应用mask 235 | bg_pil.paste(result_pil, (0, 0), mask=full_mask) 236 | 237 | # 转换回tensor并添加到结果列表 238 | ret_images.append(pil2tensor(bg_pil)) 239 | 240 | # 返回结果 241 | return (torch.cat(ret_images, dim=0),) 242 | 243 | # 节点注册 244 | NODE_CLASS_MAPPINGS = { 245 | "ImageBlendResize": ImageBlendResize 246 | } 247 | 248 | NODE_DISPLAY_NAME_MAPPINGS = { 249 | "ImageBlendResize": "Image Blend Resize" 250 | } 251 | -------------------------------------------------------------------------------- /py/image_selector.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import random 4 | 5 | class ImageSelector: 6 | """ 7 | 一个用于从多个输入图像中选择指定图像的节点 8 | 支持10个固定输入端口,通过名称选择需要输出的图像 9 | 支持直接输入名称或通过输入端连接 10 | """ 11 | 12 | def __init__(self): 13 | self._valid_names = [] # 缓存有效的名称列表 14 | 15 | @classmethod 16 | def INPUT_TYPES(cls): 17 | return { 18 | "required": {}, 19 | "optional": { 20 | "selected_name": ("STRING", {"default": "image1"}), # 选择的图像名称,支持直接输入或输入端连接 21 | "image1": ("IMAGE",), 22 | "name1": ("STRING", {"default": "image1"}), 23 | "image2": ("IMAGE",), 24 | "name2": ("STRING", {"default": "image2"}), 25 | "image3": ("IMAGE",), 26 | "name3": ("STRING", {"default": "image3"}), 27 | "image4": ("IMAGE",), 28 | "name4": ("STRING", {"default": "image4"}), 29 | "image5": ("IMAGE",), 30 | "name5": ("STRING", {"default": "image5"}), 31 | "image6": ("IMAGE",), 32 | "name6": ("STRING", {"default": "image6"}), 33 | "image7": ("IMAGE",), 34 | "name7": ("STRING", {"default": "image7"}), 35 | "image8": ("IMAGE",), 36 | "name8": ("STRING", {"default": "image8"}), 37 | "image9": ("IMAGE",), 38 | "name9": ("STRING", {"default": "image9"}), 39 | "image10": ("IMAGE",), 40 | "name10": ("STRING", {"default": "image10"}) 41 | } 42 | } 43 | 44 | RETURN_TYPES = ("IMAGE",) 45 | FUNCTION = "select_image" 46 | CATEGORY = "YCNode/Image" 47 | 48 | def select_image(self, **kwargs) -> tuple: 49 | try: 50 | # 创建图像和名称的映射 51 | images = {} 52 | self._valid_names = [] 53 | 54 | # 收集所有有效的图像和名称 55 | for i in range(1, 11): 56 | image_key = f"image{i}" 57 | name_key = f"name{i}" 58 | if image_key in kwargs and name_key in kwargs and kwargs[image_key] is not None: 59 | name = kwargs[name_key].strip() 60 | images[name] = kwargs[image_key] 61 | self._valid_names.append(name) 62 | 63 | # 检查是否有有效的图像输入 64 | if not images: 65 | raise ValueError("没有有效的图像输入") 66 | 67 | # 获取selected_name,如果没有提供则使用第一个有效名称 68 | selected_name = kwargs.get("selected_name") 69 | if selected_name is None or not selected_name.strip(): 70 | selected_name = self._valid_names[0] 71 | else: 72 | selected_name = selected_name.strip() 73 | 74 | # 检查选择的名称是否存在 75 | if selected_name not in images: 76 | raise ValueError(f"未找到名称: {selected_name}") 77 | 78 | # 返回选定的图像 79 | selected_image = images[selected_name] 80 | if len(selected_image.shape) == 3: # 如果是单张图片,增加batch维度 81 | selected_image = selected_image.unsqueeze(0) 82 | return (selected_image,) 83 | 84 | except Exception as e: 85 | raise ValueError(f"图像选择失败: {str(e)}") 86 | 87 | @classmethod 88 | def IS_CHANGED(cls, **kwargs): 89 | return float("NaN") # 总是更新 90 | 91 | @classmethod 92 | def VALIDATE_INPUTS(cls, **kwargs): 93 | # 收集有效的名称 94 | valid_names = [] 95 | for i in range(1, 11): 96 | name_key = f"name{i}" 97 | if name_key in kwargs: 98 | valid_names.append(kwargs[name_key].strip()) 99 | 100 | # 如果没有有效的图像输入 101 | if not valid_names: 102 | return "至少需要一个有效的图像输入" 103 | 104 | # 如果提供了selected_name,验证其有效性 105 | if "selected_name" in kwargs and kwargs["selected_name"]: 106 | selected_name = kwargs["selected_name"].strip() 107 | if selected_name and selected_name not in valid_names: 108 | return f"选择的名称 '{selected_name}' 不在有效名称列表中" 109 | 110 | return True 111 | 112 | class ImageBatchSelector: 113 | """ 114 | 一个用于图像组合批次处理的节点 115 | 接收批量图像输入,通过自定义名称选择输出指定图像 116 | 支持直接输入名称或通过输入端连接 117 | """ 118 | 119 | def __init__(self): 120 | self._name_list = [] # 缓存名称列表 121 | 122 | @classmethod 123 | def INPUT_TYPES(cls): 124 | return { 125 | "required": { 126 | "images": ("IMAGE",), # 接收批量图像输入 127 | "names": ("STRING", {"multiline": True, "default": "image1,image2,image3"}), # 图像名称列表,用逗号分隔 128 | }, 129 | "optional": { 130 | "selected_name": ("STRING", {"default": "image1"}) # 选择输出的图像名称,支持直接输入或输入端连接 131 | } 132 | } 133 | 134 | RETURN_TYPES = ("IMAGE",) 135 | FUNCTION = "select_image" 136 | CATEGORY = "YCNode/Image" 137 | 138 | def select_image(self, images: torch.Tensor, names: str, selected_name: str = None) -> tuple: 139 | try: 140 | # 处理名称列表 141 | self._name_list = [name.strip() for name in names.split(",") if name.strip()] 142 | 143 | # 检查输入 144 | if len(self._name_list) == 0: 145 | raise ValueError("名称列表不能为空") 146 | 147 | if len(self._name_list) != images.shape[0]: 148 | raise ValueError(f"图像数量({images.shape[0]})与名称数量({len(self._name_list)})不匹配") 149 | 150 | # 如果没有提供selected_name,使用第一个名称 151 | if selected_name is None or not selected_name.strip(): 152 | selected_name = self._name_list[0] 153 | 154 | # 创建图像和名称的映射 155 | image_dict = {name: images[i] for i, name in enumerate(self._name_list)} 156 | 157 | # 检查选择的名称是否存在 158 | if selected_name.strip() not in image_dict: 159 | raise ValueError(f"未找到名称: {selected_name}") 160 | 161 | # 返回选定的图像 162 | selected_image = image_dict[selected_name.strip()] 163 | if len(selected_image.shape) == 3: # 如果是单张图片,增加batch维度 164 | selected_image = selected_image.unsqueeze(0) 165 | return (selected_image,) 166 | 167 | except Exception as e: 168 | raise ValueError(f"图像选择失败: {str(e)}") 169 | 170 | @classmethod 171 | def IS_CHANGED(cls, **kwargs): 172 | return float("NaN") # 总是更新 173 | 174 | @classmethod 175 | def VALIDATE_INPUTS(cls, **kwargs): 176 | # 首先验证names参数 177 | if not kwargs["names"].strip(): 178 | return "名称列表不能为空" 179 | 180 | # 如果提供了selected_name,验证其有效性 181 | if "selected_name" in kwargs and kwargs["selected_name"]: 182 | name_list = [name.strip() for name in kwargs["names"].split(",") if name.strip()] 183 | selected_name = kwargs["selected_name"].strip() 184 | if selected_name and selected_name not in name_list: 185 | return f"选择的名称 '{selected_name}' 不在名称列表中" 186 | 187 | return True 188 | 189 | class AdvancedImageSelector: 190 | """ 191 | 高级图像选择器 - 支持随机、循环、索引选择模式 192 | 可以设置随机种子,控制图像选择方式 193 | 支持10个可选输入端口 194 | """ 195 | 196 | def __init__(self): 197 | self._valid_images = [] # 缓存有效的图像列表 198 | self._counter = 0 # 用于循环模式的计数器 199 | 200 | @classmethod 201 | def INPUT_TYPES(cls): 202 | return { 203 | "required": { 204 | "mode": (["随机", "递增循环", "递减循环", "索引选择"], {"default": "随机"}), 205 | "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), 206 | "index": ("INT", {"default": 0, "min": 0, "max": 9}), 207 | }, 208 | "optional": { 209 | "image1": ("IMAGE",), 210 | "image2": ("IMAGE",), 211 | "image3": ("IMAGE",), 212 | "image4": ("IMAGE",), 213 | "image5": ("IMAGE",), 214 | "image6": ("IMAGE",), 215 | "image7": ("IMAGE",), 216 | "image8": ("IMAGE",), 217 | "image9": ("IMAGE",), 218 | "image10": ("IMAGE",), 219 | } 220 | } 221 | 222 | RETURN_TYPES = ("IMAGE", "INT", "STRING") 223 | RETURN_NAMES = ("image", "selected_index", "info") 224 | FUNCTION = "select_image" 225 | CATEGORY = "YCNode/Image" 226 | 227 | def select_image(self, mode, seed, index, **kwargs) -> tuple: 228 | try: 229 | # 收集所有有效的图像 230 | self._valid_images = [] 231 | for i in range(1, 11): 232 | image_key = f"image{i}" 233 | if image_key in kwargs and kwargs[image_key] is not None: 234 | self._valid_images.append((i-1, kwargs[image_key])) # 存储索引和图像 235 | 236 | # 检查是否有有效的图像输入 237 | if not self._valid_images: 238 | raise ValueError("没有有效的图像输入") 239 | 240 | # 根据不同模式选择图像 241 | if mode == "随机": 242 | # 使用种子确保可重复性 243 | rng = random.Random(seed) 244 | selected_idx, selected_image = rng.choice(self._valid_images) 245 | info = f"随机选择 - 索引:{selected_idx}, 种子:{seed}" 246 | 247 | elif mode == "递增循环": 248 | # 选择下一个可用图像 249 | selected_idx, selected_image = self._valid_images[self._counter % len(self._valid_images)] 250 | self._counter = (self._counter + 1) % len(self._valid_images) 251 | info = f"递增循环 - 索引:{selected_idx}, 计数:{self._counter}" 252 | 253 | elif mode == "递减循环": 254 | # 选择上一个可用图像 255 | selected_idx, selected_image = self._valid_images[self._counter % len(self._valid_images)] 256 | self._counter = (self._counter - 1) % len(self._valid_images) 257 | if self._counter < 0: 258 | self._counter = len(self._valid_images) - 1 259 | info = f"递减循环 - 索引:{selected_idx}, 计数:{self._counter}" 260 | 261 | elif mode == "索引选择": 262 | # 直接通过索引选择 263 | matching_images = [img for idx, img in self._valid_images if idx == index] 264 | if not matching_images: 265 | # 如果指定索引不存在,选择最近的有效索引 266 | distances = [abs(idx - index) for idx, _ in self._valid_images] 267 | min_distance_idx = distances.index(min(distances)) 268 | selected_idx, selected_image = self._valid_images[min_distance_idx] 269 | info = f"索引选择(近似) - 请求:{index}, 实际:{selected_idx}" 270 | else: 271 | selected_image = matching_images[0] 272 | selected_idx = index 273 | info = f"索引选择 - 索引:{selected_idx}" 274 | 275 | # 返回选定的图像 276 | if len(selected_image.shape) == 3: # 如果是单张图片,增加batch维度 277 | selected_image = selected_image.unsqueeze(0) 278 | return (selected_image, selected_idx, info) 279 | 280 | except Exception as e: 281 | raise ValueError(f"图像选择失败: {str(e)}") 282 | 283 | @classmethod 284 | def IS_CHANGED(cls, **kwargs): 285 | # 随机模式下,只有当种子改变时才更新 286 | if kwargs.get("mode", "") == "随机": 287 | return kwargs.get("seed", 0) 288 | return float("NaN") # 其他模式总是更新 289 | 290 | # 节点类映射 291 | NODE_CLASS_MAPPINGS = { 292 | "ImageSelector": ImageSelector, 293 | "ImageBatchSelector": ImageBatchSelector, 294 | "AdvancedImageSelector": AdvancedImageSelector 295 | } 296 | 297 | # 节点显示名称映射 298 | NODE_DISPLAY_NAME_MAPPINGS = { 299 | "ImageSelector": "Image Selector", 300 | "ImageBatchSelector": "Image Batch Selector", 301 | "AdvancedImageSelector": "AD Image Selector" 302 | } 303 | -------------------------------------------------------------------------------- /py/logic.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import os 3 | import json 4 | import numpy as np 5 | from PIL import Image 6 | import folder_paths 7 | import re 8 | import inspect 9 | 10 | MAX_FLOW_NUM = 12 # 扩展到12个输入端口 11 | lazy_options = {"lazy": True} # 懒加载选项 12 | 13 | # 创建通用类型处理容器 14 | class AllContainer: 15 | def __contains__(self, item): 16 | return True 17 | 18 | def __getitem__(self, key): 19 | if key.startswith("input"): 20 | return "*", {"lazy": True, "forceInput": True} 21 | elif key == "cascade_input": 22 | return "*", {"lazy": True, "forceInput": True, "tooltip": "级联输入,通常连接到上一个选择器的输出"} 23 | elif key == "chain_input": 24 | return "*", {"lazy": True, "forceInput": True, "tooltip": "来自下一个选择器的链接输入"} 25 | else: 26 | return "*", {"lazy": True} 27 | 28 | # 检查是否支持高级模型执行 29 | def is_advanced_model_supported(): 30 | try: 31 | stack = inspect.stack() 32 | if stack[2].function == 'get_input_info': 33 | return True 34 | return False 35 | except: 36 | return False 37 | 38 | class textIndexSwitch: 39 | def __init__(self): 40 | pass 41 | 42 | @classmethod 43 | def INPUT_TYPES(cls): 44 | inputs = { 45 | "required": { 46 | "index": ("INT", {"default": 0, "min": 0, "max": MAX_FLOW_NUM-1, "step": 1}), 47 | }, 48 | "optional": { 49 | } 50 | } 51 | # 动态添加12个文本输入端口 52 | for i in range(MAX_FLOW_NUM): 53 | inputs["optional"]["text%d" % i] = ("STRING", {**lazy_options, "forceInput": True}) 54 | return inputs 55 | 56 | RETURN_TYPES = ("STRING",) 57 | RETURN_NAMES = ("text",) 58 | FUNCTION = "index_switch" 59 | 60 | CATEGORY = "YCNode/Logic" 61 | 62 | def check_lazy_status(self, index, **kwargs): 63 | key = "text%d" % index 64 | if kwargs.get(key, None) is None: 65 | return [key] 66 | 67 | def index_switch(self, index, **kwargs): 68 | key = "text%d" % index 69 | # 如果对应索引的输入端口未连接,返回空字符串 70 | return (kwargs.get(key, ""),) 71 | 72 | class TextConditionSwitch: 73 | """ 74 | 文本条件判断器 - 根据输入文本是否与预设文本相匹配来选择输出A或B图像 75 | """ 76 | 77 | @classmethod 78 | def INPUT_TYPES(cls): 79 | return { 80 | "required": { 81 | "input_text": ("STRING", {"default": "", "multiline": False, "tooltip": "输入文本,将与预设文本比较"}), 82 | "preset_text": ("STRING", {"default": "", "multiline": False, "tooltip": "预设文本,用于与输入文本比较"}), 83 | "case_sensitive": ("BOOLEAN", {"default": True, "tooltip": "是否区分大小写"}), 84 | }, 85 | "optional": { 86 | "image_a": ("IMAGE", {"tooltip": "当输入文本与预设文本匹配时输出的图像"}), 87 | "image_b": ("IMAGE", {"tooltip": "当输入文本与预设文本不匹配时输出的图像"}), 88 | } 89 | } 90 | 91 | RETURN_TYPES = ("IMAGE", "BOOLEAN",) 92 | RETURN_NAMES = ("image", "is_matched",) 93 | FUNCTION = "condition_switch" 94 | CATEGORY = "YCNode/Logic" 95 | 96 | def condition_switch(self, input_text, preset_text, case_sensitive, image_a=None, image_b=None): 97 | # 判断文本是否匹配 98 | if case_sensitive: 99 | is_matched = input_text == preset_text 100 | else: 101 | is_matched = input_text.lower() == preset_text.lower() 102 | 103 | # 根据匹配结果选择输出图像 104 | if is_matched: 105 | output_image = image_a if image_a is not None else None 106 | else: 107 | output_image = image_b if image_b is not None else None 108 | 109 | # 如果没有图像可用,返回空图像 110 | if output_image is None: 111 | # 创建1x1的黑色图像 112 | empty_image = torch.zeros(1, 1, 1, 3) 113 | return (empty_image, is_matched) 114 | 115 | return (output_image, is_matched) 116 | 117 | class extractNumberFromText: 118 | @classmethod 119 | def INPUT_TYPES(cls): 120 | return { 121 | "required": { 122 | "text": ("STRING", {"default": "", "multiline": False}), 123 | "default_value": ("INT", {"default": 0}), # 当未找到数字时的默认值 124 | } 125 | } 126 | 127 | RETURN_TYPES = ("INT",) 128 | RETURN_NAMES = ("number",) 129 | FUNCTION = "extract" 130 | CATEGORY = "YCNode/Logic" 131 | 132 | def extract(self, text, default_value): 133 | # 使用正则表达式匹配文本开头的数字 134 | match = re.match(r'^\d+', text.strip()) 135 | if match: 136 | # 如果找到数字,转换为整数返回 137 | return (int(match.group()),) 138 | else: 139 | # 如果未找到数字,返回默认值 140 | return (default_value,) 141 | 142 | class SuperIndexSelector: 143 | """ 144 | LoRA选择器 - 专为LoRA切换设计的专用选择器 145 | 146 | 特点: 147 | - 直观的"模式"选择,比索引值更容易理解 148 | - 只有10个LoRA输入端口,设计清晰 149 | - 专门的级联输入端口,支持串联多个选择器 150 | - 惰性加载,只加载被选择的LoRA,节省内存 151 | 152 | 使用方法: 153 | 1. 模式="选择LoRA"时,可以使用index选择input0-input9连接的LoRA 154 | 2. 模式="使用级联"时,会使用cascade_input作为输出 155 | 3. 将第一个选择器的输出连接到第二个选择器的cascade_input 156 | 4. 第一个选择器设为"选择LoRA",第二个设为"使用级联" 157 | 158 | 可以无限级联多个选择器,管理大量LoRA模型 159 | """ 160 | 161 | def __init__(self): 162 | pass 163 | 164 | @classmethod 165 | def INPUT_TYPES(cls): 166 | inputs = { 167 | "required": { 168 | "mode": (["选择LoRA", "使用级联"], {"default": "选择LoRA", "tooltip": "选择模式:选择LoRA使用index选择输入端口,使用级联则从级联输入获取LoRA"}), 169 | "index": ("INT", {"default": 0, "min": 0, "max": 9, "step": 1, "tooltip": "在'选择LoRA'模式下,选择0-9号LoRA"}), 170 | }, 171 | "optional": {} 172 | } 173 | 174 | # 使用通用容器绕过类型验证 175 | if is_advanced_model_supported(): 176 | inputs["optional"] = AllContainer() 177 | else: 178 | # 添加10个普通输入端口和级联输入端口 179 | inputs["optional"]["cascade_input"] = ("*", {"forceInput": True, "tooltip": "级联输入,通常连接到上一个选择器的输出"}) 180 | for i in range(10): 181 | inputs["optional"]["input%d" % i] = ("*", {**lazy_options, "forceInput": True, "tooltip": f"LoRA输入 #{i+1}"}) 182 | 183 | return inputs 184 | 185 | RETURN_TYPES = ("*",) 186 | RETURN_NAMES = ("selected_lora",) 187 | FUNCTION = "select_lora" 188 | CATEGORY = "YCNode/LoRA" 189 | 190 | def check_lazy_status(self, mode, index, **kwargs): 191 | if mode == "使用级联": 192 | if kwargs.get("cascade_input", None) is None: 193 | return ["cascade_input"] 194 | else: # 选择LoRA模式 195 | key = "input%d" % index 196 | if kwargs.get(key, None) is None: 197 | return [key] 198 | 199 | def select_lora(self, mode, index, **kwargs): 200 | if mode == "使用级联": 201 | if "cascade_input" in kwargs and kwargs["cascade_input"] is not None: 202 | print(f"使用级联LoRA") 203 | return (kwargs["cascade_input"],) 204 | else: 205 | print(f"警告:级联模式已选择但级联输入未连接") 206 | return (None,) 207 | else: # 选择LoRA模式 208 | key = "input%d" % index 209 | if key not in kwargs or kwargs[key] is None: 210 | print(f"警告:索引 {index} 未连接任何LoRA") 211 | return (None,) 212 | print(f"已选择LoRA #{index}") 213 | return (kwargs[key],) 214 | 215 | @classmethod 216 | def IS_CHANGED(cls, **kwargs): 217 | return float("NaN") # 总是更新 218 | 219 | class DynamicThreshold: 220 | """ 221 | Compares an input value to predefined ranges and outputs a corresponding value. 222 | """ 223 | 224 | @classmethod 225 | def INPUT_TYPES(s): 226 | return { 227 | "required": { 228 | "input_value": ("INT", {"default": 1, "min": 1, "max": 1000, "step": 1}), 229 | } 230 | } 231 | 232 | RETURN_TYPES = ("FLOAT",) 233 | FUNCTION = "compare" 234 | CATEGORY = "YCNode/Logic" 235 | 236 | def compare(self, input_value): 237 | if 1 <= input_value <= 150: 238 | return (7.0,) 239 | elif 151 <= input_value <= 200: 240 | return (5.3,) 241 | elif 201 <= input_value <= 250: 242 | return (4.5,) 243 | elif 251 <= input_value <= 300: 244 | return (3.8,) 245 | elif 301 <= input_value <= 350: 246 | return (3.3,) 247 | elif 351 <= input_value <= 400: 248 | return (2.9,) 249 | elif 401 <= input_value <= 450: 250 | return (2.8,) 251 | elif 451 <= input_value <= 500: 252 | return (2.6,) 253 | elif 501 <= input_value <= 550: 254 | return (2.2,) 255 | elif 551 <= input_value <= 600: 256 | return (2.2,) 257 | elif 601 <= input_value <= 650: 258 | return (1.8,) 259 | elif 651 <= input_value <= 700: 260 | return (1.7,) 261 | elif 701 <= input_value <= 750: 262 | return (1.6,) 263 | elif 751 <= input_value <= 800: 264 | return (1.5,) 265 | elif 801 <= input_value <= 900: 266 | return (1.4,) 267 | elif 901 <= input_value <= 950: 268 | return (1.3,) 269 | elif 951 <= input_value <= 1000: 270 | return (1.2,) 271 | elif 1001 <= input_value <= 1100: 272 | return (1.1,) 273 | elif 1101 <= input_value <= 1200: 274 | return (1,) 275 | elif 1201 <= input_value <= 1400: 276 | return (0.9,) 277 | elif 1401 <= input_value <= 1500: 278 | return (0.8,) 279 | elif 1501 <= input_value <= 1600: 280 | return (0.75,) 281 | else: 282 | return (0.6,) 283 | 284 | class MaskConditionSwitch: 285 | """ 286 | 遮罩条件判断器 - 根据输入文本是否与预设文本相匹配来选择输出A或B遮罩 287 | """ 288 | 289 | @classmethod 290 | def INPUT_TYPES(cls): 291 | return { 292 | "required": { 293 | "input_text": ("STRING", {"default": "", "multiline": False, "tooltip": "输入文本,将与预设文本比较"}), 294 | "preset_text": ("STRING", {"default": "", "multiline": False, "tooltip": "预设文本,用于与输入文本比较"}), 295 | "case_sensitive": ("BOOLEAN", {"default": True, "tooltip": "是否区分大小写"}), 296 | }, 297 | "optional": { 298 | "mask_a": ("MASK", {"tooltip": "当输入文本与预设文本匹配时输出的遮罩"}), 299 | "mask_b": ("MASK", {"tooltip": "当输入文本与预设文本不匹配时输出的遮罩"}), 300 | } 301 | } 302 | 303 | RETURN_TYPES = ("MASK", "BOOLEAN",) 304 | RETURN_NAMES = ("mask", "is_matched",) 305 | FUNCTION = "condition_switch" 306 | CATEGORY = "YCNode/Logic" 307 | 308 | def condition_switch(self, input_text, preset_text, case_sensitive, mask_a=None, mask_b=None): 309 | # 判断文本是否匹配 310 | if case_sensitive: 311 | is_matched = input_text == preset_text 312 | else: 313 | is_matched = input_text.lower() == preset_text.lower() 314 | 315 | # 根据匹配结果选择输出遮罩 316 | if is_matched: 317 | output_mask = mask_a if mask_a is not None else None 318 | else: 319 | output_mask = mask_b if mask_b is not None else None 320 | 321 | # 如果没有遮罩可用,返回空遮罩 322 | if output_mask is None: 323 | # 创建1x1的空遮罩 324 | empty_mask = torch.zeros(1, 1, 1, 1) 325 | return (empty_mask, is_matched) 326 | 327 | return (output_mask, is_matched) 328 | 329 | class UniversalConditionGate: 330 | """ 331 | 通用条件开关 - 根据输入文本是否与预设文本相匹配来决定是否让输入通过 332 | 适用于任何数据类型,可作为任意节点之间的条件门控 333 | """ 334 | 335 | def __init__(self): 336 | pass 337 | 338 | @classmethod 339 | def INPUT_TYPES(cls): 340 | # 使用通用容器接受任何类型的输入 341 | if is_advanced_model_supported(): 342 | return { 343 | "required": { 344 | "input_text": ("STRING", {"default": "", "multiline": False, "tooltip": "输入文本,将与预设文本比较"}), 345 | "preset_text": ("STRING", {"default": "", "multiline": False, "tooltip": "预设文本,用于与输入文本比较"}), 346 | "case_sensitive": ("BOOLEAN", {"default": True, "tooltip": "是否区分大小写"}), 347 | "invert": ("BOOLEAN", {"default": False, "tooltip": "反转逻辑:选择True时,不匹配则通过;选择False时,匹配则通过"}), 348 | }, 349 | "optional": AllContainer() 350 | } 351 | else: 352 | return { 353 | "required": { 354 | "input_text": ("STRING", {"default": "", "multiline": False, "tooltip": "输入文本,将与预设文本比较"}), 355 | "preset_text": ("STRING", {"default": "", "multiline": False, "tooltip": "预设文本,用于与输入文本比较"}), 356 | "case_sensitive": ("BOOLEAN", {"default": True, "tooltip": "是否区分大小写"}), 357 | "invert": ("BOOLEAN", {"default": False, "tooltip": "反转逻辑:选择True时,不匹配则通过;选择False时,匹配则通过"}), 358 | }, 359 | "optional": { 360 | "input": ("*", {"tooltip": "任意类型的输入,当条件匹配时将被传递到输出"}) 361 | } 362 | } 363 | 364 | RETURN_TYPES = ("*", "BOOLEAN",) 365 | RETURN_NAMES = ("output", "is_matched",) 366 | FUNCTION = "gate_control" 367 | CATEGORY = "YCNode/Logic" 368 | 369 | def check_lazy_status(self, input_text, preset_text, case_sensitive, invert, **kwargs): 370 | # 检查输入端口的连接状态 371 | if "input" not in kwargs or kwargs["input"] is None: 372 | return ["input"] 373 | return None 374 | 375 | def gate_control(self, input_text, preset_text, case_sensitive, invert, **kwargs): 376 | # 判断文本是否匹配 377 | if case_sensitive: 378 | is_matched = input_text == preset_text 379 | else: 380 | is_matched = input_text.lower() == preset_text.lower() 381 | 382 | # 如果设置了反转,则翻转匹配结果 383 | should_pass = is_matched if not invert else not is_matched 384 | 385 | # 检查是否有输入 386 | if "input" not in kwargs or kwargs["input"] is None: 387 | print("警告:通用条件开关 - 未连接输入") 388 | # 返回None和匹配状态 389 | return (None, is_matched) 390 | 391 | # 根据匹配结果决定是否传递输入 392 | if should_pass: 393 | return (kwargs["input"], is_matched) 394 | else: 395 | # 不匹配时返回None 396 | return (None, is_matched) 397 | 398 | # 节点映射 399 | NODE_CLASS_MAPPINGS = { 400 | "YC Text Index Switch": textIndexSwitch, 401 | "YC Extract Number": extractNumberFromText, 402 | "YC Super Selector": SuperIndexSelector, 403 | "DynamicThreshold": DynamicThreshold, 404 | "YC Text Condition Switch": TextConditionSwitch, 405 | "YC Mask Condition Switch": MaskConditionSwitch, 406 | "YC Universal Gate": UniversalConditionGate, 407 | } 408 | 409 | # 节点显示名称映射 410 | NODE_DISPLAY_NAME_MAPPINGS = { 411 | "YC Text Index Switch": "Text Index Switch (YC)", 412 | "YC Extract Number": "Extract Number (YC)", 413 | "YC Super Selector": "LoRA Selector (YC)", 414 | "DynamicThreshold": "Dynamic Threshold", 415 | "YC Text Condition Switch": "Text Condition Switch (YC)", 416 | "YC Mask Condition Switch": "Mask Condition Switch (YC)", 417 | "YC Universal Gate": "Universal Condition Gate (YC)", 418 | } 419 | -------------------------------------------------------------------------------- /py/mask_smart_valley_split.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | import torch 4 | 5 | class MaskSmartValleySplit: 6 | """智能遮罩分割节点,基于凹陷检测和模式分析""" 7 | 8 | @classmethod 9 | def INPUT_TYPES(s): 10 | return { 11 | "required": { 12 | "mask": ("MASK",), 13 | "valley_depth_threshold": ("FLOAT", {"default": 0.5, "min": 0.1, "max": 1.0, "step": 0.1}), 14 | "pattern_check_range": ("INT", {"default": 10, "min": 5, "max": 50, "step": 1}), 15 | "cut_width": ("INT", {"default": 2, "min": 1, "max": 10, "step": 1}), 16 | } 17 | } 18 | 19 | RETURN_TYPES = ("MASK",) 20 | FUNCTION = "smart_split" 21 | CATEGORY = "YCNode/Mask" 22 | 23 | def check_connectivity(self, mask): 24 | """检查mask是否已经分离""" 25 | mask_np = mask.cpu().numpy() 26 | # 确保mask是2D 27 | if len(mask_np.shape) > 2: 28 | mask_np = mask_np.squeeze() 29 | num_labels, _ = cv2.connectedComponents(mask_np.astype(np.uint8)) 30 | return num_labels > 2 31 | 32 | def find_major_regions(self, mask_np): 33 | """找到两个主要白色区域的范围""" 34 | # 确保mask是2D 35 | if len(mask_np.shape) > 2: 36 | mask_np = mask_np.squeeze() 37 | 38 | # 计算每列的像素和 39 | col_sums = np.sum(mask_np, axis=0) 40 | 41 | # 使用阈值找到显著的白色区域 42 | threshold = np.max(col_sums) * 0.3 43 | significant_cols = col_sums > threshold 44 | 45 | # 找到连续的白色区域 46 | regions = [] 47 | start = None 48 | for i, is_significant in enumerate(significant_cols): 49 | if is_significant and start is None: 50 | start = i 51 | elif not is_significant and start is not None: 52 | regions.append((start, i)) 53 | start = None 54 | 55 | if start is not None: 56 | regions.append((start, len(significant_cols))) 57 | 58 | # 按区域大小排序,返回最大的两个区域 59 | if not regions: 60 | return None 61 | 62 | regions.sort(key=lambda x: x[1] - x[0], reverse=True) 63 | if len(regions) < 2: 64 | return None 65 | 66 | # 确保两个区域之间有足够的距离 67 | region1, region2 = regions[:2] 68 | if abs(region1[1] - region2[0]) < 2: # 确保有至少2个像素的间隔 69 | return None 70 | 71 | return [region1, region2] 72 | 73 | def find_largest_valley(self, col_sums): 74 | """查找最大的凹陷区域""" 75 | # 使用滑动窗口平滑处理,减少噪声影响 76 | window_size = 5 77 | smoothed_sums = np.convolve(col_sums, np.ones(window_size)/window_size, mode='valid') 78 | 79 | # 计算全局特征 80 | global_max = np.max(smoothed_sums) 81 | global_min = np.min(smoothed_sums) 82 | global_mean = np.mean(smoothed_sums) 83 | 84 | # 找到所有局部峰值 85 | peaks = [] 86 | for i in range(1, len(smoothed_sums)-1): 87 | if smoothed_sums[i] > smoothed_sums[i-1] and smoothed_sums[i] > smoothed_sums[i+1]: 88 | # 只要高于平均值就考虑 89 | if smoothed_sums[i] > global_mean * 0.5: # 进一步降低阈值 90 | peaks.append((i, smoothed_sums[i])) 91 | 92 | if len(peaks) < 2: 93 | return None 94 | 95 | # 对峰值按高度排序 96 | peaks.sort(key=lambda x: x[1], reverse=True) 97 | 98 | # 找到最显著的凹陷 99 | max_valley_score = 0 100 | max_valley_range = None 101 | 102 | # 分析每对峰值之间的区域 103 | for i in range(len(peaks)): 104 | for j in range(i+1, len(peaks)): 105 | left_peak = peaks[i] 106 | right_peak = peaks[j] 107 | 108 | # 获取两峰之间的区域 109 | start_idx = min(left_peak[0], right_peak[0]) 110 | end_idx = max(left_peak[0], right_peak[0]) 111 | 112 | # 分析区域 113 | region = smoothed_sums[start_idx:end_idx] 114 | if len(region) == 0: # 只检查是否为空 115 | continue 116 | 117 | region_min = np.min(region) 118 | region_min_idx = start_idx + np.argmin(region) 119 | 120 | # 计算凹陷特征 121 | # 1. 深度分数:凹陷的深度 122 | peak_height = min(left_peak[1], right_peak[1]) 123 | depth = peak_height - region_min 124 | depth_score = depth / global_max 125 | 126 | # 2. 对比分数:与周围区域的对比度 127 | contrast = (left_peak[1] + right_peak[1])/2 - region_min 128 | contrast_score = contrast / global_max 129 | 130 | # 简化评分机制,主要关注深度和对比度 131 | valley_score = depth_score * 3.0 + contrast_score * 2.0 132 | 133 | # 额外奖励:如果凹陷明显 134 | if depth_score > 0.15: # 进一步降低深度阈值 135 | valley_score *= 1.5 136 | 137 | if valley_score > max_valley_score: 138 | max_valley_score = valley_score 139 | max_valley_range = (start_idx, end_idx) 140 | 141 | return max_valley_range 142 | 143 | def detect_valley(self, mask, threshold): 144 | """检测两个主要区域之间的凹陷""" 145 | mask_np = mask.cpu().numpy() 146 | if len(mask_np.shape) > 2: 147 | mask_np = mask_np.squeeze() 148 | 149 | # 首先尝试找到主要区域 150 | major_regions = self.find_major_regions(mask_np) 151 | if major_regions: 152 | # 按照x坐标排序区域 153 | major_regions.sort(key=lambda x: x[0]) 154 | valley_start = major_regions[0][1] # 第一个区域的结束 155 | valley_end = major_regions[1][0] # 第二个区域的开始 156 | 157 | # 确保凹陷区域有效 158 | if valley_end > valley_start and valley_end - valley_start >= 2: 159 | return valley_start, valley_end 160 | 161 | # 如果主要区域检测失败,尝试查找最大凹陷 162 | col_sums = np.sum(mask_np, axis=0) 163 | valley_range = self.find_largest_valley(col_sums) 164 | 165 | return valley_range 166 | 167 | def analyze_pattern(self, mask, valley_range, check_range): 168 | """分析凹陷区域的整体趋势""" 169 | if valley_range is None: 170 | return False, None 171 | 172 | start, end = valley_range 173 | if end <= start or end - start < 2: 174 | return False, None 175 | 176 | mask_np = mask.cpu().numpy() 177 | if len(mask_np.shape) > 2: 178 | mask_np = mask_np.squeeze() 179 | 180 | # 获取凹陷区域的像素分布 181 | col_sums = np.sum(mask_np[:, start:end], axis=0) 182 | if len(col_sums) < 3: # 确保有足够的数据进行分析 183 | return False, None 184 | 185 | # 使用平滑处理减少噪声 186 | window_size = max(3, min((end - start) // 5, 7)) # 限制窗口大小在3-7之间 187 | if len(col_sums) < window_size: # 如果数据点太少,调整窗口大小 188 | window_size = len(col_sums) 189 | if window_size % 2 == 0: 190 | window_size -= 1 191 | if window_size < 3: 192 | return False, start + np.argmin(col_sums) # 直接返回最小值位置 193 | 194 | smoothed_sums = np.convolve(col_sums, np.ones(window_size)/window_size, mode='valid') 195 | if len(smoothed_sums) < 3: # 确保平滑后有足够的数据 196 | return False, start + np.argmin(col_sums) 197 | 198 | # 分析整体趋势 199 | third = max(1, len(smoothed_sums) // 3) 200 | first_third = smoothed_sums[:third] 201 | middle_third = smoothed_sums[third:2*third] 202 | last_third = smoothed_sums[2*third:] 203 | 204 | # 确保所有部分都有数据 205 | if len(first_third) == 0 or len(middle_third) == 0 or len(last_third) == 0: 206 | return False, start + np.argmin(col_sums) 207 | 208 | # 判断是否符合低高低模式 209 | is_valley_pattern = (np.mean(middle_third) > np.mean(first_third) and 210 | np.mean(middle_third) > np.mean(last_third)) 211 | 212 | if is_valley_pattern: 213 | # 在中间区域的中心切割 214 | cut_pos = start + len(col_sums) // 2 215 | else: 216 | # 在最窄处切割 217 | cut_pos = start + np.argmin(col_sums) 218 | 219 | return is_valley_pattern, cut_pos 220 | 221 | def execute_cut(self, mask, cut_position, cut_width): 222 | """执行切割""" 223 | if cut_position is None: 224 | return mask 225 | 226 | result = mask.clone() 227 | half_width = cut_width // 2 228 | cut_start = max(0, cut_position - half_width) 229 | cut_end = min(mask.shape[1], cut_position + half_width + 1) 230 | result[:, cut_start:cut_end] = 0 231 | return result 232 | 233 | def smart_split(self, mask, valley_depth_threshold, pattern_check_range, cut_width): 234 | """主处理函数""" 235 | # 处理维度问题 236 | if len(mask.shape) == 3 and mask.shape[0] == 1: 237 | mask = mask.squeeze(0) 238 | 239 | # 确保mask是二值图像 240 | binary_mask = (mask > 0).float() 241 | 242 | # 检查是否已经分离 243 | if self.check_connectivity(binary_mask): 244 | return (binary_mask,) 245 | 246 | # 检测凹陷区域 247 | valley_range = self.detect_valley(binary_mask, valley_depth_threshold) 248 | 249 | # 如果没有检测到凹陷,返回原始mask 250 | if valley_range is None: 251 | return (binary_mask,) 252 | 253 | # 分析凹陷区域模式并确定切割位置 254 | has_pattern, cut_position = self.analyze_pattern( 255 | binary_mask, valley_range, pattern_check_range 256 | ) 257 | 258 | # 执行切割 259 | result = self.execute_cut(binary_mask, cut_position, cut_width) 260 | 261 | # 确保返回的mask维度正确 262 | if len(result.shape) == 2: 263 | result = result.unsqueeze(0) 264 | 265 | return (result,) 266 | 267 | NODE_CLASS_MAPPINGS = { 268 | "MaskSmartValleySplit": MaskSmartValleySplit 269 | } 270 | 271 | NODE_DISPLAY_NAME_MAPPINGS = { 272 | "MaskSmartValleySplit": "Mask Smart Valley Split" 273 | } 274 | -------------------------------------------------------------------------------- /py/mask_to_ellipse.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import cv2 4 | from scipy import ndimage 5 | import comfy.utils 6 | 7 | class IrregularToEllipseMask: 8 | """ 9 | 将不规则遮罩(包括凹陷区域)转换为椭圆形遮罩 10 | """ 11 | @classmethod 12 | def INPUT_TYPES(cls): 13 | return { 14 | "required": { 15 | "mask": ("MASK",), 16 | "keep_region": (["all", "largest", "top", "bottom", "left", "right"], {"default": "largest"}), 17 | "fill_holes": (["enable", "disable"], {"default": "enable"}), 18 | "smooth_edges": (["enable", "disable"], {"default": "enable"}), 19 | "smoothing_kernel_size": ("INT", {"default": 5, "min": 3, "max": 31, "step": 2}), 20 | "output_mode": (["ellipse", "convex_hull", "filled_original"], {"default": "ellipse"}), 21 | "expand_mask": ("INT", {"default": 0, "min": -100, "max": 100, "step": 1}), 22 | "blur_amount": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.1}), 23 | } 24 | } 25 | 26 | RETURN_TYPES = ("MASK",) 27 | RETURN_NAMES = ("ellipse_mask",) 28 | FUNCTION = "convert_to_ellipse" 29 | CATEGORY = "YCNode/Mask" 30 | 31 | def convert_to_ellipse(self, mask, keep_region="largest", fill_holes="enable", smooth_edges="enable", 32 | smoothing_kernel_size=5, output_mode="ellipse", expand_mask=0, blur_amount=0.0): 33 | # 确保内核大小是奇数 34 | if smoothing_kernel_size % 2 == 0: 35 | smoothing_kernel_size += 1 36 | 37 | # 将掩码转换为numpy数组并进行处理 38 | mask_np = mask.reshape((-1, mask.shape[-2], mask.shape[-1])).cpu().numpy() 39 | result_masks = [] 40 | 41 | for m in mask_np: 42 | # 转换为8位灰度图 43 | mask_8bit = (m * 255).astype(np.uint8) 44 | 45 | # 过滤区域(如果需要) 46 | if keep_region != "all": 47 | # 查找所有轮廓 48 | all_contours, _ = cv2.findContours(mask_8bit, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) 49 | 50 | if len(all_contours) > 1: # 只有存在多个轮廓时才进行过滤 51 | filtered_mask = np.zeros_like(mask_8bit) 52 | 53 | if keep_region == "largest": 54 | # 保留最大面积的轮廓 55 | selected_contour = max(all_contours, key=cv2.contourArea) 56 | cv2.drawContours(filtered_mask, [selected_contour], 0, 255, -1) 57 | else: 58 | # 根据位置选择轮廓 59 | if keep_region == "top": 60 | # 保留最上方的轮廓(y坐标最小) 61 | selected_contour = min(all_contours, key=lambda c: cv2.boundingRect(c)[1]) 62 | elif keep_region == "bottom": 63 | # 保留最下方的轮廓(y坐标最大) 64 | selected_contour = max(all_contours, key=lambda c: cv2.boundingRect(c)[1] + cv2.boundingRect(c)[3]) 65 | elif keep_region == "left": 66 | # 保留最左方的轮廓(x坐标最小) 67 | selected_contour = min(all_contours, key=lambda c: cv2.boundingRect(c)[0]) 68 | elif keep_region == "right": 69 | # 保留最右方的轮廓(x坐标最大) 70 | selected_contour = max(all_contours, key=lambda c: cv2.boundingRect(c)[0] + cv2.boundingRect(c)[2]) 71 | 72 | cv2.drawContours(filtered_mask, [selected_contour], 0, 255, -1) 73 | 74 | # 使用过滤后的遮罩替换原始遮罩 75 | mask_8bit = filtered_mask 76 | 77 | # 填充内部空洞 78 | if fill_holes == "enable": 79 | # 使用形态学操作填充空洞 80 | mask_filled = ndimage.binary_fill_holes(mask_8bit > 127).astype(np.uint8) * 255 81 | else: 82 | mask_filled = mask_8bit 83 | 84 | # 查找轮廓 85 | contours, _ = cv2.findContours(mask_filled, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) 86 | 87 | # 创建输出掩码 88 | result_mask = np.zeros_like(mask_filled) 89 | 90 | # 如果找到轮廓 91 | if contours and len(contours) > 0: 92 | # 找到最大的轮廓 93 | largest_contour = max(contours, key=cv2.contourArea) 94 | 95 | if output_mode == "ellipse": 96 | # 计算最佳拟合椭圆 97 | if len(largest_contour) >= 5: # 椭圆拟合需要至少5个点 98 | ellipse = cv2.fitEllipse(largest_contour) 99 | # 绘制椭圆 100 | result_mask = cv2.ellipse(result_mask, ellipse, 255, -1) 101 | else: 102 | # 如果点太少,退化为矩形 103 | x, y, w, h = cv2.boundingRect(largest_contour) 104 | result_mask = cv2.rectangle(result_mask, (x, y), (x+w, y+h), 255, -1) 105 | 106 | elif output_mode == "convex_hull": 107 | # 使用凸包 108 | hull = cv2.convexHull(largest_contour) 109 | cv2.drawContours(result_mask, [hull], 0, 255, -1) 110 | 111 | elif output_mode == "filled_original": 112 | # 直接绘制填充后的轮廓 113 | cv2.drawContours(result_mask, [largest_contour], 0, 255, -1) 114 | 115 | # 应用遮罩扩展/收缩 116 | if expand_mask != 0: 117 | # 创建适当大小的内核 118 | kernel_size = abs(expand_mask) * 2 + 1 119 | kernel = np.ones((kernel_size, kernel_size), np.uint8) 120 | 121 | if expand_mask > 0: 122 | # 扩展遮罩 123 | result_mask = cv2.dilate(result_mask, kernel, iterations=1) 124 | else: 125 | # 收缩遮罩 126 | result_mask = cv2.erode(result_mask, kernel, iterations=1) 127 | 128 | # 平滑边缘(如果启用) 129 | if smooth_edges == "enable" and smoothing_kernel_size > 1: 130 | kernel = np.ones((smoothing_kernel_size, smoothing_kernel_size), np.uint8) 131 | result_mask = cv2.morphologyEx(result_mask, cv2.MORPH_OPEN, kernel) 132 | result_mask = cv2.morphologyEx(result_mask, cv2.MORPH_CLOSE, kernel) 133 | result_mask = cv2.GaussianBlur(result_mask, (smoothing_kernel_size, smoothing_kernel_size), 0) 134 | 135 | # 应用额外的高斯模糊(如果指定) 136 | if blur_amount > 0: 137 | # 确保模糊内核大小是奇数 138 | blur_kernel_size = max(3, int(blur_amount * 2) * 2 + 1) 139 | result_mask = cv2.GaussianBlur(result_mask, (blur_kernel_size, blur_kernel_size), blur_amount) 140 | 141 | # 转换回0-1范围的浮点数 142 | result_mask = result_mask.astype(np.float32) / 255.0 143 | result_masks.append(result_mask) 144 | 145 | # 转换回torch张量 146 | result_tensor = torch.from_numpy(np.stack(result_masks)) 147 | 148 | return (result_tensor,) 149 | 150 | # 注册节点 151 | NODE_CLASS_MAPPINGS = { 152 | "IrregularToEllipseMask": IrregularToEllipseMask 153 | } 154 | 155 | # 显示名称 156 | NODE_DISPLAY_NAME_MAPPINGS = { 157 | "IrregularToEllipseMask": "Irregular To EllipseMask" 158 | } 159 | -------------------------------------------------------------------------------- /py/text_image_node.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from PIL import Image, ImageDraw, ImageFont 4 | import os 5 | import math 6 | 7 | class TextImageGenerator: 8 | """ 9 | 生成文本图像和对应遮罩的节点 10 | 支持中英文、水平垂直排版,可调整字体大小、颜色、位置等 11 | """ 12 | 13 | @classmethod 14 | def INPUT_TYPES(cls): 15 | # 获取字体目录中所有ttf和otf文件 16 | font_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), "font") 17 | if not os.path.exists(font_dir): 18 | os.makedirs(font_dir) 19 | 20 | fonts = [f for f in os.listdir(font_dir) if f.endswith(('.ttf', '.otf'))] 21 | if not fonts: 22 | # 如果没有字体文件,添加默认选项 23 | fonts = ["NotoSansSC-Regular.ttf"] 24 | 25 | return { 26 | "required": { 27 | "text": ("STRING", {"multiline": True, "default": "示例文本\nSample Text"}), 28 | "canvas_width": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 8}), 29 | "canvas_height": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 8}), 30 | "font_size": ("INT", {"default": 72, "min": 8, "max": 500, "step": 1}), 31 | "orientation": (["horizontal", "vertical"], {"default": "horizontal"}), 32 | "alignment": (["left", "center", "right"], {"default": "center"}), 33 | "vertical_alignment": (["top", "middle", "bottom"], {"default": "middle"}), 34 | "x_position": ("INT", {"default": 0, "min": -4096, "max": 4096, "step": 1}), 35 | "y_position": ("INT", {"default": 0, "min": -4096, "max": 4096, "step": 1}), 36 | "background_color": ("COLOR", {"default": "#000000"}), 37 | "text_color": ("COLOR", {"default": "#ffffff"}), 38 | "background_alpha": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), 39 | "text_alpha": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), 40 | "font": (fonts, ), 41 | "letter_spacing": ("FLOAT", {"default": 0.0, "min": -0.5, "max": 2.0, "step": 0.01}), 42 | "word_spacing": ("FLOAT", {"default": 1.0, "min": 0.5, "max": 5.0, "step": 0.1}), 43 | "line_spacing": ("FLOAT", {"default": 1.2, "min": 0.5, "max": 3.0, "step": 0.1}), 44 | } 45 | } 46 | 47 | RETURN_TYPES = ("IMAGE", "MASK") 48 | FUNCTION = "generate_text_image" 49 | CATEGORY = "YCNode/Text" 50 | 51 | def hex_to_rgb(self, hex_color): 52 | """将十六进制颜色转换为RGB值""" 53 | hex_color = hex_color.lstrip('#') 54 | return tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4)) 55 | 56 | def generate_text_image(self, text, canvas_width, canvas_height, font_size, 57 | orientation, alignment, vertical_alignment, 58 | x_position, y_position, background_color, text_color, 59 | background_alpha, text_alpha, font, 60 | letter_spacing, word_spacing, line_spacing): 61 | """生成文本图像和对应的遮罩""" 62 | 63 | # 转换颜色 64 | bg_color = self.hex_to_rgb(background_color) 65 | txt_color = self.hex_to_rgb(text_color) 66 | 67 | # 创建RGBA图像 68 | bg_color_with_alpha = bg_color + (int(background_alpha * 255),) 69 | image = Image.new('RGBA', (canvas_width, canvas_height), bg_color_with_alpha) 70 | draw = ImageDraw.Draw(image) 71 | 72 | # 创建文本遮罩图像 (黑色背景,白色文本) 73 | mask_image = Image.new('L', (canvas_width, canvas_height), 0) 74 | mask_draw = ImageDraw.Draw(mask_image) 75 | 76 | # 加载字体 77 | font_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "font", font) 78 | if not os.path.exists(font_path): 79 | # 如果字体文件不存在,尝试使用系统默认字体 80 | try: 81 | font_obj = ImageFont.truetype(font, font_size) 82 | except: 83 | font_obj = ImageFont.load_default() 84 | font_size = 12 # 默认字体可能较小 85 | else: 86 | font_obj = ImageFont.truetype(font_path, font_size) 87 | 88 | # 处理文本行 89 | lines = text.splitlines() 90 | 91 | # 计算文本尺寸和位置 92 | if orientation == "horizontal": 93 | # 水平排列文本 94 | line_heights = [] 95 | line_widths = [] 96 | 97 | # 计算字母间距的基础单位(像素) 98 | letter_spacing_px = font_size * letter_spacing 99 | # 计算单词间距的基础单位(像素) 100 | space_width = draw.textlength(" ", font=font_obj) 101 | word_spacing_px = space_width * (word_spacing - 1) # 额外的空格宽度 102 | 103 | for line in lines: 104 | # 计算行宽度(包括字间距和词间距) 105 | if any(ord(c) < 128 for c in line): # 包含ASCII字符 106 | # 对于英文文本,计算每个字符的宽度和单词间距 107 | total_width = 0 108 | words = line.split() 109 | 110 | for i, word in enumerate(words): 111 | # 单词内字符 112 | word_width = 0 113 | for j, char in enumerate(word): 114 | char_width = draw.textlength(char, font=font_obj) 115 | word_width += char_width 116 | if j < len(word) - 1: # 不是单词的最后一个字符 117 | word_width += letter_spacing_px 118 | 119 | total_width += word_width 120 | 121 | # 单词间添加空格和额外的单词间距 122 | if i < len(words) - 1: # 不是最后一个单词 123 | total_width += space_width + word_spacing_px 124 | 125 | line_width = total_width 126 | else: 127 | # 对于纯中文文本,字符间添加字间距 128 | total_width = 0 129 | for i, char in enumerate(line): 130 | char_width = draw.textlength(char, font=font_obj) 131 | total_width += char_width 132 | if i < len(line) - 1: # 不是行的最后一个字符 133 | total_width += letter_spacing_px 134 | 135 | line_width = total_width 136 | 137 | bbox = font_obj.getbbox(line) 138 | line_height = bbox[3] - bbox[1] 139 | 140 | line_heights.append(line_height) 141 | line_widths.append(line_width) 142 | 143 | # 计算行间距 144 | line_spacing_px = font_size * line_spacing 145 | total_height = sum(line_heights) + (len(lines) - 1) * line_spacing_px 146 | max_width = max(line_widths) if line_widths else 0 147 | 148 | # 计算起始位置 149 | if alignment == "left": 150 | x_start = x_position 151 | elif alignment == "center": 152 | x_start = x_position + (canvas_width - max_width) // 2 153 | else: # right 154 | x_start = x_position + canvas_width - max_width 155 | 156 | if vertical_alignment == "top": 157 | y_start = y_position 158 | elif vertical_alignment == "middle": 159 | y_start = y_position + (canvas_height - total_height) // 2 160 | else: # bottom 161 | y_start = y_position + canvas_height - total_height 162 | 163 | # 绘制文本 164 | y = y_start 165 | for i, line in enumerate(lines): 166 | x = x_start 167 | 168 | if any(ord(c) < 128 for c in line): # 包含ASCII字符 169 | # 对于英文文本,处理单词和字符 170 | words = line.split() 171 | for word_idx, word in enumerate(words): 172 | # 绘制单词中的每个字符 173 | for j, char in enumerate(word): 174 | char_width = draw.textlength(char, font=font_obj) 175 | 176 | # 绘制到图像和遮罩 177 | txt_color_with_alpha = txt_color + (int(text_alpha * 255),) 178 | draw.text((x, y), char, fill=txt_color_with_alpha, font=font_obj) 179 | mask_draw.text((x, y), char, fill=255, font=font_obj) 180 | 181 | x += char_width 182 | if j < len(word) - 1: # 不是单词的最后一个字符 183 | x += letter_spacing_px 184 | 185 | # 添加单词间距 186 | if word_idx < len(words) - 1: # 不是最后一个单词 187 | x += space_width + word_spacing_px 188 | else: 189 | # 对于纯中文文本,逐字符绘制并添加字间距 190 | for j, char in enumerate(line): 191 | # 绘制到图像和遮罩 192 | txt_color_with_alpha = txt_color + (int(text_alpha * 255),) 193 | draw.text((x, y), char, fill=txt_color_with_alpha, font=font_obj) 194 | mask_draw.text((x, y), char, fill=255, font=font_obj) 195 | 196 | # 计算下一个字符的位置 197 | char_width = draw.textlength(char, font=font_obj) 198 | x += char_width 199 | 200 | # 添加字间距 201 | if j < len(line) - 1: # 不是行的最后一个字符 202 | x += letter_spacing_px 203 | 204 | # 计算下一行的位置(使用自定义行间距) 205 | y += line_heights[i] + line_spacing_px 206 | 207 | else: # vertical 208 | # 垂直排列文本 209 | char_heights = [] 210 | max_char_width = 0 211 | 212 | # 将所有文本合并为一个字符列表 213 | all_chars = [] 214 | for line in lines: 215 | all_chars.extend(list(line)) 216 | if line != lines[-1]: # 如果不是最后一行,添加换行符 217 | all_chars.append('\n') 218 | 219 | # 测量每个字符的尺寸 220 | for char in all_chars: 221 | if char == '\n': 222 | # 换行符使用行间距 223 | char_heights.append(font_size * line_spacing) 224 | continue 225 | 226 | bbox = font_obj.getbbox(char) 227 | char_height = bbox[3] - bbox[1] 228 | char_width = bbox[2] - bbox[0] 229 | 230 | char_heights.append(char_height) 231 | max_char_width = max(max_char_width, char_width) 232 | 233 | # 计算字符间距(垂直方向) 234 | letter_spacing_px = font_size * letter_spacing 235 | total_height = sum(char_heights) + (len(all_chars) - 1) * letter_spacing_px 236 | 237 | # 计算起始位置 238 | if alignment == "left": 239 | x_start = x_position 240 | elif alignment == "center": 241 | x_start = x_position + (canvas_width - max_char_width) // 2 242 | else: # right 243 | x_start = x_position + canvas_width - max_char_width 244 | 245 | if vertical_alignment == "top": 246 | y_start = y_position 247 | elif vertical_alignment == "middle": 248 | y_start = y_position + (canvas_height - total_height) // 2 249 | else: # bottom 250 | y_start = y_position + canvas_height - total_height 251 | 252 | # 绘制文本 253 | y = y_start 254 | prev_char = None 255 | for char in all_chars: 256 | if char == '\n': 257 | # 换行符使用行间距 258 | y += font_size * line_spacing 259 | prev_char = None 260 | continue 261 | 262 | # 计算特殊单词间距 263 | extra_spacing = 0 264 | if prev_char == ' ' and char != ' ': 265 | # 单词之间的间距 266 | extra_spacing = (word_spacing - 1) * font_size * 0.5 267 | 268 | # 绘制到图像和遮罩 269 | txt_color_with_alpha = txt_color + (int(text_alpha * 255),) 270 | draw.text((x_start, y + extra_spacing), char, fill=txt_color_with_alpha, font=font_obj) 271 | mask_draw.text((x_start, y + extra_spacing), char, fill=255, font=font_obj) 272 | 273 | # 计算下一个字符的位置 274 | bbox = font_obj.getbbox(char) 275 | char_height = bbox[3] - bbox[1] 276 | y += char_height + letter_spacing_px 277 | 278 | prev_char = char 279 | 280 | # 将RGBA图像转换为RGB 281 | rgb_image = Image.new('RGB', image.size, (0, 0, 0)) 282 | rgb_image.paste(image, mask=image.split()[3]) # 使用alpha通道作为遮罩 283 | 284 | # 转换为ComfyUI格式 285 | img_tensor = torch.from_numpy(np.array(rgb_image).astype(np.float32) / 255.0).unsqueeze(0) 286 | mask_tensor = torch.from_numpy(np.array(mask_image).astype(np.float32) / 255.0).unsqueeze(0) 287 | 288 | return (img_tensor, mask_tensor) 289 | 290 | # 注册节点 291 | NODE_CLASS_MAPPINGS = { 292 | "YCTextImageGenerator": TextImageGenerator 293 | } 294 | 295 | # 显示名称 296 | NODE_DISPLAY_NAME_MAPPINGS = { 297 | "YCTextImageGenerator": "文本图像生成器" 298 | } 299 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "ycnodes" 3 | description = "A collection of image processing extension nodes for ComfyUI." 4 | version = "1.0.0" 5 | license = {file = "LICENSE"} 6 | dependencies = ["torch", "numpy", "pillow", "opencv-python"] 7 | 8 | [project.urls] 9 | Repository = "https://github.com/yichengup/ComfyUI-YCNodes" 10 | # Used by Comfy Registry https://comfyregistry.org 11 | 12 | [tool.comfy] 13 | PublisherId = "" 14 | DisplayName = "ComfyUI-YCNodes" 15 | Icon = "" 16 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | torch 2 | numpy 3 | pillow 4 | opencv-python 5 | scipy 6 | --------------------------------------------------------------------------------