├── .github └── workflows │ └── publish.yml ├── .gitignore ├── LICENSE ├── README.md ├── __init__.py ├── examples ├── ComfyUI_00200_.png ├── ComfyUI_00201_.png ├── ComfyUI_00227_.png ├── ComfyUI_00229_.png └── ComfyUI_00245_.png ├── images ├── comfy_screenshot.png ├── comfy_screenshot_2.png ├── comfy_screenshot_3.png └── comfy_screenshot_4.png ├── node.py └── pyproject.toml /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish to Comfy registry 2 | on: 3 | workflow_dispatch: 4 | push: 5 | branches: 6 | - main 7 | - master 8 | paths: 9 | - "pyproject.toml" 10 | 11 | permissions: 12 | issues: write 13 | 14 | jobs: 15 | publish-node: 16 | name: Publish Custom Node to registry 17 | runs-on: ubuntu-latest 18 | if: ${{ github.repository_owner == 'attashe' }} 19 | steps: 20 | - name: Check out code 21 | uses: actions/checkout@v4 22 | with: 23 | submodules: true 24 | - name: Publish Custom Node 25 | uses: Comfy-Org/publish-node-action@v1 26 | with: 27 | ## Add your own personal access token to your Github Repository secrets and reference it here. 28 | personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }} 29 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 110 | .pdm.toml 111 | .pdm-python 112 | .pdm-build/ 113 | 114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 115 | __pypackages__/ 116 | 117 | # Celery stuff 118 | celerybeat-schedule 119 | celerybeat.pid 120 | 121 | # SageMath parsed files 122 | *.sage.py 123 | 124 | # Environments 125 | .env 126 | .venv 127 | env/ 128 | venv/ 129 | ENV/ 130 | env.bak/ 131 | venv.bak/ 132 | 133 | # Spyder project settings 134 | .spyderproject 135 | .spyproject 136 | 137 | # Rope project settings 138 | .ropeproject 139 | 140 | # mkdocs documentation 141 | /site 142 | 143 | # mypy 144 | .mypy_cache/ 145 | .dmypy.json 146 | dmypy.json 147 | 148 | # Pyre type checker 149 | .pyre/ 150 | 151 | # pytype static type analyzer 152 | .pytype/ 153 | 154 | # Cython debug symbols 155 | cython_debug/ 156 | 157 | # PyCharm 158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 160 | # and can be added to the global gitignore or merged into this file. For a more nuclear 161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 162 | #.idea/ 163 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ComfyUI-FluxRegionAttention 2 | 3 | Implement Region Attention for Flux model. Add node RegionAttention that takes a regions - mask + condition, mask could be set from comfyui masks or bbox in FluxRegionBBOX node. This code is not optimized and has a memory leak. If you caught a OOM just try run a query againg - works on my RTX3080. 4 | 5 | For generation it uses a usual prompt that have influence to all picture and a regions that have their own prompts. Base prompt good for setup background and style of image. 6 | 7 | This is train-free technique and results not always stable - sometimes need to try several seeds or change prompt. 8 | 9 | ## Examples 10 | 11 | Workflows in the metadata 12 | 13 | ![clown in the forest](./examples/ComfyUI_00227_.png) 14 | ![black swan](./examples/ComfyUI_00229_.png) 15 | ![colored brick wall](./examples/ComfyUI_00245_.png) 16 | 17 | ## Nodes screenshot 18 | 19 | 1. Region attention influence only to t5_xxl embeddings, for clip_l embeddings we can use concatinated prompt (stronger regional conditioning) or only common prompg (weaker conditioning). 20 | 21 | ![concat prompt to clip_l](comfy_screenshot_2.png) 22 | ![background prompt to clip_l](comfy_screenshot_3.png) 23 | 24 | 2. Pipeline 25 | 26 | ![colored wall](./images/comfy_screenshot_4.png) 27 | ![black swan](./images/comfy_screenshot.png) 28 | 29 | 30 | ## Aknowledgements 31 | 32 | This repository is base on next repositories: 33 | 34 | @Misc{omost, 35 | author = {Omost Team}, 36 | title = {Omost GitHub Page}, 37 | year = {2024}, 38 | } 39 | 40 | [Gligen-GUI](https://github.com/mut-ex/gligen-gui) 41 | 42 | [black-forest-labs](https://github.com/black-forest-labs/flux) 43 | 44 | [lucidrains attention implementation](https://github.com/lucidrains/memory-efficient-attention-pytorch) -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | from .node import RegionAttention, CLIPDebug, FluxRegionMask, FluxRegionBBOX 2 | 3 | NODE_CLASS_MAPPINGS = { 4 | "RegionAttention": RegionAttention, 5 | "CLIPDebug": CLIPDebug, 6 | "FluxRegionMask": FluxRegionMask, 7 | "FluxRegionBBOX": FluxRegionBBOX, 8 | # "BoundingBoxNode": BoundingBoxNode, 9 | # "VisualizeBBoxesNode": VisualizeBBoxesNode, 10 | # "BBoxToMaskNode": BBoxToMaskNode, 11 | } 12 | 13 | NODE_DISPLAY_NAME_MAPPINGS = { 14 | "RegionAttention": "Region Attention", 15 | "FluxRegionMask": "Region Mask", 16 | "FluxRegionBBOX": "Region bbox", 17 | "CLIPDebug": "CLIP debug", 18 | # "VisualizeBBoxesNode": "Visualize Bounding Boxes", 19 | # "BBoxToMaskNode": "Bounding Boxes to Mask", 20 | } 21 | 22 | __all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS'] -------------------------------------------------------------------------------- /examples/ComfyUI_00200_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/attashe/ComfyUI-FluxRegionAttention/0ab8a2252ca0f88bc329e002ee30136918df2740/examples/ComfyUI_00200_.png -------------------------------------------------------------------------------- /examples/ComfyUI_00201_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/attashe/ComfyUI-FluxRegionAttention/0ab8a2252ca0f88bc329e002ee30136918df2740/examples/ComfyUI_00201_.png -------------------------------------------------------------------------------- /examples/ComfyUI_00227_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/attashe/ComfyUI-FluxRegionAttention/0ab8a2252ca0f88bc329e002ee30136918df2740/examples/ComfyUI_00227_.png -------------------------------------------------------------------------------- /examples/ComfyUI_00229_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/attashe/ComfyUI-FluxRegionAttention/0ab8a2252ca0f88bc329e002ee30136918df2740/examples/ComfyUI_00229_.png -------------------------------------------------------------------------------- /examples/ComfyUI_00245_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/attashe/ComfyUI-FluxRegionAttention/0ab8a2252ca0f88bc329e002ee30136918df2740/examples/ComfyUI_00245_.png -------------------------------------------------------------------------------- /images/comfy_screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/attashe/ComfyUI-FluxRegionAttention/0ab8a2252ca0f88bc329e002ee30136918df2740/images/comfy_screenshot.png -------------------------------------------------------------------------------- /images/comfy_screenshot_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/attashe/ComfyUI-FluxRegionAttention/0ab8a2252ca0f88bc329e002ee30136918df2740/images/comfy_screenshot_2.png -------------------------------------------------------------------------------- /images/comfy_screenshot_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/attashe/ComfyUI-FluxRegionAttention/0ab8a2252ca0f88bc329e002ee30136918df2740/images/comfy_screenshot_3.png -------------------------------------------------------------------------------- /images/comfy_screenshot_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/attashe/ComfyUI-FluxRegionAttention/0ab8a2252ca0f88bc329e002ee30136918df2740/images/comfy_screenshot_4.png -------------------------------------------------------------------------------- /node.py: -------------------------------------------------------------------------------- 1 | import gc 2 | import torch 3 | import torch.nn.functional as F 4 | import numpy as np 5 | from torch import Tensor 6 | from comfy.ldm.modules import attention as comfy_attention 7 | from comfy.ldm.flux import math as flux_math 8 | from comfy.ldm.flux import layers as flux_layers 9 | from comfy import model_management 10 | 11 | from PIL import Image 12 | from typing import List, Dict, Optional 13 | from functools import partial 14 | from einops import rearrange 15 | 16 | import matplotlib.pyplot as plt 17 | 18 | orig_attention = comfy_attention.optimized_attention 19 | 20 | 21 | def masked_attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor, 22 | attn_mask: Tensor = None, q_scale=None, k_scale=None, **kwargs) -> Tensor: 23 | """ 24 | Implementation using PyTorch's scaled_dot_product_attention 25 | 26 | Args: 27 | q: Query tensor of shape [B, H, L, D] 28 | k: Key tensor of shape [B, H, L, D] 29 | v: Value tensor of shape [B, H, L, D] 30 | pe: Positional encoding tensor 31 | attn_mask: Optional attention mask 32 | q_scale: Optional query scaling factor 33 | k_scale: Optional key scaling factor 34 | 35 | Returns: 36 | Output tensor of shape [B, L, H*D] 37 | """ 38 | # Apply rotary positional encoding 39 | q, k = flux_math.apply_rope(q, k, pe) 40 | 41 | # PyTorch's scaled_dot_product_attention 42 | # The scaling is handled internally by the function 43 | x = F.scaled_dot_product_attention( 44 | q, k, v, 45 | attn_mask=attn_mask, 46 | dropout_p=0.0, # Assuming no dropout in the original 47 | is_causal=False # Assuming not causal, adjust if needed 48 | ) 49 | 50 | # Reshape back to the expected output format 51 | x = rearrange(x, "B H L D -> B L (H D)") 52 | 53 | return x 54 | 55 | def prepare_attention_mask(lin_masks: List[Image.Image], reg_embeds: List[Tensor], 56 | Nx: int, emb_size: int, emb_len: int,): 57 | cross_mask = torch.zeros(emb_len + Nx, emb_len + Nx) 58 | q_scale = torch.ones(emb_len + Nx) 59 | k_scale = torch.ones(emb_len + Nx) 60 | 61 | n_regs = len(lin_masks) 62 | emb_cum_idx = 0 63 | 64 | # Mask main prompt to subprompts 65 | for j in range(n_regs): 66 | t1, t2 = emb_cum_idx + (j+1) * emb_size, emb_cum_idx + (j+2) * emb_size 67 | p1, p2 = emb_cum_idx, emb_cum_idx + emb_size 68 | print(t1, t2, p1, p2) 69 | 70 | cross_mask[t1 : t2, p1 : p2] = 1 71 | cross_mask[p1 : p2, t1 : t2] = 1 72 | 73 | emb_cum_idx += emb_size 74 | 75 | for i, (m, emb) in enumerate(zip(lin_masks, reg_embeds)): 76 | # mask text 77 | for j in range(1, n_regs - i): 78 | t1, t2 = emb_cum_idx + j * emb_size, emb_cum_idx + (j+1) * emb_size 79 | p1, p2 = emb_cum_idx, emb_cum_idx + emb_size 80 | print(t1, t2, p1, p2) 81 | 82 | cross_mask[t1 : t2, p1 : p2] = 1 83 | cross_mask[p1 : p2, t1 : t2] = 1 84 | 85 | scale = m.sum() / Nx 86 | print('m: ', m.shape, scale) 87 | if scale > 1e-5: 88 | q_scale[emb_cum_idx : emb_cum_idx+emb_size] = 1 / scale 89 | k_scale[emb_cum_idx : emb_cum_idx+emb_size] = 1 / scale 90 | 91 | # m (4096) -> (N_text * 256 + 4096) 92 | m = torch.cat([torch.ones(emb_size * (n_regs+1)), m]) 93 | print(m.shape) 94 | 95 | mb = m > 0.5 96 | cross_mask[~mb, emb_cum_idx : emb_cum_idx + emb_size] = 1 97 | cross_mask[emb_cum_idx : emb_cum_idx + emb_size, ~mb] = 1 98 | emb_cum_idx += emb_size 99 | 100 | # Image Self-Attention attention between different areas blocking 101 | # Calculate pairwise masks between different areas with the kronecker product 102 | for i in range(n_regs): 103 | for j in range(i+1, n_regs): 104 | # We need to calculate two kr.prod for preserving the symmetry of the matrix 105 | kron1 = torch.kron(lin_masks[i].unsqueeze(0), lin_masks[j].unsqueeze(-1)) 106 | kron2 = torch.kron(lin_masks[j].unsqueeze(0), lin_masks[i].unsqueeze(-1)) 107 | # cross_mask[emb_cum_idx:, emb_cum_idx:] += kron1 + kron2 108 | 109 | # We need to select interesecting regions and set the rows and columns which are intersecting to 0 110 | 111 | # Get the intersecting regions 112 | intersect_idx = torch.logical_and(lin_masks[i] > 0.5, lin_masks[j] > 0.5) 113 | # Set the intersecting regions to 0 114 | kron_sum = kron1 + kron2 115 | kron_sum[intersect_idx, :] = 0 116 | kron_sum[:, intersect_idx] = 0 117 | 118 | # kron_sum[intersect_idx, intersect_idx] = 0 119 | 120 | # Add the kronecker product to the cross mask 121 | cross_mask[emb_cum_idx:, emb_cum_idx:] += kron_sum 122 | 123 | # Clean up the diagonal 124 | cross_mask.fill_diagonal_(0) 125 | 126 | q_scale = q_scale.reshape(1, 1, -1, 1).cuda() 127 | k_scale = k_scale.reshape(1, 1, -1, 1).cuda() 128 | 129 | return cross_mask, q_scale, k_scale 130 | 131 | 132 | test_payload = { 133 | 'prompt': { 134 | 'positive': 'An italian cafe', 135 | 'width': 1024, 136 | 'height': 1024, 137 | 'bboxes': [ 138 | { 139 | 'caption': 'An asian man with sombrero', 140 | 'x': 100, 141 | 'y': 200, 142 | 'width': 300, 143 | 'height': 700, 144 | }, 145 | { 146 | 'caption': 'A redhair sexual woman', 147 | 'x': 500, 148 | 'y': 200, 149 | 'width': 300, 150 | 'height': 700, 151 | } 152 | ], 153 | } 154 | } 155 | 156 | 157 | def process_payload(payload): 158 | bboxes = payload['prompt']['bboxes'] 159 | masks = [] 160 | subprompts = [] 161 | 162 | for i, bbox in enumerate(bboxes): 163 | mask = Image.new('L', (payload['prompt']['width'], payload['prompt']['height']), 0) 164 | mask_arr = np.array(mask) 165 | 166 | # Draw the bounding box 167 | mask_arr[bbox['y']:bbox['y']+bbox['height'], bbox['x']:bbox['x']+bbox['width']] = 255 168 | mask = Image.fromarray(mask_arr) 169 | 170 | # Debug save the mask 171 | mask.save(f'mask_{i}.png') 172 | 173 | masks.append(mask) 174 | subprompts.append(bbox['caption']) 175 | 176 | return masks, subprompts 177 | 178 | 179 | def generate_test_mask(masks, height, width): 180 | hH, hW = int(height) // 16, int(width) // 16 181 | print(height, width, '->', hH, hW) 182 | 183 | lin_masks = [] 184 | for mask in masks: 185 | mask = mask.convert('L') 186 | mask = torch.tensor(np.array(mask)).unsqueeze(0).unsqueeze(0) / 255 187 | # Linearize mask 188 | mask = torch.nn.functional.interpolate(mask, (hH, hW), mode='nearest-exact').flatten() 189 | lin_masks.append(mask) 190 | 191 | return lin_masks, hH, hW 192 | 193 | def generate_region_mask(region, width, height): 194 | if region.get('bbox') is not None: 195 | x1, y1, x2, y2 = region['bbox'] 196 | mask = Image.new('L', (width, height), 0) 197 | mask_arr = np.array(mask) 198 | 199 | print(f'Generating masks with {width}x{height} and [{x1}, {y1}, {x2}, {y2}]') 200 | 201 | # Draw the bounding box 202 | mask_arr[int(y1*height):int(y2*height), int(x1*width):int(x2*width)] = 255 203 | mask = Image.fromarray(mask_arr) 204 | 205 | return mask 206 | elif region.get('mask') is not None: 207 | mask = region['mask'] # ComfyUI mask is tensor (bs x height x width) 208 | print('MASK: ', mask) 209 | mask = mask[0].cpu().numpy() 210 | mask = (mask * 255).astype(np.uint8) 211 | mask = Image.fromarray(mask) 212 | mask = mask.resize((width, height)) 213 | 214 | return mask 215 | else: 216 | raise Exception('Unknown region type') 217 | 218 | 219 | class RegionAttention: 220 | RETURN_TYPES = ("MODEL", "CONDITIONING") 221 | RETURN_NAMES = ("model", "condition") 222 | FUNCTION = "go" 223 | CATEGORY = "model_patches" 224 | 225 | @classmethod 226 | def INPUT_TYPES(cls): 227 | return { 228 | "required": { 229 | "model": ("MODEL",), 230 | # "clip": ("CLIP", {"tooltip": "The CLIP model used for encoding the text."}), 231 | "condition": ("CONDITIONING",), 232 | "samples": ("LATENT",), 233 | "region1": ("REGION",), 234 | "enabled": ("BOOLEAN", {"default": True},), 235 | }, 236 | "optional": { 237 | "region2": ("REGION",), 238 | "region3": ("REGION",), 239 | "region4": ("REGION",), 240 | } 241 | } 242 | 243 | @classmethod 244 | def go(cls, *, model: object, condition, samples, region1, enabled: bool, 245 | region2=None, region3=None, region4=None): 246 | print(f'Region attention Node enabled: {enabled}, model: {model}') 247 | # masks, payload = process_payload(test_payload) 248 | 249 | latent = samples['samples'] 250 | print('latent.shape', latent.shape) 251 | bs_l, n_ch, lH, lW = latent.shape 252 | text_emb = condition[0][0].clone() 253 | clip_emb = condition[0][1]['pooled_output'].clone() 254 | bs, emb_size, emb_dim = text_emb.shape 255 | iH, iW = lH * 8, lW * 8 256 | 257 | subprompts_embeds, masks = [region1['condition'][0][0],], [generate_region_mask(region1, iW, iH),] 258 | masks[-1].save(f'mask_1.png') 259 | if region2 is not None: 260 | print('append region2') 261 | sub_emb2 = region2['condition'][0][0] 262 | masks.append(generate_region_mask(region2, iW, iH)) 263 | subprompts_embeds.append(sub_emb2) 264 | masks[-1].save(f'mask_2.png') 265 | if region3 is not None: 266 | print('append region3') 267 | sub_emb3 = region3['condition'][0][0] 268 | masks.append(generate_region_mask(region3, iW, iH)) 269 | subprompts_embeds.append(sub_emb3) 270 | if region4 is not None: 271 | print('append region4') 272 | sub_emb4 = region4['condition'][0][0] 273 | masks.append(generate_region_mask(region4, iW, iH)) 274 | subprompts_embeds.append(sub_emb4) 275 | 276 | lin_masks, hH, hW = generate_test_mask(masks, lH * 8, lW * 8) 277 | Nx = int(hH * hW) 278 | emb_len = (len(subprompts_embeds) + 1) * emb_size 279 | extended_condition = torch.cat([text_emb, *subprompts_embeds], dim=1) if enabled else text_emb 280 | 281 | attn_mask, q_scale, k_scale = prepare_attention_mask(lin_masks, subprompts_embeds, Nx, emb_size, emb_len) 282 | 283 | # Visualize and save the attention mask 284 | # fig, ax = plt.subplots(1, 1, figsize=(10, 10)) 285 | # ax.imshow(attn_mask.cpu().numpy()) 286 | # plt.savefig('attention_mask_1.png') 287 | 288 | # Pad mask for xformers to reduce allocations during inference 289 | device = torch.device('cuda') 290 | attn_dtype = torch.bfloat16 if model_management.should_use_bf16(device=device) else torch.float16 291 | if attn_mask is not None: 292 | print(f'Aplying attention masks: {attn_mask.shape}') 293 | L, _ = attn_mask.shape 294 | H = 24 # 24 heads for FLUX models 295 | pad = 8 - L % 8 296 | 297 | # print(f'Attention mask memory padded by: {pad}') 298 | if pad != 8: 299 | # TODO: take dtype from memory_management computational_type 300 | mask_out = torch.empty([bs, H, L + pad, L + pad], 301 | dtype=torch.bfloat16, device=device) 302 | mask_out[:, :, :L, :L] = attn_mask 303 | # print(f'Attention mask memory padded to: {mask_out.shape}') 304 | attn_mask = mask_out[:, :, :L, :L] 305 | else: 306 | mask_out = torch.empty([bs, H, L, L], 307 | dtype=torch.bfloat16, device=device) 308 | mask_out[:, :, :, :] = attn_mask 309 | attn_mask = mask_out 310 | 311 | attn_mask_bool = attn_mask > 0.5 312 | attn_mask.masked_fill_(attn_mask_bool, float('-inf')) 313 | 314 | attn_mask_arg: Tensor = attn_mask if enabled else None 315 | 316 | # if attn_mask_arg is not None: 317 | # fig, ax = plt.subplots(1, 1, figsize=(10, 10)) 318 | # ax.imshow(attn_mask[0][0].float().cpu().numpy()) 319 | # plt.savefig('attention_mask_2.png') 320 | 321 | def region_attention(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False): 322 | print(q.shape, k.shape, v.shape) 323 | 324 | res = orig_attention(q, k, v, heads, mask=attn_mask, attn_precision=attn_precision, skip_reshape=skip_reshape) 325 | 326 | return res 327 | 328 | # comfy_attention.optimized_attention = orig_attention if not enabled else region_attention 329 | 330 | def override_attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor) -> Tensor: 331 | 332 | q, k = flux_math.apply_rope(q, k, pe) 333 | 334 | heads = q.shape[1] 335 | x = region_attention(q, k, v, heads, skip_reshape=True) 336 | return x 337 | 338 | override_attention = partial(masked_attention, attn_mask=attn_mask_arg) 339 | 340 | flux_math.attention = override_attention 341 | flux_layers.attention = override_attention 342 | 343 | del condition 344 | gc.collect() 345 | torch.cuda.empty_cache() 346 | new_condition = [[ 347 | extended_condition, 348 | {'pooled_output': clip_emb}, 349 | ]] 350 | 351 | return (model, new_condition) 352 | 353 | 354 | class FluxRegionMask: 355 | 356 | @classmethod 357 | def INPUT_TYPES(s): 358 | return { 359 | "required": { 360 | "mask": ("MASK",), 361 | "condition": ("CONDITIONING",), 362 | } 363 | } 364 | 365 | RETURN_TYPES = ("REGION",) 366 | FUNCTION = "create_region" 367 | 368 | def create_region(self, mask, condition): 369 | return ({ 370 | "condition": condition, 371 | "mask": mask, 372 | },) 373 | 374 | 375 | class FluxRegionBBOX: 376 | 377 | @classmethod 378 | def INPUT_TYPES(s): 379 | return { 380 | "required": { 381 | "x1": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0}), 382 | "y1": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0}), 383 | "x2": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0}), 384 | "y2": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0}), 385 | "condition": ("CONDITIONING",), 386 | } 387 | } 388 | 389 | RETURN_TYPES = ("REGION",) 390 | FUNCTION = "create_region" 391 | 392 | def create_region(self, x1, y1, x2, y2, condition): 393 | return ({ 394 | "condition": condition, 395 | "bbox": [x1, y1, x2, y2], 396 | },) 397 | 398 | 399 | class CLIPDebug: 400 | @classmethod 401 | def INPUT_TYPES(s): 402 | return { 403 | "required": { 404 | "clip": ("CLIP", {"tooltip": "The CLIP model used for encoding the text."}), 405 | "condition": ("CONDITIONING",), 406 | } 407 | } 408 | RETURN_TYPES = ("CONDITIONING",) 409 | OUTPUT_TOOLTIPS = ("A conditioning containing the embedded text used to guide the diffusion model.",) 410 | FUNCTION = "debug" 411 | 412 | def debug(self, clip, condition): 413 | # print(clip) 414 | print('len(condition)', len(condition)) 415 | print('len(condition[0]', len(condition[0])) 416 | print('type(condition[0][1])', type(condition[0][1])) 417 | print('condition[0][0].shape', condition[0][0].shape) 418 | print('list(condition[0][1].keys())', list(condition[0][1].keys())) 419 | print("condition[0][1]['pooled_output'].shape", condition[0][1]['pooled_output'].shape) 420 | 421 | return (condition,) 422 | 423 | import numpy as np 424 | from PIL import Image, ImageDraw 425 | 426 | class RegionBbox: 427 | @classmethod 428 | def INPUT_TYPES(cls): 429 | return { 430 | "required": { 431 | "image_width": ("INT", {"default": 512, "min": 64, "max": 2048}), 432 | "image_height": ("INT", {"default": 512, "min": 64, "max": 2048}), 433 | "x1": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 1.0}), 434 | "y1": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 1.0}), 435 | "x2": ("FLOAT", {"default": 0.9, "min": 0.0, "max": 1.0}), 436 | "y2": ("FLOAT", {"default": 0.9, "min": 0.0, "max": 1.0}), 437 | } 438 | } 439 | 440 | RETURN_TYPES = ("BBOX",) 441 | FUNCTION = "create_bbox" 442 | 443 | def create_bbox(self, image_width, image_height, x1, y1, x2, y2): 444 | bbox = { 445 | "x1": int(x1 * image_width), 446 | "y1": int(y1 * image_height), 447 | "x2": int(x2 * image_width), 448 | "y2": int(y2 * image_height), 449 | } 450 | return (bbox,) 451 | 452 | class VisualizeBBoxesNode: 453 | @classmethod 454 | def INPUT_TYPES(cls): 455 | return { 456 | "required": { 457 | "image": ("IMAGE",), 458 | "bboxes": ("BBOX",), 459 | "color": ("COLOR", {"default": "#FF0000"}), 460 | "width": ("INT", {"default": 2, "min": 1, "max": 10}), 461 | } 462 | } 463 | 464 | RETURN_TYPES = ("IMAGE",) 465 | FUNCTION = "visualize_bboxes" 466 | 467 | def visualize_bboxes(self, image, bboxes, color, width): 468 | # Convert the PyTorch tensor to a PIL Image 469 | pil_image = Image.fromarray((image[0].permute(1, 2, 0) * 255).byte().cpu().numpy()) 470 | draw = ImageDraw.Draw(pil_image) 471 | 472 | for bbox in bboxes: 473 | draw.rectangle([bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]], outline=color, width=width) 474 | 475 | # Convert back to PyTorch tensor 476 | tensor_image = torch.from_numpy(np.array(pil_image)).float() / 255.0 477 | tensor_image = tensor_image.permute(2, 0, 1).unsqueeze(0) 478 | 479 | return (tensor_image,) 480 | 481 | class BBoxToMaskNode: 482 | @classmethod 483 | def INPUT_TYPES(cls): 484 | return { 485 | "required": { 486 | "image_width": ("INT", {"default": 512, "min": 64, "max": 2048}), 487 | "image_height": ("INT", {"default": 512, "min": 64, "max": 2048}), 488 | "bboxes": ("BBOX",), 489 | } 490 | } 491 | 492 | RETURN_TYPES = ("MASK",) 493 | FUNCTION = "create_mask" 494 | 495 | def create_mask(self, image_width, image_height, bbox): 496 | mask = torch.zeros((1, image_height, image_width)) 497 | 498 | mask[0, bbox["y1"]:bbox["y2"], bbox["x1"]:bbox["x2"]] = 1.0 499 | 500 | return (mask,) 501 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "fluxregionattention" 3 | description = "Implement Region Attention for Flux model. Add node RegionAttention that takes a regions - mask + condition, mask could be set from comfyui masks or bbox in FluxRegionBBOX node.\nThis code is not optimized and has a memory leak. If you caught a OOM just try run a query againg - works on my RTX3080. For generation it uses a usual prompt that have influence to all picture and a regions that have their own prompts.\nBase prompt good for setup background and style of image. This is train-free technique and results not always stable - sometimes need to try several seeds or change prompt." 4 | version = "1.0.0" 5 | license = {file = "LICENSE"} 6 | dependencies = [ 7 | "torch", 8 | "xformers", 9 | ] 10 | 11 | [project.urls] 12 | Repository = "https://github.com/attashe/ComfyUI-FluxRegionAttention" 13 | # Used by Comfy Registry https://comfyregistry.org 14 | 15 | [tool.comfy] 16 | PublisherId = "" 17 | DisplayName = "ComfyUI-FluxRegionAttention" 18 | Icon = "" 19 | --------------------------------------------------------------------------------