├── .github
└── workflows
│ └── publish.yml
├── .gitignore
├── LICENSE
├── README.md
├── __init__.py
├── examples
├── infinite_you_workflow.json
├── multi_id_infinite_you_workflow.json
├── multi_id_workflow.jpg
├── teaser.jpg
└── workflow.jpg
├── infuse_net.py
├── nodes.py
├── pyproject.toml
├── requirements.txt
├── resampler.py
└── utils.py
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | name: Publish to Comfy registry
2 | on:
3 | workflow_dispatch:
4 | push:
5 | branches:
6 | - main
7 | - master
8 | paths:
9 | - "pyproject.toml"
10 |
11 | permissions:
12 | issues: write
13 |
14 | jobs:
15 | publish-node:
16 | name: Publish Custom Node to registry
17 | runs-on: ubuntu-latest
18 | if: ${{ github.repository_owner == 'bytedance' }}
19 | steps:
20 | - name: Check out code
21 | uses: actions/checkout@v4
22 | with:
23 | submodules: true
24 | - name: Publish Custom Node
25 | uses: Comfy-Org/publish-node-action@v1
26 | with:
27 | ## Add your own personal access token to your Github Repository secrets and reference it here.
28 | personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }}
29 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | .idea
3 | .ipynb_checkpoints
4 | .gradio
5 | *.swp
6 | *.pyc
7 | __pycache__
8 | *.tar*
9 | *.zip
10 | *.pkl
11 | *.pyc
12 | *.bak
13 | *.png
14 | *.deb
15 |
16 | .isort.cfg
17 | .pre-commit-config.yaml
18 |
19 | dataset_stats
20 | debug*
21 | locks
22 | checkpoints
23 | pretrained_checkpoint
24 | ./models
25 | models
26 | results
27 | wandb
28 | tmp*
29 | env*
30 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## Official ComfyUI Support - InfiniteYou: Flexible Photo Recrafting While Preserving Your Identity
2 |
3 |
4 |
5 |

6 |

7 |

8 |

9 |

10 |
11 |
12 |
13 | This repository provides the official ComfyUI native node for [**InfiniteYou**](https://github.com/bytedance/InfiniteYou) with FLUX.
14 |
15 | 
16 |
17 |
18 | Abstract (click to expand)
19 |
20 | > *Achieving flexible and high-fidelity identity-preserved image generation remains formidable, particularly with advanced Diffusion Transformers (DiTs) like FLUX. We introduce **InfiniteYou (InfU)**, one of the earliest robust frameworks leveraging DiTs for this task. InfU addresses significant issues of existing methods, such as insufficient identity similarity, poor text-image alignment, and low generation quality and aesthetics. Central to InfU is InfuseNet, a component that injects identity features into the DiT base model via residual connections, enhancing identity similarity while maintaining generation capabilities. A multi-stage training strategy, including pretraining and supervised fine-tuning (SFT) with synthetic single-person-multiple-sample (SPMS) data, further improves text-image alignment, ameliorates image quality, and alleviates face copy-pasting. Extensive experiments demonstrate that InfU achieves state-of-the-art performance, surpassing existing baselines. In addition, the plug-and-play design of InfU ensures compatibility with various existing methods, offering a valuable contribution to the broader community.*
21 |
22 |
23 |
24 |
25 | ## 🛠️ Workflow Example
26 |
27 | This node adds InfiniteYou‑FLUX support to ComfyUI. In [infinite_you_workflow.json](./examples/infinite_you_workflow.json), you can find a simple workflow demonstrating its usage with either an empty face‑pose control image or a real face‑pose control image, configured to run using FLUX FP8 precision. It also shows an example running the node with FLUX.1-schnell.
28 |
29 | 
30 |
31 | **Extension:** We also provide an example [multi-id workflow](examples/multi_id_infinite_you_workflow.json) for identity-preserved image generation of two people. This uses the masked multi-region test of single-ID InfiniteYou‑FLUX models with masked residual blending, and is provided for reference only.
32 |
33 | 
34 |
35 |
36 | ## 🔧 Requirements and Installation
37 |
38 | ### Dependencies
39 |
40 | 1. Install [ComfyUI](https://github.com/comfyanonymous/ComfyUI?tab=readme-ov-file#get-started).
41 |
42 | 2. Clone this repo under `ComfyUI/custom_nodes` and install the dependencies:
43 | ```
44 | cd ComfyUI/custom_nodes
45 | git clone https://github.com/bytedance/ComfyUI_InfiniteYou.git
46 |
47 | cd ComfyUI_InfiniteYou
48 | pip install -r requirements.txt
49 | ```
50 |
51 | * Our InfiniteYou node has been added to the official Comfy Registry to ease installation: https://registry.comfy.org/publishers/yuminjia/nodes/infiniteyou. Therefore, you can also search `ComfyUI_InfiniteYou` in the ComfyUI Node Manager to install this official node.
52 |
53 |
54 | ### Memory Requirements
55 |
56 | The full-performance BF16 model inference requires a peak VRAM of around **43GB**. Running with FP8 precision requires a peak VRAM of around **24GB**.
57 |
58 |
59 | ## 💡 Usage
60 |
61 | 1. Restart ComfyUI.
62 |
63 | 2. Import the [workflow](examples/infinite_you_workflow.json) from the [examples folder](./examples). Please use [multi-id workflow](examples/multi_id_infinite_you_workflow.json) if needed.
64 |
65 | * Some [important usage tips](https://github.com/bytedance/InfiniteYou?tab=readme-ov-file#-important-usage-tips) can be found in our main InfiniteYou repository.
66 |
67 |
68 | ## 🏰 Required Models
69 |
70 | ### InfiniteYou and InsightFace Detection Models
71 |
72 | This node will automatically download the following models at runtime if they not exists. Alternatively, you may download them manually into the following locations. For InfiniteYou, you need at least `image_proj_model.bin` and `infusenet_.*safetensors` of the corresponding model versions.
73 |
74 | | Model | Location |
75 | | ---- | ---- |
76 | | [InfiniteYou](https://huggingface.co/ByteDance/InfiniteYou/tree/main/infu_flux_v1.0) | `ComfyUI/models/infinite_you/` |
77 | | [InsightFace AntelopeV2](https://huggingface.co/ByteDance/InfiniteYou/tree/main/supports/insightface/models/antelopev2) | `ComfyUI/models/insightface/models/antelopev2` |
78 |
79 | You may follow [ComfyUI FLUX examples](https://comfyanonymous.github.io/ComfyUI_examples/flux/) to download FLUX and other models for full-performance inference.
80 |
81 |
82 | ### Other Required Models for Running FP8 Precision
83 |
84 | The FP8 InfiniteYou model can also be downloaded automatically or manually as above. To run with FP8 precision, other models need to be downloaded manually into the following locations.
85 |
86 | | Model | Location |
87 | | ---- | ---- |
88 | | [FLUX FP8](https://huggingface.co/Kijai/flux-fp8/tree/main) | `ComfyUI/models/diffusion_models` |
89 | | [FLUX VAE](https://huggingface.co/black-forest-labs/FLUX.1-schnell/blob/main/ae.safetensors) | `ComfyUI/models/vae` |
90 | | [Text Encoders FP8](https://huggingface.co/comfyanonymous/flux_text_encoders/tree/main) | `ComfyUI/models/text_encoders` |
91 |
92 |
93 | ## 📜 Disclaimer and Licenses
94 |
95 | The images used in this repository and related demos are sourced from consented subjects or generated by the models. These pictures are intended solely to showcase the capabilities of our research. If you have any concerns, please feel free to contact us, and we will promptly remove any inappropriate content.
96 |
97 | The use of the released code, model, and demo must strictly adhere to the respective licenses. Our code is released under the [Apache License 2.0](./LICENSE), and our model is released under the [Creative Commons Attribution-NonCommercial 4.0 International Public License](https://huggingface.co/ByteDance/InfiniteYou/blob/main/LICENSE) for academic research purposes only. Any manual or automatic downloading of the face models from [InsightFace](https://github.com/deepinsight/insightface), the [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev) base model, LoRAs ([Realism](https://civitai.com/models/631986?modelVersionId=706528) and [Anti-blur](https://civitai.com/models/675581/anti-blur-flux-lora)), *etc.*, must follow their original licenses and be used only for academic research purposes.
98 |
99 | This research aims to positively impact the field of Generative AI. Any usage of this method must be responsible and comply with local laws. The developers do not assume any responsibility for any potential misuse.
100 |
101 |
102 | ## 📖 Citation
103 |
104 | If you find InfiniteYou useful for your research or applications, please cite our paper:
105 |
106 | ```bibtex
107 | @article{jiang2025infiniteyou,
108 | title={{InfiniteYou}: Flexible Photo Recrafting While Preserving Your Identity},
109 | author={Jiang, Liming and Yan, Qing and Jia, Yumin and Liu, Zichuan and Kang, Hao and Lu, Xin},
110 | journal={arXiv preprint},
111 | volume={arXiv:2503.16418},
112 | year={2025}
113 | }
114 | ```
115 |
116 | We also appreciate it if you could give a star :star: to this repository and our [main repository](https://github.com/bytedance/InfiniteYou). Thanks a lot!
117 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2025 Bytedance Ltd. and/or its affiliates. All rights reserved.
2 |
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 |
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from .nodes import *
16 |
17 | __all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS"]
18 |
--------------------------------------------------------------------------------
/examples/infinite_you_workflow.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": "c1a3d0d1-f0cb-4369-83ae-273696df248c",
3 | "revision": 0,
4 | "last_node_id": 116,
5 | "last_link_id": 185,
6 | "nodes": [
7 | {
8 | "id": 52,
9 | "type": "CLIPTextEncode",
10 | "pos": [
11 | -568.8687133789062,
12 | 687.748046875
13 | ],
14 | "size": [
15 | 397.7880859375,
16 | 150.98748779296875
17 | ],
18 | "flags": {
19 | "collapsed": true
20 | },
21 | "order": 11,
22 | "mode": 0,
23 | "inputs": [
24 | {
25 | "name": "clip",
26 | "type": "CLIP",
27 | "link": 94
28 | }
29 | ],
30 | "outputs": [
31 | {
32 | "name": "CONDITIONING",
33 | "shape": 3,
34 | "type": "CONDITIONING",
35 | "slot_index": 0,
36 | "links": [
37 | 65,
38 | 132,
39 | 159
40 | ]
41 | }
42 | ],
43 | "title": "CLIP Text Encode (EMPTY)",
44 | "properties": {
45 | "Node name for S&R": "CLIPTextEncode"
46 | },
47 | "widgets_values": [
48 | ""
49 | ]
50 | },
51 | {
52 | "id": 110,
53 | "type": "ExtractIDEmbedding",
54 | "pos": [
55 | -565.7846069335938,
56 | 778.5155639648438
57 | ],
58 | "size": [
59 | 367.79998779296875,
60 | 86
61 | ],
62 | "flags": {},
63 | "order": 13,
64 | "mode": 0,
65 | "inputs": [
66 | {
67 | "name": "face_detector",
68 | "type": "MODEL",
69 | "link": 172
70 | },
71 | {
72 | "name": "arcface_model",
73 | "type": "MODEL",
74 | "link": 173
75 | },
76 | {
77 | "name": "image_proj_model",
78 | "type": "MODEL",
79 | "link": 174
80 | },
81 | {
82 | "name": "image",
83 | "type": "IMAGE",
84 | "link": 175
85 | }
86 | ],
87 | "outputs": [
88 | {
89 | "name": "CONDITIONING",
90 | "type": "CONDITIONING",
91 | "links": [
92 | 176
93 | ]
94 | }
95 | ],
96 | "properties": {
97 | "Node name for S&R": "ExtractIDEmbedding"
98 | },
99 | "widgets_values": []
100 | },
101 | {
102 | "id": 45,
103 | "type": "SaveImage",
104 | "pos": [
105 | 668.8138427734375,
106 | 288.35491943359375
107 | ],
108 | "size": [
109 | 688.4625244140625,
110 | 863.3844604492188
111 | ],
112 | "flags": {
113 | "collapsed": false
114 | },
115 | "order": 15,
116 | "mode": 0,
117 | "inputs": [
118 | {
119 | "name": "images",
120 | "type": "IMAGE",
121 | "link": 60
122 | }
123 | ],
124 | "outputs": [],
125 | "properties": {
126 | "Node name for S&R": "SaveImage"
127 | },
128 | "widgets_values": [
129 | "ComfyUI",
130 | ""
131 | ]
132 | },
133 | {
134 | "id": 44,
135 | "type": "VAEDecode",
136 | "pos": [
137 | 315.2570495605469,
138 | 300.6652526855469
139 | ],
140 | "size": [
141 | 210,
142 | 46
143 | ],
144 | "flags": {
145 | "collapsed": true
146 | },
147 | "order": 14,
148 | "mode": 0,
149 | "inputs": [
150 | {
151 | "name": "samples",
152 | "type": "LATENT",
153 | "link": 58
154 | },
155 | {
156 | "name": "vae",
157 | "type": "VAE",
158 | "link": 59
159 | }
160 | ],
161 | "outputs": [
162 | {
163 | "name": "IMAGE",
164 | "type": "IMAGE",
165 | "slot_index": 0,
166 | "links": [
167 | 60
168 | ]
169 | }
170 | ],
171 | "properties": {
172 | "Node name for S&R": "VAEDecode"
173 | },
174 | "widgets_values": []
175 | },
176 | {
177 | "id": 109,
178 | "type": "InfuseNetApply",
179 | "pos": [
180 | 278.14422607421875,
181 | 803.1834716796875
182 | ],
183 | "size": [
184 | 315,
185 | 206
186 | ],
187 | "flags": {},
188 | "order": 17,
189 | "mode": 0,
190 | "inputs": [
191 | {
192 | "name": "positive",
193 | "type": "CONDITIONING",
194 | "link": 169
195 | },
196 | {
197 | "name": "negative",
198 | "type": "CONDITIONING",
199 | "link": 159
200 | },
201 | {
202 | "name": "id_embedding",
203 | "type": "CONDITIONING",
204 | "link": 176
205 | },
206 | {
207 | "name": "control_net",
208 | "type": "CONTROL_NET",
209 | "link": 166
210 | },
211 | {
212 | "name": "image",
213 | "shape": 7,
214 | "type": "IMAGE",
215 | "link": 180
216 | },
217 | {
218 | "name": "vae",
219 | "shape": 7,
220 | "type": "VAE",
221 | "link": 170
222 | }
223 | ],
224 | "outputs": [
225 | {
226 | "name": "positive",
227 | "type": "CONDITIONING",
228 | "links": [
229 | 163
230 | ]
231 | },
232 | {
233 | "name": "negative",
234 | "type": "CONDITIONING",
235 | "links": [
236 | 164
237 | ]
238 | }
239 | ],
240 | "properties": {
241 | "Node name for S&R": "InfuseNetApply"
242 | },
243 | "widgets_values": [
244 | 1,
245 | 0,
246 | 1
247 | ]
248 | },
249 | {
250 | "id": 43,
251 | "type": "EmptyLatentImage",
252 | "pos": [
253 | -138.02438354492188,
254 | 463.6690368652344
255 | ],
256 | "size": [
257 | 242.7444610595703,
258 | 106
259 | ],
260 | "flags": {
261 | "collapsed": false
262 | },
263 | "order": 0,
264 | "mode": 0,
265 | "inputs": [],
266 | "outputs": [
267 | {
268 | "name": "LATENT",
269 | "type": "LATENT",
270 | "slot_index": 0,
271 | "links": [
272 | 66
273 | ]
274 | }
275 | ],
276 | "properties": {
277 | "Node name for S&R": "EmptyLatentImage"
278 | },
279 | "widgets_values": [
280 | 864,
281 | 1152,
282 | 1
283 | ]
284 | },
285 | {
286 | "id": 92,
287 | "type": "PrimitiveNode",
288 | "pos": [
289 | -140.0155487060547,
290 | 623.6337890625
291 | ],
292 | "size": [
293 | 268.72747802734375,
294 | 82
295 | ],
296 | "flags": {
297 | "collapsed": false
298 | },
299 | "order": 1,
300 | "mode": 0,
301 | "inputs": [],
302 | "outputs": [
303 | {
304 | "name": "INT",
305 | "type": "INT",
306 | "widget": {
307 | "name": "seed"
308 | },
309 | "links": [
310 | 127
311 | ]
312 | }
313 | ],
314 | "title": "seed",
315 | "properties": {
316 | "Run widget replace on values": false
317 | },
318 | "widgets_values": [
319 | 1117450117680779,
320 | "randomize"
321 | ]
322 | },
323 | {
324 | "id": 106,
325 | "type": "ExtractFacePoseImage",
326 | "pos": [
327 | -125.12596130371094,
328 | 1233.2313232421875
329 | ],
330 | "size": [
331 | 315,
332 | 102
333 | ],
334 | "flags": {},
335 | "order": 12,
336 | "mode": 0,
337 | "inputs": [
338 | {
339 | "name": "face_detector",
340 | "type": "MODEL",
341 | "link": 150
342 | },
343 | {
344 | "name": "image",
345 | "type": "IMAGE",
346 | "link": 151
347 | }
348 | ],
349 | "outputs": [
350 | {
351 | "name": "IMAGE",
352 | "type": "IMAGE",
353 | "links": [
354 | 156
355 | ]
356 | }
357 | ],
358 | "properties": {
359 | "Node name for S&R": "ExtractFacePoseImage"
360 | },
361 | "widgets_values": [
362 | 864,
363 | 1152
364 | ]
365 | },
366 | {
367 | "id": 105,
368 | "type": "EmptyImage",
369 | "pos": [
370 | -125.57537841796875,
371 | 1033.876220703125
372 | ],
373 | "size": [
374 | 315,
375 | 130
376 | ],
377 | "flags": {
378 | "collapsed": false
379 | },
380 | "order": 2,
381 | "mode": 0,
382 | "inputs": [],
383 | "outputs": [
384 | {
385 | "name": "IMAGE",
386 | "type": "IMAGE",
387 | "links": [
388 | 178
389 | ]
390 | }
391 | ],
392 | "title": "Empty Pose Image",
393 | "properties": {
394 | "Node name for S&R": "EmptyImage"
395 | },
396 | "widgets_values": [
397 | 864,
398 | 1152,
399 | 1,
400 | 0
401 | ]
402 | },
403 | {
404 | "id": 46,
405 | "type": "VAELoader",
406 | "pos": [
407 | -128.3144073486328,
408 | 269.4505615234375
409 | ],
410 | "size": [
411 | 210,
412 | 58
413 | ],
414 | "flags": {},
415 | "order": 3,
416 | "mode": 0,
417 | "inputs": [],
418 | "outputs": [
419 | {
420 | "name": "VAE",
421 | "shape": 3,
422 | "type": "VAE",
423 | "slot_index": 0,
424 | "links": [
425 | 59,
426 | 137,
427 | 161,
428 | 170
429 | ]
430 | }
431 | ],
432 | "properties": {
433 | "Node name for S&R": "VAELoader"
434 | },
435 | "widgets_values": [
436 | "ae.safetensors"
437 | ]
438 | },
439 | {
440 | "id": 51,
441 | "type": "KSampler",
442 | "pos": [
443 | 278.04345703125,
444 | 418.0480041503906
445 | ],
446 | "size": [
447 | 210,
448 | 238
449 | ],
450 | "flags": {},
451 | "order": 16,
452 | "mode": 0,
453 | "inputs": [
454 | {
455 | "name": "model",
456 | "type": "MODEL",
457 | "link": 185
458 | },
459 | {
460 | "name": "positive",
461 | "type": "CONDITIONING",
462 | "link": 163
463 | },
464 | {
465 | "name": "negative",
466 | "type": "CONDITIONING",
467 | "link": 164
468 | },
469 | {
470 | "name": "latent_image",
471 | "type": "LATENT",
472 | "link": 66
473 | },
474 | {
475 | "name": "seed",
476 | "type": "INT",
477 | "widget": {
478 | "name": "seed"
479 | },
480 | "link": 127
481 | }
482 | ],
483 | "outputs": [
484 | {
485 | "name": "LATENT",
486 | "shape": 3,
487 | "type": "LATENT",
488 | "slot_index": 0,
489 | "links": [
490 | 58
491 | ]
492 | }
493 | ],
494 | "properties": {
495 | "Node name for S&R": "KSampler"
496 | },
497 | "widgets_values": [
498 | 1117450117680779,
499 | "fixed",
500 | 4,
501 | 1,
502 | "euler",
503 | "beta",
504 | 1
505 | ]
506 | },
507 | {
508 | "id": 48,
509 | "type": "DualCLIPLoader",
510 | "pos": [
511 | -987.113525390625,
512 | 476.66717529296875
513 | ],
514 | "size": [
515 | 356.2766418457031,
516 | 130
517 | ],
518 | "flags": {},
519 | "order": 4,
520 | "mode": 0,
521 | "inputs": [],
522 | "outputs": [
523 | {
524 | "name": "CLIP",
525 | "shape": 3,
526 | "type": "CLIP",
527 | "slot_index": 0,
528 | "links": [
529 | 93,
530 | 94
531 | ]
532 | }
533 | ],
534 | "properties": {
535 | "Node name for S&R": "DualCLIPLoader"
536 | },
537 | "widgets_values": [
538 | "t5xxl_fp8_e4m3fn.safetensors",
539 | "clip_l.safetensors",
540 | "flux",
541 | "default"
542 | ]
543 | },
544 | {
545 | "id": 108,
546 | "type": "InfuseNetLoader",
547 | "pos": [
548 | -128.15025329589844,
549 | 920.6913452148438
550 | ],
551 | "size": [
552 | 315,
553 | 58
554 | ],
555 | "flags": {},
556 | "order": 5,
557 | "mode": 0,
558 | "inputs": [],
559 | "outputs": [
560 | {
561 | "name": "CONTROL_NET",
562 | "type": "CONTROL_NET",
563 | "links": [
564 | 157,
565 | 166
566 | ]
567 | }
568 | ],
569 | "properties": {
570 | "Node name for S&R": "InfuseNetLoader"
571 | },
572 | "widgets_values": [
573 | "aes_stage2/infusenet_aes_fp8e4m3fn.safetensors"
574 | ]
575 | },
576 | {
577 | "id": 97,
578 | "type": "LoadImage",
579 | "pos": [
580 | -985.9622802734375,
581 | 981.4930419921875
582 | ],
583 | "size": [
584 | 315,
585 | 314
586 | ],
587 | "flags": {},
588 | "order": 6,
589 | "mode": 0,
590 | "inputs": [],
591 | "outputs": [
592 | {
593 | "name": "IMAGE",
594 | "type": "IMAGE",
595 | "links": [
596 | 139,
597 | 144,
598 | 167,
599 | 175
600 | ]
601 | },
602 | {
603 | "name": "MASK",
604 | "type": "MASK",
605 | "links": null
606 | }
607 | ],
608 | "title": "Load ID Image",
609 | "properties": {
610 | "Node name for S&R": "LoadImage"
611 | },
612 | "widgets_values": [
613 | "woman.jpg",
614 | "image",
615 | ""
616 | ]
617 | },
618 | {
619 | "id": 107,
620 | "type": "LoadImage",
621 | "pos": [
622 | -558.5816650390625,
623 | 982.09228515625
624 | ],
625 | "size": [
626 | 315,
627 | 314
628 | ],
629 | "flags": {},
630 | "order": 7,
631 | "mode": 0,
632 | "inputs": [],
633 | "outputs": [
634 | {
635 | "name": "IMAGE",
636 | "type": "IMAGE",
637 | "links": [
638 | 151
639 | ]
640 | },
641 | {
642 | "name": "MASK",
643 | "type": "MASK",
644 | "links": null
645 | }
646 | ],
647 | "title": "Load Control Image",
648 | "properties": {
649 | "Node name for S&R": "LoadImage"
650 | },
651 | "widgets_values": [
652 | "woman.jpg",
653 | "image",
654 | ""
655 | ]
656 | },
657 | {
658 | "id": 102,
659 | "type": "IDEmbeddingModelLoader",
660 | "pos": [
661 | -999.3194580078125,
662 | 746.9200439453125
663 | ],
664 | "size": [
665 | 374.6639709472656,
666 | 170.36888122558594
667 | ],
668 | "flags": {
669 | "collapsed": false
670 | },
671 | "order": 8,
672 | "mode": 0,
673 | "inputs": [],
674 | "outputs": [
675 | {
676 | "name": "FACE_DETECTOR",
677 | "type": "MODEL",
678 | "links": [
679 | 140,
680 | 150,
681 | 172
682 | ]
683 | },
684 | {
685 | "name": "ARCFACE_MODEL",
686 | "type": "MODEL",
687 | "links": [
688 | 141,
689 | 173
690 | ]
691 | },
692 | {
693 | "name": "IMAGE_PROJ_MODEL",
694 | "type": "MODEL",
695 | "links": [
696 | 142,
697 | 174
698 | ]
699 | }
700 | ],
701 | "properties": {
702 | "Node name for S&R": "IDEmbeddingModelLoader"
703 | },
704 | "widgets_values": [
705 | "aes_stage2/image_proj_model.bin",
706 | 8,
707 | "CUDA",
708 | "AUTO"
709 | ]
710 | },
711 | {
712 | "id": 50,
713 | "type": "CLIPTextEncodeFlux",
714 | "pos": [
715 | -572.113525390625,
716 | 265.6330871582031
717 | ],
718 | "size": [
719 | 357.9499816894531,
720 | 361.7095031738281
721 | ],
722 | "flags": {},
723 | "order": 10,
724 | "mode": 0,
725 | "inputs": [
726 | {
727 | "name": "clip",
728 | "type": "CLIP",
729 | "link": 93
730 | }
731 | ],
732 | "outputs": [
733 | {
734 | "name": "CONDITIONING",
735 | "shape": 3,
736 | "type": "CONDITIONING",
737 | "slot_index": 0,
738 | "links": [
739 | 64,
740 | 133,
741 | 169
742 | ]
743 | }
744 | ],
745 | "properties": {
746 | "Node name for S&R": "CLIPTextEncodeFlux"
747 | },
748 | "widgets_values": [
749 | "A young woman holding a sign with the text \"InfiniteYou\", \"Infinite\" in black and \"You\" in red, pure background",
750 | "A young woman holding a sign with the text \"InfiniteYou\", \"Infinite\" in black and \"You\" in red, pure background",
751 | 3.5
752 | ]
753 | },
754 | {
755 | "id": 47,
756 | "type": "UNETLoader",
757 | "pos": [
758 | -980.1280517578125,
759 | 264.6694030761719
760 | ],
761 | "size": [
762 | 270,
763 | 82
764 | ],
765 | "flags": {
766 | "collapsed": false
767 | },
768 | "order": 9,
769 | "mode": 0,
770 | "inputs": [],
771 | "outputs": [
772 | {
773 | "name": "MODEL",
774 | "shape": 3,
775 | "type": "MODEL",
776 | "slot_index": 0,
777 | "links": [
778 | 149,
779 | 184,
780 | 185
781 | ]
782 | }
783 | ],
784 | "properties": {
785 | "Node name for S&R": "UNETLoader"
786 | },
787 | "widgets_values": [
788 | "flux1-schnell-fp8-e4m3fn.safetensors",
789 | "fp8_e4m3fn_fast"
790 | ]
791 | }
792 | ],
793 | "links": [
794 | [
795 | 58,
796 | 51,
797 | 0,
798 | 44,
799 | 0,
800 | "LATENT"
801 | ],
802 | [
803 | 59,
804 | 46,
805 | 0,
806 | 44,
807 | 1,
808 | "VAE"
809 | ],
810 | [
811 | 60,
812 | 44,
813 | 0,
814 | 45,
815 | 0,
816 | "IMAGE"
817 | ],
818 | [
819 | 66,
820 | 43,
821 | 0,
822 | 51,
823 | 3,
824 | "LATENT"
825 | ],
826 | [
827 | 93,
828 | 48,
829 | 0,
830 | 50,
831 | 0,
832 | "CLIP"
833 | ],
834 | [
835 | 94,
836 | 48,
837 | 0,
838 | 52,
839 | 0,
840 | "CLIP"
841 | ],
842 | [
843 | 127,
844 | 92,
845 | 0,
846 | 51,
847 | 4,
848 | "INT"
849 | ],
850 | [
851 | 150,
852 | 102,
853 | 0,
854 | 106,
855 | 0,
856 | "MODEL"
857 | ],
858 | [
859 | 151,
860 | 107,
861 | 0,
862 | 106,
863 | 1,
864 | "IMAGE"
865 | ],
866 | [
867 | 159,
868 | 52,
869 | 0,
870 | 109,
871 | 1,
872 | "CONDITIONING"
873 | ],
874 | [
875 | 163,
876 | 109,
877 | 0,
878 | 51,
879 | 1,
880 | "CONDITIONING"
881 | ],
882 | [
883 | 164,
884 | 109,
885 | 1,
886 | 51,
887 | 2,
888 | "CONDITIONING"
889 | ],
890 | [
891 | 166,
892 | 108,
893 | 0,
894 | 109,
895 | 3,
896 | "CONTROL_NET"
897 | ],
898 | [
899 | 169,
900 | 50,
901 | 0,
902 | 109,
903 | 0,
904 | "CONDITIONING"
905 | ],
906 | [
907 | 170,
908 | 46,
909 | 0,
910 | 109,
911 | 5,
912 | "VAE"
913 | ],
914 | [
915 | 172,
916 | 102,
917 | 0,
918 | 110,
919 | 0,
920 | "MODEL"
921 | ],
922 | [
923 | 173,
924 | 102,
925 | 1,
926 | 110,
927 | 1,
928 | "MODEL"
929 | ],
930 | [
931 | 174,
932 | 102,
933 | 2,
934 | 110,
935 | 2,
936 | "MODEL"
937 | ],
938 | [
939 | 175,
940 | 97,
941 | 0,
942 | 110,
943 | 3,
944 | "IMAGE"
945 | ],
946 | [
947 | 176,
948 | 110,
949 | 0,
950 | 109,
951 | 2,
952 | "CONDITIONING"
953 | ],
954 | [
955 | 180,
956 | 105,
957 | 0,
958 | 109,
959 | 4,
960 | "IMAGE"
961 | ],
962 | [
963 | 184,
964 | 47,
965 | 0,
966 | 51,
967 | 0,
968 | "MODEL"
969 | ],
970 | [
971 | 185,
972 | 47,
973 | 0,
974 | 51,
975 | 0,
976 | "MODEL"
977 | ]
978 | ],
979 | "groups": [],
980 | "config": {},
981 | "extra": {
982 | "ds": {
983 | "scale": 0.7400249944258194,
984 | "offset": [
985 | 1149.8946398425187,
986 | 572.9878718971356
987 | ]
988 | }
989 | },
990 | "version": 0.4
991 | }
--------------------------------------------------------------------------------
/examples/multi_id_infinite_you_workflow.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": "c1a3d0d1-f0cb-4369-83ae-273696df248c",
3 | "revision": 0,
4 | "last_node_id": 165,
5 | "last_link_id": 320,
6 | "nodes": [
7 | {
8 | "id": 158,
9 | "type": "EmptySD3LatentImage",
10 | "pos": [
11 | -437.91448974609375,
12 | 287.0752258300781
13 | ],
14 | "size": [
15 | 315,
16 | 106
17 | ],
18 | "flags": {},
19 | "order": 0,
20 | "mode": 0,
21 | "inputs": [],
22 | "outputs": [
23 | {
24 | "label": "LATENT",
25 | "name": "LATENT",
26 | "type": "LATENT",
27 | "slot_index": 0,
28 | "links": [
29 | 266
30 | ]
31 | }
32 | ],
33 | "properties": {
34 | "Node name for S&R": "EmptySD3LatentImage"
35 | },
36 | "widgets_values": [
37 | 864,
38 | 1152,
39 | 1
40 | ]
41 | },
42 | {
43 | "id": 159,
44 | "type": "RandomNoise",
45 | "pos": [
46 | -448.7533874511719,
47 | 448.651611328125
48 | ],
49 | "size": [
50 | 315,
51 | 82
52 | ],
53 | "flags": {},
54 | "order": 1,
55 | "mode": 0,
56 | "inputs": [],
57 | "outputs": [
58 | {
59 | "label": "NOISE",
60 | "name": "NOISE",
61 | "type": "NOISE",
62 | "links": [
63 | 262
64 | ]
65 | }
66 | ],
67 | "properties": {
68 | "Node name for S&R": "RandomNoise"
69 | },
70 | "widgets_values": [
71 | 145190283448493,
72 | "randomize"
73 | ],
74 | "color": "#2a363b",
75 | "bgcolor": "#3f5159"
76 | },
77 | {
78 | "id": 154,
79 | "type": "DualCLIPLoader",
80 | "pos": [
81 | -874.8045043945312,
82 | 63.95768737792969
83 | ],
84 | "size": [
85 | 315,
86 | 130
87 | ],
88 | "flags": {},
89 | "order": 2,
90 | "mode": 0,
91 | "inputs": [],
92 | "outputs": [
93 | {
94 | "label": "CLIP",
95 | "name": "CLIP",
96 | "type": "CLIP",
97 | "slot_index": 0,
98 | "links": [
99 | 269
100 | ]
101 | }
102 | ],
103 | "properties": {
104 | "Node name for S&R": "DualCLIPLoader"
105 | },
106 | "widgets_values": [
107 | "t5xxl_fp8_e4m3fn.safetensors",
108 | "clip_l.safetensors",
109 | "flux",
110 | "default"
111 | ]
112 | },
113 | {
114 | "id": 152,
115 | "type": "VAEDecode",
116 | "pos": [
117 | 911.7118530273438,
118 | 82.94072723388672
119 | ],
120 | "size": [
121 | 210,
122 | 46
123 | ],
124 | "flags": {},
125 | "order": 24,
126 | "mode": 0,
127 | "inputs": [
128 | {
129 | "label": "samples",
130 | "name": "samples",
131 | "type": "LATENT",
132 | "link": 315
133 | },
134 | {
135 | "label": "vae",
136 | "name": "vae",
137 | "type": "VAE",
138 | "link": 268
139 | }
140 | ],
141 | "outputs": [
142 | {
143 | "label": "IMAGE",
144 | "name": "IMAGE",
145 | "type": "IMAGE",
146 | "slot_index": 0,
147 | "links": [
148 | 276
149 | ]
150 | }
151 | ],
152 | "properties": {
153 | "Node name for S&R": "VAEDecode"
154 | },
155 | "widgets_values": []
156 | },
157 | {
158 | "id": 155,
159 | "type": "VAELoader",
160 | "pos": [
161 | 635.755615234375,
162 | 728.4552612304688
163 | ],
164 | "size": [
165 | 311.81634521484375,
166 | 60.429901123046875
167 | ],
168 | "flags": {},
169 | "order": 3,
170 | "mode": 0,
171 | "inputs": [],
172 | "outputs": [
173 | {
174 | "label": "VAE",
175 | "name": "VAE",
176 | "type": "VAE",
177 | "slot_index": 0,
178 | "links": [
179 | 268,
180 | 296,
181 | 301
182 | ]
183 | }
184 | ],
185 | "properties": {
186 | "Node name for S&R": "VAELoader"
187 | },
188 | "widgets_values": [
189 | "ae.safetensors"
190 | ]
191 | },
192 | {
193 | "id": 150,
194 | "type": "BasicGuider",
195 | "pos": [
196 | 344.4640197753906,
197 | 730.0437622070312
198 | ],
199 | "size": [
200 | 222.3482666015625,
201 | 46
202 | ],
203 | "flags": {},
204 | "order": 22,
205 | "mode": 0,
206 | "inputs": [
207 | {
208 | "label": "model",
209 | "name": "model",
210 | "type": "MODEL",
211 | "link": 320
212 | },
213 | {
214 | "label": "conditioning",
215 | "name": "conditioning",
216 | "type": "CONDITIONING",
217 | "link": 304
218 | }
219 | ],
220 | "outputs": [
221 | {
222 | "label": "GUIDER",
223 | "name": "GUIDER",
224 | "type": "GUIDER",
225 | "slot_index": 0,
226 | "links": [
227 | 263
228 | ]
229 | }
230 | ],
231 | "properties": {
232 | "Node name for S&R": "BasicGuider"
233 | },
234 | "widgets_values": []
235 | },
236 | {
237 | "id": 160,
238 | "type": "KSamplerSelect",
239 | "pos": [
240 | -433.69488525390625,
241 | 610.5037231445312
242 | ],
243 | "size": [
244 | 315,
245 | 58
246 | ],
247 | "flags": {},
248 | "order": 4,
249 | "mode": 0,
250 | "inputs": [],
251 | "outputs": [
252 | {
253 | "label": "SAMPLER",
254 | "name": "SAMPLER",
255 | "type": "SAMPLER",
256 | "links": [
257 | 264
258 | ]
259 | }
260 | ],
261 | "properties": {
262 | "Node name for S&R": "KSamplerSelect"
263 | },
264 | "widgets_values": [
265 | "euler"
266 | ]
267 | },
268 | {
269 | "id": 120,
270 | "type": "LoadImage",
271 | "pos": [
272 | 965.3735961914062,
273 | 1622.5902099609375
274 | ],
275 | "size": [
276 | 315,
277 | 314
278 | ],
279 | "flags": {},
280 | "order": 5,
281 | "mode": 0,
282 | "inputs": [],
283 | "outputs": [
284 | {
285 | "name": "IMAGE",
286 | "type": "IMAGE",
287 | "links": [
288 | 187
289 | ]
290 | },
291 | {
292 | "name": "MASK",
293 | "type": "MASK",
294 | "links": null
295 | }
296 | ],
297 | "title": "Load ID Image",
298 | "properties": {
299 | "Node name for S&R": "LoadImage"
300 | },
301 | "widgets_values": [
302 | "man.jpg",
303 | "image"
304 | ]
305 | },
306 | {
307 | "id": 134,
308 | "type": "LoadImageMask",
309 | "pos": [
310 | 1344.5068359375,
311 | 1615.87109375
312 | ],
313 | "size": [
314 | 304.0878601074219,
315 | 330
316 | ],
317 | "flags": {},
318 | "order": 6,
319 | "mode": 0,
320 | "inputs": [],
321 | "outputs": [
322 | {
323 | "name": "MASK",
324 | "type": "MASK",
325 | "links": [
326 | 302,
327 | 306
328 | ]
329 | }
330 | ],
331 | "properties": {
332 | "Node name for S&R": "LoadImageMask"
333 | },
334 | "widgets_values": [
335 | "mask_right_half.png",
336 | "red",
337 | "image"
338 | ]
339 | },
340 | {
341 | "id": 119,
342 | "type": "ExtractFacePoseImage",
343 | "pos": [
344 | 1716.77392578125,
345 | 1481.45556640625
346 | ],
347 | "size": [
348 | 210,
349 | 122
350 | ],
351 | "flags": {},
352 | "order": 16,
353 | "mode": 0,
354 | "inputs": [
355 | {
356 | "name": "face_detector",
357 | "type": "MODEL",
358 | "link": 203
359 | },
360 | {
361 | "name": "image",
362 | "type": "IMAGE",
363 | "link": 307
364 | },
365 | {
366 | "name": "mask",
367 | "shape": 7,
368 | "type": "MASK",
369 | "link": 306
370 | }
371 | ],
372 | "outputs": [
373 | {
374 | "name": "IMAGE",
375 | "type": "IMAGE",
376 | "links": [
377 | 300
378 | ]
379 | }
380 | ],
381 | "properties": {
382 | "Node name for S&R": "ExtractFacePoseImage"
383 | },
384 | "widgets_values": [
385 | 864,
386 | 1152
387 | ]
388 | },
389 | {
390 | "id": 121,
391 | "type": "ExtractIDEmbedding",
392 | "pos": [
393 | 1324.1436767578125,
394 | 1391.9034423828125
395 | ],
396 | "size": [
397 | 367.79998779296875,
398 | 86
399 | ],
400 | "flags": {
401 | "collapsed": true
402 | },
403 | "order": 14,
404 | "mode": 0,
405 | "inputs": [
406 | {
407 | "name": "face_detector",
408 | "type": "MODEL",
409 | "link": 188
410 | },
411 | {
412 | "name": "arcface_model",
413 | "type": "MODEL",
414 | "link": 189
415 | },
416 | {
417 | "name": "image_proj_model",
418 | "type": "MODEL",
419 | "link": 190
420 | },
421 | {
422 | "name": "image",
423 | "type": "IMAGE",
424 | "link": 187
425 | }
426 | ],
427 | "outputs": [
428 | {
429 | "name": "CONDITIONING",
430 | "type": "CONDITIONING",
431 | "links": [
432 | 298
433 | ]
434 | }
435 | ],
436 | "properties": {
437 | "Node name for S&R": "ExtractIDEmbedding"
438 | },
439 | "widgets_values": []
440 | },
441 | {
442 | "id": 110,
443 | "type": "ExtractIDEmbedding",
444 | "pos": [
445 | -742.9761962890625,
446 | 1403.471435546875
447 | ],
448 | "size": [
449 | 367.79998779296875,
450 | 86
451 | ],
452 | "flags": {
453 | "collapsed": true
454 | },
455 | "order": 18,
456 | "mode": 0,
457 | "inputs": [
458 | {
459 | "name": "face_detector",
460 | "type": "MODEL",
461 | "link": 172
462 | },
463 | {
464 | "name": "arcface_model",
465 | "type": "MODEL",
466 | "link": 173
467 | },
468 | {
469 | "name": "image_proj_model",
470 | "type": "MODEL",
471 | "link": 174
472 | },
473 | {
474 | "name": "image",
475 | "type": "IMAGE",
476 | "link": 175
477 | }
478 | ],
479 | "outputs": [
480 | {
481 | "name": "CONDITIONING",
482 | "type": "CONDITIONING",
483 | "links": [
484 | 293
485 | ]
486 | }
487 | ],
488 | "properties": {
489 | "Node name for S&R": "ExtractIDEmbedding"
490 | },
491 | "widgets_values": []
492 | },
493 | {
494 | "id": 142,
495 | "type": "LoadImageMask",
496 | "pos": [
497 | -563.2556762695312,
498 | 1623.5535888671875
499 | ],
500 | "size": [
501 | 330,
502 | 318
503 | ],
504 | "flags": {},
505 | "order": 7,
506 | "mode": 0,
507 | "inputs": [],
508 | "outputs": [
509 | {
510 | "name": "MASK",
511 | "type": "MASK",
512 | "links": [
513 | 297,
514 | 305
515 | ]
516 | }
517 | ],
518 | "properties": {
519 | "Node name for S&R": "LoadImageMask"
520 | },
521 | "widgets_values": [
522 | "mask_left_half.png",
523 | "red",
524 | "image"
525 | ]
526 | },
527 | {
528 | "id": 106,
529 | "type": "ExtractFacePoseImage",
530 | "pos": [
531 | -142.10414123535156,
532 | 1576.8348388671875
533 | ],
534 | "size": [
535 | 210,
536 | 122
537 | ],
538 | "flags": {},
539 | "order": 15,
540 | "mode": 0,
541 | "inputs": [
542 | {
543 | "name": "face_detector",
544 | "type": "MODEL",
545 | "link": 150
546 | },
547 | {
548 | "name": "image",
549 | "type": "IMAGE",
550 | "link": 151
551 | },
552 | {
553 | "name": "mask",
554 | "shape": 7,
555 | "type": "MASK",
556 | "link": 305
557 | }
558 | ],
559 | "outputs": [
560 | {
561 | "name": "IMAGE",
562 | "type": "IMAGE",
563 | "links": [
564 | 156,
565 | 295
566 | ]
567 | }
568 | ],
569 | "properties": {
570 | "Node name for S&R": "ExtractFacePoseImage"
571 | },
572 | "widgets_values": [
573 | 864,
574 | 1152
575 | ]
576 | },
577 | {
578 | "id": 108,
579 | "type": "InfuseNetLoader",
580 | "pos": [
581 | 569.8663940429688,
582 | 1030.9078369140625
583 | ],
584 | "size": [
585 | 463.8862609863281,
586 | 58
587 | ],
588 | "flags": {},
589 | "order": 8,
590 | "mode": 0,
591 | "inputs": [],
592 | "outputs": [
593 | {
594 | "name": "CONTROL_NET",
595 | "type": "CONTROL_NET",
596 | "links": [
597 | 157,
598 | 294,
599 | 299
600 | ]
601 | }
602 | ],
603 | "properties": {
604 | "Node name for S&R": "InfuseNetLoader"
605 | },
606 | "widgets_values": [
607 | "aes_stage2/infusenet_aes_fp8e4m3fn.safetensors"
608 | ]
609 | },
610 | {
611 | "id": 102,
612 | "type": "IDEmbeddingModelLoader",
613 | "pos": [
614 | -869.2985229492188,
615 | 977.8091430664062
616 | ],
617 | "size": [
618 | 530.486328125,
619 | 170
620 | ],
621 | "flags": {
622 | "collapsed": false
623 | },
624 | "order": 9,
625 | "mode": 0,
626 | "inputs": [],
627 | "outputs": [
628 | {
629 | "name": "FACE_DETECTOR",
630 | "type": "MODEL",
631 | "links": [
632 | 140,
633 | 150,
634 | 172,
635 | 188,
636 | 203
637 | ]
638 | },
639 | {
640 | "name": "ARCFACE_MODEL",
641 | "type": "MODEL",
642 | "links": [
643 | 141,
644 | 173,
645 | 189
646 | ]
647 | },
648 | {
649 | "name": "IMAGE_PROJ_MODEL",
650 | "type": "MODEL",
651 | "links": [
652 | 142,
653 | 174,
654 | 190
655 | ]
656 | }
657 | ],
658 | "properties": {
659 | "Node name for S&R": "IDEmbeddingModelLoader"
660 | },
661 | "widgets_values": [
662 | "aes_stage2/image_proj_model.bin",
663 | 8,
664 | "CUDA",
665 | "AUTO"
666 | ]
667 | },
668 | {
669 | "id": 149,
670 | "type": "FluxGuidance",
671 | "pos": [
672 | 8.051407814025879,
673 | 69.1958236694336
674 | ],
675 | "size": [
676 | 317.4000244140625,
677 | 58
678 | ],
679 | "flags": {},
680 | "order": 19,
681 | "mode": 0,
682 | "inputs": [
683 | {
684 | "label": "conditioning",
685 | "name": "conditioning",
686 | "type": "CONDITIONING",
687 | "link": 259
688 | }
689 | ],
690 | "outputs": [
691 | {
692 | "label": "CONDITIONING",
693 | "name": "CONDITIONING",
694 | "type": "CONDITIONING",
695 | "slot_index": 0,
696 | "links": [
697 | 292
698 | ]
699 | }
700 | ],
701 | "properties": {
702 | "Node name for S&R": "FluxGuidance"
703 | },
704 | "widgets_values": [
705 | 3.5
706 | ],
707 | "color": "#233",
708 | "bgcolor": "#355"
709 | },
710 | {
711 | "id": 153,
712 | "type": "CLIPTextEncode",
713 | "pos": [
714 | -481.558837890625,
715 | 68.3829574584961
716 | ],
717 | "size": [
718 | 422.84503173828125,
719 | 164.31304931640625
720 | ],
721 | "flags": {},
722 | "order": 13,
723 | "mode": 0,
724 | "inputs": [
725 | {
726 | "label": "clip",
727 | "name": "clip",
728 | "type": "CLIP",
729 | "link": 269
730 | }
731 | ],
732 | "outputs": [
733 | {
734 | "label": "CONDITIONING",
735 | "name": "CONDITIONING",
736 | "type": "CONDITIONING",
737 | "slot_index": 0,
738 | "links": [
739 | 259
740 | ]
741 | }
742 | ],
743 | "title": "CLIP Text Encode (Positive Prompt)",
744 | "properties": {
745 | "Node name for S&R": "CLIPTextEncode"
746 | },
747 | "widgets_values": [
748 | "A couple in an classroom, The woman, with long wavy brown hair, wears a white graphic tee, denim mini skirt, black boots, the woman is laughing. The man with short black hair, sports a black bomber jacket, white tee, and headphones, and black pants. Both appear casual and stylish. both are looking at camera"
749 | ],
750 | "color": "#232",
751 | "bgcolor": "#353"
752 | },
753 | {
754 | "id": 107,
755 | "type": "LoadImage",
756 | "pos": [
757 | 527.5704956054688,
758 | 1645.22998046875
759 | ],
760 | "size": [
761 | 315,
762 | 314.0001220703125
763 | ],
764 | "flags": {},
765 | "order": 10,
766 | "mode": 0,
767 | "inputs": [],
768 | "outputs": [
769 | {
770 | "name": "IMAGE",
771 | "type": "IMAGE",
772 | "links": [
773 | 151,
774 | 307
775 | ]
776 | },
777 | {
778 | "name": "MASK",
779 | "type": "MASK",
780 | "links": null
781 | }
782 | ],
783 | "title": "Load Control Image",
784 | "properties": {
785 | "Node name for S&R": "LoadImage"
786 | },
787 | "widgets_values": [
788 | "ComfyUI_00050_.png",
789 | "image"
790 | ]
791 | },
792 | {
793 | "id": 163,
794 | "type": "InfuseNetApply",
795 | "pos": [
796 | 136.95687866210938,
797 | 1326.953369140625
798 | ],
799 | "size": [
800 | 315,
801 | 226
802 | ],
803 | "flags": {},
804 | "order": 20,
805 | "mode": 0,
806 | "inputs": [
807 | {
808 | "name": "positive",
809 | "type": "CONDITIONING",
810 | "link": 292
811 | },
812 | {
813 | "name": "id_embedding",
814 | "type": "CONDITIONING",
815 | "link": 293
816 | },
817 | {
818 | "name": "control_net",
819 | "type": "CONTROL_NET",
820 | "link": 294
821 | },
822 | {
823 | "name": "image",
824 | "type": "IMAGE",
825 | "link": 295
826 | },
827 | {
828 | "name": "negative",
829 | "shape": 7,
830 | "type": "CONDITIONING",
831 | "link": null
832 | },
833 | {
834 | "name": "vae",
835 | "shape": 7,
836 | "type": "VAE",
837 | "link": 296
838 | },
839 | {
840 | "name": "control_mask",
841 | "shape": 7,
842 | "type": "MASK",
843 | "link": 297
844 | }
845 | ],
846 | "outputs": [
847 | {
848 | "name": "positive",
849 | "type": "CONDITIONING",
850 | "links": [
851 | 303
852 | ]
853 | },
854 | {
855 | "name": "negative",
856 | "type": "CONDITIONING",
857 | "links": null
858 | }
859 | ],
860 | "properties": {
861 | "Node name for S&R": "InfuseNetApply"
862 | },
863 | "widgets_values": [
864 | 1.0000000000000002,
865 | 0,
866 | 1
867 | ]
868 | },
869 | {
870 | "id": 164,
871 | "type": "InfuseNetApply",
872 | "pos": [
873 | 2013.8388671875,
874 | 1340.6917724609375
875 | ],
876 | "size": [
877 | 315,
878 | 226
879 | ],
880 | "flags": {},
881 | "order": 21,
882 | "mode": 0,
883 | "inputs": [
884 | {
885 | "name": "positive",
886 | "type": "CONDITIONING",
887 | "link": 303
888 | },
889 | {
890 | "name": "id_embedding",
891 | "type": "CONDITIONING",
892 | "link": 298
893 | },
894 | {
895 | "name": "control_net",
896 | "type": "CONTROL_NET",
897 | "link": 299
898 | },
899 | {
900 | "name": "image",
901 | "type": "IMAGE",
902 | "link": 300
903 | },
904 | {
905 | "name": "negative",
906 | "shape": 7,
907 | "type": "CONDITIONING",
908 | "link": null
909 | },
910 | {
911 | "name": "vae",
912 | "shape": 7,
913 | "type": "VAE",
914 | "link": 301
915 | },
916 | {
917 | "name": "control_mask",
918 | "shape": 7,
919 | "type": "MASK",
920 | "link": 302
921 | }
922 | ],
923 | "outputs": [
924 | {
925 | "name": "positive",
926 | "type": "CONDITIONING",
927 | "links": [
928 | 304
929 | ]
930 | },
931 | {
932 | "name": "negative",
933 | "type": "CONDITIONING",
934 | "links": null
935 | }
936 | ],
937 | "properties": {
938 | "Node name for S&R": "InfuseNetApply"
939 | },
940 | "widgets_values": [
941 | 1.0000000000000002,
942 | 0,
943 | 1
944 | ]
945 | },
946 | {
947 | "id": 151,
948 | "type": "SamplerCustomAdvanced",
949 | "pos": [
950 | 506.7764587402344,
951 | 102.62291717529297
952 | ],
953 | "size": [
954 | 272.3617858886719,
955 | 124.53733825683594
956 | ],
957 | "flags": {},
958 | "order": 23,
959 | "mode": 0,
960 | "inputs": [
961 | {
962 | "label": "noise",
963 | "name": "noise",
964 | "type": "NOISE",
965 | "link": 262
966 | },
967 | {
968 | "label": "guider",
969 | "name": "guider",
970 | "type": "GUIDER",
971 | "link": 263
972 | },
973 | {
974 | "label": "sampler",
975 | "name": "sampler",
976 | "type": "SAMPLER",
977 | "link": 264
978 | },
979 | {
980 | "label": "sigmas",
981 | "name": "sigmas",
982 | "type": "SIGMAS",
983 | "link": 311
984 | },
985 | {
986 | "label": "latent_image",
987 | "name": "latent_image",
988 | "type": "LATENT",
989 | "link": 266
990 | }
991 | ],
992 | "outputs": [
993 | {
994 | "label": "output",
995 | "name": "output",
996 | "type": "LATENT",
997 | "slot_index": 0,
998 | "links": [
999 | 315
1000 | ]
1001 | },
1002 | {
1003 | "label": "denoised_output",
1004 | "name": "denoised_output",
1005 | "type": "LATENT",
1006 | "links": []
1007 | }
1008 | ],
1009 | "properties": {
1010 | "Node name for S&R": "SamplerCustomAdvanced"
1011 | },
1012 | "widgets_values": []
1013 | },
1014 | {
1015 | "id": 148,
1016 | "type": "UNETLoader",
1017 | "pos": [
1018 | -876.4181518554688,
1019 | 267.08319091796875
1020 | ],
1021 | "size": [
1022 | 315,
1023 | 82
1024 | ],
1025 | "flags": {},
1026 | "order": 11,
1027 | "mode": 0,
1028 | "inputs": [],
1029 | "outputs": [
1030 | {
1031 | "label": "MODEL",
1032 | "name": "MODEL",
1033 | "type": "MODEL",
1034 | "slot_index": 0,
1035 | "links": [
1036 | 319,
1037 | 320
1038 | ]
1039 | }
1040 | ],
1041 | "properties": {
1042 | "Node name for S&R": "UNETLoader"
1043 | },
1044 | "widgets_values": [
1045 | "flux1-schnell-fp8-e4m3fn.safetensors",
1046 | "default"
1047 | ],
1048 | "color": "#223",
1049 | "bgcolor": "#335"
1050 | },
1051 | {
1052 | "id": 97,
1053 | "type": "LoadImage",
1054 | "pos": [
1055 | -920.3511962890625,
1056 | 1624.9892578125
1057 | ],
1058 | "size": [
1059 | 315,
1060 | 314
1061 | ],
1062 | "flags": {},
1063 | "order": 12,
1064 | "mode": 0,
1065 | "inputs": [],
1066 | "outputs": [
1067 | {
1068 | "name": "IMAGE",
1069 | "type": "IMAGE",
1070 | "links": [
1071 | 139,
1072 | 144,
1073 | 167,
1074 | 175
1075 | ]
1076 | },
1077 | {
1078 | "name": "MASK",
1079 | "type": "MASK",
1080 | "links": null
1081 | }
1082 | ],
1083 | "title": "Load ID Image",
1084 | "properties": {
1085 | "Node name for S&R": "LoadImage"
1086 | },
1087 | "widgets_values": [
1088 | "woman.jpg",
1089 | "image"
1090 | ]
1091 | },
1092 | {
1093 | "id": 45,
1094 | "type": "SaveImage",
1095 | "pos": [
1096 | 1508.172119140625,
1097 | 26.20151138305664
1098 | ],
1099 | "size": [
1100 | 688.4625244140625,
1101 | 863.3844604492188
1102 | ],
1103 | "flags": {
1104 | "collapsed": false
1105 | },
1106 | "order": 25,
1107 | "mode": 0,
1108 | "inputs": [
1109 | {
1110 | "name": "images",
1111 | "type": "IMAGE",
1112 | "link": 276
1113 | }
1114 | ],
1115 | "outputs": [],
1116 | "properties": {
1117 | "Node name for S&R": "SaveImage"
1118 | },
1119 | "widgets_values": [
1120 | "ComfyUI"
1121 | ]
1122 | },
1123 | {
1124 | "id": 161,
1125 | "type": "BasicScheduler",
1126 | "pos": [
1127 | 83.75202941894531,
1128 | 511.52001953125
1129 | ],
1130 | "size": [
1131 | 315,
1132 | 106
1133 | ],
1134 | "flags": {},
1135 | "order": 17,
1136 | "mode": 0,
1137 | "inputs": [
1138 | {
1139 | "label": "model",
1140 | "name": "model",
1141 | "type": "MODEL",
1142 | "link": 319
1143 | }
1144 | ],
1145 | "outputs": [
1146 | {
1147 | "label": "SIGMAS",
1148 | "name": "SIGMAS",
1149 | "type": "SIGMAS",
1150 | "links": [
1151 | 311
1152 | ]
1153 | }
1154 | ],
1155 | "properties": {
1156 | "Node name for S&R": "BasicScheduler"
1157 | },
1158 | "widgets_values": [
1159 | "beta",
1160 | 4,
1161 | 1
1162 | ]
1163 | }
1164 | ],
1165 | "links": [
1166 | [
1167 | 150,
1168 | 102,
1169 | 0,
1170 | 106,
1171 | 0,
1172 | "MODEL"
1173 | ],
1174 | [
1175 | 151,
1176 | 107,
1177 | 0,
1178 | 106,
1179 | 1,
1180 | "IMAGE"
1181 | ],
1182 | [
1183 | 172,
1184 | 102,
1185 | 0,
1186 | 110,
1187 | 0,
1188 | "MODEL"
1189 | ],
1190 | [
1191 | 173,
1192 | 102,
1193 | 1,
1194 | 110,
1195 | 1,
1196 | "MODEL"
1197 | ],
1198 | [
1199 | 174,
1200 | 102,
1201 | 2,
1202 | 110,
1203 | 2,
1204 | "MODEL"
1205 | ],
1206 | [
1207 | 175,
1208 | 97,
1209 | 0,
1210 | 110,
1211 | 3,
1212 | "IMAGE"
1213 | ],
1214 | [
1215 | 187,
1216 | 120,
1217 | 0,
1218 | 121,
1219 | 3,
1220 | "IMAGE"
1221 | ],
1222 | [
1223 | 188,
1224 | 102,
1225 | 0,
1226 | 121,
1227 | 0,
1228 | "MODEL"
1229 | ],
1230 | [
1231 | 189,
1232 | 102,
1233 | 1,
1234 | 121,
1235 | 1,
1236 | "MODEL"
1237 | ],
1238 | [
1239 | 190,
1240 | 102,
1241 | 2,
1242 | 121,
1243 | 2,
1244 | "MODEL"
1245 | ],
1246 | [
1247 | 203,
1248 | 102,
1249 | 0,
1250 | 119,
1251 | 0,
1252 | "MODEL"
1253 | ],
1254 | [
1255 | 259,
1256 | 153,
1257 | 0,
1258 | 149,
1259 | 0,
1260 | "CONDITIONING"
1261 | ],
1262 | [
1263 | 262,
1264 | 159,
1265 | 0,
1266 | 151,
1267 | 0,
1268 | "NOISE"
1269 | ],
1270 | [
1271 | 263,
1272 | 150,
1273 | 0,
1274 | 151,
1275 | 1,
1276 | "GUIDER"
1277 | ],
1278 | [
1279 | 264,
1280 | 160,
1281 | 0,
1282 | 151,
1283 | 2,
1284 | "SAMPLER"
1285 | ],
1286 | [
1287 | 266,
1288 | 158,
1289 | 0,
1290 | 151,
1291 | 4,
1292 | "LATENT"
1293 | ],
1294 | [
1295 | 268,
1296 | 155,
1297 | 0,
1298 | 152,
1299 | 1,
1300 | "VAE"
1301 | ],
1302 | [
1303 | 269,
1304 | 154,
1305 | 0,
1306 | 153,
1307 | 0,
1308 | "CLIP"
1309 | ],
1310 | [
1311 | 276,
1312 | 152,
1313 | 0,
1314 | 45,
1315 | 0,
1316 | "IMAGE"
1317 | ],
1318 | [
1319 | 292,
1320 | 149,
1321 | 0,
1322 | 163,
1323 | 0,
1324 | "CONDITIONING"
1325 | ],
1326 | [
1327 | 293,
1328 | 110,
1329 | 0,
1330 | 163,
1331 | 1,
1332 | "CONDITIONING"
1333 | ],
1334 | [
1335 | 294,
1336 | 108,
1337 | 0,
1338 | 163,
1339 | 2,
1340 | "CONTROL_NET"
1341 | ],
1342 | [
1343 | 295,
1344 | 106,
1345 | 0,
1346 | 163,
1347 | 3,
1348 | "IMAGE"
1349 | ],
1350 | [
1351 | 296,
1352 | 155,
1353 | 0,
1354 | 163,
1355 | 5,
1356 | "VAE"
1357 | ],
1358 | [
1359 | 297,
1360 | 142,
1361 | 0,
1362 | 163,
1363 | 6,
1364 | "MASK"
1365 | ],
1366 | [
1367 | 298,
1368 | 121,
1369 | 0,
1370 | 164,
1371 | 1,
1372 | "CONDITIONING"
1373 | ],
1374 | [
1375 | 299,
1376 | 108,
1377 | 0,
1378 | 164,
1379 | 2,
1380 | "CONTROL_NET"
1381 | ],
1382 | [
1383 | 300,
1384 | 119,
1385 | 0,
1386 | 164,
1387 | 3,
1388 | "IMAGE"
1389 | ],
1390 | [
1391 | 301,
1392 | 155,
1393 | 0,
1394 | 164,
1395 | 5,
1396 | "VAE"
1397 | ],
1398 | [
1399 | 302,
1400 | 134,
1401 | 0,
1402 | 164,
1403 | 6,
1404 | "MASK"
1405 | ],
1406 | [
1407 | 303,
1408 | 163,
1409 | 0,
1410 | 164,
1411 | 0,
1412 | "CONDITIONING"
1413 | ],
1414 | [
1415 | 304,
1416 | 164,
1417 | 0,
1418 | 150,
1419 | 1,
1420 | "CONDITIONING"
1421 | ],
1422 | [
1423 | 305,
1424 | 142,
1425 | 0,
1426 | 106,
1427 | 2,
1428 | "MASK"
1429 | ],
1430 | [
1431 | 306,
1432 | 134,
1433 | 0,
1434 | 119,
1435 | 2,
1436 | "MASK"
1437 | ],
1438 | [
1439 | 307,
1440 | 107,
1441 | 0,
1442 | 119,
1443 | 1,
1444 | "IMAGE"
1445 | ],
1446 | [
1447 | 311,
1448 | 161,
1449 | 0,
1450 | 151,
1451 | 3,
1452 | "SIGMAS"
1453 | ],
1454 | [
1455 | 315,
1456 | 151,
1457 | 0,
1458 | 152,
1459 | 0,
1460 | "LATENT"
1461 | ],
1462 | [
1463 | 319,
1464 | 148,
1465 | 0,
1466 | 161,
1467 | 0,
1468 | "MODEL"
1469 | ],
1470 | [
1471 | 320,
1472 | 148,
1473 | 0,
1474 | 150,
1475 | 0,
1476 | "MODEL"
1477 | ]
1478 | ],
1479 | "groups": [
1480 | {
1481 | "id": 1,
1482 | "title": "ID 1",
1483 | "bounding": [
1484 | -938.527099609375,
1485 | 1232.126953125,
1486 | 1409.8516845703125,
1487 | 723.2393798828125
1488 | ],
1489 | "color": "#3f789e",
1490 | "font_size": 24,
1491 | "flags": {}
1492 | },
1493 | {
1494 | "id": 2,
1495 | "title": "ID 2",
1496 | "bounding": [
1497 | 941.7040405273438,
1498 | 1250.2364501953125,
1499 | 1486.1617431640625,
1500 | 706.6896362304688
1501 | ],
1502 | "color": "#3f789e",
1503 | "font_size": 24,
1504 | "flags": {}
1505 | },
1506 | {
1507 | "id": 4,
1508 | "title": "Flux",
1509 | "bounding": [
1510 | -898.5185546875,
1511 | -25.370464324951172,
1512 | 2022.9615478515625,
1513 | 921.326416015625
1514 | ],
1515 | "color": "#3f789e",
1516 | "font_size": 24,
1517 | "flags": {}
1518 | }
1519 | ],
1520 | "config": {},
1521 | "extra": {
1522 | "ds": {
1523 | "scale": 0.32349184307607415,
1524 | "offset": [
1525 | 1020.3511962890625,
1526 | 73.79848861694336
1527 | ]
1528 | },
1529 | "frontendVersion": "1.20.4",
1530 | "groupNodes": {}
1531 | },
1532 | "version": 0.4
1533 | }
--------------------------------------------------------------------------------
/examples/multi_id_workflow.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bytedance/ComfyUI_InfiniteYou/fb507e133b5e6e4a86a9c63a9b58f562aed4906a/examples/multi_id_workflow.jpg
--------------------------------------------------------------------------------
/examples/teaser.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bytedance/ComfyUI_InfiniteYou/fb507e133b5e6e4a86a9c63a9b58f562aed4906a/examples/teaser.jpg
--------------------------------------------------------------------------------
/examples/workflow.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bytedance/ComfyUI_InfiniteYou/fb507e133b5e6e4a86a9c63a9b58f562aed4906a/examples/workflow.jpg
--------------------------------------------------------------------------------
/infuse_net.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2025 Bytedance Ltd. and/or its affiliates. All rights reserved.
2 |
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 |
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import torch
16 | import math
17 | from torch import Tensor, nn
18 | from einops import rearrange, repeat
19 |
20 | import comfy
21 | from comfy.controlnet import controlnet_config, controlnet_load_state_dict, ControlNet, StrengthType
22 | from comfy.ldm.flux.model import Flux
23 | from comfy.ldm.flux.layers import (timestep_embedding)
24 | import comfy.ldm.common_dit
25 |
26 | class InfuseNet(ControlNet):
27 | def __init__(self,
28 | control_model=None,
29 | id_embedding = None,
30 | global_average_pooling=False,
31 | compression_ratio=8,
32 | latent_format=None,
33 | load_device=None,
34 | manual_cast_dtype=None,
35 | extra_conds=["y"],
36 | strength_type=StrengthType.CONSTANT,
37 | concat_mask=False,
38 | preprocess_image=lambda a: a):
39 | super().__init__(control_model=control_model,
40 | global_average_pooling=global_average_pooling,
41 | compression_ratio=compression_ratio,
42 | latent_format=latent_format,
43 | load_device=load_device,
44 | manual_cast_dtype=manual_cast_dtype,
45 | extra_conds=extra_conds,
46 | strength_type=strength_type,
47 | concat_mask=concat_mask,
48 | preprocess_image=preprocess_image)
49 | self.id_embedding = id_embedding
50 |
51 | def copy(self):
52 | c = InfuseNet(None, global_average_pooling=self.global_average_pooling, load_device=self.load_device, manual_cast_dtype=self.manual_cast_dtype)
53 | c.control_model = self.control_model
54 | c.control_model_wrapped = self.control_model_wrapped
55 | c.id_embedding = self.id_embedding
56 | self.copy_to(c)
57 | return c
58 |
59 | def get_control(self, x_noisy, t, cond, batched_number, transformer_options):
60 | cond = cond.copy()
61 | cond['crossattn_controlnet'] = self.id_embedding
62 | cond['c_crossattn'] = self.id_embedding
63 | return super().get_control(x_noisy, t, cond, batched_number, transformer_options)
64 |
65 | class InfuseNetFlux(Flux):
66 | def __init__(self, latent_input=False, num_union_modes=0, mistoline=False, control_latent_channels=None, image_model=None, dtype=None, device=None, operations=None, **kwargs):
67 | super().__init__(final_layer=False, dtype=dtype, device=device, operations=operations, **kwargs)
68 |
69 | self.main_model_double = 19
70 | self.main_model_single = 38
71 |
72 | self.mistoline = mistoline
73 | # add ControlNet blocks
74 | if self.mistoline:
75 | control_block = lambda : MistolineControlnetBlock(self.hidden_size, dtype=dtype, device=device, operations=operations)
76 | else:
77 | control_block = lambda : operations.Linear(self.hidden_size, self.hidden_size, dtype=dtype, device=device)
78 |
79 | self.controlnet_blocks = nn.ModuleList([])
80 | for _ in range(self.params.depth):
81 | self.controlnet_blocks.append(control_block())
82 |
83 | self.controlnet_single_blocks = nn.ModuleList([])
84 | for _ in range(self.params.depth_single_blocks):
85 | self.controlnet_single_blocks.append(control_block())
86 |
87 | self.num_union_modes = num_union_modes
88 | self.controlnet_mode_embedder = None
89 | if self.num_union_modes > 0:
90 | self.controlnet_mode_embedder = operations.Embedding(self.num_union_modes, self.hidden_size, dtype=dtype, device=device)
91 |
92 | self.gradient_checkpointing = False
93 | self.latent_input = latent_input
94 | if control_latent_channels is None:
95 | control_latent_channels = self.in_channels
96 | else:
97 | control_latent_channels *= 2 * 2 #patch size
98 |
99 | self.pos_embed_input = operations.Linear(control_latent_channels, self.hidden_size, bias=True, dtype=dtype, device=device)
100 | if not self.latent_input:
101 | if self.mistoline:
102 | self.input_cond_block = MistolineCondDownsamplBlock(dtype=dtype, device=device, operations=operations)
103 | else:
104 | self.input_hint_block = nn.Sequential(
105 | operations.Conv2d(3, 16, 3, padding=1, dtype=dtype, device=device),
106 | nn.SiLU(),
107 | operations.Conv2d(16, 16, 3, padding=1, dtype=dtype, device=device),
108 | nn.SiLU(),
109 | operations.Conv2d(16, 16, 3, padding=1, stride=2, dtype=dtype, device=device),
110 | nn.SiLU(),
111 | operations.Conv2d(16, 16, 3, padding=1, dtype=dtype, device=device),
112 | nn.SiLU(),
113 | operations.Conv2d(16, 16, 3, padding=1, stride=2, dtype=dtype, device=device),
114 | nn.SiLU(),
115 | operations.Conv2d(16, 16, 3, padding=1, dtype=dtype, device=device),
116 | nn.SiLU(),
117 | operations.Conv2d(16, 16, 3, padding=1, stride=2, dtype=dtype, device=device),
118 | nn.SiLU(),
119 | operations.Conv2d(16, 16, 3, padding=1, dtype=dtype, device=device)
120 | )
121 |
122 | def forward_orig(
123 | self,
124 | img: Tensor,
125 | img_ids: Tensor,
126 | controlnet_cond: Tensor,
127 | txt: Tensor,
128 | txt_ids: Tensor,
129 | timesteps: Tensor,
130 | y: Tensor,
131 | guidance: Tensor = None,
132 | control_type: Tensor = None,
133 | out_mask: Tensor = None
134 | ) -> Tensor:
135 | if img.ndim != 3 or txt.ndim != 3:
136 | raise ValueError("Input img and txt tensors must have 3 dimensions.")
137 |
138 | # running on sequences img
139 | img = self.img_in(img)
140 |
141 | controlnet_cond = self.pos_embed_input(controlnet_cond)
142 | img = img + controlnet_cond
143 | vec = self.time_in(timestep_embedding(timesteps, 256))
144 | if self.params.guidance_embed:
145 | vec = vec + self.guidance_in(timestep_embedding(guidance, 256))
146 | vec = vec + self.vector_in(y)
147 | txt = self.txt_in(txt)
148 |
149 | if self.controlnet_mode_embedder is not None and len(control_type) > 0:
150 | control_cond = self.controlnet_mode_embedder(torch.tensor(control_type, device=img.device), out_dtype=img.dtype).unsqueeze(0).repeat((txt.shape[0], 1, 1))
151 | txt = torch.cat([control_cond, txt], dim=1)
152 | txt_ids = torch.cat([txt_ids[:,:1], txt_ids], dim=1)
153 |
154 | ids = torch.cat((txt_ids, img_ids), dim=1)
155 | pe = self.pe_embedder(ids)
156 |
157 | controlnet_double = ()
158 |
159 | for i in range(len(self.double_blocks)):
160 | img, txt = self.double_blocks[i](img=img, txt=txt, vec=vec, pe=pe)
161 | controlnet_double = controlnet_double + (self.controlnet_blocks[i](img),)
162 |
163 | img = torch.cat((txt, img), 1)
164 |
165 | controlnet_single = ()
166 |
167 | for i in range(len(self.single_blocks)):
168 | img = self.single_blocks[i](img, vec=vec, pe=pe)
169 | controlnet_single = controlnet_single + (self.controlnet_single_blocks[i](img[:, txt.shape[1] :, ...]),)
170 |
171 | repeat = math.ceil(self.main_model_double / len(controlnet_double))
172 | if self.latent_input:
173 | out_input = ()
174 | for x in controlnet_double:
175 | if out_mask is not None:
176 | out_input += (x * out_mask,) * repeat
177 | else:
178 | out_input += (x,) * repeat
179 | else:
180 | out_input = (controlnet_double * repeat)
181 |
182 | out = {"input": out_input[:self.main_model_double]}
183 | if len(controlnet_single) > 0:
184 | repeat = math.ceil(self.main_model_single / len(controlnet_single))
185 | out_output = ()
186 | if self.latent_input:
187 | for x in controlnet_single:
188 | if out_mask is not None:
189 | out_output += (x * out_mask,) * repeat
190 | else:
191 | out_output += (x,) * repeat
192 | else:
193 | out_output = (controlnet_single * repeat)
194 | out["output"] = out_output[:self.main_model_single]
195 | return out
196 |
197 | def forward(self, x, timesteps, context, y, guidance=None, hint=None, **kwargs):
198 | patch_size = 2
199 | if self.latent_input:
200 | hint = comfy.ldm.common_dit.pad_to_patch_size(hint, (patch_size, patch_size))
201 | elif self.mistoline:
202 | hint = hint * 2.0 - 1.0
203 | hint = self.input_cond_block(hint)
204 | else:
205 | hint = hint * 2.0 - 1.0
206 | hint = self.input_hint_block(hint)
207 |
208 | hint = rearrange(hint, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=patch_size, pw=patch_size)
209 |
210 | bs, c, h, w = x.shape
211 | x = comfy.ldm.common_dit.pad_to_patch_size(x, (patch_size, patch_size))
212 |
213 | img = rearrange(x, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=patch_size, pw=patch_size)
214 |
215 | control_mask = kwargs.get("control_mask", None)
216 | out_mask = None
217 | if control_mask is not None:
218 | in_mask = comfy.sampler_helpers.prepare_mask(control_mask, (bs, c, h, w), img.device)
219 | in_mask = comfy.ldm.common_dit.pad_to_patch_size(in_mask, (patch_size, patch_size))
220 | in_mask = rearrange(in_mask, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=patch_size, pw=patch_size)
221 | # (b, seq_len, _) =>(b, seq_len, pulid.dim)
222 | in_mask = in_mask[..., 0].unsqueeze(-1).repeat(1, 1, img.shape[-1]).to(dtype=img.dtype)
223 | img = img * in_mask
224 |
225 | out_mask = comfy.sampler_helpers.prepare_mask(control_mask, (bs,
226 | self.hidden_size // (patch_size * patch_size), h, w), img.device)
227 | out_mask = comfy.ldm.common_dit.pad_to_patch_size(out_mask, (patch_size, patch_size))
228 | out_mask = rearrange(out_mask, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=patch_size, pw=patch_size)
229 |
230 | h_len = ((h + (patch_size // 2)) // patch_size)
231 | w_len = ((w + (patch_size // 2)) // patch_size)
232 | img_ids = torch.zeros((h_len, w_len, 3), device=x.device, dtype=x.dtype)
233 | img_ids[..., 1] = img_ids[..., 1] + torch.linspace(0, h_len - 1, steps=h_len, device=x.device, dtype=x.dtype)[:, None]
234 | img_ids[..., 2] = img_ids[..., 2] + torch.linspace(0, w_len - 1, steps=w_len, device=x.device, dtype=x.dtype)[None, :]
235 | img_ids = repeat(img_ids, "h w c -> b (h w) c", b=bs)
236 |
237 | txt_ids = torch.zeros((bs, context.shape[1], 3), device=x.device, dtype=x.dtype)
238 | return self.forward_orig(img, img_ids, hint, context, txt_ids, timesteps, y, guidance, control_type=kwargs.get("control_type", []), out_mask=out_mask)
239 |
240 | def load_infuse_net_flux(ckpt_path, model_options={}):
241 | sd = comfy.utils.load_torch_file(ckpt_path, safe_load=True)
242 | new_sd = comfy.model_detection.convert_diffusers_mmdit(sd, "")
243 | model_config, operations, load_device, unet_dtype, manual_cast_dtype, offload_device = controlnet_config(new_sd, model_options=model_options)
244 | for k in sd:
245 | new_sd[k] = sd[k]
246 |
247 | num_union_modes = 0
248 | union_cnet = "controlnet_mode_embedder.weight"
249 | if union_cnet in new_sd:
250 | num_union_modes = new_sd[union_cnet].shape[0]
251 |
252 | control_latent_channels = new_sd.get("pos_embed_input.weight").shape[1] // 4
253 | concat_mask = False
254 | if control_latent_channels == 17:
255 | concat_mask = True
256 |
257 | control_model = InfuseNetFlux(latent_input=True, num_union_modes=num_union_modes, control_latent_channels=control_latent_channels, operations=operations, device=offload_device, dtype=unet_dtype, **model_config.unet_config)
258 | control_model = controlnet_load_state_dict(control_model, new_sd)
259 |
260 | latent_format = comfy.latent_formats.Flux()
261 | extra_conds = ['y', 'guidance']
262 | control = InfuseNet(control_model, compression_ratio=1, latent_format=latent_format, concat_mask=concat_mask, load_device=load_device, manual_cast_dtype=manual_cast_dtype, extra_conds=extra_conds)
263 | return control
264 |
--------------------------------------------------------------------------------
/nodes.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2025 Bytedance Ltd. and/or its affiliates. All rights reserved.
2 |
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 |
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import os
16 | import torch
17 | import folder_paths
18 | import cv2
19 | import numpy as np
20 | from PIL import Image
21 | import comfy
22 | from huggingface_hub import snapshot_download, hf_hub_download
23 | import shutil
24 | import glob
25 |
26 | from facexlib.recognition import init_recognition_model
27 | from insightface.app import FaceAnalysis
28 |
29 | from .utils import extract_arcface_bgr_embedding, tensor_to_np_image, np_image_to_tensor, resize_and_pad_pil_image, draw_kps, escape_path_for_url
30 | from .infuse_net import load_infuse_net_flux
31 | from .resampler import Resampler
32 |
33 | folder_paths.add_model_folder_path("infinite_you", os.path.join(folder_paths.models_dir, "infinite_you"))
34 |
35 | class FaceDetector:
36 | def __init__(self,
37 | det_sizes,
38 | root_dir,
39 | providers) -> None:
40 | self.apps = []
41 | for det_size in det_sizes:
42 | app = FaceAnalysis(name="antelopev2", root=root_dir, providers=providers)
43 | app.prepare(ctx_id=0, det_size=(det_size, det_size))
44 | self.apps.append(app)
45 |
46 | def __call__(self, np_image_bgr):
47 | for app in self.apps:
48 | faces = app.get(np_image_bgr)
49 | if len(faces) > 0:
50 | return faces
51 | return []
52 |
53 | class IDEmbeddingModelLoader:
54 | @classmethod
55 | def INPUT_TYPES(s):
56 | return {
57 | "required": {
58 | "image_proj_model_name": (IDEmbeddingModelLoader.get_image_proj_names(), ),
59 | 'image_proj_num_tokens': ([8, 16], ),
60 | 'face_analysis_provider': (['CUDA', 'CPU'], ),
61 | 'face_analysis_det_size': (["AUTO", "640", "320", "160"], )
62 | },
63 | }
64 |
65 | RETURN_NAMES = ("FACE_DETECTOR", "ARCFACE_MODEL", "IMAGE_PROJ_MODEL")
66 | RETURN_TYPES = ("MODEL", "MODEL", "MODEL")
67 |
68 | FUNCTION = "load_insightface"
69 | CATEGORY = "infinite_you"
70 |
71 | def get_image_proj_names():
72 | names = [
73 | os.path.join("sim_stage1", "image_proj_model.bin"),
74 | os.path.join("aes_stage2", "image_proj_model.bin"),
75 | *folder_paths.get_filename_list("infinite_you"),
76 | ]
77 | return list(filter(lambda x: x.endswith(".bin"), list(set(names))))
78 |
79 | def load_insightface(self, image_proj_model_name, image_proj_num_tokens, face_analysis_provider, face_analysis_det_size):
80 | insight_facedir = os.path.join(folder_paths.models_dir, "insightface")
81 |
82 | # Download insightface models
83 | antelopev2_dir = os.path.join(insight_facedir, 'models', 'antelopev2')
84 | if not os.path.exists(antelopev2_dir) or len(glob.glob(os.path.join(antelopev2_dir, "*.onnx"))) == 0:
85 | os.makedirs(antelopev2_dir, exist_ok=True)
86 | snapshot_download(repo_id="MonsterMMORPG/tools", allow_patterns="*.onnx", local_dir=antelopev2_dir)
87 |
88 | # Download infinite you models
89 | infinite_you_dir = os.path.join(folder_paths.models_dir, "infinite_you")
90 | image_proj_model_path = os.path.join(infinite_you_dir, image_proj_model_name)
91 | if not os.path.exists(image_proj_model_path):
92 | dst_dir = os.path.dirname(image_proj_model_path)
93 | os.makedirs(dst_dir, exist_ok=True)
94 |
95 | downloaded_file = hf_hub_download(repo_id="ByteDance/InfiniteYou",
96 | filename=escape_path_for_url(os.path.join("infu_flux_v1.0", image_proj_model_name)),
97 | local_dir=infinite_you_dir)
98 | shutil.move(downloaded_file, image_proj_model_path)
99 |
100 | provider = 'CPUExecutionProvider'
101 | if face_analysis_provider == 'CUDA':
102 | provider = 'CUDAExecutionProvider'
103 | det_sizes = []
104 | if face_analysis_det_size == 'AUTO':
105 | det_sizes = [640, 320, 160]
106 | else:
107 | det_sizes = [int(face_analysis_det_size)]
108 | face_detector = FaceDetector(det_sizes=det_sizes, root_dir=insight_facedir, providers=[provider])
109 |
110 | device = comfy.model_management.get_torch_device()
111 |
112 | # Load arcface model
113 | arcface_model = init_recognition_model('arcface', device=device)
114 |
115 | # Load image proj model
116 | image_emb_dim = 512
117 | image_proj_model = Resampler(
118 | dim=1280,
119 | depth=4,
120 | dim_head=64,
121 | heads=20,
122 | num_queries=image_proj_num_tokens,
123 | embedding_dim=image_emb_dim,
124 | output_dim=4096,
125 | ff_mult=4,
126 | )
127 | ipm_state_dict = torch.load(image_proj_model_path, map_location="cpu")
128 | image_proj_model.load_state_dict(ipm_state_dict['image_proj'])
129 | del ipm_state_dict
130 | image_proj_model.to(device, torch.bfloat16)
131 | image_proj_model.eval()
132 |
133 | return (face_detector, arcface_model, image_proj_model)
134 |
135 | class ExtractFacePoseImage:
136 | @classmethod
137 | def INPUT_TYPES(s):
138 | return {
139 | "required": {
140 | "face_detector": ("MODEL", ),
141 | "image": ("IMAGE", ),
142 | "width": ("INT", {"default": 864, "min": 0, "max": 2048, "step": 1}),
143 | "height": ("INT", {"default": 1152, "min": 0, "max": 2048, "step": 1}),
144 | },
145 | "optional": {
146 | "mask": ("MASK", ),
147 | }
148 | }
149 |
150 | RETURN_TYPES = ("IMAGE",)
151 | FUNCTION = "extract_face_pose"
152 | CATEGORY = "infinite_you"
153 |
154 | def extract_face_pose(self, face_detector, image, width, height, mask = None):
155 | np_image = tensor_to_np_image(image)[0]
156 | if mask is not None:
157 | np_mask = tensor_to_np_image(mask)[0]
158 | np_mask = cv2.resize(np_mask, (np_image.shape[1], np_image.shape[0]), interpolation=cv2.INTER_NEAREST)
159 | mask_3ch = np.expand_dims(np_mask, axis=-1)
160 | mask_3ch = np.repeat(mask_3ch, 3, axis=-1) # Shape: (H, W, 3)
161 | np_image = np_image * mask_3ch
162 |
163 | pil_image = resize_and_pad_pil_image(Image.fromarray(np_image), (width, height))
164 | face_info = face_detector(cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR))
165 | if len(face_info) == 0:
166 | raise ValueError('No face detected in the input pose image')
167 |
168 | face_info = sorted(face_info, key=lambda x:(x['bbox'][2]-x['bbox'][0])*(x['bbox'][3]-x['bbox'][1]))[-1] # only use the maximum face
169 | pil_image = draw_kps(pil_image, face_info['kps'])
170 |
171 | return (np_image_to_tensor(np.array(pil_image)).unsqueeze(0), )
172 |
173 | class ExtractIDEmbedding:
174 | @classmethod
175 | def INPUT_TYPES(s):
176 | return {
177 | "required": {
178 | "face_detector": ("MODEL", ),
179 | "arcface_model": ("MODEL", ),
180 | "image_proj_model": ("MODEL", ),
181 | "image": ("IMAGE", ),
182 | }
183 | }
184 |
185 | RETURN_TYPES = ("CONDITIONING",)
186 | FUNCTION = "extract_id_embedding"
187 | CATEGORY = "infinite_you"
188 |
189 | def extract_id_embedding(self, face_detector, arcface_model, image_proj_model, image):
190 | np_image = tensor_to_np_image(image)
191 | id_image_cv2 = cv2.cvtColor(np_image[0], cv2.COLOR_RGB2BGR)
192 | face_info = face_detector(id_image_cv2)
193 | if len(face_info) == 0:
194 | raise ValueError('No face detected in the input ID image')
195 |
196 | device = comfy.model_management.get_torch_device()
197 |
198 | face_info = sorted(face_info, key=lambda x:(x['bbox'][2]-x['bbox'][0])*(x['bbox'][3]-x['bbox'][1]))[-1] # only use the maximum face
199 | landmark = face_info['kps']
200 | id_embed = extract_arcface_bgr_embedding(id_image_cv2, landmark, arcface_model)
201 | id_embed = id_embed.clone().unsqueeze(0).float().to(device)
202 | id_embed = id_embed.reshape([1, -1, 512])
203 | id_embed = id_embed.to(device=device, dtype=torch.bfloat16)
204 | with torch.no_grad():
205 | id_embed = image_proj_model(id_embed)
206 | bs_embed, seq_len, _ = id_embed.shape
207 | id_embed = id_embed.repeat(1, 1, 1)
208 | id_embed = id_embed.view(bs_embed * 1, seq_len, -1)
209 | id_embed = id_embed.to(device=device, dtype=torch.bfloat16)
210 |
211 | return ({'id_embedding': id_embed}, )
212 |
213 | class InfuseNetLoader:
214 | @classmethod
215 | def INPUT_TYPES(s):
216 | return {"required": { "controlnet_name": (InfuseNetLoader.get_controlnet_names(), )}}
217 |
218 | def get_controlnet_names():
219 | names = [
220 | os.path.join("sim_stage1", "infusenet_sim_bf16.safetensors"),
221 | os.path.join("sim_stage1", "infusenet_sim_fp8e4m3fn.safetensors"),
222 | os.path.join("aes_stage2", "infusenet_aes_bf16.safetensors"),
223 | os.path.join("aes_stage2", "infusenet_aes_fp8e4m3fn.safetensors"),
224 | *folder_paths.get_filename_list("infinite_you"),
225 | ]
226 | return list(filter(lambda x: x.endswith(".safetensors"), list(set(names))))
227 |
228 | RETURN_TYPES = ("CONTROL_NET",)
229 | FUNCTION = "load_controlnet"
230 |
231 | CATEGORY = "infinite_you"
232 |
233 | def load_controlnet(self, controlnet_name):
234 | infinite_you_dir = os.path.join(folder_paths.models_dir, "infinite_you")
235 | controlnet_path = os.path.join(infinite_you_dir, controlnet_name)
236 |
237 | if not os.path.exists(controlnet_path):
238 | dst_dir = os.path.dirname(controlnet_path)
239 | os.makedirs(dst_dir, exist_ok=True)
240 | downloaded_file = hf_hub_download(repo_id="ByteDance/InfiniteYou",
241 | filename=escape_path_for_url(os.path.join("infu_flux_v1.0", controlnet_name)),
242 | local_dir=infinite_you_dir)
243 |
244 | shutil.move(downloaded_file, controlnet_path)
245 |
246 | controlnet = load_infuse_net_flux(controlnet_path)
247 | return (controlnet,)
248 |
249 | class InfuseNetApply:
250 | @classmethod
251 | def INPUT_TYPES(s):
252 | return {"required": {"positive": ("CONDITIONING", ),
253 | "id_embedding": ("CONDITIONING", ),
254 | "control_net": ("CONTROL_NET", ),
255 | "image": ("IMAGE", ),
256 | "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
257 | "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
258 | "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001})
259 | },
260 | "optional": {
261 | "negative": ("CONDITIONING", ),
262 | "vae": ("VAE", ),
263 | "control_mask": ("MASK", ),
264 | }
265 | }
266 |
267 | RETURN_TYPES = ("CONDITIONING","CONDITIONING")
268 | RETURN_NAMES = ("positive", "negative")
269 | FUNCTION = "apply_controlnet"
270 |
271 | CATEGORY = "infinite_you"
272 |
273 | def apply_controlnet(self, positive, id_embedding, control_net, image, strength, start_percent, end_percent, negative = None, vae=None, control_mask=None, extra_concat=[]):
274 | if strength == 0:
275 | return (positive, negative)
276 |
277 | if control_mask is not None:
278 | if control_mask.dim() > 3:
279 | control_mask = control_mask.squeeze(-1)
280 | elif control_mask.dim() < 3:
281 | control_mask = control_mask.unsqueeze(0)
282 |
283 | control_hint = image.movedim(-1,1)
284 | cnets = {}
285 |
286 | out = []
287 | for conditioning in [positive, negative]:
288 | c = []
289 | if conditioning is None:
290 | out.append(None)
291 | continue
292 |
293 | for t in conditioning:
294 | d = t[1].copy()
295 |
296 | prev_cnet = d.get('control', None)
297 | if prev_cnet in cnets:
298 | c_net = cnets[prev_cnet]
299 | else:
300 | c_net = control_net.copy().set_cond_hint(control_hint, strength, (start_percent, end_percent), vae=vae, extra_concat=extra_concat)
301 | c_net.id_embedding = id_embedding['id_embedding']
302 | c_net.set_previous_controlnet(prev_cnet)
303 | c_net.set_extra_arg("control_mask", control_mask)
304 | cnets[prev_cnet] = c_net
305 |
306 | d['control'] = c_net
307 | d['control_apply_to_uncond'] = False
308 | n = [t[0], d]
309 | c.append(n)
310 | out.append(c)
311 | return (out[0], out[1])
312 |
313 | NODE_CLASS_MAPPINGS = {
314 | "IDEmbeddingModelLoader": IDEmbeddingModelLoader,
315 | "ExtractIDEmbedding": ExtractIDEmbedding,
316 | "ExtractFacePoseImage": ExtractFacePoseImage,
317 | "InfuseNetApply": InfuseNetApply,
318 | "InfuseNetLoader": InfuseNetLoader,
319 | }
320 |
321 | NODE_DISPLAY_NAME_MAPPINGS = {
322 | "IDEmbeddingModelLoader": "ID Embedding Model Loader",
323 | "ExtractIDEmbedding": "Extract ID Embedding",
324 | "ExtractFacePoseImage": "Extract Face Pose Image",
325 | "InfuseNetApply": "Apply InfuseNet",
326 | "InfuseNetLoader": "Load InfuseNet",
327 | }
328 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "infiniteyou"
3 | description = "Official ComfyUI Support - InfiniteYou: Flexible Photo Recrafting While Preserving Your Identity"
4 | version = "1.0.1"
5 | license = {file = "LICENSE"}
6 | dependencies = ["facexlib>=0.3.0", "onnxruntime>=1.19.2", "insightface>=0.7.3", "opencv-python>=4.11.0.86", "huggingface_hub"]
7 |
8 | [project.urls]
9 | Repository = "https://github.com/bytedance/ComfyUI_InfiniteYou"
10 | # Used by Comfy Registry https://comfyregistry.org
11 |
12 | [tool.comfy]
13 | PublisherId = "yuminjia"
14 | DisplayName = "ComfyUI_InfiniteYou"
15 | Icon = ""
16 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | facexlib>=0.3.0
2 | onnxruntime>=1.19.2
3 | insightface>=0.7.3
4 | opencv-python>=4.11.0.86
5 | huggingface_hub
6 |
--------------------------------------------------------------------------------
/resampler.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2023 Anas Awadalla, Irena Gao, Joshua Gardner, Jack Hessel, Yusuf Hanafy,
2 | # Wanrong Zhu, Kalyani Marathe, Yonatan Bitton, Samir Gadre, Jenia Jitsev, Simon Kornblith,
3 | # Pang Wei Koh, Gabriel Ilharco, Mitchell Wortsman, Ludwig Schmidt.
4 | # SPDX-License-Identifier: MIT
5 |
6 | # Modified from https://github.com/mlfoundations/open_flamingo/blob/main/open_flamingo/src/helpers.py
7 |
8 | import math
9 |
10 | import torch
11 | import torch.nn as nn
12 |
13 |
14 | # FFN
15 | def FeedForward(dim, mult=4):
16 | inner_dim = int(dim * mult)
17 | return nn.Sequential(
18 | nn.LayerNorm(dim),
19 | nn.Linear(dim, inner_dim, bias=False),
20 | nn.GELU(),
21 | nn.Linear(inner_dim, dim, bias=False),
22 | )
23 |
24 |
25 | def reshape_tensor(x, heads):
26 | bs, length, width = x.shape
27 | #(bs, length, width) --> (bs, length, n_heads, dim_per_head)
28 | x = x.view(bs, length, heads, -1)
29 | # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head)
30 | x = x.transpose(1, 2)
31 | # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head)
32 | x = x.reshape(bs, heads, length, -1)
33 | return x
34 |
35 |
36 | class PerceiverAttention(nn.Module):
37 | def __init__(self, *, dim, dim_head=64, heads=8):
38 | super().__init__()
39 | self.scale = dim_head**-0.5
40 | self.dim_head = dim_head
41 | self.heads = heads
42 | inner_dim = dim_head * heads
43 |
44 | self.norm1 = nn.LayerNorm(dim)
45 | self.norm2 = nn.LayerNorm(dim)
46 |
47 | self.to_q = nn.Linear(dim, inner_dim, bias=False)
48 | self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
49 | self.to_out = nn.Linear(inner_dim, dim, bias=False)
50 |
51 | def forward(self, x, latents):
52 | """
53 | Args:
54 | x (torch.Tensor): image features
55 | shape (b, n1, D)
56 | latent (torch.Tensor): latent features
57 | shape (b, n2, D)
58 | """
59 | x = self.norm1(x)
60 | latents = self.norm2(latents)
61 |
62 | b, l, _ = latents.shape
63 |
64 | q = self.to_q(latents)
65 | kv_input = torch.cat((x, latents), dim=-2)
66 | k, v = self.to_kv(kv_input).chunk(2, dim=-1)
67 |
68 | q = reshape_tensor(q, self.heads)
69 | k = reshape_tensor(k, self.heads)
70 | v = reshape_tensor(v, self.heads)
71 |
72 | # attention
73 | scale = 1 / math.sqrt(math.sqrt(self.dim_head))
74 | weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards
75 | weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
76 | out = weight @ v
77 |
78 | out = out.permute(0, 2, 1, 3).reshape(b, l, -1)
79 |
80 | return self.to_out(out)
81 |
82 |
83 | class Resampler(nn.Module):
84 | def __init__(
85 | self,
86 | dim=1024,
87 | depth=8,
88 | dim_head=64,
89 | heads=16,
90 | num_queries=8,
91 | embedding_dim=768,
92 | output_dim=1024,
93 | ff_mult=4,
94 | ):
95 | super().__init__()
96 |
97 | self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5)
98 |
99 | self.proj_in = nn.Linear(embedding_dim, dim)
100 |
101 | self.proj_out = nn.Linear(dim, output_dim)
102 | self.norm_out = nn.LayerNorm(output_dim)
103 |
104 | self.layers = nn.ModuleList([])
105 | for _ in range(depth):
106 | self.layers.append(
107 | nn.ModuleList(
108 | [
109 | PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
110 | FeedForward(dim=dim, mult=ff_mult),
111 | ]
112 | )
113 | )
114 |
115 | def forward(self, x):
116 |
117 | latents = self.latents.repeat(x.size(0), 1, 1)
118 |
119 | x = self.proj_in(x)
120 |
121 | for attn, ff in self.layers:
122 | latents = attn(x, latents) + latents
123 | latents = ff(latents) + latents
124 |
125 | latents = self.proj_out(latents)
126 | return self.norm_out(latents)
127 |
--------------------------------------------------------------------------------
/utils.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2025 Bytedance Ltd. and/or its affiliates. All rights reserved.
2 |
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 |
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import torch
16 | import numpy as np
17 | from insightface.utils import face_align
18 | from PIL import Image
19 | import math
20 | import cv2
21 |
22 | def extract_arcface_bgr_embedding(in_image, landmark, arcface_model, in_settings=None):
23 | kps = landmark
24 | arc_face_image = face_align.norm_crop(in_image, landmark=np.array(kps), image_size=112)
25 | arc_face_image = torch.from_numpy(arc_face_image).unsqueeze(0).permute(0,3,1,2) / 255.
26 | arc_face_image = 2 * arc_face_image - 1
27 | arc_face_image = arc_face_image.cuda().contiguous()
28 | face_emb = arcface_model(arc_face_image)[0] # [512], normalized
29 | return face_emb
30 |
31 | def tensor_to_np_image(tensor):
32 | return tensor.mul(255).clamp(0, 255).byte().cpu().numpy()
33 |
34 | def np_image_to_tensor(image):
35 | return torch.clamp(torch.from_numpy(image).float() / 255., 0, 1)
36 |
37 | def resize_and_pad_pil_image(source_img, target_img_size):
38 | # Get original and target sizes
39 | source_img_size = source_img.size
40 | target_width, target_height = target_img_size
41 |
42 | # Determine the new size based on the shorter side of target_img
43 | if target_width <= target_height:
44 | new_width = target_width
45 | new_height = int(target_width * (source_img_size[1] / source_img_size[0]))
46 | else:
47 | new_height = target_height
48 | new_width = int(target_height * (source_img_size[0] / source_img_size[1]))
49 |
50 | # Resize the source image using LANCZOS interpolation for high quality
51 | resized_source_img = source_img.resize((new_width, new_height), Image.LANCZOS)
52 |
53 | # Compute padding to center resized image
54 | pad_left = (target_width - new_width) // 2
55 | pad_top = (target_height - new_height) // 2
56 |
57 | # Create a new image with white background
58 | padded_img = Image.new("RGB", target_img_size, (255, 255, 255))
59 | padded_img.paste(resized_source_img, (pad_left, pad_top))
60 |
61 | return padded_img
62 |
63 | # modified from https://github.com/instantX-research/InstantID/blob/main/pipeline_stable_diffusion_xl_instantid.py
64 | def draw_kps(image_pil, kps, color_list=[(255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255)]):
65 | stickwidth = 4
66 | limbSeq = np.array([[0, 2], [1, 2], [3, 2], [4, 2]])
67 | kps = np.array(kps)
68 |
69 | w, h = image_pil.size
70 | out_img = np.zeros([h, w, 3])
71 |
72 | for i in range(len(limbSeq)):
73 | index = limbSeq[i]
74 | color = color_list[index[0]]
75 |
76 | x = kps[index][:, 0]
77 | y = kps[index][:, 1]
78 | length = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5
79 | angle = math.degrees(math.atan2(y[0] - y[1], x[0] - x[1]))
80 | polygon = cv2.ellipse2Poly((int(np.mean(x)), int(np.mean(y))), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
81 | out_img = cv2.fillConvexPoly(out_img.copy(), polygon, color)
82 | out_img = (out_img * 0.6).astype(np.uint8)
83 |
84 | for idx_kp, kp in enumerate(kps):
85 | color = color_list[idx_kp]
86 | x, y = kp
87 | out_img = cv2.circle(out_img.copy(), (int(x), int(y)), 10, color, -1)
88 |
89 | out_img_pil = Image.fromarray(out_img.astype(np.uint8))
90 | return out_img_pil
91 |
92 | def escape_path_for_url(path):
93 | return path.replace("\\", "/")
94 |
--------------------------------------------------------------------------------