├── .gitignore ├── LICENSE ├── README.md ├── app.py ├── assets ├── comparative_results.jpg ├── examples │ ├── man.jpg │ ├── man_pose.jpg │ └── woman.jpg ├── plug_and_play.jpg └── teaser.jpg ├── pipelines ├── __init__.py ├── pipeline_flux_infusenet.py ├── pipeline_infu_flux.py └── resampler.py ├── requirements.txt └── test.py /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .idea 3 | .ipynb_checkpoints 4 | .gradio 5 | *.swp 6 | *.pyc 7 | __pycache__ 8 | *.tar* 9 | *.zip 10 | *.pkl 11 | *.pyc 12 | *.bak 13 | *.png 14 | *.deb 15 | 16 | .isort.cfg 17 | .pre-commit-config.yaml 18 | 19 | dataset_stats 20 | debug* 21 | locks 22 | checkpoints 23 | pretrained_checkpoint 24 | ./models 25 | models 26 | results 27 | wandb 28 | tmp* 29 | env* 30 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 | 3 | ## InfiniteYou: Flexible Photo Recrafting While Preserving Your Identity 4 | 5 | [**Liming Jiang**](https://liming-jiang.com/)     6 | [**Qing Yan**](https://scholar.google.com/citations?user=0TIYjPAAAAAJ)     7 | [**Yumin Jia**](https://www.linkedin.com/in/yuminjia/)     8 | [**Zichuan Liu**](https://scholar.google.com/citations?user=-H18WY8AAAAJ)     9 | [**Hao Kang**](https://scholar.google.com/citations?user=VeTCSyEAAAAJ)     10 | [**Xin Lu**](https://scholar.google.com/citations?user=mFC0wp8AAAAJ)
11 | ByteDance Intelligent Creation 12 | 13 |   14 |   15 |   16 |   17 |   18 | 19 |
20 | 21 | ![teaser](./assets/teaser.jpg) 22 | 23 | > **Abstract:** *Achieving flexible and high-fidelity identity-preserved image generation remains formidable, particularly with advanced Diffusion Transformers (DiTs) like FLUX. We introduce **InfiniteYou (InfU)**, one of the earliest robust frameworks leveraging DiTs for this task. InfU addresses significant issues of existing methods, such as insufficient identity similarity, poor text-image alignment, and low generation quality and aesthetics. Central to InfU is InfuseNet, a component that injects identity features into the DiT base model via residual connections, enhancing identity similarity while maintaining generation capabilities. A multi-stage training strategy, including pretraining and supervised fine-tuning (SFT) with synthetic single-person-multiple-sample (SPMS) data, further improves text-image alignment, ameliorates image quality, and alleviates face copy-pasting. Extensive experiments demonstrate that InfU achieves state-of-the-art performance, surpassing existing baselines. In addition, the plug-and-play design of InfU ensures compatibility with various existing methods, offering a valuable contribution to the broader community.* 24 | 25 | 26 | ## 🔥 News 27 | 28 | - [04/2025] 🔥 The official [ComfyUI node](https://github.com/bytedance/ComfyUI_InfiniteYou) is released. Unofficial [ComfyUI contributions](https://github.com/bytedance/InfiniteYou#comfyui-nodes) are appreciated. 29 | 30 | - [04/2025] 🔥 Quantization and offloading [options](https://github.com/bytedance/InfiniteYou#memory-requirements) are provided to reduce the memory requirements for InfiniteYou-FLUX v1.0. 31 | 32 | - [03/2025] 🔥 The [code](https://github.com/bytedance/InfiniteYou), [model](https://huggingface.co/ByteDance/InfiniteYou), and [demo](https://huggingface.co/spaces/ByteDance/InfiniteYou-FLUX) of InfiniteYou-FLUX v1.0 are released. 33 | 34 | - [03/2025] 🔥 The [project page](https://bytedance.github.io/InfiniteYou) of InfiniteYou is created. 35 | 36 | - [03/2025] 🔥 The [paper](https://arxiv.org/abs/2503.16418) of InfiniteYou is released on arXiv. 37 | 38 | 39 | ## 💡 Important Usage Tips 40 | 41 | - We released two model variants of InfiniteYou-FLUX v1.0: [aes_stage2](https://huggingface.co/ByteDance/InfiniteYou/tree/main/infu_flux_v1.0/aes_stage2) and [sim_stage1](https://huggingface.co/ByteDance/InfiniteYou/tree/main/infu_flux_v1.0/sim_stage1). The `aes_stage2` is our model after SFT, which is used by default for better text-image alignment and aesthetics. For higher ID similarity, please try `sim_stage1` (using `--model_version` to switch). More details can be found in our [paper](https://arxiv.org/abs/2503.16418). 42 | 43 | - To better fit specific personal needs, we find that two arguments are highly useful to adjust:
`--infusenet_conditioning_scale` (default: `1.0`) and `--infusenet_guidance_start` (default: `0.0`). Usually, you may NOT need to adjust them. If necessary, start by trying a slightly larger `--infusenet_guidance_start` (*e.g.*, `0.1`) only (especially helpful for `sim_stage1`). If still not satisfactory, then try a slightly smaller `--infusenet_conditioning_scale` (*e.g.*, `0.9`). 44 | 45 | - We also provided two LoRAs ([Realism](https://civitai.com/models/631986?modelVersionId=706528) and [Anti-blur](https://civitai.com/models/675581/anti-blur-flux-lora)) to enable additional usage flexibility. If needed, try `Realism` only first. They are *entirely optional*, which are examples to try but are NOT used in our paper. 46 | 47 | - If the generated gender does not align with your preferences, try adding specific words in the text prompt, such as 'a man', 'a woman', *etc*. We encourage users to use inclusive and respectful language. 48 | 49 | 50 | ## :european_castle: Model Zoo 51 | 52 | | InfiniteYou Version | Model Version | Base Model Trained with | Description | 53 | | :---: | :---: | :---: | :---: | 54 | | [InfiniteYou-FLUX v1.0](https://huggingface.co/ByteDance/InfiniteYou) | [aes_stage2](https://huggingface.co/ByteDance/InfiniteYou/tree/main/infu_flux_v1.0/aes_stage2) | [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev) | Stage-2 model after SFT. Better text-image alignment and aesthetics. | 55 | | [InfiniteYou-FLUX v1.0](https://huggingface.co/ByteDance/InfiniteYou) | [sim_stage1](https://huggingface.co/ByteDance/InfiniteYou/tree/main/infu_flux_v1.0/sim_stage1) | [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev) | Stage-1 model before SFT. Higher identity similarity. | 56 | 57 | 58 | ## 🔧 Requirements and Installation 59 | 60 | ### Dependencies 61 | 62 | Simply run this one-line command to install (feel free to create a `python3` virtual environment before you run): 63 | 64 | ```bash 65 | pip install -r requirements.txt 66 | ``` 67 | 68 | ### Memory Requirements 69 | 70 | - **Full-performance**: The original `bf16` model inference requires a **peak VRAM** of around **43GB**. 71 | 72 | - **Fast CPU offloading**: By specifying only `--cpu_offload` in [test.py](https://github.com/bytedance/InfiniteYou/blob/main/test.py#L44), the **peak VRAM** is reduced to around **30GB** with **NO** performance degradation. 73 | 74 | - **8-bit quantization**: By specifying only `--quantize_8bit` in [test.py](https://github.com/bytedance/InfiniteYou/blob/main/test.py#L44), the **peak VRAM** is reduced to around **24GB** with performance remaining very similar. 75 | 76 | - **Combining fast CPU offloading and 8-bit quantization**: By specifying both `--cpu_offload` and
`--quantize_8bit`, the **peak VRAM** is further reduced to around **16GB** with performance remaining very similar. 77 | 78 | If you want to use our models but only have a GPU with even less VRAM, please further refer to [Diffusers memory reduction tips](https://huggingface.co/docs/diffusers/en/optimization/memory), where some more aggressive strategies may be helpful. Community contributions are also welcome. 79 | 80 | 81 | ## ⚡️ Quick Inference 82 | 83 | ### Local Inference Script 84 | 85 | ```bash 86 | python test.py --id_image ./assets/examples/man.jpg --prompt "A man, portrait, cinematic" --out_results_dir ./results 87 | ``` 88 | 89 |
90 | Explanation of all the arguments (click to expand!) 91 | 92 | - Input and output: 93 | - `--id_image (str)`: The path to the input identity (ID) image. Default: `./assets/examples/man.jpg`. 94 | - `--prompt (str)`: The text prompt for image generation. Default: `A man, portrait, cinematic`. 95 | - `--out_results_dir (str)`: The path to the output directory to save the generated results. Default: `./results`. 96 | - `--control_image (str or None)`: The path to the control image \[*optional*\] to extract five facical keypoints to control the generation. Default: `None`. 97 | - `--base_model_path (str)`: The huggingface or local path to the base model. Default: `black-forest-labs/FLUX.1-dev`. 98 | - `--model_dir (str)`: The path to the InfiniteYou model directory. Default: `ByteDance/InfiniteYou`. 99 | - Version control: 100 | - `--infu_flux_version (str)`: InfiniteYou-FLUX version: currently only `v1.0` is supported. Default: `v1.0`. 101 | - `--model_version (str)`: The model variant to use: `aes_stage2` | `sim_stage1`. Default: `aes_stage2`. 102 | - General inference arguments: 103 | - `--cuda_device (int)`: The cuda device ID to use. Default: `0`. 104 | - `--seed (int)`: The seed for reproducibility (0 for random). Default: `0`. 105 | - `--guideance_scale (float)`: The guidance scale for the diffusion process. Default: `3.5`. 106 | - `--num_steps (int)`: The number of inference steps. Default: `30`. 107 | - InfiniteYou-specific arguments: 108 | - `--infusenet_conditioning_scale (float)`: The scale for the InfuseNet conditioning. Default: `1.0`. 109 | - `--infusenet_guidance_start (float)`: The start point for the InfuseNet guidance injection. Default: `0.0`. 110 | - `--infusenet_guidance_end (float)`: The end point for the InfuseNet guidance injection. Default: `1.0`. 111 | - Optional LoRAs: 112 | - `--enable_realism_lora (store_true)`: Whether to enable the Realism LoRA. Default: `False`. 113 | - `--enable_anti_blur_lora (store_true)`: Whether to enable the Anti-blur LoRA. Default: `False`. 114 | - Memory reduction options: 115 | - `--quantize_8bit (store_true)`: Whether to quantize the model to the 8-bit format. Default: `False`. 116 | - `--cpu_offload (store_true)`: Whether to use fast CPU offloading. Default: `False`. 117 | 118 |
119 | 120 | 121 | ### Local Gradio Demo 122 | 123 | ```bash 124 | python app.py 125 | ``` 126 | 127 | ### Online Hugging Face Demo 128 | 129 | We appreciate the GPU grant from the Hugging Face team. 130 | You can also try our [InfiniteYou-FLUX Hugging Face demo](https://huggingface.co/spaces/ByteDance/InfiniteYou-FLUX) online. 131 | 132 | ### ComfyUI Nodes 133 | 134 | - **Official ComfyUI native node implementation** 135 | - [bytedance/ComfyUI_InfiniteYou](https://github.com/bytedance/ComfyUI_InfiniteYou) 136 | 137 | - **Unofficial contributions** 138 | - [ZenAI-Vietnam/ComfyUI_InfiniteYou](https://github.com/ZenAI-Vietnam/ComfyUI_InfiniteYou) 139 | - [katalist-ai/ComfyUI-InfiniteYou](https://github.com/katalist-ai/ComfyUI-InfiniteYou) 140 | - [niknah/ComfyUI-InfiniteYou](https://github.com/niknah/ComfyUI-InfiniteYou) 141 | - [game4d/ComfyUI-BDsInfiniteYou](https://github.com/game4d/ComfyUI-BDsInfiniteYou) 142 | - [GGUF version](https://civitai.com/models/1424364?modelVersionId=1617144) (16GB VRAM) and [Christmas Toy LoRA](https://civitai.com/models/1466015?modelVersionId=1658038) by [@MegaCocos](https://github.com/MegaCocos) 143 | 144 | 145 | ## 🆚 Comparison with State-of-the-Art Relevant Methods 146 | 147 | ![comparative_results](./assets/comparative_results.jpg) 148 | 149 | Qualitative comparison results of InfU with the state-of-the-art baselines, FLUX.1-dev IP-Adapter and PuLID-FLUX. The identity similarity and text-image alignment of the results generated by FLUX.1-dev IP-Adapter (IPA) are inadequate. PuLID-FLUX generates images with decent identity similarity. However, it suffers from poor text-image alignment (Columns 1, 2, 4), and the image quality (e.g., bad hands in Column 5) and aesthetic appeal are degraded. In addition, the face copy-paste issue of PuLID-FLUX is evident (Column 5). In comparison, the proposed InfU outperforms the baselines across all dimensions. 150 | 151 | 152 | ## ⚙️ Plug-and-Play Property with Off-the-Shelf Popular Approaches 153 | 154 | ![plug_and_play](./assets/plug_and_play.jpg) 155 | 156 | InfU features a desirable plug-and-play design, compatible with many existing methods. It naturally supports base model replacement with any variants of FLUX.1-dev, such as FLUX.1-schnell for more efficient generation (e.g., in 4 steps). The compatibility with ControlNets and LoRAs provides more controllability and flexibility for customized tasks. Notably, the compatibility with OminiControl extends our potential for multi-concept personalization, such as interacted identity (ID) and object personalized generation. InfU is also compatible with IP-Adapter (IPA) for stylization of personalized images, producing decent results when injecting style references via IPA. Our plug-and-play feature may extend to even more approaches, providing valuable contributions to the broader community. 157 | 158 | 159 | ## 📜 Disclaimer and Licenses 160 | 161 | The images used in this repository and related demos are sourced from consented subjects or generated by the models. These pictures are intended solely to showcase the capabilities of our research. If you have any concerns, please feel free to contact us, and we will promptly remove any inappropriate content. 162 | 163 | The use of the released code, model, and demo must strictly adhere to the respective licenses. Our code is released under the [Apache License 2.0](./LICENSE), and our model is released under the [Creative Commons Attribution-NonCommercial 4.0 International Public License](https://huggingface.co/ByteDance/InfiniteYou/blob/main/LICENSE) for academic research purposes only. Any manual or automatic downloading of the face models from [InsightFace](https://github.com/deepinsight/insightface), the [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev) base model, LoRAs ([Realism](https://civitai.com/models/631986?modelVersionId=706528) and [Anti-blur](https://civitai.com/models/675581/anti-blur-flux-lora)), *etc.*, must follow their original licenses and be used only for academic research purposes. 164 | 165 | This research aims to positively impact the field of Generative AI. Any usage of this method must be responsible and comply with local laws. The developers do not assume any responsibility for any potential misuse. 166 | 167 | 168 | ## 🤗 Acknowledgments 169 | 170 | We sincerely acknowledge the insightful discussions from Stathi Fotiadis, Min Jin Chong, Xiao Yang, Tiancheng Zhi, Jing Liu, and Xiaohui Shen. We genuinely appreciate the help from Jincheng Liang and Lu Guo with our user study and qualitative evaluation. 171 | 172 | 173 | ## 📖 Citation 174 | 175 | If you find InfiniteYou useful for your research or applications, please cite our paper: 176 | 177 | ```bibtex 178 | @article{jiang2025infiniteyou, 179 | title={{InfiniteYou}: Flexible Photo Recrafting While Preserving Your Identity}, 180 | author={Jiang, Liming and Yan, Qing and Jia, Yumin and Liu, Zichuan and Kang, Hao and Lu, Xin}, 181 | journal={arXiv preprint}, 182 | volume={arXiv:2503.16418}, 183 | year={2025} 184 | } 185 | ``` 186 | 187 | We also appreciate it if you could give a star :star: to this repository. Thanks a lot! 188 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Bytedance Ltd. and/or its affiliates. All rights reserved. 2 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import gc 16 | 17 | import gradio as gr 18 | import pillow_avif 19 | import torch 20 | from huggingface_hub import snapshot_download 21 | from pillow_heif import register_heif_opener 22 | 23 | from pipelines.pipeline_infu_flux import InfUFluxPipeline 24 | 25 | 26 | # Register HEIF support for Pillow 27 | register_heif_opener() 28 | 29 | class ModelVersion: 30 | STAGE_1 = "sim_stage1" 31 | STAGE_2 = "aes_stage2" 32 | 33 | DEFAULT_VERSION = STAGE_2 34 | 35 | ENABLE_ANTI_BLUR_DEFAULT = False 36 | ENABLE_REALISM_DEFAULT = False 37 | 38 | loaded_pipeline_config = { 39 | "model_version": "aes_stage2", 40 | "enable_realism": False, 41 | "enable_anti_blur": False, 42 | 'pipeline': None 43 | } 44 | 45 | 46 | def download_models(): 47 | snapshot_download(repo_id='ByteDance/InfiniteYou', local_dir='./models/InfiniteYou', local_dir_use_symlinks=False) 48 | try: 49 | snapshot_download(repo_id='black-forest-labs/FLUX.1-dev', local_dir='./models/FLUX.1-dev', local_dir_use_symlinks=False) 50 | except Exception as e: 51 | print(e) 52 | print('\nYou are downloading `black-forest-labs/FLUX.1-dev` to `./models/FLUX.1-dev` but failed. ' 53 | 'Please accept the agreement and obtain access at https://huggingface.co/black-forest-labs/FLUX.1-dev. ' 54 | 'Then, use `huggingface-cli login` and your access tokens at https://huggingface.co/settings/tokens to authenticate. ' 55 | 'After that, run the code again.') 56 | print('\nYou can also download it manually from HuggingFace and put it in `./models/InfiniteYou`, ' 57 | 'or you can modify `base_model_path` in `app.py` to specify the correct path.') 58 | exit() 59 | 60 | 61 | def prepare_pipeline(model_version, enable_realism, enable_anti_blur): 62 | if ( 63 | loaded_pipeline_config['pipeline'] is not None 64 | and loaded_pipeline_config["enable_realism"] == enable_realism 65 | and loaded_pipeline_config["enable_anti_blur"] == enable_anti_blur 66 | and model_version == loaded_pipeline_config["model_version"] 67 | ): 68 | return loaded_pipeline_config['pipeline'] 69 | 70 | loaded_pipeline_config["enable_realism"] = enable_realism 71 | loaded_pipeline_config["enable_anti_blur"] = enable_anti_blur 72 | loaded_pipeline_config["model_version"] = model_version 73 | 74 | pipeline = loaded_pipeline_config['pipeline'] 75 | if pipeline is None or pipeline.model_version != model_version: 76 | print(f'Switching model to {model_version}') 77 | del pipeline 78 | del loaded_pipeline_config['pipeline'] 79 | gc.collect() 80 | torch.cuda.empty_cache() 81 | 82 | model_path = f'./models/InfiniteYou/infu_flux_v1.0/{model_version}' 83 | print(f'Loading model from {model_path}') 84 | 85 | pipeline = InfUFluxPipeline( 86 | base_model_path='./models/FLUX.1-dev', 87 | infu_model_path=model_path, 88 | insightface_root_path='./models/InfiniteYou/supports/insightface', 89 | image_proj_num_tokens=8, 90 | infu_flux_version='v1.0', 91 | model_version=model_version, 92 | ) 93 | 94 | loaded_pipeline_config['pipeline'] = pipeline 95 | 96 | pipeline.pipe.delete_adapters(['realism', 'anti_blur']) 97 | loras = [] 98 | if enable_realism: 99 | loras.append(['./models/InfiniteYou/supports/optional_loras/flux_realism_lora.safetensors', 'realism', 1.0]) 100 | if enable_anti_blur: 101 | loras.append(['./models/InfiniteYou/supports/optional_loras/flux_anti_blur_lora.safetensors', 'anti_blur', 1.0]) 102 | pipeline.load_loras(loras) 103 | 104 | return pipeline 105 | 106 | 107 | def generate_image( 108 | input_image, 109 | control_image, 110 | prompt, 111 | seed, 112 | width, 113 | height, 114 | guidance_scale, 115 | num_steps, 116 | infusenet_conditioning_scale, 117 | infusenet_guidance_start, 118 | infusenet_guidance_end, 119 | enable_realism, 120 | enable_anti_blur, 121 | model_version 122 | ): 123 | pipeline = prepare_pipeline(model_version=model_version, enable_realism=enable_realism, enable_anti_blur=enable_anti_blur) 124 | 125 | if seed == 0: 126 | seed = torch.seed() & 0xFFFFFFFF 127 | 128 | try: 129 | image = pipeline( 130 | id_image=input_image, 131 | prompt=prompt, 132 | control_image=control_image, 133 | seed=seed, 134 | width=width, 135 | height=height, 136 | guidance_scale=guidance_scale, 137 | num_steps=num_steps, 138 | infusenet_conditioning_scale=infusenet_conditioning_scale, 139 | infusenet_guidance_start=infusenet_guidance_start, 140 | infusenet_guidance_end=infusenet_guidance_end, 141 | ) 142 | except Exception as e: 143 | print(e) 144 | gr.Error(f"An error occurred: {e}") 145 | return gr.update() 146 | 147 | return gr.update(value = image, label=f"Generated Image, seed = {seed}") 148 | 149 | 150 | def generate_examples(id_image, control_image, prompt_text, seed, enable_realism, enable_anti_blur, model_version): 151 | return generate_image(id_image, control_image, prompt_text, seed, 864, 1152, 3.5, 30, 1.0, 0.0, 1.0, enable_realism, enable_anti_blur, model_version) 152 | 153 | 154 | sample_list = [ 155 | ['./assets/examples/man.jpg', None, 'A sophisticated gentleman exuding confidence. He is dressed in a 1990s brown plaid jacket with a high collar, paired with a dark grey turtleneck. His trousers are tailored and charcoal in color, complemented by a sleek leather belt. The background showcases an elegant library with bookshelves, a marble fireplace, and warm lighting, creating a refined and cozy atmosphere. His relaxed posture and casual hand-in-pocket stance add to his composed and stylish demeanor', 666, False, False, 'aes_stage2'], 156 | ['./assets/examples/man.jpg', './assets/examples/man_pose.jpg', 'A man, portrait, cinematic', 42, True, False, 'aes_stage2'], 157 | ['./assets/examples/man.jpg', None, 'A man, portrait, cinematic', 12345, False, False, 'sim_stage1'], 158 | ['./assets/examples/woman.jpg', './assets/examples/woman.jpg', 'A woman, portrait, cinematic', 1621695706, False, False, 'sim_stage1'], 159 | ['./assets/examples/woman.jpg', None, 'A young woman holding a sign with the text "InfiniteYou", "Infinite" in black and "You" in red, pure background', 3724009365, False, False, 'aes_stage2'], 160 | ['./assets/examples/woman.jpg', None, 'A photo of an elegant Javanese bride in traditional attire, with long hair styled into intricate a braid made of many fresh flowers, wearing a delicate headdress made from sequins and beads. She\'s holding flowers, light smiling at the camera, against a backdrop adorned with orchid blooms. The scene captures her grace as she stands amidst soft pastel colors, adding to its dreamy atmosphere', 42, True, False, 'aes_stage2'], 161 | ['./assets/examples/woman.jpg', None, 'A photo of an elegant Javanese bride in traditional attire, with long hair styled into intricate a braid made of many fresh flowers, wearing a delicate headdress made from sequins and beads. She\'s holding flowers, light smiling at the camera, against a backdrop adorned with orchid blooms. The scene captures her grace as she stands amidst soft pastel colors, adding to its dreamy atmosphere', 42, False, False, 'sim_stage1'], 162 | ] 163 | 164 | with gr.Blocks() as demo: 165 | session_state = gr.State({}) 166 | default_model_version = "v1.0" 167 | 168 | gr.HTML(""" 169 |
170 |

InfiniteYou-FLUX

171 |

Official Gradio Demo for InfiniteYou: Flexible Photo Recrafting While Preserving Your Identity

172 | [Project Page]  173 | [Paper]  174 | [Code]  175 | [Model] 176 |
177 | """) 178 | 179 | gr.Markdown(""" 180 | ### 💡 How to Use This Demo: 181 | 1. **Upload an identity (ID) image containing a human face.** For multiple faces, only the largest face will be detected. The face should ideally be clear and large enough, without significant occlusions or blur. 182 | 2. **Enter the text prompt to describe the generated image and select the model version.** Please refer to **important usage tips** under the Generated Image field. 183 | 3. *[Optional] Upload a control image containing a human face.* Only five facial keypoints will be extracted to control the generation. If not provided, we use a black control image, indicating no control. 184 | 4. *[Optional] Adjust advanced hyperparameters or apply optional LoRAs to meet personal needs.* Please refer to **important usage tips** under the Generated Image field. 185 | 5. **Click the "Generate" button to generate an image.** Enjoy! 186 | """) 187 | 188 | with gr.Row(): 189 | with gr.Column(scale=3): 190 | with gr.Row(): 191 | ui_id_image = gr.Image(label="Identity Image", type="pil", scale=3, height=370, min_width=100) 192 | 193 | with gr.Column(scale=2, min_width=100): 194 | ui_control_image = gr.Image(label="Control Image [Optional]", type="pil", height=370, min_width=100) 195 | 196 | ui_prompt_text = gr.Textbox(label="Prompt", value="Portrait, 4K, high quality, cinematic") 197 | ui_model_version = gr.Dropdown( 198 | label="Model Version", 199 | choices=[ModelVersion.STAGE_1, ModelVersion.STAGE_2], 200 | value=ModelVersion.DEFAULT_VERSION, 201 | ) 202 | 203 | ui_btn_generate = gr.Button("Generate") 204 | with gr.Accordion("Advanced", open=False): 205 | with gr.Row(): 206 | ui_num_steps = gr.Number(label="num steps", value=30) 207 | ui_seed = gr.Number(label="seed (0 for random)", value=0) 208 | with gr.Row(): 209 | ui_width = gr.Number(label="width", value=864) 210 | ui_height = gr.Number(label="height", value=1152) 211 | ui_guidance_scale = gr.Number(label="guidance scale", value=3.5, step=0.5) 212 | ui_infusenet_conditioning_scale = gr.Slider(minimum=0.0, maximum=1.0, value=1.0, step=0.05, label="infusenet conditioning scale") 213 | with gr.Row(): 214 | ui_infusenet_guidance_start = gr.Slider(minimum=0.0, maximum=1.0, value=0.0, step=0.05, label="infusenet guidance start") 215 | ui_infusenet_guidance_end = gr.Slider(minimum=0.0, maximum=1.0, value=1.0, step=0.05, label="infusenet guidance end") 216 | 217 | with gr.Accordion("LoRAs [Optional]", open=True): 218 | with gr.Row(): 219 | ui_enable_realism = gr.Checkbox(label="Enable realism LoRA", value=ENABLE_REALISM_DEFAULT) 220 | ui_enable_anti_blur = gr.Checkbox(label="Enable anti-blur LoRA", value=ENABLE_ANTI_BLUR_DEFAULT) 221 | 222 | with gr.Column(scale=2): 223 | image_output = gr.Image(label="Generated Image", interactive=False, height=550, format='png') 224 | gr.Markdown( 225 | """ 226 | ### ❗️ Important Usage Tips: 227 | - **Model Version**: `aes_stage2` is used by default for better text-image alignment and aesthetics. For higher ID similarity, try `sim_stage1`. 228 | - **Useful Hyperparameters**: Usually, there is NO need to adjust too much. If necessary, try a slightly larger `--infusenet_guidance_start` (*e.g.*, `0.1`) only (especially helpful for `sim_stage1`). If still not satisfactory, then try a slightly smaller `--infusenet_conditioning_scale` (*e.g.*, `0.9`). 229 | - **Optional LoRAs**: `realism` and `anti-blur`. To enable them, please check the corresponding boxes. If needed, try `realism` only first. They are optional and were NOT used in our paper. 230 | - **Gender Prompt**: If the generated gender is not preferred, add specific words in the prompt, such as 'a man', 'a woman', *etc*. We encourage using inclusive and respectful language. 231 | """ 232 | ) 233 | 234 | gr.Examples( 235 | sample_list, 236 | inputs=[ui_id_image, ui_control_image, ui_prompt_text, ui_seed, ui_enable_realism, ui_enable_anti_blur, ui_model_version], 237 | outputs=[image_output], 238 | fn=generate_examples, 239 | cache_examples=True, 240 | ) 241 | 242 | ui_btn_generate.click( 243 | generate_image, 244 | inputs=[ 245 | ui_id_image, 246 | ui_control_image, 247 | ui_prompt_text, 248 | ui_seed, 249 | ui_width, 250 | ui_height, 251 | ui_guidance_scale, 252 | ui_num_steps, 253 | ui_infusenet_conditioning_scale, 254 | ui_infusenet_guidance_start, 255 | ui_infusenet_guidance_end, 256 | ui_enable_realism, 257 | ui_enable_anti_blur, 258 | ui_model_version 259 | ], 260 | outputs=[image_output], 261 | concurrency_id="gpu" 262 | ) 263 | 264 | with gr.Accordion("Local Gradio Demo for Developers", open=False): 265 | gr.Markdown( 266 | 'Please refer to our GitHub repository to [run the InfiniteYou-FLUX gradio demo locally](https://github.com/bytedance/InfiniteYou#local-gradio-demo).' 267 | ) 268 | 269 | gr.Markdown( 270 | """ 271 | --- 272 | ### 📜 Disclaimer and Licenses 273 | The images used in this demo are sourced from consented subjects or generated by the models. These pictures are intended solely to show the capabilities of our research. If you have any concerns, please contact us, and we will promptly remove any inappropriate content. 274 | 275 | The use of the released code, model, and demo must strictly adhere to the respective licenses. 276 | Our code is released under the [Apache 2.0 License](https://github.com/bytedance/InfiniteYou/blob/main/LICENSE), 277 | and our model is released under the [Creative Commons Attribution-NonCommercial 4.0 International Public License](https://huggingface.co/ByteDance/InfiniteYou/blob/main/LICENSE) 278 | for academic research purposes only. Any manual or automatic downloading of the face models from [InsightFace](https://github.com/deepinsight/insightface), 279 | the [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev) base model, LoRAs, *etc.*, must follow their original licenses and be used only for academic research purposes. 280 | 281 | This research aims to positively impact the field of Generative AI. Any usage of this method must be responsible and comply with local laws. The developers do not assume any responsibility for any potential misuse. 282 | """ 283 | ) 284 | 285 | gr.Markdown( 286 | """ 287 | ### 📖 Citation 288 | 289 | If you find InfiniteYou useful for your research or applications, please cite our paper: 290 | 291 | ```bibtex 292 | @article{jiang2025infiniteyou, 293 | title={{InfiniteYou}: Flexible Photo Recrafting While Preserving Your Identity}, 294 | author={Jiang, Liming and Yan, Qing and Jia, Yumin and Liu, Zichuan and Kang, Hao and Lu, Xin}, 295 | journal={arXiv preprint}, 296 | volume={arXiv:2503.16418}, 297 | year={2025} 298 | } 299 | ``` 300 | 301 | We also appreciate it if you could give a star ⭐ to our [Github repository](https://github.com/bytedance/InfiniteYou). Thanks a lot! 302 | """ 303 | ) 304 | 305 | download_models() 306 | 307 | prepare_pipeline(model_version=ModelVersion.DEFAULT_VERSION, enable_realism=ENABLE_REALISM_DEFAULT, enable_anti_blur=ENABLE_ANTI_BLUR_DEFAULT) 308 | 309 | demo.queue() 310 | demo.launch(server_name='0.0.0.0') # IPv4 311 | # demo.launch(server_name='[::]') # IPv6 312 | -------------------------------------------------------------------------------- /assets/comparative_results.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bytedance/InfiniteYou/7e7faf38ad9c95dcd1ae22ad1f8f838c0d0581be/assets/comparative_results.jpg -------------------------------------------------------------------------------- /assets/examples/man.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bytedance/InfiniteYou/7e7faf38ad9c95dcd1ae22ad1f8f838c0d0581be/assets/examples/man.jpg -------------------------------------------------------------------------------- /assets/examples/man_pose.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bytedance/InfiniteYou/7e7faf38ad9c95dcd1ae22ad1f8f838c0d0581be/assets/examples/man_pose.jpg -------------------------------------------------------------------------------- /assets/examples/woman.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bytedance/InfiniteYou/7e7faf38ad9c95dcd1ae22ad1f8f838c0d0581be/assets/examples/woman.jpg -------------------------------------------------------------------------------- /assets/plug_and_play.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bytedance/InfiniteYou/7e7faf38ad9c95dcd1ae22ad1f8f838c0d0581be/assets/plug_and_play.jpg -------------------------------------------------------------------------------- /assets/teaser.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bytedance/InfiniteYou/7e7faf38ad9c95dcd1ae22ad1f8f838c0d0581be/assets/teaser.jpg -------------------------------------------------------------------------------- /pipelines/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bytedance/InfiniteYou/7e7faf38ad9c95dcd1ae22ad1f8f838c0d0581be/pipelines/__init__.py -------------------------------------------------------------------------------- /pipelines/pipeline_flux_infusenet.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Bytedance Ltd. and/or its affiliates. 2 | # Copyright (c) 2024 Black Forest Labs, The HuggingFace Team and The InstantX Team. All rights reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | import inspect 17 | from typing import Any, Callable, Dict, List, Optional, Tuple, Union 18 | 19 | import numpy as np 20 | import torch 21 | from diffusers import FluxControlNetPipeline 22 | from diffusers.models.controlnet_flux import FluxControlNetModel, FluxMultiControlNetModel 23 | from diffusers.image_processor import PipelineImageInput 24 | from diffusers.pipelines.flux.pipeline_output import FluxPipelineOutput 25 | from diffusers.utils import replace_example_docstring, is_torch_xla_available, logging 26 | 27 | 28 | if is_torch_xla_available(): 29 | import torch_xla.core.xla_model as xm 30 | 31 | XLA_AVAILABLE = True 32 | else: 33 | XLA_AVAILABLE = False 34 | 35 | logger = logging.get_logger(__name__) # pylint: disable=invalid-name 36 | 37 | 38 | # Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift 39 | def calculate_shift( 40 | image_seq_len, 41 | base_seq_len: int = 256, 42 | max_seq_len: int = 4096, 43 | base_shift: float = 0.5, 44 | max_shift: float = 1.16, 45 | ): 46 | m = (max_shift - base_shift) / (max_seq_len - base_seq_len) 47 | b = base_shift - m * base_seq_len 48 | mu = image_seq_len * m + b 49 | return mu 50 | 51 | 52 | # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps 53 | def retrieve_timesteps( 54 | scheduler, 55 | num_inference_steps: Optional[int] = None, 56 | device: Optional[Union[str, torch.device]] = None, 57 | timesteps: Optional[List[int]] = None, 58 | sigmas: Optional[List[float]] = None, 59 | **kwargs, 60 | ): 61 | r""" 62 | Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles 63 | custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. 64 | 65 | Args: 66 | scheduler (`SchedulerMixin`): 67 | The scheduler to get timesteps from. 68 | num_inference_steps (`int`): 69 | The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` 70 | must be `None`. 71 | device (`str` or `torch.device`, *optional*): 72 | The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. 73 | timesteps (`List[int]`, *optional*): 74 | Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, 75 | `num_inference_steps` and `sigmas` must be `None`. 76 | sigmas (`List[float]`, *optional*): 77 | Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, 78 | `num_inference_steps` and `timesteps` must be `None`. 79 | 80 | Returns: 81 | `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the 82 | second element is the number of inference steps. 83 | """ 84 | if timesteps is not None and sigmas is not None: 85 | raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") 86 | if timesteps is not None: 87 | accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) 88 | if not accepts_timesteps: 89 | raise ValueError( 90 | f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" 91 | f" timestep schedules. Please check whether you are using the correct scheduler." 92 | ) 93 | scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) 94 | timesteps = scheduler.timesteps 95 | num_inference_steps = len(timesteps) 96 | elif sigmas is not None: 97 | accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) 98 | if not accept_sigmas: 99 | raise ValueError( 100 | f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" 101 | f" sigmas schedules. Please check whether you are using the correct scheduler." 102 | ) 103 | scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) 104 | timesteps = scheduler.timesteps 105 | num_inference_steps = len(timesteps) 106 | else: 107 | scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) 108 | timesteps = scheduler.timesteps 109 | return timesteps, num_inference_steps 110 | 111 | 112 | class FluxInfuseNetPipeline(FluxControlNetPipeline): 113 | @torch.no_grad() 114 | def __call__( 115 | self, 116 | prompt: Union[str, List[str]] = None, 117 | prompt_2: Optional[Union[str, List[str]]] = None, 118 | height: Optional[int] = None, 119 | width: Optional[int] = None, 120 | num_inference_steps: int = 28, 121 | timesteps: List[int] = None, 122 | guidance_scale: float = 3.5, 123 | controlnet_guidance_scale: float = 1.0, 124 | control_guidance_start: Union[float, List[float]] = 0.0, 125 | control_guidance_end: Union[float, List[float]] = 1.0, 126 | control_image: PipelineImageInput = None, 127 | control_mode: Optional[Union[int, List[int]]] = None, 128 | controlnet_conditioning_scale: Union[float, List[float]] = 1.0, 129 | num_images_per_prompt: Optional[int] = 1, 130 | generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, 131 | latents: Optional[torch.FloatTensor] = None, 132 | prompt_embeds: Optional[torch.FloatTensor] = None, 133 | pooled_prompt_embeds: Optional[torch.FloatTensor] = None, 134 | output_type: Optional[str] = "pil", 135 | return_dict: bool = True, 136 | joint_attention_kwargs: Optional[Dict[str, Any]] = None, 137 | callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, 138 | callback_on_step_end_tensor_inputs: List[str] = ["latents"], 139 | max_sequence_length: int = 512, 140 | 141 | # ID-specific parameters 142 | controlnet_prompt_embeds: Optional[torch.FloatTensor] = None, 143 | 144 | # True CFG parameters 145 | true_guidance_scale: float = 1.0, 146 | negative_prompt: Optional[Union[str, List[str]]] = None, 147 | negative_prompt_2: Optional[Union[str, List[str]]] = None, 148 | negative_prompt_embeds: Optional[torch.FloatTensor] = None, 149 | negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, 150 | 151 | # Memory reduction parameters 152 | cpu_offload: bool = False, 153 | ): 154 | r""" 155 | Function invoked when calling the pipeline for generation. 156 | 157 | Args: 158 | prompt (`str` or `List[str]`, *optional*): 159 | The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. 160 | instead. 161 | prompt_2 (`str` or `List[str]`, *optional*): 162 | The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is 163 | will be used instead 164 | height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): 165 | The height in pixels of the generated image. This is set to 1024 by default for the best results. 166 | width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): 167 | The width in pixels of the generated image. This is set to 1024 by default for the best results. 168 | num_inference_steps (`int`, *optional*, defaults to 50): 169 | The number of denoising steps. More denoising steps usually lead to a higher quality image at the 170 | expense of slower inference. 171 | timesteps (`List[int]`, *optional*): 172 | Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument 173 | in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is 174 | passed will be used. Must be in descending order. 175 | guidance_scale (`float`, *optional*, defaults to 7.0): 176 | Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). 177 | `guidance_scale` is defined as `w` of equation 2. of [Imagen 178 | Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 179 | 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, 180 | usually at the expense of lower image quality. 181 | controlnet_guidance_scale (`float`, *optional*, defaults to 7.0): 182 | Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). 183 | `controlnet_guidance_scale` is defined as `w` of equation 2. of [Imagen 184 | Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 185 | 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, 186 | usually at the expense of lower image quality. 187 | control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): 188 | The percentage of total steps at which the ControlNet starts applying. 189 | control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): 190 | The percentage of total steps at which the ControlNet stops applying. 191 | control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: 192 | `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): 193 | The ControlNet input condition to provide guidance to the `unet` for generation. If the type is 194 | specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted 195 | as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or 196 | width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, 197 | images must be passed as a list such that each element of the list can be correctly batched for input 198 | to a single ControlNet. 199 | controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): 200 | The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added 201 | to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set 202 | the corresponding scale as a list. 203 | control_mode (`int` or `List[int]`,, *optional*, defaults to None): 204 | The control mode when applying ControlNet-Union. 205 | num_images_per_prompt (`int`, *optional*, defaults to 1): 206 | The number of images to generate per prompt. 207 | generator (`torch.Generator` or `List[torch.Generator]`, *optional*): 208 | One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) 209 | to make generation deterministic. 210 | latents (`torch.FloatTensor`, *optional*): 211 | Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image 212 | generation. Can be used to tweak the same generation with different prompts. If not provided, a latents 213 | tensor will ge generated by sampling using the supplied random `generator`. 214 | prompt_embeds (`torch.FloatTensor`, *optional*): 215 | Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not 216 | provided, text embeddings will be generated from `prompt` input argument. 217 | pooled_prompt_embeds (`torch.FloatTensor`, *optional*): 218 | Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. 219 | If not provided, pooled text embeddings will be generated from `prompt` input argument. 220 | output_type (`str`, *optional*, defaults to `"pil"`): 221 | The output format of the generate image. Choose between 222 | [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. 223 | return_dict (`bool`, *optional*, defaults to `True`): 224 | Whether or not to return a [`~pipelines.flux.FluxPipelineOutput`] instead of a plain tuple. 225 | joint_attention_kwargs (`dict`, *optional*): 226 | A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under 227 | `self.processor` in 228 | [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). 229 | callback_on_step_end (`Callable`, *optional*): 230 | A function that calls at the end of each denoising steps during the inference. The function is called 231 | with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, 232 | callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by 233 | `callback_on_step_end_tensor_inputs`. 234 | callback_on_step_end_tensor_inputs (`List`, *optional*): 235 | The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list 236 | will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the 237 | `._callback_tensor_inputs` attribute of your pipeline class. 238 | max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`. 239 | controlnet_prompt_embeds (`torch.FloatTensor`, *optional*): 240 | Pre-generated embeddings for the InfuseNet. Can be used to easily tweak inputs, *e.g.* image embeddings. 241 | If not provided, embeddings will be generated from `prompt` or `prompt_embeds` input arguments. 242 | true_guidance_scale (`float`, *optional*, defaults to 1.0): 243 | True CFG scale as defined in [Classifier-Free Diffusion Guidance]((https://arxiv.org/abs/2207.12598). 244 | negative_prompt (`str` or `List[str]`, *optional*): 245 | The negative prompt or negative prompts to guide the image generation. If not defined, one has to pass 246 | `negative_prompt_embeds`. instead. 247 | negative_prompt_2 (`str` or `List[str]`, *optional*): 248 | The negative prompt or negative prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, 249 | `negative_prompt` is will be used instead. 250 | negative_prompt_embeds (`torch.FloatTensor`, *optional*): 251 | Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt 252 | weighting. If not provided, negative text embeddings will be generated from `negative_prompt` input 253 | argument. 254 | negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): 255 | Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt 256 | weighting. If not provided, negative pooled text embeddings will be generated from 257 | `negative_prompt` input argument. 258 | cpu_offload (`bool`, *optional*, defaults to `False`): 259 | Whether to offload the models to CPU to save memory. 260 | 261 | Examples: 262 | 263 | Returns: 264 | [`~pipelines.flux.FluxPipelineOutput`] or `tuple`: [`~pipelines.flux.FluxPipelineOutput`] if `return_dict` 265 | is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated 266 | images. 267 | """ 268 | 269 | height = height or self.default_sample_size * self.vae_scale_factor 270 | width = width or self.default_sample_size * self.vae_scale_factor 271 | 272 | if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): 273 | control_guidance_start = len(control_guidance_end) * [control_guidance_start] 274 | elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): 275 | control_guidance_end = len(control_guidance_start) * [control_guidance_end] 276 | elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): 277 | mult = len(self.controlnet.nets) if isinstance(self.controlnet, FluxMultiControlNetModel) else 1 278 | control_guidance_start, control_guidance_end = ( 279 | mult * [control_guidance_start], 280 | mult * [control_guidance_end], 281 | ) 282 | 283 | # 1. Check inputs. Raise error if not correct 284 | self.check_inputs( 285 | prompt, 286 | prompt_2, 287 | height, 288 | width, 289 | prompt_embeds=prompt_embeds, 290 | pooled_prompt_embeds=pooled_prompt_embeds, 291 | callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, 292 | max_sequence_length=max_sequence_length, 293 | ) 294 | 295 | self._guidance_scale = guidance_scale 296 | self._controlnet_guidance_scale = controlnet_guidance_scale 297 | self._true_guidance_scale = true_guidance_scale 298 | self._joint_attention_kwargs = joint_attention_kwargs 299 | self._interrupt = False 300 | 301 | # 2. Define call parameters 302 | if prompt is not None and isinstance(prompt, str): 303 | batch_size = 1 304 | elif prompt is not None and isinstance(prompt, list): 305 | batch_size = len(prompt) 306 | else: 307 | batch_size = prompt_embeds.shape[0] 308 | 309 | device = self._execution_device if not cpu_offload else 'cuda' 310 | dtype = self.transformer.dtype 311 | 312 | if cpu_offload: 313 | # Move VAE, Transformer, InfuseNet to CPU 314 | self.vae.cpu() 315 | self.transformer.cpu() 316 | self.controlnet.cpu() 317 | torch.cuda.empty_cache() 318 | 319 | # Move CLIP and T5 to GPU 320 | self.text_encoder.to(device) 321 | self.text_encoder_2.to(device) 322 | 323 | lora_scale = ( 324 | self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None 325 | ) 326 | ( 327 | prompt_embeds, 328 | pooled_prompt_embeds, 329 | text_ids, 330 | ) = self.encode_prompt( 331 | prompt=prompt, 332 | prompt_2=prompt_2, 333 | prompt_embeds=prompt_embeds, 334 | pooled_prompt_embeds=pooled_prompt_embeds, 335 | device=device, 336 | num_images_per_prompt=num_images_per_prompt, 337 | max_sequence_length=max_sequence_length, 338 | lora_scale=lora_scale, 339 | ) 340 | if negative_prompt is not None or (negative_prompt_embeds is not None and negative_pooled_prompt_embeds is not None): 341 | ( 342 | negative_prompt_embeds, 343 | negative_pooled_prompt_embeds, 344 | negative_text_ids, 345 | ) = self.encode_prompt( 346 | prompt=negative_prompt, 347 | prompt_2=negative_prompt_2, 348 | prompt_embeds=negative_prompt_embeds, 349 | pooled_prompt_embeds=negative_pooled_prompt_embeds, 350 | device=device, 351 | num_images_per_prompt=num_images_per_prompt, 352 | max_sequence_length=max_sequence_length, 353 | lora_scale=lora_scale, 354 | ) 355 | 356 | if controlnet_prompt_embeds is None: 357 | controlnet_prompt_embeds = prompt_embeds 358 | ( 359 | controlnet_prompt_embeds, 360 | pooled_prompt_embeds, 361 | controlnet_text_ids, 362 | ) = self.encode_prompt( 363 | prompt=prompt, 364 | prompt_2=prompt_2, 365 | prompt_embeds=controlnet_prompt_embeds, 366 | pooled_prompt_embeds=pooled_prompt_embeds, 367 | device=device, 368 | num_images_per_prompt=num_images_per_prompt, 369 | max_sequence_length=max_sequence_length, 370 | lora_scale=lora_scale, 371 | ) 372 | 373 | if cpu_offload: 374 | # Move CLIP and T5 to CPU 375 | self.text_encoder.cpu() 376 | self.text_encoder_2.cpu() 377 | torch.cuda.empty_cache() 378 | 379 | # Move VAE, InfuseNet to GPU 380 | self.vae.to(device) 381 | self.controlnet.to(device) 382 | 383 | # 3. Prepare control image 384 | num_channels_latents = self.transformer.config.in_channels // 4 385 | if isinstance(self.controlnet, FluxControlNetModel): 386 | control_image = self.prepare_image( 387 | image=control_image, 388 | width=width, 389 | height=height, 390 | batch_size=batch_size * num_images_per_prompt, 391 | num_images_per_prompt=num_images_per_prompt, 392 | device=device, 393 | dtype=self.vae.dtype, 394 | ) 395 | height, width = control_image.shape[-2:] 396 | 397 | # xlab controlnet has a input_hint_block and instantx controlnet does not 398 | controlnet_blocks_repeat = False if self.controlnet.input_hint_block is None else True 399 | if self.controlnet.input_hint_block is None: 400 | # vae encode 401 | control_image = self.vae.encode(control_image).latent_dist.sample() 402 | control_image = (control_image - self.vae.config.shift_factor) * self.vae.config.scaling_factor 403 | 404 | # pack 405 | height_control_image, width_control_image = control_image.shape[2:] 406 | control_image = self._pack_latents( 407 | control_image, 408 | batch_size * num_images_per_prompt, 409 | num_channels_latents, 410 | height_control_image, 411 | width_control_image, 412 | ) 413 | 414 | # Here we ensure that `control_mode` has the same length as the control_image. 415 | if control_mode is not None: 416 | if not isinstance(control_mode, int): 417 | raise ValueError(" For `FluxControlNet`, `control_mode` should be an `int` or `None`") 418 | control_mode = torch.tensor(control_mode).to(device, dtype=torch.long) 419 | control_mode = control_mode.view(-1, 1).expand(control_image.shape[0], 1) 420 | 421 | elif isinstance(self.controlnet, FluxMultiControlNetModel): 422 | control_images = [] 423 | # xlab controlnet has a input_hint_block and instantx controlnet does not 424 | controlnet_blocks_repeat = False if self.controlnet.nets[0].input_hint_block is None else True 425 | for i, control_image_ in enumerate(control_image): 426 | control_image_ = self.prepare_image( 427 | image=control_image_, 428 | width=width, 429 | height=height, 430 | batch_size=batch_size * num_images_per_prompt, 431 | num_images_per_prompt=num_images_per_prompt, 432 | device=device, 433 | dtype=self.vae.dtype, 434 | ) 435 | height, width = control_image_.shape[-2:] 436 | 437 | if self.controlnet.nets[0].input_hint_block is None: 438 | # vae encode 439 | control_image_ = self.vae.encode(control_image_).latent_dist.sample() 440 | control_image_ = (control_image_ - self.vae.config.shift_factor) * self.vae.config.scaling_factor 441 | 442 | # pack 443 | height_control_image, width_control_image = control_image_.shape[2:] 444 | control_image_ = self._pack_latents( 445 | control_image_, 446 | batch_size * num_images_per_prompt, 447 | num_channels_latents, 448 | height_control_image, 449 | width_control_image, 450 | ) 451 | control_images.append(control_image_) 452 | 453 | control_image = control_images 454 | 455 | # Here we ensure that `control_mode` has the same length as the control_image. 456 | if isinstance(control_mode, list) and len(control_mode) != len(control_image): 457 | raise ValueError( 458 | "For Multi-ControlNet, `control_mode` must be a list of the same " 459 | + " length as the number of controlnets (control images) specified" 460 | ) 461 | if not isinstance(control_mode, list): 462 | control_mode = [control_mode] * len(control_image) 463 | # set control mode 464 | control_modes = [] 465 | for cmode in control_mode: 466 | if cmode is None: 467 | cmode = -1 468 | control_mode = torch.tensor(cmode).expand(control_images[0].shape[0]).to(device, dtype=torch.long) 469 | control_modes.append(control_mode) 470 | control_mode = control_modes 471 | 472 | # 4. Prepare latent variables 473 | num_channels_latents = self.transformer.config.in_channels // 4 474 | latents, latent_image_ids = self.prepare_latents( 475 | batch_size * num_images_per_prompt, 476 | num_channels_latents, 477 | height, 478 | width, 479 | prompt_embeds.dtype, 480 | device, 481 | generator, 482 | latents, 483 | ) 484 | 485 | # 5. Prepare timesteps 486 | sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) 487 | image_seq_len = latents.shape[1] 488 | mu = calculate_shift( 489 | image_seq_len, 490 | self.scheduler.config.base_image_seq_len, 491 | self.scheduler.config.max_image_seq_len, 492 | self.scheduler.config.base_shift, 493 | self.scheduler.config.max_shift, 494 | ) 495 | timesteps, num_inference_steps = retrieve_timesteps( 496 | self.scheduler, 497 | num_inference_steps, 498 | device, 499 | timesteps, 500 | sigmas, 501 | mu=mu, 502 | ) 503 | 504 | num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) 505 | self._num_timesteps = len(timesteps) 506 | 507 | # 6. Create tensor stating which controlnets to keep 508 | controlnet_keep = [] 509 | for i in range(len(timesteps)): 510 | keeps = [ 511 | 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) 512 | for s, e in zip(control_guidance_start, control_guidance_end) 513 | ] 514 | controlnet_keep.append(keeps[0] if isinstance(self.controlnet, FluxControlNetModel) else keeps) 515 | 516 | if cpu_offload: 517 | # Move VAE to CPU 518 | self.vae.cpu() 519 | torch.cuda.empty_cache() 520 | 521 | # Move Transformer to GPU 522 | self.transformer.to(device) 523 | 524 | # 7. Denoising loop 525 | with self.progress_bar(total=num_inference_steps) as progress_bar: 526 | for i, t in enumerate(timesteps): 527 | if self.interrupt: 528 | continue 529 | 530 | # broadcast to batch dimension in a way that's compatible with ONNX/Core ML 531 | timestep = t.expand(latents.shape[0]).to(latents.dtype) 532 | 533 | if isinstance(self.controlnet, FluxMultiControlNetModel): 534 | use_guidance = self.controlnet.nets[0].config.guidance_embeds 535 | else: 536 | use_guidance = self.controlnet.config.guidance_embeds 537 | 538 | guidance = torch.tensor([controlnet_guidance_scale], device=device) if use_guidance else None 539 | guidance = guidance.expand(latents.shape[0]) if guidance is not None else None 540 | 541 | if isinstance(controlnet_keep[i], list): 542 | cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] 543 | else: 544 | controlnet_cond_scale = controlnet_conditioning_scale 545 | if isinstance(controlnet_cond_scale, list): 546 | controlnet_cond_scale = controlnet_cond_scale[0] 547 | cond_scale = controlnet_cond_scale * controlnet_keep[i] 548 | 549 | # controlnet 550 | controlnet_block_samples, controlnet_single_block_samples = self.controlnet( 551 | hidden_states=latents, 552 | controlnet_cond=control_image, 553 | controlnet_mode=control_mode, 554 | conditioning_scale=cond_scale, 555 | timestep=timestep / 1000, 556 | guidance=guidance, 557 | pooled_projections=pooled_prompt_embeds, 558 | encoder_hidden_states=controlnet_prompt_embeds, 559 | txt_ids=controlnet_text_ids, 560 | img_ids=latent_image_ids, 561 | joint_attention_kwargs=self.joint_attention_kwargs, 562 | return_dict=False, 563 | ) 564 | 565 | guidance = ( 566 | torch.tensor([guidance_scale], device=device) if self.transformer.config.guidance_embeds else None 567 | ) 568 | guidance = guidance.expand(latents.shape[0]) if guidance is not None else None 569 | 570 | noise_pred = self.transformer( 571 | hidden_states=latents, 572 | timestep=timestep / 1000, 573 | guidance=guidance, 574 | pooled_projections=pooled_prompt_embeds, 575 | encoder_hidden_states=prompt_embeds, 576 | controlnet_block_samples=controlnet_block_samples, 577 | controlnet_single_block_samples=controlnet_single_block_samples, 578 | txt_ids=text_ids, 579 | img_ids=latent_image_ids, 580 | joint_attention_kwargs=self.joint_attention_kwargs, 581 | return_dict=False, 582 | controlnet_blocks_repeat=controlnet_blocks_repeat, 583 | )[0] 584 | 585 | # perform true CFG 586 | if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is not None and negative_text_ids is not None: 587 | noise_pred_uncond = self.transformer( 588 | hidden_states=latents, 589 | timestep=timestep / 1000, 590 | guidance=guidance, 591 | pooled_projections=negative_pooled_prompt_embeds, 592 | encoder_hidden_states=negative_prompt_embeds, 593 | controlnet_block_samples=None, 594 | controlnet_single_block_samples=None, 595 | txt_ids=negative_text_ids, 596 | img_ids=latent_image_ids, 597 | joint_attention_kwargs=self.joint_attention_kwargs, 598 | return_dict=False, 599 | controlnet_blocks_repeat=controlnet_blocks_repeat, 600 | )[0] 601 | 602 | noise_pred = noise_pred_uncond + true_guidance_scale * (noise_pred - noise_pred_uncond) 603 | 604 | # compute the previous noisy sample x_t -> x_t-1 605 | latents_dtype = latents.dtype 606 | latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] 607 | 608 | if latents.dtype != latents_dtype: 609 | if torch.backends.mps.is_available(): 610 | # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 611 | latents = latents.to(latents_dtype) 612 | 613 | if callback_on_step_end is not None: 614 | callback_kwargs = {} 615 | for k in callback_on_step_end_tensor_inputs: 616 | callback_kwargs[k] = locals()[k] 617 | callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) 618 | 619 | latents = callback_outputs.pop("latents", latents) 620 | prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) 621 | 622 | # call the callback, if provided 623 | if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): 624 | progress_bar.update() 625 | 626 | if XLA_AVAILABLE: 627 | xm.mark_step() 628 | 629 | if cpu_offload: 630 | # Move InfuseNet to CPU 631 | self.controlnet.cpu() 632 | torch.cuda.empty_cache() 633 | 634 | # Move VAE to GPU 635 | self.vae.to(device) 636 | 637 | if output_type == "latent": 638 | image = latents 639 | 640 | else: 641 | latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) 642 | latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor 643 | 644 | image = self.vae.decode(latents, return_dict=False)[0] 645 | image = self.image_processor.postprocess(image, output_type=output_type) 646 | 647 | # Offload all models 648 | self.maybe_free_model_hooks() 649 | 650 | if not return_dict: 651 | return (image,) 652 | 653 | return FluxPipelineOutput(images=image) 654 | -------------------------------------------------------------------------------- /pipelines/pipeline_infu_flux.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Bytedance Ltd. and/or its affiliates. All rights reserved. 2 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import math 16 | import os 17 | import random 18 | from typing import Optional 19 | 20 | import cv2 21 | import numpy as np 22 | import torch 23 | from diffusers import FluxControlNetModel, FluxTransformer2DModel 24 | from facexlib.recognition import init_recognition_model 25 | from huggingface_hub import snapshot_download 26 | from insightface.app import FaceAnalysis 27 | from insightface.utils import face_align 28 | from PIL import Image 29 | from optimum.quanto import freeze, qint8, quantize 30 | from transformers import T5EncoderModel 31 | 32 | from .pipeline_flux_infusenet import FluxInfuseNetPipeline 33 | from .resampler import Resampler 34 | 35 | 36 | def seed_everything(seed, deterministic=False): 37 | """Set random seed. 38 | 39 | Args: 40 | seed (int): Seed to be used. 41 | deterministic (bool): Whether to set the deterministic option for 42 | CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` 43 | to True and `torch.backends.cudnn.benchmark` to False. 44 | Default: False. 45 | """ 46 | random.seed(seed) 47 | np.random.seed(seed) 48 | torch.manual_seed(seed) 49 | torch.cuda.manual_seed(seed) 50 | torch.cuda.manual_seed_all(seed) 51 | os.environ['PYTHONHASHSEED'] = str(seed) 52 | if deterministic: 53 | torch.backends.cudnn.deterministic = True 54 | torch.backends.cudnn.benchmark = False 55 | 56 | 57 | # modified from https://github.com/instantX-research/InstantID/blob/main/pipeline_stable_diffusion_xl_instantid.py 58 | def draw_kps(image_pil, kps, color_list=[(255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255)]): 59 | stickwidth = 4 60 | limbSeq = np.array([[0, 2], [1, 2], [3, 2], [4, 2]]) 61 | kps = np.array(kps) 62 | 63 | w, h = image_pil.size 64 | out_img = np.zeros([h, w, 3]) 65 | 66 | for i in range(len(limbSeq)): 67 | index = limbSeq[i] 68 | color = color_list[index[0]] 69 | 70 | x = kps[index][:, 0] 71 | y = kps[index][:, 1] 72 | length = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5 73 | angle = math.degrees(math.atan2(y[0] - y[1], x[0] - x[1])) 74 | polygon = cv2.ellipse2Poly((int(np.mean(x)), int(np.mean(y))), (int(length / 2), stickwidth), int(angle), 0, 360, 1) 75 | out_img = cv2.fillConvexPoly(out_img.copy(), polygon, color) 76 | out_img = (out_img * 0.6).astype(np.uint8) 77 | 78 | for idx_kp, kp in enumerate(kps): 79 | color = color_list[idx_kp] 80 | x, y = kp 81 | out_img = cv2.circle(out_img.copy(), (int(x), int(y)), 10, color, -1) 82 | 83 | out_img_pil = Image.fromarray(out_img.astype(np.uint8)) 84 | return out_img_pil 85 | 86 | 87 | def extract_arcface_bgr_embedding(in_image, landmark, arcface_model=None, in_settings=None): 88 | kps = landmark 89 | arc_face_image = face_align.norm_crop(in_image, landmark=np.array(kps), image_size=112) 90 | arc_face_image = torch.from_numpy(arc_face_image).unsqueeze(0).permute(0,3,1,2) / 255. 91 | arc_face_image = 2 * arc_face_image - 1 92 | arc_face_image = arc_face_image.cuda().contiguous() 93 | if arcface_model is None: 94 | arcface_model = init_recognition_model('arcface', device='cuda') 95 | face_emb = arcface_model(arc_face_image)[0] # [512], normalized 96 | return face_emb 97 | 98 | 99 | def resize_and_pad_image(source_img, target_img_size): 100 | # Get original and target sizes 101 | source_img_size = source_img.size 102 | target_width, target_height = target_img_size 103 | 104 | # Determine the new size based on the shorter side of target_img 105 | if target_width <= target_height: 106 | new_width = target_width 107 | new_height = int(target_width * (source_img_size[1] / source_img_size[0])) 108 | else: 109 | new_height = target_height 110 | new_width = int(target_height * (source_img_size[0] / source_img_size[1])) 111 | 112 | # Resize the source image using LANCZOS interpolation for high quality 113 | resized_source_img = source_img.resize((new_width, new_height), Image.LANCZOS) 114 | 115 | # Compute padding to center resized image 116 | pad_left = (target_width - new_width) // 2 117 | pad_top = (target_height - new_height) // 2 118 | 119 | # Create a new image with white background 120 | padded_img = Image.new("RGB", target_img_size, (255, 255, 255)) 121 | padded_img.paste(resized_source_img, (pad_left, pad_top)) 122 | 123 | return padded_img 124 | 125 | 126 | class InfUFluxPipeline: 127 | def __init__( 128 | self, 129 | base_model_path, 130 | infu_model_path, 131 | insightface_root_path = './', 132 | image_proj_num_tokens=8, 133 | infu_flux_version='v1.0', 134 | model_version='aes_stage2', 135 | quantize_8bit=False, 136 | cpu_offload=False, 137 | ): 138 | 139 | self.infu_flux_version = infu_flux_version 140 | self.model_version = model_version 141 | 142 | # Load pipeline 143 | try: 144 | infusenet_path = os.path.join(infu_model_path, 'InfuseNetModel') 145 | self.infusenet = FluxControlNetModel.from_pretrained(infusenet_path, torch_dtype=torch.bfloat16) 146 | except: 147 | print("No InfiniteYou model found. Downloading from HuggingFace `ByteDance/InfiniteYou` to `./models/InfiniteYou` ...") 148 | snapshot_download(repo_id='ByteDance/InfiniteYou', local_dir='./models/InfiniteYou', local_dir_use_symlinks=False) 149 | infu_model_path = os.path.join('./models/InfiniteYou', f'infu_flux_{infu_flux_version}', model_version) 150 | infusenet_path = os.path.join(infu_model_path, 'InfuseNetModel') 151 | self.infusenet = FluxControlNetModel.from_pretrained(infusenet_path, torch_dtype=torch.bfloat16) 152 | insightface_root_path = './models/InfiniteYou/supports/insightface' 153 | if quantize_8bit: 154 | quantize(self.infusenet, weights=qint8) 155 | freeze(self.infusenet) 156 | try: 157 | transformer = FluxTransformer2DModel.from_pretrained(base_model_path, subfolder="transformer", torch_dtype=torch.bfloat16) 158 | text_encoder_2 = T5EncoderModel.from_pretrained(base_model_path, subfolder="text_encoder_2", torch_dtype=torch.bfloat16) 159 | if quantize_8bit: 160 | quantize(transformer, weights=qint8) 161 | freeze(transformer) 162 | quantize(text_encoder_2, weights=qint8) 163 | freeze(text_encoder_2) 164 | pipe = FluxInfuseNetPipeline.from_pretrained( 165 | base_model_path, 166 | transformer=transformer, 167 | text_encoder_2=text_encoder_2, 168 | controlnet=self.infusenet, 169 | torch_dtype=torch.bfloat16, 170 | ) 171 | except Exception as e: 172 | print(e) 173 | print('\nIf you are using `black-forest-labs/FLUX.1-dev` and have not downloaded it into a local directory, ' 174 | 'please accept the agreement and obtain access at https://huggingface.co/black-forest-labs/FLUX.1-dev. ' 175 | 'Then, use `huggingface-cli login` and your access tokens at https://huggingface.co/settings/tokens to authenticate. ' 176 | 'After that, run the code again. If you have downloaded it, please use `base_model_path` to specify the correct path.') 177 | print('\nIf you are using other models, please download them to a local directory and use `base_model_path` to specify the correct path.') 178 | exit() 179 | if not cpu_offload: 180 | pipe.to('cuda') 181 | self.pipe = pipe 182 | 183 | # Load image proj model 184 | num_tokens = image_proj_num_tokens 185 | image_emb_dim = 512 186 | image_proj_model = Resampler( 187 | dim=1280, 188 | depth=4, 189 | dim_head=64, 190 | heads=20, 191 | num_queries=num_tokens, 192 | embedding_dim=image_emb_dim, 193 | output_dim=4096, 194 | ff_mult=4, 195 | ) 196 | image_proj_model_path = os.path.join(infu_model_path, 'image_proj_model.bin') 197 | ipm_state_dict = torch.load(image_proj_model_path, map_location="cpu") 198 | image_proj_model.load_state_dict(ipm_state_dict['image_proj']) 199 | del ipm_state_dict 200 | image_proj_model.to('cuda', torch.bfloat16) 201 | image_proj_model.eval() 202 | 203 | self.image_proj_model = image_proj_model 204 | 205 | # Load face encoder 206 | self.app_640 = FaceAnalysis(name='antelopev2', 207 | root=insightface_root_path, providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) 208 | self.app_640.prepare(ctx_id=0, det_size=(640, 640)) 209 | 210 | self.app_320 = FaceAnalysis(name='antelopev2', 211 | root=insightface_root_path, providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) 212 | self.app_320.prepare(ctx_id=0, det_size=(320, 320)) 213 | 214 | self.app_160 = FaceAnalysis(name='antelopev2', 215 | root=insightface_root_path, providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) 216 | self.app_160.prepare(ctx_id=0, det_size=(160, 160)) 217 | 218 | self.arcface_model = init_recognition_model('arcface', device='cuda') 219 | 220 | def load_loras(self, loras): 221 | names, scales = [],[] 222 | for lora_path, lora_name, lora_scale in loras: 223 | if lora_path != "": 224 | print(f"Loading lora {lora_path}") 225 | self.pipe.load_lora_weights(lora_path, adapter_name = lora_name) 226 | names.append(lora_name) 227 | scales.append(lora_scale) 228 | 229 | if len(names) > 0: 230 | self.pipe.set_adapters(names, adapter_weights=scales) 231 | 232 | def _detect_face(self, id_image_cv2): 233 | face_info = self.app_640.get(id_image_cv2) 234 | if len(face_info) > 0: 235 | return face_info 236 | 237 | face_info = self.app_320.get(id_image_cv2) 238 | if len(face_info) > 0: 239 | return face_info 240 | 241 | face_info = self.app_160.get(id_image_cv2) 242 | return face_info 243 | 244 | def __call__( 245 | self, 246 | id_image: Image.Image, # PIL.Image.Image (RGB) 247 | prompt: str, 248 | control_image: Optional[Image.Image] = None, # PIL.Image.Image (RGB) or None 249 | width = 864, 250 | height = 1152, 251 | seed = 42, 252 | guidance_scale = 3.5, 253 | num_steps = 30, 254 | infusenet_conditioning_scale = 1.0, 255 | infusenet_guidance_start = 0.0, 256 | infusenet_guidance_end = 1.0, 257 | cpu_offload = False, 258 | ): 259 | # Extract ID embeddings 260 | print('Preparing ID embeddings') 261 | id_image_cv2 = cv2.cvtColor(np.array(id_image), cv2.COLOR_RGB2BGR) 262 | face_info = self._detect_face(id_image_cv2) 263 | if len(face_info) == 0: 264 | raise ValueError('No face detected in the input ID image') 265 | 266 | face_info = sorted(face_info, key=lambda x:(x['bbox'][2]-x['bbox'][0])*(x['bbox'][3]-x['bbox'][1]))[-1] # only use the maximum face 267 | landmark = face_info['kps'] 268 | self.arcface_model.to('cuda') 269 | id_embed = extract_arcface_bgr_embedding(id_image_cv2, landmark, self.arcface_model) 270 | self.arcface_model.cpu() 271 | torch.cuda.empty_cache() 272 | id_embed = id_embed.clone().unsqueeze(0).float().cuda() 273 | id_embed = id_embed.reshape([1, -1, 512]) 274 | id_embed = id_embed.to(device='cuda', dtype=torch.bfloat16) 275 | self.image_proj_model.to('cuda', torch.bfloat16) 276 | with torch.no_grad(): 277 | id_embed = self.image_proj_model(id_embed) 278 | bs_embed, seq_len, _ = id_embed.shape 279 | id_embed = id_embed.repeat(1, 1, 1) 280 | id_embed = id_embed.view(bs_embed * 1, seq_len, -1) 281 | id_embed = id_embed.to(device='cuda', dtype=torch.bfloat16) 282 | self.image_proj_model.cpu() 283 | torch.cuda.empty_cache() 284 | 285 | # Load control image 286 | print('Preparing the control image') 287 | if control_image is not None: 288 | control_image = control_image.convert("RGB") 289 | control_image = resize_and_pad_image(control_image, (width, height)) 290 | face_info = self._detect_face(cv2.cvtColor(np.array(control_image), cv2.COLOR_RGB2BGR)) 291 | if len(face_info) == 0: 292 | raise ValueError('No face detected in the control image') 293 | face_info = sorted(face_info, key=lambda x:(x['bbox'][2]-x['bbox'][0])*(x['bbox'][3]-x['bbox'][1]))[-1] # only use the maximum face 294 | control_image = draw_kps(control_image, face_info['kps']) 295 | else: 296 | out_img = np.zeros([height, width, 3]) 297 | control_image = Image.fromarray(out_img.astype(np.uint8)) 298 | 299 | # Perform inference 300 | print('Generating image') 301 | seed_everything(seed) 302 | image = self.pipe( 303 | prompt=prompt, 304 | controlnet_prompt_embeds=id_embed, 305 | control_image=control_image, 306 | guidance_scale=guidance_scale, 307 | num_inference_steps=num_steps, 308 | controlnet_guidance_scale=1.0, 309 | controlnet_conditioning_scale=infusenet_conditioning_scale, 310 | control_guidance_start=infusenet_guidance_start, 311 | control_guidance_end=infusenet_guidance_end, 312 | height=height, 313 | width=width, 314 | cpu_offload=cpu_offload, 315 | ).images[0] 316 | 317 | return image 318 | -------------------------------------------------------------------------------- /pipelines/resampler.py: -------------------------------------------------------------------------------- 1 | # Modified from https://github.com/mlfoundations/open_flamingo/blob/main/open_flamingo/src/helpers.py 2 | 3 | import math 4 | 5 | import torch 6 | import torch.nn as nn 7 | 8 | 9 | # FFN 10 | def FeedForward(dim, mult=4): 11 | inner_dim = int(dim * mult) 12 | return nn.Sequential( 13 | nn.LayerNorm(dim), 14 | nn.Linear(dim, inner_dim, bias=False), 15 | nn.GELU(), 16 | nn.Linear(inner_dim, dim, bias=False), 17 | ) 18 | 19 | 20 | def reshape_tensor(x, heads): 21 | bs, length, width = x.shape 22 | #(bs, length, width) --> (bs, length, n_heads, dim_per_head) 23 | x = x.view(bs, length, heads, -1) 24 | # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head) 25 | x = x.transpose(1, 2) 26 | # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head) 27 | x = x.reshape(bs, heads, length, -1) 28 | return x 29 | 30 | 31 | class PerceiverAttention(nn.Module): 32 | def __init__(self, *, dim, dim_head=64, heads=8): 33 | super().__init__() 34 | self.scale = dim_head**-0.5 35 | self.dim_head = dim_head 36 | self.heads = heads 37 | inner_dim = dim_head * heads 38 | 39 | self.norm1 = nn.LayerNorm(dim) 40 | self.norm2 = nn.LayerNorm(dim) 41 | 42 | self.to_q = nn.Linear(dim, inner_dim, bias=False) 43 | self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False) 44 | self.to_out = nn.Linear(inner_dim, dim, bias=False) 45 | 46 | def forward(self, x, latents): 47 | """ 48 | Args: 49 | x (torch.Tensor): image features 50 | shape (b, n1, D) 51 | latent (torch.Tensor): latent features 52 | shape (b, n2, D) 53 | """ 54 | x = self.norm1(x) 55 | latents = self.norm2(latents) 56 | 57 | b, l, _ = latents.shape 58 | 59 | q = self.to_q(latents) 60 | kv_input = torch.cat((x, latents), dim=-2) 61 | k, v = self.to_kv(kv_input).chunk(2, dim=-1) 62 | 63 | q = reshape_tensor(q, self.heads) 64 | k = reshape_tensor(k, self.heads) 65 | v = reshape_tensor(v, self.heads) 66 | 67 | # attention 68 | scale = 1 / math.sqrt(math.sqrt(self.dim_head)) 69 | weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards 70 | weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) 71 | out = weight @ v 72 | 73 | out = out.permute(0, 2, 1, 3).reshape(b, l, -1) 74 | 75 | return self.to_out(out) 76 | 77 | 78 | class Resampler(nn.Module): 79 | def __init__( 80 | self, 81 | dim=1024, 82 | depth=8, 83 | dim_head=64, 84 | heads=16, 85 | num_queries=8, 86 | embedding_dim=768, 87 | output_dim=1024, 88 | ff_mult=4, 89 | ): 90 | super().__init__() 91 | 92 | self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5) 93 | 94 | self.proj_in = nn.Linear(embedding_dim, dim) 95 | 96 | self.proj_out = nn.Linear(dim, output_dim) 97 | self.norm_out = nn.LayerNorm(output_dim) 98 | 99 | self.layers = nn.ModuleList([]) 100 | for _ in range(depth): 101 | self.layers.append( 102 | nn.ModuleList( 103 | [ 104 | PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads), 105 | FeedForward(dim=dim, mult=ff_mult), 106 | ] 107 | ) 108 | ) 109 | 110 | def forward(self, x): 111 | 112 | latents = self.latents.repeat(x.size(0), 1, 1) 113 | 114 | x = self.proj_in(x) 115 | 116 | for attn, ff in self.layers: 117 | latents = attn(x, latents) + latents 118 | latents = ff(latents) + latents 119 | 120 | latents = self.proj_out(latents) 121 | return self.norm_out(latents) 122 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | accelerate==1.6.0 2 | diffusers==0.31.0 3 | facexlib==0.3.0 4 | gradio==5.23.1 5 | httpcore==1.0.7 6 | httpx==0.28.1 7 | huggingface-hub==0.28.1 8 | insightface==0.7.3 9 | numpy==1.26.4 10 | onnxruntime==1.19.2 11 | opencv-python==4.11.0.86 12 | optimum-quanto==0.2.7 13 | peft==0.14.0 14 | pillow==10.4.0 15 | pillow-avif-plugin==1.5.0 16 | pillow-heif==0.21.0 17 | sentencepiece==0.2.0 18 | torch==2.6.0 19 | torchvision==0.21.0 20 | transformers==4.48.0 -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Bytedance Ltd. and/or its affiliates. All rights reserved. 2 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import argparse 16 | import os 17 | 18 | import torch 19 | from PIL import Image 20 | 21 | from pipelines.pipeline_infu_flux import InfUFluxPipeline 22 | 23 | 24 | def main(): 25 | parser = argparse.ArgumentParser() 26 | parser.add_argument('--id_image', default='./assets/examples/man.jpg', help="""input ID image""") 27 | parser.add_argument('--control_image', default=None, help="""control image [optional]""") 28 | parser.add_argument('--out_results_dir', default='./results', help="""output folder""") 29 | parser.add_argument('--prompt', default='A man, portrait, cinematic') 30 | parser.add_argument('--base_model_path', default='black-forest-labs/FLUX.1-dev') 31 | parser.add_argument('--model_dir', default='ByteDance/InfiniteYou') 32 | parser.add_argument('--infu_flux_version', default='v1.0', help="""InfiniteYou-FLUX version: currently only v1.0""") 33 | parser.add_argument('--model_version', default='aes_stage2', help="""model version: aes_stage2 | sim_stage1""") 34 | parser.add_argument('--cuda_device', default=0, type=int) 35 | parser.add_argument('--seed', default=0, type=int, help="""seed (0 for random)""") 36 | parser.add_argument('--guidance_scale', default=3.5, type=float) 37 | parser.add_argument('--num_steps', default=30, type=int) 38 | parser.add_argument('--infusenet_conditioning_scale', default=1.0, type=float) 39 | parser.add_argument('--infusenet_guidance_start', default=0.0, type=float) 40 | parser.add_argument('--infusenet_guidance_end', default=1.0, type=float) 41 | # The LoRA options below are entirely optional. Here we provide two examples to facilitate users to try, but they are NOT used in our paper. 42 | parser.add_argument('--enable_realism_lora', action='store_true') 43 | parser.add_argument('--enable_anti_blur_lora', action='store_true') 44 | # Memory reduction options 45 | parser.add_argument('--quantize_8bit', action='store_true') 46 | parser.add_argument('--cpu_offload', action='store_true') 47 | args = parser.parse_args() 48 | 49 | # Check arguments 50 | assert args.infu_flux_version == 'v1.0', 'Currently only supports InfiniteYou-FLUX v1.0' 51 | assert args.model_version in ['aes_stage2', 'sim_stage1'], 'Currently only supports model versions: aes_stage2 | sim_stage1' 52 | 53 | # Set cuda device 54 | torch.cuda.set_device(args.cuda_device) 55 | 56 | # Load pipeline 57 | infu_model_path = os.path.join(args.model_dir, f'infu_flux_{args.infu_flux_version}', args.model_version) 58 | insightface_root_path = os.path.join(args.model_dir, 'supports', 'insightface') 59 | pipe = InfUFluxPipeline( 60 | base_model_path=args.base_model_path, 61 | infu_model_path=infu_model_path, 62 | insightface_root_path=insightface_root_path, 63 | infu_flux_version=args.infu_flux_version, 64 | model_version=args.model_version, 65 | quantize_8bit=args.quantize_8bit, 66 | cpu_offload=args.cpu_offload, 67 | ) 68 | # Load LoRAs (optional) 69 | lora_dir = os.path.join(args.model_dir, 'supports', 'optional_loras') 70 | if not os.path.exists(lora_dir): lora_dir = './models/InfiniteYou/supports/optional_loras' 71 | loras = [] 72 | if args.enable_realism_lora: 73 | loras.append([os.path.join(lora_dir, 'flux_realism_lora.safetensors'), 'realism', 1.0]) 74 | if args.enable_anti_blur_lora: 75 | loras.append([os.path.join(lora_dir, 'flux_anti_blur_lora.safetensors'), 'anti_blur', 1.0]) 76 | pipe.load_loras(loras) 77 | 78 | # Perform inference 79 | if args.seed == 0: 80 | args.seed = torch.seed() & 0xFFFFFFFF 81 | image = pipe( 82 | id_image=Image.open(args.id_image).convert('RGB'), 83 | prompt=args.prompt, 84 | control_image=Image.open(args.control_image).convert('RGB') if args.control_image is not None else None, 85 | seed=args.seed, 86 | guidance_scale=args.guidance_scale, 87 | num_steps=args.num_steps, 88 | infusenet_conditioning_scale=args.infusenet_conditioning_scale, 89 | infusenet_guidance_start=args.infusenet_guidance_start, 90 | infusenet_guidance_end=args.infusenet_guidance_end, 91 | cpu_offload=args.cpu_offload, 92 | ) 93 | 94 | # Save results 95 | os.makedirs(args.out_results_dir, exist_ok=True) 96 | index = len(os.listdir(args.out_results_dir)) 97 | id_name = os.path.splitext(os.path.basename(args.id_image))[0] 98 | prompt_name = args.prompt[:150] + '*' if len(args.prompt) > 150 else args.prompt 99 | prompt_name = prompt_name.replace('/', '|') 100 | out_name = f'{index:05d}_{id_name}_{prompt_name}_seed{args.seed}.png' 101 | out_result_path = os.path.join(args.out_results_dir, out_name) 102 | image.save(out_result_path) 103 | 104 | 105 | if __name__ == "__main__": 106 | main() 107 | --------------------------------------------------------------------------------