├── LICENSE ├── README.md ├── data ├── flowerwoman │ ├── ref │ │ ├── elesban-landero-berriozabal-5ZXn8NGbqB4-unsplash.jpg │ │ ├── elesban-landero-berriozabal-D0IF_oTQzyI-unsplash.jpg │ │ └── elesban-landero-berriozabal-T9g2grKsu4Y-unsplash.jpg │ └── target │ │ ├── mask.png │ │ └── target.png └── pumpkinlady2 │ ├── ref │ ├── ivana-cajina-VpjXU5ovAPk-unsplash.jpg │ ├── ivana-cajina-g8dF7P94Vwg-unsplash.jpg │ └── ivana-cajina-t9K4K2iD5yg-unsplash.jpg │ └── target │ ├── mask.png │ └── target.png ├── infer.py ├── requirements.txt ├── train_realfill.ipynb └── train_realfill.py /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2023 Thuan Hoang Nguyen CreativeML Open RAIL++-M License dated October 3, 2023 2 | 3 | Section I: PREAMBLE Multimodal generative models are being widely adopted and used, and have the potential to transform the way artists, among other individuals, conceive and benefit from AI or ML technologies as a tool for content creation. Notwithstanding the current and potential benefits that these artifacts can bring to society at large, there are also concerns about potential misuses of them, either due to their technical limitations or ethical considerations. In short, this license strives for both the open and responsible downstream use of the accompanying model. When it comes to the open character, we took inspiration from open source permissive licenses regarding the grant of IP rights. Referring to the downstream responsible use, we added use-based restrictions not permitting the use of the model in very specific scenarios, in order for the licensor to be able to enforce the license in case potential misuses of the Model may occur. At the same time, we strive to promote open and responsible research on generative models for art and content generation. Even though downstream derivative versions of the model could be released under different licensing terms, the latter will always have to include - at minimum - the same use-based restrictions as the ones in the original license (this license). We believe in the intersection between open and responsible AI development; thus, this agreement aims to strike a balance between both in order to enable responsible open-science in the field of AI. This CreativeML Open RAIL++-M License governs the use of the model (and its derivatives) and is informed by the model card associated with the model. NOW THEREFORE, You and Licensor agree as follows: Definitions "License" means the terms and conditions for use, reproduction, and Distribution as defined in this document. "Data" means a collection of information and/or content extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License. "Output" means the results of operating a Model as embodied in informational content resulting therefrom. "Model" means any accompanying machine-learning based assemblies (including checkpoints), consisting of learnt weights, parameters (including optimizer states), corresponding to the model architecture as embodied in the Complementary Material, that have been trained or tuned, in whole or in part on the Data, using the Complementary Material. "Derivatives of the Model" means all modifications to the Model, works based on the Model, or any other model which is created or initialized by transfer of patterns of the weights, parameters, activations or output of the Model, to the other model, in order to cause the other model to perform similarly to the Model, including - but not limited to - distillation methods entailing the use of intermediate data representations or methods based on the generation of synthetic data by the Model for training the other model. "Complementary Material" means the accompanying source code and scripts used to define, run, load, benchmark or evaluate the Model, and used to prepare data for training or evaluation, if any. This includes any accompanying documentation, tutorials, examples, etc, if any. "Distribution" means any transmission, reproduction, publication or other sharing of the Model or Derivatives of the Model to a third party, including providing the Model as a hosted service made available by electronic or other remote means - e.g. API-based or web access. "Licensor" means the copyright owner or entity authorized by the copyright owner that is granting the License, including the persons or entities that may have rights in the Model and/or distributing the Model. "You" (or "Your") means an individual or Legal Entity exercising permissions granted by this License and/or making use of the Model for whichever purpose and in any field of use, including usage of the Model in an end-use application - e.g. chatbot, translator, image generator. "Third Parties" means individuals or legal entities that are not under common control with Licensor or You. "Contribution" means any work of authorship, including the original version of the Model and any modifications or additions to that Model or Derivatives of the Model thereof, that is intentionally submitted to Licensor for inclusion in the Model by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Model, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" means Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Model. 4 | 5 | Section II: INTELLECTUAL PROPERTY RIGHTS Both copyright and patent grants apply to the Model, Derivatives of the Model and Complementary Material. The Model and Derivatives of the Model are subject to additional terms as described in 6 | 7 | Section III. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare, publicly display, publicly perform, sublicense, and distribute the Complementary Material, the Model, and Derivatives of the Model. Grant of Patent License. Subject to the terms and conditions of this License and where and as applicable, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this paragraph) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Model and the Complementary Material, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Model to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Model and/or Complementary Material or a Contribution incorporated within the Model and/or Complementary Material constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for the Model and/or Work shall terminate as of the date such litigation is asserted or filed. Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION Distribution and Redistribution. You may host for Third Party remote access purposes (e.g. software-as-a-service), reproduce and distribute copies of the Model or Derivatives of the Model thereof in any medium, with or without modifications, provided that You meet the following conditions: Use-based restrictions as referenced in paragraph 5 MUST be included as an enforceable provision by You in any type of legal agreement (e.g. a license) governing the use and/or distribution of the Model or Derivatives of the Model, and You shall give notice to subsequent users You Distribute to, that the Model or Derivatives of the Model are subject to paragraph 5. This provision does not apply to the use of Complementary Material. You must give any Third Party recipients of the Model or Derivatives of the Model a copy of this License; You must cause any modified files to carry prominent notices stating that You changed the files; You must retain all copyright, patent, trademark, and attribution notices excluding those notices that do not pertain to any part of the Model, Derivatives of the Model. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions - respecting paragraph 4.a. - for use, reproduction, or Distribution of Your modifications, or for any such Derivatives of the Model as a whole, provided Your use, reproduction, and Distribution of the Model otherwise complies with the conditions stated in this License. Use-based restrictions. The restrictions set forth in Attachment A are considered Use-based restrictions. Therefore You cannot use the Model and the Derivatives of the Model for the specified restricted uses. You may use the Model subject to this License, including only for lawful purposes and in accordance with the License. Use may include creating any content with, finetuning, updating, running, training, evaluating and/or reparametrizing the Model. You shall require all of Your users who use the Model or a Derivative of the Model to comply with the terms of this paragraph (paragraph 5). The Output You Generate. Except as set forth herein, Licensor claims no rights in the Output You generate using the Model. You are accountable for the Output you generate and its subsequent uses. No use of the output can contravene any provision as stated in the License. 8 | 9 | Section IV: OTHER PROVISIONS Updates and Runtime Restrictions. To the maximum extent permitted by law, Licensor reserves the right to restrict (remotely or otherwise) usage of the Model in violation of this License. Trademarks and related. Nothing in this License permits You to make use of Licensors’ trademarks, trade names, logos or to otherwise suggest endorsement or misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by the Licensors. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Model and the Complementary Material (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Model, Derivatives of the Model, and the Complementary Material and assume any risks associated with Your exercise of permissions under this License. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Model and the Complementary Material (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. Accepting Warranty or Additional Liability. While redistributing the Model, Derivatives of the Model and the Complementary Material thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. If any provision of this License is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein. 10 | 11 | END OF TERMS AND CONDITIONS 12 | 13 | Attachment A Use Restrictions You agree not to use the Model or Derivatives of the Model: In any way that violates any applicable national, federal, state, local or international law or regulation; For the purpose of exploiting, harming or attempting to exploit or harm minors in any way; To generate or disseminate verifiably false information and/or content with the purpose of harming others; To generate or disseminate personal identifiable information that can be used to harm an individual; To defame, disparage or otherwise harass others; For fully automated decision making that adversely impacts an individual’s legal rights or otherwise creates or modifies a binding, enforceable obligation; For any use intended to or which has the effect of discriminating against or harming individuals or groups based on online or offline social behavior or known or predicted personal or personality characteristics; To exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm; For any use intended to or which has the effect of discriminating against individuals or groups based on legally protected characteristics or categories; To provide medical advice and medical results interpretation; To generate or disseminate information for the purpose to be used for administration of justice, law enforcement, immigration or asylum processes, such as predicting an individual will commit fraud/crime commitment (e.g. by text profiling, drawing causal relationships between assertions made in documents, indiscriminate and arbitrarily-targeted use). 14 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # RealFill 2 | 3 | [RealFill](https://arxiv.org/abs/2309.16668) is a method to personalize text2image inpainting models like stable diffusion inpainting given just a few (1~5) images of a scene. 4 | The `train_realfill.py` script shows how to implement the training procedure for stable diffusion inpainting. 5 | 6 | 7 | ## Running locally with PyTorch 8 | 9 | ### Installing the dependencies 10 | 11 | Before running the scripts, make sure to install the library's training dependencies: 12 | 13 | cd to the realfill folder and run 14 | ```bash 15 | cd realfill 16 | pip install -r requirements.txt 17 | ``` 18 | 19 | And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: 20 | 21 | ```bash 22 | accelerate config 23 | ``` 24 | 25 | Or for a default accelerate configuration without answering questions about your environment 26 | 27 | ```bash 28 | accelerate config default 29 | ``` 30 | 31 | Or if your environment doesn't support an interactive shell e.g. a notebook 32 | 33 | ```python 34 | from accelerate.utils import write_basic_config 35 | write_basic_config() 36 | ``` 37 | 38 | When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups. 39 | 40 | ### Toy example 41 | 42 | Now let's fill the real. For this example, we will use some images of the flower girl example from the paper. 43 | 44 | We already provide some images for testing in data folder 45 | 46 | You only have to launch the training using: 47 | 48 | ```bash 49 | export MODEL_NAME="stabilityai/stable-diffusion-2-inpainting" 50 | export TRAIN_DIR="data/flowerwoman" 51 | export OUTPUT_DIR="flowerwoman-model" 52 | 53 | accelerate launch train_realfill.py \ 54 | --pretrained_model_name_or_path=$MODEL_NAME \ 55 | --train_data_dir=$TRAIN_DIR \ 56 | --output_dir=$OUTPUT_DIR \ 57 | --resolution=512 \ 58 | --train_batch_size=16 \ 59 | --gradient_accumulation_steps=1 \ 60 | --unet_learning_rate=2e-4 \ 61 | --text_encoder_learning_rate=4e-5 \ 62 | --lr_scheduler="constant" \ 63 | --lr_warmup_steps=100 \ 64 | --max_train_steps=2000 \ 65 | --lora_rank=8 \ 66 | --lora_dropout=0.1 \ 67 | --lora_alpha=16 \ 68 | ``` 69 | 70 | ### Training on a low-memory GPU: 71 | 72 | It is possible to run realfill on a low-memory GPU by using the following optimizations: 73 | - [gradient checkpointing and the 8-bit optimizer](#training-with-gradient-checkpointing-and-8-bit-optimizers) 74 | - [xformers](#training-with-xformers) 75 | - [setting grads to none](#set-grads-to-none) 76 | 77 | ```bash 78 | export MODEL_NAME="stabilityai/stable-diffusion-2-inpainting" 79 | export TRAIN_DIR="data/flowerwoman" 80 | export OUTPUT_DIR="flowerwoman-model" 81 | 82 | accelerate launch train_realfill.py \ 83 | --pretrained_model_name_or_path=$MODEL_NAME \ 84 | --train_data_dir=$TRAIN_DIR \ 85 | --output_dir=$OUTPUT_DIR \ 86 | --resolution=512 \ 87 | --train_batch_size=16 \ 88 | --gradient_accumulation_steps=1 --gradient_checkpointing \ 89 | --use_8bit_adam \ 90 | --enable_xformers_memory_efficient_attention \ 91 | --set_grads_to_none \ 92 | --unet_learning_rate=2e-4 \ 93 | --text_encoder_learning_rate=4e-5 \ 94 | --lr_scheduler="constant" \ 95 | --lr_warmup_steps=100 \ 96 | --max_train_steps=2000 \ 97 | --lora_rank=8 \ 98 | --lora_dropout=0.1 \ 99 | --lora_alpha=16 \ 100 | ``` 101 | 102 | ### Training with gradient checkpointing and 8-bit optimizers: 103 | 104 | With the help of gradient checkpointing and the 8-bit optimizer from bitsandbytes it's possible to run train realfill on a 16GB GPU. 105 | 106 | To install `bitsandbytes` please refer to this [readme](https://github.com/TimDettmers/bitsandbytes#requirements--installation). 107 | 108 | ### Training with xformers: 109 | You can enable memory efficient attention by [installing xFormers](https://github.com/facebookresearch/xformers#installing-xformers) and padding the `--enable_xformers_memory_efficient_attention` argument to the script. 110 | 111 | ### Set grads to none 112 | 113 | To save even more memory, pass the `--set_grads_to_none` argument to the script. This will set grads to None instead of zero. However, be aware that it changes certain behaviors, so if you start experiencing any problems, remove this argument. 114 | 115 | More info: https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html 116 | 117 | ## Acknowledge 118 | This repo is built upon the code of DreamBooth from diffusers and we thank the developers for their great works and efforts to release source code. Furthermore, a special "thank you" to RealFill's authors for publishing such an amazing work. 119 | -------------------------------------------------------------------------------- /data/flowerwoman/ref/elesban-landero-berriozabal-5ZXn8NGbqB4-unsplash.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thuanz123/realfill/9d8d335324a4cbdce6c635268a87666092ecc74f/data/flowerwoman/ref/elesban-landero-berriozabal-5ZXn8NGbqB4-unsplash.jpg -------------------------------------------------------------------------------- /data/flowerwoman/ref/elesban-landero-berriozabal-D0IF_oTQzyI-unsplash.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thuanz123/realfill/9d8d335324a4cbdce6c635268a87666092ecc74f/data/flowerwoman/ref/elesban-landero-berriozabal-D0IF_oTQzyI-unsplash.jpg -------------------------------------------------------------------------------- /data/flowerwoman/ref/elesban-landero-berriozabal-T9g2grKsu4Y-unsplash.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thuanz123/realfill/9d8d335324a4cbdce6c635268a87666092ecc74f/data/flowerwoman/ref/elesban-landero-berriozabal-T9g2grKsu4Y-unsplash.jpg -------------------------------------------------------------------------------- /data/flowerwoman/target/mask.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thuanz123/realfill/9d8d335324a4cbdce6c635268a87666092ecc74f/data/flowerwoman/target/mask.png -------------------------------------------------------------------------------- /data/flowerwoman/target/target.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thuanz123/realfill/9d8d335324a4cbdce6c635268a87666092ecc74f/data/flowerwoman/target/target.png -------------------------------------------------------------------------------- /data/pumpkinlady2/ref/ivana-cajina-VpjXU5ovAPk-unsplash.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thuanz123/realfill/9d8d335324a4cbdce6c635268a87666092ecc74f/data/pumpkinlady2/ref/ivana-cajina-VpjXU5ovAPk-unsplash.jpg -------------------------------------------------------------------------------- /data/pumpkinlady2/ref/ivana-cajina-g8dF7P94Vwg-unsplash.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thuanz123/realfill/9d8d335324a4cbdce6c635268a87666092ecc74f/data/pumpkinlady2/ref/ivana-cajina-g8dF7P94Vwg-unsplash.jpg -------------------------------------------------------------------------------- /data/pumpkinlady2/ref/ivana-cajina-t9K4K2iD5yg-unsplash.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thuanz123/realfill/9d8d335324a4cbdce6c635268a87666092ecc74f/data/pumpkinlady2/ref/ivana-cajina-t9K4K2iD5yg-unsplash.jpg -------------------------------------------------------------------------------- /data/pumpkinlady2/target/mask.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thuanz123/realfill/9d8d335324a4cbdce6c635268a87666092ecc74f/data/pumpkinlady2/target/mask.png -------------------------------------------------------------------------------- /data/pumpkinlady2/target/target.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thuanz123/realfill/9d8d335324a4cbdce6c635268a87666092ecc74f/data/pumpkinlady2/target/target.png -------------------------------------------------------------------------------- /infer.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | 4 | import torch 5 | from PIL import Image, ImageFilter 6 | from diffusers import ( 7 | StableDiffusionInpaintPipeline, 8 | UNet2DConditionModel, 9 | DDPMScheduler 10 | ) 11 | from transformers import CLIPTextModel 12 | 13 | parser = argparse.ArgumentParser(description="Inference") 14 | parser.add_argument( 15 | "--model_path", 16 | type=str, 17 | default=None, 18 | required=True, 19 | help="Path to pretrained model or model identifier from huggingface.co/models.", 20 | ) 21 | parser.add_argument( 22 | "--validation_image", 23 | type=str, 24 | default=None, 25 | required=True, 26 | help="The directory of the validation image", 27 | ) 28 | parser.add_argument( 29 | "--validation_mask", 30 | type=str, 31 | default=None, 32 | required=True, 33 | help="The directory of the validation mask", 34 | ) 35 | parser.add_argument( 36 | "--output_dir", 37 | type=str, 38 | default="./test-infer/", 39 | help="The output directory where predictions are saved", 40 | ) 41 | parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible inference.") 42 | 43 | args = parser.parse_args() 44 | 45 | if __name__ == "__main__": 46 | os.makedirs(args.output_dir, exist_ok=True) 47 | generator = None 48 | 49 | # create & load model 50 | pipe = StableDiffusionInpaintPipeline.from_pretrained( 51 | "stabilityai/stable-diffusion-2-inpainting", 52 | torch_dtype=torch.float32, 53 | revision=None 54 | ) 55 | 56 | pipe.unet = UNet2DConditionModel.from_pretrained( 57 | args.model_path, subfolder="unet", revision=None, 58 | ) 59 | pipe.text_encoder = CLIPTextModel.from_pretrained( 60 | args.model_path, subfolder="text_encoder", revision=None, 61 | ) 62 | pipe.scheduler = DDPMScheduler.from_config(pipe.scheduler.config) 63 | pipe = pipe.to("cuda") 64 | 65 | if args.seed is not None: 66 | generator = torch.Generator(device="cuda").manual_seed(args.seed) 67 | 68 | image = Image.open(args.validation_image) 69 | mask_image = Image.open(args.validation_mask) 70 | 71 | erode_kernel = ImageFilter.MaxFilter(3) 72 | mask_image = mask_image.filter(erode_kernel) 73 | 74 | blur_kernel = ImageFilter.BoxBlur(1) 75 | mask_image = mask_image.filter(blur_kernel) 76 | 77 | for idx in range(16): 78 | result = pipe( 79 | prompt="a photo of sks", image=image, mask_image=mask_image, 80 | num_inference_steps=200, guidance_scale=1, generator=generator, 81 | ).images[0] 82 | 83 | result = Image.composite(result, image, mask_image) 84 | result.save(f"{args.output_dir}/{idx}.png") 85 | 86 | del pipe 87 | torch.cuda.empty_cache() 88 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | diffusers==0.20.1 2 | accelerate==0.23.0 3 | transformers==4.36.0 4 | peft==0.5.0 5 | huggingface-hub==0.25.2 6 | torch==2.0.1 7 | torchvision==0.15.2 8 | ftfy==6.1.1 9 | tensorboard==2.14.0 10 | Jinja2==3.1.3 11 | -------------------------------------------------------------------------------- /train_realfill.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": { 7 | "colab": { 8 | "base_uri": "https://localhost:8080/" 9 | }, 10 | "id": "7M37t-cRkNDf", 11 | "outputId": "deba5e20-1f49-45db-b53b-096217ceafd9" 12 | }, 13 | "outputs": [ 14 | { 15 | "output_type": "stream", 16 | "name": "stdout", 17 | "text": [ 18 | "Cloning into 'realfill'...\n", 19 | "remote: Enumerating objects: 192, done.\u001b[K\n", 20 | "remote: Counting objects: 100% (55/55), done.\u001b[K\n", 21 | "remote: Compressing objects: 100% (55/55), done.\u001b[K\n", 22 | "remote: Total 192 (delta 29), reused 0 (delta 0), pack-reused 137\u001b[K\n", 23 | "Receiving objects: 100% (192/192), 1.22 MiB | 9.36 MiB/s, done.\n", 24 | "Resolving deltas: 100% (81/81), done.\n" 25 | ] 26 | } 27 | ], 28 | "source": [ 29 | "!git clone https://github.com/thuanz123/realfill.git" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": null, 35 | "metadata": { 36 | "colab": { 37 | "base_uri": "https://localhost:8080/" 38 | }, 39 | "id": "wTXUPQrrtMZF", 40 | "outputId": "ff54182e-d63f-4289-f084-4481067b874c" 41 | }, 42 | "outputs": [ 43 | { 44 | "output_type": "stream", 45 | "name": "stdout", 46 | "text": [ 47 | "/content/realfill\n", 48 | "\u001b[0m\u001b[01;34mdata\u001b[0m/ infer.py LICENSE README.md requirements.txt train_realfill.py\n" 49 | ] 50 | } 51 | ], 52 | "source": [ 53 | "%cd realfill\n", 54 | "%ls" 55 | ] 56 | }, 57 | { 58 | "cell_type": "code", 59 | "execution_count": null, 60 | "metadata": { 61 | "colab": { 62 | "base_uri": "https://localhost:8080/" 63 | }, 64 | "id": "8hcWLJk3kq3i", 65 | "outputId": "b741d90d-7448-4647-f4c1-f7518431d8de" 66 | }, 67 | "outputs": [ 68 | { 69 | "output_type": "stream", 70 | "name": "stdout", 71 | "text": [ 72 | "Collecting diffusers==0.20.1 (from -r /content/realfill/requirements.txt (line 1))\n", 73 | " Downloading diffusers-0.20.1-py3-none-any.whl (1.3 MB)\n", 74 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.3/1.3 MB\u001b[0m \u001b[31m6.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 75 | "\u001b[?25hCollecting accelerate==0.23.0 (from -r /content/realfill/requirements.txt (line 2))\n", 76 | " Downloading accelerate-0.23.0-py3-none-any.whl (258 kB)\n", 77 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m258.1/258.1 kB\u001b[0m \u001b[31m30.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 78 | "\u001b[?25hCollecting transformers==4.34.0 (from -r /content/realfill/requirements.txt (line 3))\n", 79 | " Downloading transformers-4.34.0-py3-none-any.whl (7.7 MB)\n", 80 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m7.7/7.7 MB\u001b[0m \u001b[31m52.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 81 | "\u001b[?25hCollecting peft==0.5.0 (from -r /content/realfill/requirements.txt (line 4))\n", 82 | " Downloading peft-0.5.0-py3-none-any.whl (85 kB)\n", 83 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m85.6/85.6 kB\u001b[0m \u001b[31m12.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 84 | "\u001b[?25hCollecting torch==2.0.1 (from -r /content/realfill/requirements.txt (line 5))\n", 85 | " Downloading torch-2.0.1-cp310-cp310-manylinux1_x86_64.whl (619.9 MB)\n", 86 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m619.9/619.9 MB\u001b[0m \u001b[31m1.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 87 | "\u001b[?25hCollecting torchvision==0.15.2 (from -r /content/realfill/requirements.txt (line 6))\n", 88 | " Downloading torchvision-0.15.2-cp310-cp310-manylinux1_x86_64.whl (6.0 MB)\n", 89 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m6.0/6.0 MB\u001b[0m \u001b[31m68.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 90 | "\u001b[?25hCollecting ftfy==6.1.1 (from -r /content/realfill/requirements.txt (line 7))\n", 91 | " Downloading ftfy-6.1.1-py3-none-any.whl (53 kB)\n", 92 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m53.1/53.1 kB\u001b[0m \u001b[31m7.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 93 | "\u001b[?25hCollecting tensorboard==2.14.0 (from -r /content/realfill/requirements.txt (line 8))\n", 94 | " Downloading tensorboard-2.14.0-py3-none-any.whl (5.5 MB)\n", 95 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m5.5/5.5 MB\u001b[0m \u001b[31m67.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 96 | "\u001b[?25hCollecting Jinja2==3.1.2 (from -r /content/realfill/requirements.txt (line 9))\n", 97 | " Downloading Jinja2-3.1.2-py3-none-any.whl (133 kB)\n", 98 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m133.1/133.1 kB\u001b[0m \u001b[31m19.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 99 | "\u001b[?25hRequirement already satisfied: importlib-metadata in /usr/local/lib/python3.10/dist-packages (from diffusers==0.20.1->-r /content/realfill/requirements.txt (line 1)) (7.0.1)\n", 100 | "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from diffusers==0.20.1->-r /content/realfill/requirements.txt (line 1)) (3.13.1)\n", 101 | "Requirement already satisfied: huggingface-hub>=0.13.2 in /usr/local/lib/python3.10/dist-packages (from diffusers==0.20.1->-r /content/realfill/requirements.txt (line 1)) (0.20.3)\n", 102 | "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from diffusers==0.20.1->-r /content/realfill/requirements.txt (line 1)) (1.25.2)\n", 103 | "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.10/dist-packages (from diffusers==0.20.1->-r /content/realfill/requirements.txt (line 1)) (2023.12.25)\n", 104 | "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from diffusers==0.20.1->-r /content/realfill/requirements.txt (line 1)) (2.31.0)\n", 105 | "Requirement already satisfied: safetensors>=0.3.1 in /usr/local/lib/python3.10/dist-packages (from diffusers==0.20.1->-r /content/realfill/requirements.txt (line 1)) (0.4.2)\n", 106 | "Requirement already satisfied: Pillow in /usr/local/lib/python3.10/dist-packages (from diffusers==0.20.1->-r /content/realfill/requirements.txt (line 1)) (9.4.0)\n", 107 | "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from accelerate==0.23.0->-r /content/realfill/requirements.txt (line 2)) (23.2)\n", 108 | "Requirement already satisfied: psutil in /usr/local/lib/python3.10/dist-packages (from accelerate==0.23.0->-r /content/realfill/requirements.txt (line 2)) (5.9.5)\n", 109 | "Requirement already satisfied: pyyaml in /usr/local/lib/python3.10/dist-packages (from accelerate==0.23.0->-r /content/realfill/requirements.txt (line 2)) (6.0.1)\n", 110 | "Collecting tokenizers<0.15,>=0.14 (from transformers==4.34.0->-r /content/realfill/requirements.txt (line 3))\n", 111 | " Downloading tokenizers-0.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.8 MB)\n", 112 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.8/3.8 MB\u001b[0m \u001b[31m66.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 113 | "\u001b[?25hRequirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.10/dist-packages (from transformers==4.34.0->-r /content/realfill/requirements.txt (line 3)) (4.66.1)\n", 114 | "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from torch==2.0.1->-r /content/realfill/requirements.txt (line 5)) (4.9.0)\n", 115 | "Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch==2.0.1->-r /content/realfill/requirements.txt (line 5)) (1.12)\n", 116 | "Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch==2.0.1->-r /content/realfill/requirements.txt (line 5)) (3.2.1)\n", 117 | "Collecting nvidia-cuda-nvrtc-cu11==11.7.99 (from torch==2.0.1->-r /content/realfill/requirements.txt (line 5))\n", 118 | " Downloading nvidia_cuda_nvrtc_cu11-11.7.99-2-py3-none-manylinux1_x86_64.whl (21.0 MB)\n", 119 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m21.0/21.0 MB\u001b[0m \u001b[31m41.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 120 | "\u001b[?25hCollecting nvidia-cuda-runtime-cu11==11.7.99 (from torch==2.0.1->-r /content/realfill/requirements.txt (line 5))\n", 121 | " Downloading nvidia_cuda_runtime_cu11-11.7.99-py3-none-manylinux1_x86_64.whl (849 kB)\n", 122 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m849.3/849.3 kB\u001b[0m \u001b[31m67.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 123 | "\u001b[?25hCollecting nvidia-cuda-cupti-cu11==11.7.101 (from torch==2.0.1->-r /content/realfill/requirements.txt (line 5))\n", 124 | " Downloading nvidia_cuda_cupti_cu11-11.7.101-py3-none-manylinux1_x86_64.whl (11.8 MB)\n", 125 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m11.8/11.8 MB\u001b[0m \u001b[31m52.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 126 | "\u001b[?25hCollecting nvidia-cudnn-cu11==8.5.0.96 (from torch==2.0.1->-r /content/realfill/requirements.txt (line 5))\n", 127 | " Downloading nvidia_cudnn_cu11-8.5.0.96-2-py3-none-manylinux1_x86_64.whl (557.1 MB)\n", 128 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m557.1/557.1 MB\u001b[0m \u001b[31m1.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 129 | "\u001b[?25hCollecting nvidia-cublas-cu11==11.10.3.66 (from torch==2.0.1->-r /content/realfill/requirements.txt (line 5))\n", 130 | " Downloading nvidia_cublas_cu11-11.10.3.66-py3-none-manylinux1_x86_64.whl (317.1 MB)\n", 131 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m317.1/317.1 MB\u001b[0m \u001b[31m3.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 132 | "\u001b[?25hCollecting nvidia-cufft-cu11==10.9.0.58 (from torch==2.0.1->-r /content/realfill/requirements.txt (line 5))\n", 133 | " Downloading nvidia_cufft_cu11-10.9.0.58-py3-none-manylinux1_x86_64.whl (168.4 MB)\n", 134 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m168.4/168.4 MB\u001b[0m \u001b[31m8.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 135 | "\u001b[?25hCollecting nvidia-curand-cu11==10.2.10.91 (from torch==2.0.1->-r /content/realfill/requirements.txt (line 5))\n", 136 | " Downloading nvidia_curand_cu11-10.2.10.91-py3-none-manylinux1_x86_64.whl (54.6 MB)\n", 137 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m54.6/54.6 MB\u001b[0m \u001b[31m11.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 138 | "\u001b[?25hCollecting nvidia-cusolver-cu11==11.4.0.1 (from torch==2.0.1->-r /content/realfill/requirements.txt (line 5))\n", 139 | " Downloading nvidia_cusolver_cu11-11.4.0.1-2-py3-none-manylinux1_x86_64.whl (102.6 MB)\n", 140 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m102.6/102.6 MB\u001b[0m \u001b[31m14.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 141 | "\u001b[?25hCollecting nvidia-cusparse-cu11==11.7.4.91 (from torch==2.0.1->-r /content/realfill/requirements.txt (line 5))\n", 142 | " Downloading nvidia_cusparse_cu11-11.7.4.91-py3-none-manylinux1_x86_64.whl (173.2 MB)\n", 143 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m173.2/173.2 MB\u001b[0m \u001b[31m6.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 144 | "\u001b[?25hCollecting nvidia-nccl-cu11==2.14.3 (from torch==2.0.1->-r /content/realfill/requirements.txt (line 5))\n", 145 | " Downloading nvidia_nccl_cu11-2.14.3-py3-none-manylinux1_x86_64.whl (177.1 MB)\n", 146 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m177.1/177.1 MB\u001b[0m \u001b[31m5.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 147 | "\u001b[?25hCollecting nvidia-nvtx-cu11==11.7.91 (from torch==2.0.1->-r /content/realfill/requirements.txt (line 5))\n", 148 | " Downloading nvidia_nvtx_cu11-11.7.91-py3-none-manylinux1_x86_64.whl (98 kB)\n", 149 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m98.6/98.6 kB\u001b[0m \u001b[31m14.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 150 | "\u001b[?25hCollecting triton==2.0.0 (from torch==2.0.1->-r /content/realfill/requirements.txt (line 5))\n", 151 | " Downloading triton-2.0.0-1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (63.3 MB)\n", 152 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m63.3/63.3 MB\u001b[0m \u001b[31m20.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 153 | "\u001b[?25hRequirement already satisfied: wcwidth>=0.2.5 in /usr/local/lib/python3.10/dist-packages (from ftfy==6.1.1->-r /content/realfill/requirements.txt (line 7)) (0.2.13)\n", 154 | "Requirement already satisfied: absl-py>=0.4 in /usr/local/lib/python3.10/dist-packages (from tensorboard==2.14.0->-r /content/realfill/requirements.txt (line 8)) (1.4.0)\n", 155 | "Requirement already satisfied: grpcio>=1.48.2 in /usr/local/lib/python3.10/dist-packages (from tensorboard==2.14.0->-r /content/realfill/requirements.txt (line 8)) (1.60.1)\n", 156 | "Requirement already satisfied: google-auth<3,>=1.6.3 in /usr/local/lib/python3.10/dist-packages (from tensorboard==2.14.0->-r /content/realfill/requirements.txt (line 8)) (2.27.0)\n", 157 | "Collecting google-auth-oauthlib<1.1,>=0.5 (from tensorboard==2.14.0->-r /content/realfill/requirements.txt (line 8))\n", 158 | " Downloading google_auth_oauthlib-1.0.0-py2.py3-none-any.whl (18 kB)\n", 159 | "Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.10/dist-packages (from tensorboard==2.14.0->-r /content/realfill/requirements.txt (line 8)) (3.5.2)\n", 160 | "Requirement already satisfied: protobuf>=3.19.6 in /usr/local/lib/python3.10/dist-packages (from tensorboard==2.14.0->-r /content/realfill/requirements.txt (line 8)) (3.20.3)\n", 161 | "Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard==2.14.0->-r /content/realfill/requirements.txt (line 8)) (67.7.2)\n", 162 | "Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard==2.14.0->-r /content/realfill/requirements.txt (line 8)) (0.7.2)\n", 163 | "Requirement already satisfied: werkzeug>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from tensorboard==2.14.0->-r /content/realfill/requirements.txt (line 8)) (3.0.1)\n", 164 | "Requirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.10/dist-packages (from tensorboard==2.14.0->-r /content/realfill/requirements.txt (line 8)) (0.42.0)\n", 165 | "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from Jinja2==3.1.2->-r /content/realfill/requirements.txt (line 9)) (2.1.5)\n", 166 | "Requirement already satisfied: cmake in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->torch==2.0.1->-r /content/realfill/requirements.txt (line 5)) (3.27.9)\n", 167 | "Collecting lit (from triton==2.0.0->torch==2.0.1->-r /content/realfill/requirements.txt (line 5))\n", 168 | " Downloading lit-17.0.6.tar.gz (153 kB)\n", 169 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m153.0/153.0 kB\u001b[0m \u001b[31m19.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 170 | "\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n", 171 | " Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n", 172 | " Installing backend dependencies ... \u001b[?25l\u001b[?25hdone\n", 173 | " Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", 174 | "Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from google-auth<3,>=1.6.3->tensorboard==2.14.0->-r /content/realfill/requirements.txt (line 8)) (5.3.2)\n", 175 | "Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.10/dist-packages (from google-auth<3,>=1.6.3->tensorboard==2.14.0->-r /content/realfill/requirements.txt (line 8)) (0.3.0)\n", 176 | "Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.10/dist-packages (from google-auth<3,>=1.6.3->tensorboard==2.14.0->-r /content/realfill/requirements.txt (line 8)) (4.9)\n", 177 | "Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.10/dist-packages (from google-auth-oauthlib<1.1,>=0.5->tensorboard==2.14.0->-r /content/realfill/requirements.txt (line 8)) (1.3.1)\n", 178 | "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub>=0.13.2->diffusers==0.20.1->-r /content/realfill/requirements.txt (line 1)) (2023.6.0)\n", 179 | "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->diffusers==0.20.1->-r /content/realfill/requirements.txt (line 1)) (3.3.2)\n", 180 | "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->diffusers==0.20.1->-r /content/realfill/requirements.txt (line 1)) (3.6)\n", 181 | "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->diffusers==0.20.1->-r /content/realfill/requirements.txt (line 1)) (2.0.7)\n", 182 | "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->diffusers==0.20.1->-r /content/realfill/requirements.txt (line 1)) (2024.2.2)\n", 183 | "Collecting huggingface-hub>=0.13.2 (from diffusers==0.20.1->-r /content/realfill/requirements.txt (line 1))\n", 184 | " Downloading huggingface_hub-0.17.3-py3-none-any.whl (295 kB)\n", 185 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m295.0/295.0 kB\u001b[0m \u001b[31m35.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 186 | "\u001b[?25hRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.10/dist-packages (from importlib-metadata->diffusers==0.20.1->-r /content/realfill/requirements.txt (line 1)) (3.17.0)\n", 187 | "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch==2.0.1->-r /content/realfill/requirements.txt (line 5)) (1.3.0)\n", 188 | "Requirement already satisfied: pyasn1<0.6.0,>=0.4.6 in /usr/local/lib/python3.10/dist-packages (from pyasn1-modules>=0.2.1->google-auth<3,>=1.6.3->tensorboard==2.14.0->-r /content/realfill/requirements.txt (line 8)) (0.5.1)\n", 189 | "Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.10/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<1.1,>=0.5->tensorboard==2.14.0->-r /content/realfill/requirements.txt (line 8)) (3.2.2)\n", 190 | "Building wheels for collected packages: lit\n", 191 | " Building wheel for lit (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", 192 | " Created wheel for lit: filename=lit-17.0.6-py3-none-any.whl size=93255 sha256=da4cb2f8852a00c11b75e7f9a44519eb31da2b1a9ea1c3c87c66066c6588624b\n", 193 | " Stored in directory: /root/.cache/pip/wheels/30/dd/04/47d42976a6a86dc2ab66d7518621ae96f43452c8841d74758a\n", 194 | "Successfully built lit\n", 195 | "Installing collected packages: lit, nvidia-nvtx-cu11, nvidia-nccl-cu11, nvidia-cusparse-cu11, nvidia-curand-cu11, nvidia-cufft-cu11, nvidia-cuda-runtime-cu11, nvidia-cuda-nvrtc-cu11, nvidia-cuda-cupti-cu11, nvidia-cublas-cu11, Jinja2, ftfy, nvidia-cusolver-cu11, nvidia-cudnn-cu11, huggingface-hub, tokenizers, google-auth-oauthlib, diffusers, transformers, tensorboard, triton, torch, accelerate, torchvision, peft\n", 196 | " Attempting uninstall: Jinja2\n", 197 | " Found existing installation: Jinja2 3.1.3\n", 198 | " Uninstalling Jinja2-3.1.3:\n", 199 | " Successfully uninstalled Jinja2-3.1.3\n", 200 | " Attempting uninstall: huggingface-hub\n", 201 | " Found existing installation: huggingface-hub 0.20.3\n", 202 | " Uninstalling huggingface-hub-0.20.3:\n", 203 | " Successfully uninstalled huggingface-hub-0.20.3\n", 204 | " Attempting uninstall: tokenizers\n", 205 | " Found existing installation: tokenizers 0.15.1\n", 206 | " Uninstalling tokenizers-0.15.1:\n", 207 | " Successfully uninstalled tokenizers-0.15.1\n", 208 | " Attempting uninstall: google-auth-oauthlib\n", 209 | " Found existing installation: google-auth-oauthlib 1.2.0\n", 210 | " Uninstalling google-auth-oauthlib-1.2.0:\n", 211 | " Successfully uninstalled google-auth-oauthlib-1.2.0\n", 212 | " Attempting uninstall: transformers\n", 213 | " Found existing installation: transformers 4.35.2\n", 214 | " Uninstalling transformers-4.35.2:\n", 215 | " Successfully uninstalled transformers-4.35.2\n", 216 | " Attempting uninstall: tensorboard\n", 217 | " Found existing installation: tensorboard 2.15.2\n", 218 | " Uninstalling tensorboard-2.15.2:\n", 219 | " Successfully uninstalled tensorboard-2.15.2\n", 220 | " Attempting uninstall: triton\n", 221 | " Found existing installation: triton 2.1.0\n", 222 | " Uninstalling triton-2.1.0:\n", 223 | " Successfully uninstalled triton-2.1.0\n", 224 | " Attempting uninstall: torch\n", 225 | " Found existing installation: torch 2.1.0+cu121\n", 226 | " Uninstalling torch-2.1.0+cu121:\n", 227 | " Successfully uninstalled torch-2.1.0+cu121\n", 228 | " Attempting uninstall: torchvision\n", 229 | " Found existing installation: torchvision 0.16.0+cu121\n", 230 | " Uninstalling torchvision-0.16.0+cu121:\n", 231 | " Successfully uninstalled torchvision-0.16.0+cu121\n", 232 | "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n", 233 | "lida 0.0.10 requires fastapi, which is not installed.\n", 234 | "lida 0.0.10 requires kaleido, which is not installed.\n", 235 | "lida 0.0.10 requires python-multipart, which is not installed.\n", 236 | "lida 0.0.10 requires uvicorn, which is not installed.\n", 237 | "tensorflow 2.15.0 requires tensorboard<2.16,>=2.15, but you have tensorboard 2.14.0 which is incompatible.\n", 238 | "torchaudio 2.1.0+cu121 requires torch==2.1.0, but you have torch 2.0.1 which is incompatible.\n", 239 | "torchdata 0.7.0 requires torch==2.1.0, but you have torch 2.0.1 which is incompatible.\n", 240 | "torchtext 0.16.0 requires torch==2.1.0, but you have torch 2.0.1 which is incompatible.\u001b[0m\u001b[31m\n", 241 | "\u001b[0mSuccessfully installed Jinja2-3.1.2 accelerate-0.23.0 diffusers-0.20.1 ftfy-6.1.1 google-auth-oauthlib-1.0.0 huggingface-hub-0.17.3 lit-17.0.6 nvidia-cublas-cu11-11.10.3.66 nvidia-cuda-cupti-cu11-11.7.101 nvidia-cuda-nvrtc-cu11-11.7.99 nvidia-cuda-runtime-cu11-11.7.99 nvidia-cudnn-cu11-8.5.0.96 nvidia-cufft-cu11-10.9.0.58 nvidia-curand-cu11-10.2.10.91 nvidia-cusolver-cu11-11.4.0.1 nvidia-cusparse-cu11-11.7.4.91 nvidia-nccl-cu11-2.14.3 nvidia-nvtx-cu11-11.7.91 peft-0.5.0 tensorboard-2.14.0 tokenizers-0.14.1 torch-2.0.1 torchvision-0.15.2 transformers-4.34.0 triton-2.0.0\n" 242 | ] 243 | } 244 | ], 245 | "source": [ 246 | "!pip install -r /content/realfill/requirements.txt" 247 | ] 248 | }, 249 | { 250 | "cell_type": "code", 251 | "execution_count": null, 252 | "metadata": { 253 | "colab": { 254 | "base_uri": "https://localhost:8080/" 255 | }, 256 | "id": "XYgiy8a9k5yA", 257 | "outputId": "bc0c6abf-a9fc-4c12-c51e-db4c4b55ab6c" 258 | }, 259 | "outputs": [ 260 | { 261 | "output_type": "execute_result", 262 | "data": { 263 | "text/plain": [ 264 | "PosixPath('/root/.cache/huggingface/accelerate/default_config.yaml')" 265 | ] 266 | }, 267 | "metadata": {}, 268 | "execution_count": 4 269 | } 270 | ], 271 | "source": [ 272 | "from accelerate.utils import write_basic_config\n", 273 | "write_basic_config()" 274 | ] 275 | }, 276 | { 277 | "cell_type": "code", 278 | "execution_count": null, 279 | "metadata": { 280 | "colab": { 281 | "base_uri": "https://localhost:8080/" 282 | }, 283 | "id": "rQexbUktpzdp", 284 | "outputId": "779ff88a-02c5-4c6f-d681-fbbcb7e7b9e1" 285 | }, 286 | "outputs": [ 287 | { 288 | "output_type": "stream", 289 | "name": "stdout", 290 | "text": [ 291 | "env: MODEL_NAME=stabilityai/stable-diffusion-2-inpainting\n", 292 | "env: TRAIN_DIR=data/flowerwoman\n", 293 | "env: OUTPUT_DIR=flowerwoman-model\n", 294 | "/usr/local/lib/python3.10/dist-packages/torchvision/datapoints/__init__.py:12: UserWarning: The torchvision.datapoints and torchvision.transforms.v2 namespaces are still Beta. While we do not expect major breaking changes, some APIs may still change according to user feedback. Please submit any feedback you may have in this issue: https://github.com/pytorch/vision/issues/6753, and you can also check out https://github.com/pytorch/vision/issues/7319 to learn more about the APIs that we suspect might involve future changes. You can silence this warning by calling torchvision.disable_beta_transforms_warning().\n", 295 | " warnings.warn(_BETA_TRANSFORMS_WARNING)\n", 296 | "/usr/local/lib/python3.10/dist-packages/torchvision/transforms/v2/__init__.py:54: UserWarning: The torchvision.datapoints and torchvision.transforms.v2 namespaces are still Beta. While we do not expect major breaking changes, some APIs may still change according to user feedback. Please submit any feedback you may have in this issue: https://github.com/pytorch/vision/issues/6753, and you can also check out https://github.com/pytorch/vision/issues/7319 to learn more about the APIs that we suspect might involve future changes. You can silence this warning by calling torchvision.disable_beta_transforms_warning().\n", 297 | " warnings.warn(_BETA_TRANSFORMS_WARNING)\n", 298 | "2024-02-14 14:40:15.650526: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", 299 | "2024-02-14 14:40:15.650587: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", 300 | "2024-02-14 14:40:15.652350: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", 301 | "2024-02-14 14:40:16.931433: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n", 302 | "02/14/2024 14:40:18 - INFO - __main__ - Distributed environment: NO\n", 303 | "Num processes: 1\n", 304 | "Process index: 0\n", 305 | "Local process index: 0\n", 306 | "Device: cuda\n", 307 | "\n", 308 | "Mixed precision type: no\n", 309 | "\n", 310 | "Downloading (…)okenizer_config.json: 100% 829/829 [00:00<00:00, 4.60MB/s]\n", 311 | "Downloading tokenizer/vocab.json: 100% 1.06M/1.06M [00:00<00:00, 15.9MB/s]\n", 312 | "Downloading tokenizer/merges.txt: 100% 525k/525k [00:00<00:00, 118MB/s]\n", 313 | "Downloading (…)cial_tokens_map.json: 100% 460/460 [00:00<00:00, 2.23MB/s]\n", 314 | "Downloading (…)cheduler_config.json: 100% 308/308 [00:00<00:00, 1.95MB/s]\n", 315 | "{'timestep_spacing', 'variance_type', 'prediction_type', 'thresholding', 'clip_sample_range', 'dynamic_thresholding_ratio', 'sample_max_value'} was not found in config. Values will be initialized to default values.\n", 316 | "Downloading (…)_encoder/config.json: 100% 638/638 [00:00<00:00, 3.90MB/s]\n", 317 | "Downloading model.safetensors: 100% 1.36G/1.36G [00:06<00:00, 202MB/s]\n", 318 | "Downloading vae/config.json: 100% 616/616 [00:00<00:00, 3.50MB/s]\n", 319 | "Downloading (…)ch_model.safetensors: 100% 335M/335M [00:01<00:00, 202MB/s]\n", 320 | "{'force_upcast', 'scaling_factor'} was not found in config. Values will be initialized to default values.\n", 321 | "Downloading unet/config.json: 100% 914/914 [00:00<00:00, 3.86MB/s]\n", 322 | "Downloading (…)ch_model.safetensors: 100% 3.46G/3.46G [00:18<00:00, 191MB/s]\n", 323 | "{'encoder_hid_dim', 'time_cond_proj_dim', 'resnet_out_scale_factor', 'mid_block_type', 'conv_in_kernel', 'upcast_attention', 'projection_class_embeddings_input_dim', 'addition_embed_type_num_heads', 'class_embed_type', 'time_embedding_type', 'timestep_post_act', 'transformer_layers_per_block', 'time_embedding_act_fn', 'conv_out_kernel', 'cross_attention_norm', 'attention_type', 'time_embedding_dim', 'encoder_hid_dim_type', 'resnet_time_scale_shift', 'addition_embed_type', 'only_cross_attention', 'num_attention_heads', 'resnet_skip_time_act', 'addition_time_embed_dim', 'class_embeddings_concat', 'mid_block_only_cross_attention', 'num_class_embeds'} was not found in config. Values will be initialized to default values.\n", 324 | "02/14/2024 14:40:56 - INFO - __main__ - ***** Running training *****\n", 325 | "02/14/2024 14:40:56 - INFO - __main__ - Num examples = 4\n", 326 | "02/14/2024 14:40:56 - INFO - __main__ - Num batches each epoch = 1\n", 327 | "02/14/2024 14:40:56 - INFO - __main__ - Num Epochs = 2000\n", 328 | "02/14/2024 14:40:56 - INFO - __main__ - Instantaneous batch size per device = 16\n", 329 | "02/14/2024 14:40:56 - INFO - __main__ - Total train batch size (w. parallel, distributed & accumulation) = 16\n", 330 | "02/14/2024 14:40:56 - INFO - __main__ - Gradient Accumulation steps = 1\n", 331 | "02/14/2024 14:40:56 - INFO - __main__ - Total optimization steps = 2000\n", 332 | "Steps: 5% 100/2000 [02:06<39:25, 1.24s/it, loss=0.136]02/14/2024 14:43:02 - INFO - __main__ - Running validation... \n", 333 | "Generating 4 images\n", 334 | "\n", 335 | "Downloading model_index.json: 100% 544/544 [00:00<00:00, 2.95MB/s]\n", 336 | "\n", 337 | "Fetching 9 files: 0% 0/9 [00:00\n", 748 | " image = Image.open(args.validation_image)\n", 749 | " File \"/usr/local/lib/python3.10/dist-packages/PIL/Image.py\", line 3227, in open\n", 750 | " fp = builtins.open(filename, \"rb\")\n", 751 | "FileNotFoundError: [Errno 2] No such file or directory: '$TRAIN_DIR/target/target.png'\n", 752 | "Traceback (most recent call last):\n", 753 | " File \"/usr/local/bin/accelerate\", line 8, in \n", 754 | " sys.exit(main())\n", 755 | " File \"/usr/local/lib/python3.10/dist-packages/accelerate/commands/accelerate_cli.py\", line 47, in main\n", 756 | " args.func(args)\n", 757 | " File \"/usr/local/lib/python3.10/dist-packages/accelerate/commands/launch.py\", line 986, in launch_command\n", 758 | " simple_launcher(args)\n", 759 | " File \"/usr/local/lib/python3.10/dist-packages/accelerate/commands/launch.py\", line 628, in simple_launcher\n", 760 | " raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\n", 761 | "subprocess.CalledProcessError: Command '['/usr/bin/python3', 'infer.py', '--model_path=flowerwoman-model', '--validation_image=$TRAIN_DIR/target/target.png', '--validation_mask=$TRAIN_DIR/target/mask.png', '--output_dir=flowerwoman-results']' returned non-zero exit status 1.\n" 762 | ] 763 | } 764 | ] 765 | }, 766 | { 767 | "cell_type": "code", 768 | "source": [ 769 | "!zip -r realfill.zip \\flowerwoman-results\n" 770 | ], 771 | "metadata": { 772 | "colab": { 773 | "base_uri": "https://localhost:8080/" 774 | }, 775 | "id": "Nm40ryltdVNF", 776 | "outputId": "38321fd4-b0f5-4c7f-8ec6-b4550fd6f02f" 777 | }, 778 | "execution_count": null, 779 | "outputs": [ 780 | { 781 | "output_type": "stream", 782 | "name": "stdout", 783 | "text": [ 784 | "updating: flowerwoman-results/ (stored 0%)\n", 785 | "updating: flowerwoman-results/0.png (deflated 0%)\n", 786 | "updating: flowerwoman-results/2.png (deflated 0%)\n", 787 | "updating: flowerwoman-results/14.png (deflated 0%)\n", 788 | "updating: flowerwoman-results/15.png (deflated 0%)\n", 789 | "updating: flowerwoman-results/12.png (deflated 0%)\n", 790 | "updating: flowerwoman-results/4.png (deflated 0%)\n", 791 | "updating: flowerwoman-results/10.png (deflated 0%)\n", 792 | "updating: flowerwoman-results/11.png (deflated 0%)\n", 793 | "updating: flowerwoman-results/8.png (deflated 0%)\n", 794 | "updating: flowerwoman-results/3.png (deflated 0%)\n", 795 | "updating: flowerwoman-results/6.png (deflated 0%)\n", 796 | "updating: flowerwoman-results/7.png (deflated 0%)\n", 797 | "updating: flowerwoman-results/9.png (deflated 0%)\n", 798 | "updating: flowerwoman-results/5.png (deflated 0%)\n", 799 | "updating: flowerwoman-results/13.png (deflated 0%)\n", 800 | "updating: flowerwoman-results/1.png (deflated 0%)\n" 801 | ] 802 | } 803 | ] 804 | } 805 | ], 806 | "metadata": { 807 | "accelerator": "GPU", 808 | "colab": { 809 | "gpuType": "A100", 810 | "machine_shape": "hm", 811 | "provenance": [] 812 | }, 813 | "kernelspec": { 814 | "display_name": "Python 3", 815 | "name": "python3" 816 | }, 817 | "language_info": { 818 | "name": "python" 819 | } 820 | }, 821 | "nbformat": 4, 822 | "nbformat_minor": 0 823 | } -------------------------------------------------------------------------------- /train_realfill.py: -------------------------------------------------------------------------------- 1 | import random 2 | import argparse 3 | import copy 4 | import itertools 5 | import logging 6 | import math 7 | import os 8 | import shutil 9 | from pathlib import Path 10 | 11 | import numpy as np 12 | import torch 13 | import torch.nn.functional as F 14 | import torch.utils.checkpoint 15 | import transformers 16 | from accelerate import Accelerator 17 | from accelerate.logging import get_logger 18 | from accelerate.utils import set_seed 19 | from huggingface_hub import create_repo, upload_folder 20 | from packaging import version 21 | from PIL import Image 22 | from PIL.ImageOps import exif_transpose 23 | from torch.utils.data import Dataset 24 | import torchvision.transforms.v2 as transforms_v2 25 | from tqdm.auto import tqdm 26 | from transformers import AutoTokenizer, CLIPTextModel 27 | 28 | import diffusers 29 | from diffusers import ( 30 | AutoencoderKL, 31 | DDPMScheduler, 32 | StableDiffusionInpaintPipeline, 33 | UNet2DConditionModel, 34 | ) 35 | from diffusers.optimization import get_scheduler 36 | from diffusers.utils import check_min_version, is_wandb_available 37 | from diffusers.utils.import_utils import is_xformers_available 38 | 39 | from peft import PeftModel, LoraConfig, get_peft_model 40 | 41 | # Will error if the minimal version of diffusers is not installed. Remove at your own risks. 42 | check_min_version("0.20.1") 43 | 44 | logger = get_logger(__name__) 45 | 46 | def make_mask(images, resolution, times=30): 47 | mask, times = torch.ones_like(images[0:1, :, :]), np.random.randint(1, times) 48 | min_size, max_size, margin = np.array([0.03, 0.25, 0.01]) * resolution 49 | max_size = min(max_size, resolution - margin * 2) 50 | 51 | for _ in range(times): 52 | width = np.random.randint(int(min_size), int(max_size)) 53 | height = np.random.randint(int(min_size), int(max_size)) 54 | 55 | x_start = np.random.randint(int(margin), resolution - int(margin) - width + 1) 56 | y_start = np.random.randint(int(margin), resolution - int(margin) - height + 1) 57 | mask[:, y_start:y_start + height, x_start:x_start + width] = 0 58 | 59 | mask = 1 - mask if random.random() < 0.5 else mask 60 | return mask 61 | 62 | def save_model_card( 63 | repo_id: str, 64 | images=None, 65 | base_model=str, 66 | repo_folder=None, 67 | ): 68 | img_str = "" 69 | for i, image in enumerate(images): 70 | image.save(os.path.join(repo_folder, f"image_{i}.png")) 71 | img_str += f"![img_{i}](./image_{i}.png)\n" 72 | 73 | yaml = f""" 74 | --- 75 | license: creativeml-openrail-m 76 | base_model: {base_model} 77 | prompt: "a photo of sks" 78 | tags: 79 | - stable-diffusion-inpainting 80 | - stable-diffusion-inpainting-diffusers 81 | - text-to-image 82 | - diffusers 83 | - realfill 84 | inference: true 85 | --- 86 | """ 87 | model_card = f""" 88 | # RealFill - {repo_id} 89 | 90 | This is a realfill model derived from {base_model}. The weights were trained using [RealFill](https://realfill.github.io/). 91 | You can find some example images in the following. \n 92 | {img_str} 93 | """ 94 | with open(os.path.join(repo_folder, "README.md"), "w") as f: 95 | f.write(yaml + model_card) 96 | 97 | @torch.no_grad() 98 | def log_validation( 99 | text_encoder, 100 | tokenizer, 101 | unet, 102 | args, 103 | accelerator, 104 | weight_dtype, 105 | epoch, 106 | ): 107 | logger.info( 108 | f"Running validation... \nGenerating {args.num_validation_images} images" 109 | ) 110 | 111 | # create pipeline (note: unet and vae are loaded again in float32) 112 | pipeline = StableDiffusionInpaintPipeline.from_pretrained( 113 | args.pretrained_model_name_or_path, 114 | tokenizer=tokenizer, 115 | revision=args.revision, 116 | ) 117 | 118 | # set `keep_fp32_wrapper` to True because we do not want to remove 119 | # mixed precision hooks while we are still training 120 | pipeline.unet = accelerator.unwrap_model(unet, keep_fp32_wrapper=True) 121 | pipeline.text_encoder = accelerator.unwrap_model(text_encoder, keep_fp32_wrapper=True) 122 | pipeline.scheduler = DDPMScheduler.from_config(pipeline.scheduler.config) 123 | 124 | pipeline = pipeline.to(accelerator.device) 125 | pipeline.set_progress_bar_config(disable=True) 126 | 127 | # run inference 128 | generator = None if args.seed is None else torch.Generator(device=accelerator.device).manual_seed(args.seed) 129 | 130 | target_dir = Path(args.train_data_dir) / "target" 131 | target_image, target_mask = target_dir / "target.png", target_dir / "mask.png" 132 | image, mask_image = Image.open(target_image), Image.open(target_mask) 133 | 134 | if image.mode != "RGB": 135 | image = image.convert("RGB") 136 | 137 | images = [] 138 | for _ in range(args.num_validation_images): 139 | image = pipeline( 140 | prompt="a photo of sks", image=image, mask_image=mask_image, 141 | num_inference_steps=200, guidance_scale=1, generator=generator 142 | ).images[0] 143 | images.append(image) 144 | 145 | for tracker in accelerator.trackers: 146 | if tracker.name == "tensorboard": 147 | np_images = np.stack([np.asarray(img) for img in images]) 148 | tracker.writer.add_images(f"validation", np_images, epoch, dataformats="NHWC") 149 | if tracker.name == "wandb": 150 | tracker.log( 151 | { 152 | f"validation": [ 153 | wandb.Image(image, caption=str(i)) for i, image in enumerate(images) 154 | ] 155 | } 156 | ) 157 | 158 | del pipeline 159 | torch.cuda.empty_cache() 160 | 161 | return images 162 | 163 | def parse_args(input_args=None): 164 | parser = argparse.ArgumentParser(description="Simple example of a training script.") 165 | parser.add_argument( 166 | "--pretrained_model_name_or_path", 167 | type=str, 168 | default=None, 169 | required=True, 170 | help="Path to pretrained model or model identifier from huggingface.co/models.", 171 | ) 172 | parser.add_argument( 173 | "--revision", 174 | type=str, 175 | default=None, 176 | required=False, 177 | help="Revision of pretrained model identifier from huggingface.co/models.", 178 | ) 179 | parser.add_argument( 180 | "--tokenizer_name", 181 | type=str, 182 | default=None, 183 | help="Pretrained tokenizer name or path if not the same as model_name", 184 | ) 185 | parser.add_argument( 186 | "--train_data_dir", 187 | type=str, 188 | default=None, 189 | required=True, 190 | help="A folder containing the training data of images.", 191 | ) 192 | parser.add_argument( 193 | "--num_validation_images", 194 | type=int, 195 | default=4, 196 | help="Number of images that should be generated during validation with `validation_conditioning`.", 197 | ) 198 | parser.add_argument( 199 | "--validation_steps", 200 | type=int, 201 | default=100, 202 | help=( 203 | "Run realfill validation every X steps. RealFill validation consists of running the conditioning" 204 | " `args.validation_conditioning` multiple times: `args.num_validation_images`." 205 | ), 206 | ) 207 | parser.add_argument( 208 | "--output_dir", 209 | type=str, 210 | default="realfill-model", 211 | help="The output directory where the model predictions and checkpoints will be written.", 212 | ) 213 | parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") 214 | parser.add_argument( 215 | "--resolution", 216 | type=int, 217 | default=512, 218 | help=( 219 | "The resolution for input images, all the images in the train/validation dataset will be resized to this" 220 | " resolution" 221 | ), 222 | ) 223 | parser.add_argument( 224 | "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." 225 | ) 226 | parser.add_argument("--num_train_epochs", type=int, default=1) 227 | parser.add_argument( 228 | "--max_train_steps", 229 | type=int, 230 | default=None, 231 | help="Total number of training steps to perform. If provided, overrides num_train_epochs.", 232 | ) 233 | parser.add_argument( 234 | "--checkpointing_steps", 235 | type=int, 236 | default=500, 237 | help=( 238 | "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" 239 | " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" 240 | " training using `--resume_from_checkpoint`." 241 | ), 242 | ) 243 | parser.add_argument( 244 | "--checkpoints_total_limit", 245 | type=int, 246 | default=None, 247 | help=("Max number of checkpoints to store."), 248 | ) 249 | parser.add_argument( 250 | "--resume_from_checkpoint", 251 | type=str, 252 | default=None, 253 | help=( 254 | "Whether training should be resumed from a previous checkpoint. Use a path saved by" 255 | ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' 256 | ), 257 | ) 258 | parser.add_argument( 259 | "--gradient_accumulation_steps", 260 | type=int, 261 | default=1, 262 | help="Number of updates steps to accumulate before performing a backward/update pass.", 263 | ) 264 | parser.add_argument( 265 | "--gradient_checkpointing", 266 | action="store_true", 267 | help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", 268 | ) 269 | parser.add_argument( 270 | "--unet_learning_rate", 271 | type=float, 272 | default=2e-4, 273 | help="Learning rate to use for unet.", 274 | ) 275 | parser.add_argument( 276 | "--text_encoder_learning_rate", 277 | type=float, 278 | default=4e-5, 279 | help="Learning rate to use for text encoder.", 280 | ) 281 | parser.add_argument( 282 | "--scale_lr", 283 | action="store_true", 284 | default=False, 285 | help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", 286 | ) 287 | parser.add_argument( 288 | "--lr_scheduler", 289 | type=str, 290 | default="constant", 291 | help=( 292 | 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' 293 | ' "constant", "constant_with_warmup"]' 294 | ), 295 | ) 296 | parser.add_argument( 297 | "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." 298 | ) 299 | parser.add_argument( 300 | "--lr_num_cycles", 301 | type=int, 302 | default=1, 303 | help="Number of hard resets of the lr in cosine_with_restarts scheduler.", 304 | ) 305 | parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") 306 | parser.add_argument( 307 | "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." 308 | ) 309 | parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") 310 | parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") 311 | parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") 312 | parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") 313 | parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") 314 | parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") 315 | parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") 316 | parser.add_argument( 317 | "--hub_model_id", 318 | type=str, 319 | default=None, 320 | help="The name of the repository to keep in sync with the local `output_dir`.", 321 | ) 322 | parser.add_argument( 323 | "--logging_dir", 324 | type=str, 325 | default="logs", 326 | help=( 327 | "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" 328 | " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." 329 | ), 330 | ) 331 | parser.add_argument( 332 | "--allow_tf32", 333 | action="store_true", 334 | help=( 335 | "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" 336 | " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" 337 | ), 338 | ) 339 | parser.add_argument( 340 | "--report_to", 341 | type=str, 342 | default="tensorboard", 343 | help=( 344 | 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' 345 | ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' 346 | ), 347 | ) 348 | parser.add_argument( 349 | "--wandb_key", 350 | type=str, 351 | default=None, 352 | help=("If report to option is set to wandb, api-key for wandb used for login to wandb "), 353 | ) 354 | parser.add_argument( 355 | "--wandb_project_name", 356 | type=str, 357 | default=None, 358 | help=("If report to option is set to wandb, project name in wandb for log tracking "), 359 | ) 360 | parser.add_argument( 361 | "--mixed_precision", 362 | type=str, 363 | default=None, 364 | choices=["no", "fp16", "bf16"], 365 | help=( 366 | "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" 367 | " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" 368 | " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." 369 | ), 370 | ) 371 | parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") 372 | parser.add_argument( 373 | "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." 374 | ) 375 | parser.add_argument( 376 | "--set_grads_to_none", 377 | action="store_true", 378 | help=( 379 | "Save more memory by using setting grads to None instead of zero. Be aware, that this changes certain" 380 | " behaviors, so disable this argument if it causes any problems. More info:" 381 | " https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html" 382 | ), 383 | ) 384 | parser.add_argument( 385 | "--lora_rank", 386 | type=int, 387 | default=16, 388 | help=("The dimension of the LoRA update matrices."), 389 | ) 390 | parser.add_argument( 391 | "--lora_alpha", 392 | type=int, 393 | default=27, 394 | help=("The alpha constant of the LoRA update matrices."), 395 | ) 396 | parser.add_argument( 397 | "--lora_dropout", 398 | type=float, 399 | default=0.0, 400 | help="The dropout rate of the LoRA update matrices.", 401 | ) 402 | parser.add_argument( 403 | "--lora_bias", 404 | type=str, 405 | default="none", 406 | help="The bias type of the Lora update matrices. Must be 'none', 'all' or 'lora_only'.", 407 | ) 408 | 409 | if input_args is not None: 410 | args = parser.parse_args(input_args) 411 | else: 412 | args = parser.parse_args() 413 | 414 | env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) 415 | if env_local_rank != -1 and env_local_rank != args.local_rank: 416 | args.local_rank = env_local_rank 417 | 418 | return args 419 | 420 | class RealFillDataset(Dataset): 421 | """ 422 | A dataset to prepare the training and conditioning images and 423 | the masks with the dummy prompt for fine-tuning the model. 424 | It pre-processes the images, masks and tokenizes the prompts. 425 | """ 426 | 427 | def __init__( 428 | self, 429 | train_data_root, 430 | tokenizer, 431 | size=512, 432 | ): 433 | self.size = size 434 | self.tokenizer = tokenizer 435 | 436 | self.ref_data_root = Path(train_data_root) / "ref" 437 | self.target_image = Path(train_data_root) / "target" / "target.png" 438 | self.target_mask = Path(train_data_root) / "target" / "mask.png" 439 | if not (self.ref_data_root.exists() and self.target_image.exists() and self.target_mask.exists()): 440 | raise ValueError("Train images root doesn't exist.") 441 | 442 | self.train_images_path = list(self.ref_data_root.iterdir()) + [self.target_image] 443 | self.num_train_images = len(self.train_images_path) 444 | self.train_prompt = "a photo of sks" 445 | 446 | self.transform = transforms_v2.Compose( 447 | [ 448 | transforms_v2.RandomResize(size, int(1.125 * size)), 449 | transforms_v2.RandomCrop(size), 450 | transforms_v2.ToImageTensor(), 451 | transforms_v2.ConvertImageDtype(), 452 | transforms_v2.Normalize([0.5], [0.5]), 453 | ] 454 | ) 455 | 456 | def __len__(self): 457 | return self.num_train_images 458 | 459 | def __getitem__(self, index): 460 | example = {} 461 | 462 | image = Image.open(self.train_images_path[index]) 463 | image = exif_transpose(image) 464 | 465 | if not image.mode == "RGB": 466 | image = image.convert("RGB") 467 | 468 | if index < len(self) - 1: 469 | weighting = Image.new("L", image.size) 470 | else: 471 | weighting = Image.open(self.target_mask) 472 | weighting = exif_transpose(weighting) 473 | 474 | image, weighting = self.transform(image, weighting) # The range of weighting becomes [-1, 1] after self.transform 475 | example["images"], example["weightings"] = image, weighting[0:1] < 0 476 | 477 | if index == len(self) - 1: 478 | example["masks"] = 1 - (example["weightings"]).float() 479 | elif random.random() < 0.1: 480 | example["masks"] = torch.ones_like(example["images"][0:1]) 481 | else: 482 | example["masks"] = make_mask(example["images"], self.size) 483 | 484 | example["conditioning_images"] = example["images"] * (example["masks"] < 0.5) 485 | 486 | train_prompt = "" if random.random() < 0.1 else self.train_prompt 487 | example["prompt_ids"] = self.tokenizer( 488 | train_prompt, 489 | truncation=True, 490 | padding="max_length", 491 | max_length=self.tokenizer.model_max_length, 492 | return_tensors="pt", 493 | ).input_ids 494 | 495 | return example 496 | 497 | def collate_fn(examples): 498 | input_ids = [example["prompt_ids"] for example in examples] 499 | images = [example["images"] for example in examples] 500 | 501 | masks = [example["masks"] for example in examples] 502 | weightings = [example["weightings"] for example in examples] 503 | conditioning_images = [example["conditioning_images"] for example in examples] 504 | 505 | images = torch.stack(images) 506 | images = images.to(memory_format=torch.contiguous_format).float() 507 | 508 | masks = torch.stack(masks) 509 | masks = masks.to(memory_format=torch.contiguous_format).float() 510 | 511 | weightings = torch.stack(weightings) 512 | weightings = weightings.to(memory_format=torch.contiguous_format).float() 513 | 514 | conditioning_images = torch.stack(conditioning_images) 515 | conditioning_images = conditioning_images.to(memory_format=torch.contiguous_format).float() 516 | 517 | input_ids = torch.cat(input_ids, dim=0) 518 | 519 | batch = { 520 | "input_ids": input_ids, 521 | "images": images, 522 | "masks": masks, 523 | "weightings": weightings, 524 | "conditioning_images": conditioning_images, 525 | } 526 | return batch 527 | 528 | def main(args): 529 | logging_dir = Path(args.output_dir, args.logging_dir) 530 | 531 | accelerator = Accelerator( 532 | gradient_accumulation_steps=args.gradient_accumulation_steps, 533 | mixed_precision=args.mixed_precision, 534 | log_with=args.report_to, 535 | project_dir=logging_dir, 536 | ) 537 | 538 | if args.report_to == "wandb": 539 | if not is_wandb_available(): 540 | raise ImportError("Make sure to install wandb if you want to use it for logging during training.") 541 | import wandb 542 | 543 | wandb.login(key=args.wandb_key) 544 | wandb.init(project=args.wandb_project_name) 545 | 546 | # Make one log on every process with the configuration for debugging. 547 | logging.basicConfig( 548 | format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", 549 | datefmt="%m/%d/%Y %H:%M:%S", 550 | level=logging.INFO, 551 | ) 552 | logger.info(accelerator.state, main_process_only=False) 553 | if accelerator.is_local_main_process: 554 | transformers.utils.logging.set_verbosity_warning() 555 | diffusers.utils.logging.set_verbosity_info() 556 | else: 557 | transformers.utils.logging.set_verbosity_error() 558 | diffusers.utils.logging.set_verbosity_error() 559 | 560 | # If passed along, set the training seed now. 561 | if args.seed is not None: 562 | set_seed(args.seed) 563 | 564 | # Handle the repository creation 565 | if accelerator.is_main_process: 566 | if args.output_dir is not None: 567 | os.makedirs(args.output_dir, exist_ok=True) 568 | 569 | if args.push_to_hub: 570 | repo_id = create_repo( 571 | repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token 572 | ).repo_id 573 | 574 | # Load the tokenizer 575 | if args.tokenizer_name: 576 | tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False) 577 | elif args.pretrained_model_name_or_path: 578 | tokenizer = AutoTokenizer.from_pretrained( 579 | args.pretrained_model_name_or_path, 580 | subfolder="tokenizer", 581 | revision=args.revision, 582 | use_fast=False, 583 | ) 584 | 585 | # Load scheduler and models 586 | noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") 587 | text_encoder = CLIPTextModel.from_pretrained( 588 | args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision 589 | ) 590 | vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision) 591 | unet = UNet2DConditionModel.from_pretrained( 592 | args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision 593 | ) 594 | 595 | unet_config = LoraConfig( 596 | r=args.lora_rank, 597 | lora_alpha=args.lora_alpha, 598 | target_modules=["to_k", "to_q", "to_v", "to_out.0"], 599 | lora_dropout=args.lora_dropout, 600 | bias=args.lora_bias, 601 | ) 602 | unet = get_peft_model(unet, unet_config) 603 | 604 | text_encoder_config = LoraConfig( 605 | r=args.lora_rank, 606 | lora_alpha=args.lora_alpha, 607 | target_modules=["k_proj", "q_proj", "v_proj", "out_proj"], 608 | lora_dropout=args.lora_dropout, 609 | bias=args.lora_bias, 610 | ) 611 | text_encoder = get_peft_model(text_encoder, text_encoder_config) 612 | 613 | vae.requires_grad_(False) 614 | 615 | if args.enable_xformers_memory_efficient_attention: 616 | if is_xformers_available(): 617 | import xformers 618 | 619 | xformers_version = version.parse(xformers.__version__) 620 | if xformers_version == version.parse("0.0.16"): 621 | logger.warn( 622 | "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." 623 | ) 624 | unet.enable_xformers_memory_efficient_attention() 625 | else: 626 | raise ValueError("xformers is not available. Make sure it is installed correctly") 627 | 628 | if args.gradient_checkpointing: 629 | unet.enable_gradient_checkpointing() 630 | text_encoder.gradient_checkpointing_enable() 631 | 632 | # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format 633 | def save_model_hook(models, weights, output_dir): 634 | if accelerator.is_main_process: 635 | for model in models: 636 | sub_dir = "unet" if isinstance(model.base_model.model, type(accelerator.unwrap_model(unet).base_model.model)) else "text_encoder" 637 | model.save_pretrained(os.path.join(output_dir, sub_dir)) 638 | 639 | # make sure to pop weight so that corresponding model is not saved again 640 | weights.pop() 641 | 642 | def load_model_hook(models, input_dir): 643 | while len(models) > 0: 644 | # pop models so that they are not loaded again 645 | model = models.pop() 646 | 647 | sub_dir = "unet" if isinstance(model.base_model.model, type(accelerator.unwrap_model(unet).base_model.model)) else "text_encoder" 648 | model_cls = UNet2DConditionModel if isinstance(model.base_model.model, type(accelerator.unwrap_model(unet).base_model.model)) else CLIPTextModel 649 | 650 | load_model = model_cls.from_pretrained(args.pretrained_model_name_or_path, subfolder=sub_dir) 651 | load_model = PeftModel.from_pretrained(load_model, input_dir, subfolder=sub_dir) 652 | 653 | model.load_state_dict(load_model.state_dict()) 654 | del load_model 655 | 656 | accelerator.register_save_state_pre_hook(save_model_hook) 657 | accelerator.register_load_state_pre_hook(load_model_hook) 658 | 659 | # Enable TF32 for faster training on Ampere GPUs, 660 | # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices 661 | if args.allow_tf32: 662 | torch.backends.cuda.matmul.allow_tf32 = True 663 | 664 | if args.scale_lr: 665 | args.unet_learning_rate = ( 666 | args.unet_learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes 667 | ) 668 | 669 | args.text_encoder_learning_rate = ( 670 | args.text_encoder_learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes 671 | ) 672 | 673 | # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs 674 | if args.use_8bit_adam: 675 | try: 676 | import bitsandbytes as bnb 677 | except ImportError: 678 | raise ImportError( 679 | "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." 680 | ) 681 | 682 | optimizer_class = bnb.optim.AdamW8bit 683 | else: 684 | optimizer_class = torch.optim.AdamW 685 | 686 | # Optimizer creation 687 | optimizer = optimizer_class( 688 | [ 689 | {"params": unet.parameters(), "lr": args.unet_learning_rate}, 690 | {"params": text_encoder.parameters(), "lr": args.text_encoder_learning_rate} 691 | ], 692 | betas=(args.adam_beta1, args.adam_beta2), 693 | weight_decay=args.adam_weight_decay, 694 | eps=args.adam_epsilon, 695 | ) 696 | 697 | # Dataset and DataLoaders creation: 698 | train_dataset = RealFillDataset( 699 | train_data_root=args.train_data_dir, 700 | tokenizer=tokenizer, 701 | size=args.resolution, 702 | ) 703 | 704 | train_dataloader = torch.utils.data.DataLoader( 705 | train_dataset, 706 | batch_size=args.train_batch_size, 707 | shuffle=True, 708 | collate_fn=collate_fn, 709 | num_workers=1, 710 | ) 711 | 712 | # Scheduler and math around the number of training steps. 713 | overrode_max_train_steps = False 714 | num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) 715 | if args.max_train_steps is None: 716 | args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch 717 | overrode_max_train_steps = True 718 | 719 | lr_scheduler = get_scheduler( 720 | args.lr_scheduler, 721 | optimizer=optimizer, 722 | num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, 723 | num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, 724 | num_cycles=args.lr_num_cycles, 725 | power=args.lr_power, 726 | ) 727 | 728 | # Prepare everything with our `accelerator`. 729 | unet, text_encoder, optimizer, train_dataloader = accelerator.prepare( 730 | unet, text_encoder, optimizer, train_dataloader 731 | ) 732 | 733 | # For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision 734 | # as these weights are only used for inference, keeping weights in full precision is not required. 735 | weight_dtype = torch.float32 736 | if accelerator.mixed_precision == "fp16": 737 | weight_dtype = torch.float16 738 | elif accelerator.mixed_precision == "bf16": 739 | weight_dtype = torch.bfloat16 740 | 741 | # Move vae to device and cast to weight_dtype 742 | vae.to(accelerator.device, dtype=weight_dtype) 743 | 744 | # We need to recalculate our total training steps as the size of the training dataloader may have changed. 745 | num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) 746 | if overrode_max_train_steps: 747 | args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch 748 | # Afterwards we recalculate our number of training epochs 749 | args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) 750 | 751 | # We need to initialize the trackers we use, and also store our configuration. 752 | # The trackers initializes automatically on the main process. 753 | if accelerator.is_main_process: 754 | tracker_config = vars(copy.deepcopy(args)) 755 | accelerator.init_trackers("realfill", config=tracker_config) 756 | 757 | # Train! 758 | total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps 759 | 760 | logger.info("***** Running training *****") 761 | logger.info(f" Num examples = {len(train_dataset)}") 762 | logger.info(f" Num batches each epoch = {len(train_dataloader)}") 763 | logger.info(f" Num Epochs = {args.num_train_epochs}") 764 | logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") 765 | logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") 766 | logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") 767 | logger.info(f" Total optimization steps = {args.max_train_steps}") 768 | global_step = 0 769 | first_epoch = 0 770 | 771 | # Potentially load in the weights and states from a previous save 772 | if args.resume_from_checkpoint: 773 | if args.resume_from_checkpoint != "latest": 774 | path = os.path.basename(args.resume_from_checkpoint) 775 | else: 776 | # Get the mos recent checkpoint 777 | dirs = os.listdir(args.output_dir) 778 | dirs = [d for d in dirs if d.startswith("checkpoint")] 779 | dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) 780 | path = dirs[-1] if len(dirs) > 0 else None 781 | 782 | if path is None: 783 | accelerator.print( 784 | f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." 785 | ) 786 | args.resume_from_checkpoint = None 787 | initial_global_step = 0 788 | else: 789 | accelerator.print(f"Resuming from checkpoint {path}") 790 | accelerator.load_state(os.path.join(args.output_dir, path)) 791 | global_step = int(path.split("-")[1]) 792 | 793 | initial_global_step = global_step 794 | first_epoch = global_step // num_update_steps_per_epoch 795 | else: 796 | initial_global_step = 0 797 | 798 | progress_bar = tqdm( 799 | range(0, args.max_train_steps), 800 | initial=initial_global_step, 801 | desc="Steps", 802 | # Only show the progress bar once on each machine. 803 | disable=not accelerator.is_local_main_process, 804 | ) 805 | 806 | for epoch in range(first_epoch, args.num_train_epochs): 807 | unet.train() 808 | text_encoder.train() 809 | 810 | for step, batch in enumerate(train_dataloader): 811 | with accelerator.accumulate(unet, text_encoder): 812 | # Convert images to latent space 813 | latents = vae.encode(batch["images"].to(dtype=weight_dtype)).latent_dist.sample() 814 | latents = latents * 0.18215 815 | 816 | # Convert masked images to latent space 817 | conditionings = vae.encode(batch["conditioning_images"].to(dtype=weight_dtype)).latent_dist.sample() 818 | conditionings = conditionings * 0.18215 819 | 820 | # Downsample mask and weighting so that they match with the latents 821 | masks, size = batch["masks"].to(dtype=weight_dtype), latents.shape[2:] 822 | masks = F.interpolate(masks, size=size) 823 | 824 | weightings = batch["weightings"].to(dtype=weight_dtype) 825 | weightings = F.interpolate(weightings, size=size) 826 | 827 | # Sample noise that we'll add to the latents 828 | noise = torch.randn_like(latents) 829 | bsz = latents.shape[0] 830 | 831 | # Sample a random timestep for each image 832 | timesteps = torch.randint( 833 | 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device 834 | ) 835 | timesteps = timesteps.long() 836 | 837 | # Add noise to the latents according to the noise magnitude at each timestep 838 | # (this is the forward diffusion process) 839 | noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) 840 | 841 | # Concatenate noisy latents, masks and conditionings to get inputs to unet 842 | inputs = torch.cat([noisy_latents, masks, conditionings], dim=1) 843 | 844 | # Get the text embedding for conditioning 845 | encoder_hidden_states = text_encoder(batch["input_ids"])[0] 846 | 847 | # Predict the noise residual 848 | model_pred = unet(inputs, timesteps, encoder_hidden_states).sample 849 | 850 | # Compute the diffusion loss 851 | assert noise_scheduler.config.prediction_type == "epsilon" 852 | loss = (weightings * F.mse_loss(model_pred.float(), noise.float(), reduction="none")).mean() 853 | 854 | # Backpropagate 855 | accelerator.backward(loss) 856 | if accelerator.sync_gradients: 857 | params_to_clip = itertools.chain( 858 | unet.parameters(), text_encoder.parameters() 859 | ) 860 | accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) 861 | 862 | optimizer.step() 863 | lr_scheduler.step() 864 | optimizer.zero_grad(set_to_none=args.set_grads_to_none) 865 | 866 | # Checks if the accelerator has performed an optimization step behind the scenes 867 | if accelerator.sync_gradients: 868 | progress_bar.update(1) 869 | if args.report_to == "wandb": 870 | accelerator.print(progress_bar) 871 | global_step += 1 872 | 873 | if accelerator.is_main_process: 874 | if global_step % args.checkpointing_steps == 0: 875 | # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` 876 | if args.checkpoints_total_limit is not None: 877 | checkpoints = os.listdir(args.output_dir) 878 | checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] 879 | checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) 880 | 881 | # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints 882 | if len(checkpoints) >= args.checkpoints_total_limit: 883 | num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 884 | removing_checkpoints = checkpoints[0:num_to_remove] 885 | 886 | logger.info( 887 | f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" 888 | ) 889 | logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") 890 | 891 | for removing_checkpoint in removing_checkpoints: 892 | removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) 893 | shutil.rmtree(removing_checkpoint) 894 | 895 | save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") 896 | accelerator.save_state(save_path) 897 | logger.info(f"Saved state to {save_path}") 898 | 899 | if global_step % args.validation_steps == 0: 900 | unet.eval() 901 | text_encoder.eval() 902 | 903 | log_validation( 904 | text_encoder, 905 | tokenizer, 906 | unet, 907 | args, 908 | accelerator, 909 | weight_dtype, 910 | global_step, 911 | ) 912 | 913 | logs = {"loss": loss.detach().item()} 914 | progress_bar.set_postfix(**logs) 915 | accelerator.log(logs, step=global_step) 916 | 917 | if global_step >= args.max_train_steps: 918 | break 919 | 920 | # Save the lora layers 921 | accelerator.wait_for_everyone() 922 | if accelerator.is_main_process: 923 | pipeline = StableDiffusionInpaintPipeline.from_pretrained( 924 | args.pretrained_model_name_or_path, 925 | unet=accelerator.unwrap_model(unet, keep_fp32_wrapper=True).merge_and_unload(), 926 | text_encoder=accelerator.unwrap_model(text_encoder, keep_fp32_wrapper=True).merge_and_unload(), 927 | revision=args.revision, 928 | ) 929 | 930 | pipeline.save_pretrained(args.output_dir) 931 | 932 | # Final inference 933 | images = log_validation( 934 | text_encoder, 935 | tokenizer, 936 | unet, 937 | args, 938 | accelerator, 939 | weight_dtype, 940 | global_step, 941 | ) 942 | 943 | if args.push_to_hub: 944 | save_model_card( 945 | repo_id, 946 | images=images, 947 | base_model=args.pretrained_model_name_or_path, 948 | repo_folder=args.output_dir, 949 | ) 950 | upload_folder( 951 | repo_id=repo_id, 952 | folder_path=args.output_dir, 953 | commit_message="End of training", 954 | ignore_patterns=["step_*", "epoch_*"], 955 | ) 956 | 957 | accelerator.end_training() 958 | 959 | if __name__ == "__main__": 960 | args = parse_args() 961 | main(args) 962 | --------------------------------------------------------------------------------