├── validation
├── __init__.py
├── requirements.txt
├── data_utils.py
├── README.md
└── compare_models.py
├── data_preparation
├── __init__.py
├── requirements.txt
├── model_utils.py
├── README.md
├── image_utils.py
├── export_to_hub.py
├── generate_dataset.py
└── instructions.txt
├── Makefile
├── requirements.txt
├── LICENSE
├── README.md
├── train_instruct_pix2pix.py
└── finetune_instruct_pix2pix.py
/validation/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/data_preparation/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/data_preparation/requirements.txt:
--------------------------------------------------------------------------------
1 | tensorflow
2 | tensorflow_datasets==4.6.0
3 | datasets
4 | huggingface_hub
5 | numpy
6 | Pillow
7 | opencv-python
8 | protobuf==3.20.*
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | check_dirs := .
2 |
3 | quality:
4 | black --check $(check_dirs)
5 | ruff $(check_dirs)
6 |
7 | style:
8 | black $(check_dirs)
9 | ruff $(check_dirs) --fix
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | torchvision
2 | accelerate
3 | diffusers
4 | transformers
5 | numpy
6 | datasets
7 | wandb
8 | black~=23.1
9 | isort>=5.5.4
10 | ruff>=0.0.241,<=0.0.259
--------------------------------------------------------------------------------
/validation/requirements.txt:
--------------------------------------------------------------------------------
1 | tensorflow
2 | tensorflow_datasets==4.6.0
3 | datasets
4 | huggingface_hub
5 | numpy
6 | Pillow
7 | opencv-python
8 | torch==1.13.1
9 | torchvision==0.14.1
--------------------------------------------------------------------------------
/validation/data_utils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | import tensorflow as tf
18 | import tensorflow_datasets as tfds
19 |
20 | tf.keras.utils.set_random_seed(0)
21 |
22 |
23 | def load_dataset(dataset_id: str, max_num_samples: int) -> tf.data.Dataset:
24 | dataset = tfds.load(dataset_id, split="validation")
25 | dataset = dataset.shuffle(max_num_samples if max_num_samples is not None else 128)
26 | if max_num_samples is not None:
27 | print(f"Dataset will be restricted to {max_num_samples} samples.")
28 | dataset = dataset.take(max_num_samples)
29 | return dataset
30 |
--------------------------------------------------------------------------------
/data_preparation/model_utils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | import os
18 | import sys
19 |
20 | SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
21 | sys.path.append(os.path.dirname(SCRIPT_DIR))
22 |
23 | from typing import Callable
24 |
25 | import numpy as np
26 | import tensorflow as tf
27 | from huggingface_hub import snapshot_download
28 | from PIL import Image
29 |
30 | import image_utils
31 |
32 |
33 | def load_model(model_id="sayakpaul/whitebox-cartoonizer"):
34 | model_path = snapshot_download(model_id)
35 | loaded_model = tf.saved_model.load(model_path)
36 | concrete_func = loaded_model.signatures["serving_default"]
37 | return concrete_func
38 |
39 |
40 | def perform_inference(concrete_fn: Callable) -> Callable:
41 | def fn(image: np.ndarray) -> Image.Image:
42 | preprocessed_image = image_utils.preprocess_image(image)
43 | result = concrete_fn(preprocessed_image)["final_output:0"]
44 | output_image = image_utils.postprocess_image(result)
45 | return output_image
46 |
47 | return fn
48 |
--------------------------------------------------------------------------------
/data_preparation/README.md:
--------------------------------------------------------------------------------
1 | This directory provides utilities to create a Cartoonizer dataset for [InstructPix2Pix](https://arxiv.org/abs/2211.09800) like training.
2 |
3 | ## Steps
4 |
5 | We used 5000 randomly sampled images as the original images from the `train` set of [ImageNette](https://www.tensorflow.org/datasets/catalog/imagenette). To derive their
6 | cartoonized renditions, we used the [Whitebox Cartoonizer model](https://huggingface.co/sayakpaul/whitebox-cartoonizer). For deriving the `instructions.txt` file, we used [ChatGPT](https://chat.openai.com/). In particular, we used the following prompt:
7 |
8 | > Provide al teast 50 synonymous sentences for the following instruction: "Cartoonize the following image."
9 |
10 | Dataset preparation is divided into three steps:
11 |
12 | ### Step 0: Install dependencies
13 |
14 | ```bash
15 | pip install -q requirements.txt
16 | ```
17 |
18 | ### Step 1: Obtain the image-cartoon pairs
19 |
20 | ```bash
21 | python generate_dataset.py
22 | ```
23 |
24 | If you want to use more than 5000 samples, specify the `--max_num_samples` option. One the image-cartoon pairs are generated, you should see a directory called `cartoonizer-dataset` directory (unless you specified a different one via `--data_root`):
25 |
26 |
27 |
28 |
29 |
30 | ### Step 2: Export the dataset to 🤗 Hub
31 |
32 | For this step, you need to be authorized to access your Hugging Face account. Run the following command to do so:
33 |
34 | ```bash
35 | huggingface-cli login
36 | ```
37 |
38 | Then run:
39 |
40 | ```python
41 | python export_to_hub.py
42 | ```
43 |
44 | > [!WARNING]
45 | > Please ensure that an empty [`DS_NAME` dataset](https://github.com/huggingface/instruction-tuned-sd/blob/0193a90d6932a2eac7a231ef5760fb427e44274d/data_preparation/export_to_hub.py#L26) was created on the Hub first. Instructions on how to do that are [here](https://huggingface.co/docs/datasets/upload_dataset#upload-with-the-hub-ui).
46 |
47 | You can find a mini dataset [here](https://huggingface.co/datasets/instruction-tuning-vision/cartoonizer-dataset):
48 |
49 |
50 |
51 |
52 |
--------------------------------------------------------------------------------
/data_preparation/image_utils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | import cv2
18 | import numpy as np
19 | import requests
20 | import tensorflow as tf
21 | from PIL import Image
22 |
23 |
24 | # Taken from
25 | # https://github.com/SystemErrorWang/White-box-Cartoonization/blob/master/test_code/cartoonize.py#L11
26 | def resize_crop(image: np.ndarray) -> np.ndarray:
27 | h, w, c = np.shape(image)
28 | if min(h, w) > 720:
29 | if h > w:
30 | h, w = int(720 * h / w), 720
31 | else:
32 | h, w = 720, int(720 * w / h)
33 | image = cv2.resize(image, (w, h), interpolation=cv2.INTER_AREA)
34 | h, w = (h // 8) * 8, (w // 8) * 8
35 | image = image[:h, :w, :]
36 | return image
37 |
38 |
39 | def download_image(url: str) -> np.ndarray:
40 | image = Image.open(requests.get(url, stream=True).raw)
41 | image = image.convert("RGB")
42 | image = np.array(image)
43 | return image
44 |
45 |
46 | def preprocess_image(image: np.ndarray) -> tf.Tensor:
47 | image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
48 | image = resize_crop(image)
49 | image = image.astype(np.float32) / 127.5 - 1
50 | image = np.expand_dims(image, axis=0)
51 | image = tf.constant(image)
52 | return image
53 |
54 |
55 | def postprocess_image(image: tf.Tensor) -> Image.Image:
56 | output = (image[0].numpy() + 1.0) * 127.5
57 | output = np.clip(output, 0, 255).astype(np.uint8)
58 | output = cv2.cvtColor(output, cv2.COLOR_BGR2RGB)
59 | output_image = Image.fromarray(output)
60 | return output_image
61 |
--------------------------------------------------------------------------------
/data_preparation/export_to_hub.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | import argparse
18 | import os
19 | from typing import List
20 |
21 | import numpy as np
22 | from datasets import Dataset, Features
23 | from datasets import Image as ImageFeature
24 | from datasets import Value
25 |
26 | DS_NAME = "cartoonizer-dataset"
27 |
28 |
29 | def parse_args():
30 | parser = argparse.ArgumentParser()
31 | parser.add_argument("--data_root", type=str, default="cartoonizer-dataset")
32 | parser.add_argument("--instructions_path", type=str, default="instructions.txt")
33 | args = parser.parse_args()
34 | return args
35 |
36 |
37 | def load_instructions(instructions_path: str) -> List[str]:
38 | with open(instructions_path, "r") as f:
39 | instructions = f.readlines()
40 | instructions = [i.strip() for i in instructions]
41 | return instructions
42 |
43 |
44 | def generate_examples(data_paths: List[str], instructions: List[str]):
45 | def fn():
46 | for data_path in data_paths:
47 | yield {
48 | "original_image": {"path": data_path[0]},
49 | "edit_prompt": np.random.choice(instructions),
50 | "cartoonized_image": {"path": data_path[1]},
51 | }
52 |
53 | return fn
54 |
55 |
56 | def main(args):
57 | instructions = load_instructions(args.instructions_path)
58 |
59 | data_paths = os.listdir(args.data_root)
60 | data_paths = [os.path.join(args.data_root, d) for d in data_paths]
61 | new_data_paths = []
62 | for data_path in data_paths:
63 | original_image = os.path.join(data_path, "original_image.png")
64 | cartoonized_image = os.path.join(data_path, "cartoonized_image.png")
65 | new_data_paths.append((original_image, cartoonized_image))
66 |
67 | generation_fn = generate_examples(new_data_paths, instructions)
68 | print("Creating dataset...")
69 | ds = Dataset.from_generator(
70 | generation_fn,
71 | features=Features(
72 | original_image=ImageFeature(),
73 | edit_prompt=Value("string"),
74 | cartoonized_image=ImageFeature(),
75 | ),
76 | )
77 |
78 | print("Pushing to the Hub...")
79 | ds.push_to_hub(DS_NAME)
80 |
81 |
82 | if __name__ == "__main__":
83 | args = parse_args()
84 | main(args)
85 |
--------------------------------------------------------------------------------
/data_preparation/generate_dataset.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | import argparse
18 | import hashlib
19 | import os
20 |
21 | import model_utils
22 | import tensorflow as tf
23 | import tensorflow_datasets as tfds
24 | from PIL import Image
25 | from tqdm import tqdm
26 |
27 |
28 | def parse_args():
29 | parser = argparse.ArgumentParser(
30 | description="Prepare a dataset for InstructPix2Pix style training."
31 | )
32 | parser.add_argument(
33 | "--model_id", type=str, default="sayakpaul/whitebox-cartoonizer"
34 | )
35 | parser.add_argument("--dataset_id", type=str, default="imagenette")
36 | parser.add_argument("--max_num_samples", type=int, default=5000)
37 | parser.add_argument("--data_root", type=str, default="cartoonizer-dataset")
38 | args = parser.parse_args()
39 | return args
40 |
41 |
42 | def load_dataset(dataset_id: str, max_num_samples: int) -> tf.data.Dataset:
43 | dataset = tfds.load(dataset_id, split="train")
44 | dataset = dataset.shuffle(max_num_samples if max_num_samples is not None else 128)
45 | if max_num_samples is not None:
46 | print(f"Dataset will be restricted to {max_num_samples} samples.")
47 | dataset = dataset.take(max_num_samples)
48 | return dataset
49 |
50 |
51 | def main(args):
52 | print("Loading initial dataset and the Cartoonizer model...")
53 | dataset = load_dataset(args.dataset_id, args.max_num_samples)
54 | concrete_fn = model_utils.load_model(args.model_id)
55 | inference_fn = model_utils.perform_inference(concrete_fn)
56 |
57 | print("Preparing the image pairs...")
58 | os.makedirs(args.data_root, exist_ok=True)
59 | for sample in tqdm(dataset.as_numpy_iterator()):
60 | original_image = sample["image"]
61 | cartoonized_image = inference_fn(original_image)
62 |
63 | hash_image = hashlib.sha1(original_image.tobytes()).hexdigest()
64 | sample_dir = os.path.join(args.data_root, hash_image)
65 | os.makedirs(sample_dir)
66 |
67 | original_image = Image.fromarray(original_image).convert("RGB")
68 | original_image.save(os.path.join(sample_dir, "original_image.png"))
69 | cartoonized_image.save(os.path.join(sample_dir, "cartoonized_image.png"))
70 |
71 | print(f"Total generated image-pairs: {len(os.listdir(args.data_root))}.")
72 |
73 |
74 | if __name__ == "__main__":
75 | args = parse_args()
76 | main(args)
77 |
--------------------------------------------------------------------------------
/data_preparation/instructions.txt:
--------------------------------------------------------------------------------
1 | Transform the natural image into a cartoon.
2 | Create a cartoon-style image from the natural image.
3 | Apply a cartoon filter to the natural image.
4 | Turn the natural image into a cartoon-style drawing.
5 | Give the natural image a cartoon effect.
6 | Convert the natural image to a cartoon-like illustration.
7 | Make the natural image look like a cartoon.
8 | Render the natural image in a cartoon style.
9 | Generate a cartoonized version of the natural image.
10 | Apply a cartoon-like effect to the natural image.
11 | Produce a cartoon version of the natural image.
12 | Turn the natural image into a cartoon-style picture.
13 | Use a cartoon filter to create a cartoon-like effect on the natural image.
14 | Transform the natural image into a cartoonish version.
15 | Apply a cartoon effect to the natural image to create a cartoonized version.
16 | Give the natural image a cartoonish look.
17 | Use a cartoon conversion software to turn the natural image into a cartoon.
18 | Use a cartoon-making app to cartoonize the natural image.
19 | Use a digital drawing software to create a cartoon version of the natural image.
20 | Edit the natural image to make it look like a cartoon.
21 | Apply a cartooning effect to the natural image.
22 | Create a cartoon-style illustration from the natural image.
23 | Turn the natural image into a cartoonish drawing.
24 | Use a cartoon filter to give the natural image a cartoon-like appearance.
25 | Use a software to convert the natural image to a cartoon.
26 | Change the natural image to a cartoon-style image.
27 | Use a graphic design software to create a cartoonized version of the natural image.
28 | Use a cartoonizing tool to transform the natural image into a cartoon.
29 | Alter the natural image to give it a cartoonish effect.
30 | Give the natural image a comic book-style look.
31 | Cartoonify the natural image.
32 | Use an image editing tool to turn the natural image into a cartoon.
33 | Use a digital art program to create a cartoonized version of the natural image.
34 | Create a cartoon-style graphic from the natural image.
35 | Give the natural image a hand-drawn, cartoon-like appearance.
36 | Transform the natural image into a sketch-like cartoon.
37 | Change the natural image to a hand-drawn cartoon.
38 | Edit the natural image to give it a toon-like effect.
39 | Use an art software to create a cartoonized version of the natural image.
40 | Apply a cartoon-like filter to the natural image to give it a toon-like appearance.
41 | Apply a filter to the natural image to create a comic book-style effect.
42 | Use a cartoonization program to create a cartoon version of the natural image.
43 | Give the natural image a graphic novel-style look.
44 | Transform the natural image into a caricature.
45 | Use a photo editing software to create a cartoonized version of the natural image.
46 | Turn the natural image into a cartoon character.
47 | Use a cartoon effect to create a cartoon-style illustration of the natural image.
48 | Give the natural image a hand-drawn, animated look.
49 | Use a tool to create a cartoonized version of the natural image.
50 | Change the natural image to a cartoon-like graphic.
51 |
--------------------------------------------------------------------------------
/validation/README.md:
--------------------------------------------------------------------------------
1 | This directory provides utilities to visually compare the results of different models:
2 |
3 | * [sayakpaul/whitebox-cartoonizer](https://hf.co/sayakpaul/whitebox-cartoonizer) (TensorFlow)
4 | * [instruction-tuning-vision/instruction-tuned-cartoonizer](https://hf.co/sayakpaul/instruction-tuning-vision/instruction-tuned-cartoonizer) (Diffusers)
5 | * [timbrooks/instruct-pix2pix](https://hf.co/sayakpaul/timbrooks/instruct-pix2pix) (Diffusers)
6 |
7 | We use the `validation` split of ImageNette for the validation purpose. Launch the following script to cartoonize 10 different samples with a specific model:
8 |
9 | ```bash
10 | python compare_models.py --model_id sayakpaul/whitebox-cartoonizer --max_num_samples 10
11 | ```
12 |
13 | For the Diffusers' compatible models, you can additionally specify the following options:
14 |
15 | * prompt
16 | * num_inference_steps
17 | * image_guidance_scale
18 | * guidance_scale
19 |
20 | After the samples have been generated, they should be serialized in the following structure:
21 |
22 | ```bash
23 | ├── comparison-sayakpaul
24 | │ └── whitebox-cartoonizer
25 | │ ├── 0 -- class label
26 | │ │ └── 55f8f5846192691faa2f603b0c92f27fd8599fc7 -- original image hash
27 | │ │ └── tf_image.png -- cartoonized image
28 | │ ├── 1
29 | │ │ ├── b8bfb2ec1a9af348ade8f467ac99e0af0fa0e937
30 | │ │ │ └── tf_image.png
31 | │ │ └── d23da1e9d9c39b17dacb66ddb52f290049a774a5
32 | │ │ └── tf_image.png
33 | │ ├── 2
34 | │ │ └── 7e25076bd693e10ad04e3c41aa29a3258e3d0ecd
35 | │ │ └── tf_image.png
36 | │ ├── 3
37 | │ │ ├── 1c43c5c5f7350b59d0c0607fd9357ed9e1b55e46
38 | │ │ │ └── tf_image.png
39 | │ │ └── cd4ca63c3d7913b1473937618c157c1919465930
40 | │ │ └── tf_image.png
41 | │ ├── 6
42 | │ │ ├── 220b6c136d47e81b186d337e0bdd064c67532e4e
43 | │ │ │ └── tf_image.png
44 | │ │ └── f80589219ae2b913677ea9417962d4ab75f08c2f
45 | │ │ └── tf_image.png
46 | │ └── 7
47 | │ ├── 4f33183189589bb171ba9489b898e5edbac25dfe
48 | │ │ └── tf_image.png
49 | │ └── 519863ade478d26b467e08dc5fb4353a6316833c
50 | │ └── tf_image.png
51 | ```
52 |
53 | For you use a Diffusers' compatible model then it would look like so:
54 |
55 | ```bash
56 | ├── comparison-instruction-tuning-vision
57 | │ └── instruction-tuned-cartoonizer
58 | │ ├── 0
59 | │ │ └── 55f8f5846192691faa2f603b0c92f27fd8599fc7
60 | │ │ └── steps@20-igs@1.5-gs@7.0.png
61 | │ ├── 1
62 | │ │ ├── b8bfb2ec1a9af348ade8f467ac99e0af0fa0e937
63 | │ │ │ └── steps@20-igs@1.5-gs@7.0.png
64 | │ │ └── d23da1e9d9c39b17dacb66ddb52f290049a774a5
65 | │ │ └── steps@20-igs@1.5-gs@7.0.png
66 | │ ├── 2
67 | │ │ └── 7e25076bd693e10ad04e3c41aa29a3258e3d0ecd
68 | │ │ └── steps@20-igs@1.5-gs@7.0.png
69 | │ ├── 3
70 | │ │ ├── 1c43c5c5f7350b59d0c0607fd9357ed9e1b55e46
71 | │ │ │ └── steps@20-igs@1.5-gs@7.0.png
72 | │ │ └── cd4ca63c3d7913b1473937618c157c1919465930
73 | │ │ └── steps@20-igs@1.5-gs@7.0.png
74 | │ ├── 6
75 | │ │ ├── 220b6c136d47e81b186d337e0bdd064c67532e4e
76 | │ │ │ └── steps@20-igs@1.5-gs@7.0.png
77 | │ │ └── f80589219ae2b913677ea9417962d4ab75f08c2f
78 | │ │ └── steps@20-igs@1.5-gs@7.0.png
79 | │ └── 7
80 | │ ├── 4f33183189589bb171ba9489b898e5edbac25dfe
81 | │ │ └── steps@20-igs@1.5-gs@7.0.png
82 | │ └── 519863ade478d26b467e08dc5fb4353a6316833c
83 | │ └── steps@20-igs@1.5-gs@7.0.png
84 | ```
85 |
--------------------------------------------------------------------------------
/validation/compare_models.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | import os
18 | import sys
19 |
20 | SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
21 | sys.path.append(os.path.dirname(SCRIPT_DIR))
22 |
23 | import argparse
24 | import hashlib
25 | import os
26 |
27 | import data_utils
28 | import torch
29 | from diffusers import StableDiffusionInstructPix2PixPipeline
30 | from PIL import Image
31 |
32 | from data_preparation import model_utils
33 |
34 | GEN = torch.manual_seed(0)
35 |
36 |
37 | def parse_args():
38 | parser = argparse.ArgumentParser()
39 | parser.add_argument(
40 | "--model_id",
41 | type=str,
42 | default="sayakpaul/whitebox-cartoonizer",
43 | choices=[
44 | "sayakpaul/whitebox-cartoonizer",
45 | "instruction-tuning-vision/instruction-tuned-cartoonizer",
46 | "timbrooks/instruct-pix2pix",
47 | ],
48 | )
49 | parser.add_argument("--dataset_id", type=str, default="imagenette")
50 | parser.add_argument("--max_num_samples", type=int, default=10)
51 | parser.add_argument(
52 | "--prompt", type=str, default="Generate a cartoonized version of the image"
53 | )
54 | parser.add_argument("--num_inference_steps", type=int, default=20)
55 | parser.add_argument("--image_guidance_scale", type=float, default=1.5)
56 | parser.add_argument("--guidance_scale", type=float, default=7.0)
57 | args = parser.parse_args()
58 | return args
59 |
60 |
61 | def load_pipeline(model_id: str):
62 | pipeline = StableDiffusionInstructPix2PixPipeline.from_pretrained(
63 | model_id, torch_dtype=torch.float16, use_auth_token=True
64 | ).to("cuda")
65 | pipeline.enable_xformers_memory_efficient_attention()
66 | pipeline.set_progress_bar_config(disable=True)
67 | return pipeline
68 |
69 |
70 | def main(args):
71 | data_root = os.path.join(f"comparison-{args.model_id}")
72 |
73 | print("Loading validation dataset and inference model...")
74 | dataset = data_utils.load_dataset(args.dataset_id, args.max_num_samples)
75 | using_tf = False
76 | if "sayakpaul" in args.model_id:
77 | inference = model_utils.load_model(args.model_id)
78 | using_tf = True
79 | print(
80 | "TensorFlow model detected for inference, Diffusion-specifc parameters won't be used."
81 | )
82 | else:
83 | inference = load_pipeline(args.model_id)
84 |
85 | num_samples_to_generate = (
86 | args.max_num_samples
87 | if args.max_num_samples is not None
88 | else dataset.cardinality()
89 | )
90 | print(f"Generating {num_samples_to_generate} images...")
91 | for sample in dataset.as_numpy_iterator():
92 | # Result dir creation.
93 | concept_path = os.path.join(data_root, str(sample["label"]))
94 | hash_image = hashlib.sha1(sample["image"].tobytes()).hexdigest()
95 | image_path = os.path.join(concept_path, hash_image)
96 | os.makedirs(image_path, exist_ok=True)
97 |
98 | # Perform inference and serialize the result.
99 | if using_tf:
100 | image = model_utils.perform_inference(inference)(sample["image"])
101 | Image.fromarray(sample["image"]).save(os.path.join(image_path, "original.png"))
102 | image.save(os.path.join(image_path, "tf_image.png"))
103 | else:
104 | image = inference(
105 | args.prompt,
106 | image=Image.fromarray(sample["image"]).convert("RGB"),
107 | num_inference_steps=args.num_inference_steps,
108 | image_guidance_scale=args.image_guidance_scale,
109 | guidance_scale=args.guidance_scale,
110 | generator=GEN,
111 | ).images[0]
112 | image_prefix = f"steps@{args.num_inference_steps}-igs@{args.image_guidance_scale}-gs@{args.guidance_scale}"
113 | Image.fromarray(sample["image"]).save(os.path.join(image_path, "original.png"))
114 | image.save(os.path.join(image_path, f"{image_prefix}.png"))
115 |
116 |
117 | if __name__ == "__main__":
118 | args = parse_args()
119 | main(args)
120 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright 2023- The HuggingFace Inc. team and The InstructPix2Pix Authors. All rights reserved.
2 |
3 | Apache License
4 | Version 2.0, January 2004
5 | http://www.apache.org/licenses/
6 |
7 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
8 |
9 | 1. Definitions.
10 |
11 | "License" shall mean the terms and conditions for use, reproduction,
12 | and distribution as defined by Sections 1 through 9 of this document.
13 |
14 | "Licensor" shall mean the copyright owner or entity authorized by
15 | the copyright owner that is granting the License.
16 |
17 | "Legal Entity" shall mean the union of the acting entity and all
18 | other entities that control, are controlled by, or are under common
19 | control with that entity. For the purposes of this definition,
20 | "control" means (i) the power, direct or indirect, to cause the
21 | direction or management of such entity, whether by contract or
22 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
23 | outstanding shares, or (iii) beneficial ownership of such entity.
24 |
25 | "You" (or "Your") shall mean an individual or Legal Entity
26 | exercising permissions granted by this License.
27 |
28 | "Source" form shall mean the preferred form for making modifications,
29 | including but not limited to software source code, documentation
30 | source, and configuration files.
31 |
32 | "Object" form shall mean any form resulting from mechanical
33 | transformation or translation of a Source form, including but
34 | not limited to compiled object code, generated documentation,
35 | and conversions to other media types.
36 |
37 | "Work" shall mean the work of authorship, whether in Source or
38 | Object form, made available under the License, as indicated by a
39 | copyright notice that is included in or attached to the work
40 | (an example is provided in the Appendix below).
41 |
42 | "Derivative Works" shall mean any work, whether in Source or Object
43 | form, that is based on (or derived from) the Work and for which the
44 | editorial revisions, annotations, elaborations, or other modifications
45 | represent, as a whole, an original work of authorship. For the purposes
46 | of this License, Derivative Works shall not include works that remain
47 | separable from, or merely link (or bind by name) to the interfaces of,
48 | the Work and Derivative Works thereof.
49 |
50 | "Contribution" shall mean any work of authorship, including
51 | the original version of the Work and any modifications or additions
52 | to that Work or Derivative Works thereof, that is intentionally
53 | submitted to Licensor for inclusion in the Work by the copyright owner
54 | or by an individual or Legal Entity authorized to submit on behalf of
55 | the copyright owner. For the purposes of this definition, "submitted"
56 | means any form of electronic, verbal, or written communication sent
57 | to the Licensor or its representatives, including but not limited to
58 | communication on electronic mailing lists, source code control systems,
59 | and issue tracking systems that are managed by, or on behalf of, the
60 | Licensor for the purpose of discussing and improving the Work, but
61 | excluding communication that is conspicuously marked or otherwise
62 | designated in writing by the copyright owner as "Not a Contribution."
63 |
64 | "Contributor" shall mean Licensor and any individual or Legal Entity
65 | on behalf of whom a Contribution has been received by Licensor and
66 | subsequently incorporated within the Work.
67 |
68 | 2. Grant of Copyright License. Subject to the terms and conditions of
69 | this License, each Contributor hereby grants to You a perpetual,
70 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
71 | copyright license to reproduce, prepare Derivative Works of,
72 | publicly display, publicly perform, sublicense, and distribute the
73 | Work and such Derivative Works in Source or Object form.
74 |
75 | 3. Grant of Patent License. Subject to the terms and conditions of
76 | this License, each Contributor hereby grants to You a perpetual,
77 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
78 | (except as stated in this section) patent license to make, have made,
79 | use, offer to sell, sell, import, and otherwise transfer the Work,
80 | where such license applies only to those patent claims licensable
81 | by such Contributor that are necessarily infringed by their
82 | Contribution(s) alone or by combination of their Contribution(s)
83 | with the Work to which such Contribution(s) was submitted. If You
84 | institute patent litigation against any entity (including a
85 | cross-claim or counterclaim in a lawsuit) alleging that the Work
86 | or a Contribution incorporated within the Work constitutes direct
87 | or contributory patent infringement, then any patent licenses
88 | granted to You under this License for that Work shall terminate
89 | as of the date such litigation is filed.
90 |
91 | 4. Redistribution. You may reproduce and distribute copies of the
92 | Work or Derivative Works thereof in any medium, with or without
93 | modifications, and in Source or Object form, provided that You
94 | meet the following conditions:
95 |
96 | (a) You must give any other recipients of the Work or
97 | Derivative Works a copy of this License; and
98 |
99 | (b) You must cause any modified files to carry prominent notices
100 | stating that You changed the files; and
101 |
102 | (c) You must retain, in the Source form of any Derivative Works
103 | that You distribute, all copyright, patent, trademark, and
104 | attribution notices from the Source form of the Work,
105 | excluding those notices that do not pertain to any part of
106 | the Derivative Works; and
107 |
108 | (d) If the Work includes a "NOTICE" text file as part of its
109 | distribution, then any Derivative Works that You distribute must
110 | include a readable copy of the attribution notices contained
111 | within such NOTICE file, excluding those notices that do not
112 | pertain to any part of the Derivative Works, in at least one
113 | of the following places: within a NOTICE text file distributed
114 | as part of the Derivative Works; within the Source form or
115 | documentation, if provided along with the Derivative Works; or,
116 | within a display generated by the Derivative Works, if and
117 | wherever such third-party notices normally appear. The contents
118 | of the NOTICE file are for informational purposes only and
119 | do not modify the License. You may add Your own attribution
120 | notices within Derivative Works that You distribute, alongside
121 | or as an addendum to the NOTICE text from the Work, provided
122 | that such additional attribution notices cannot be construed
123 | as modifying the License.
124 |
125 | You may add Your own copyright statement to Your modifications and
126 | may provide additional or different license terms and conditions
127 | for use, reproduction, or distribution of Your modifications, or
128 | for any such Derivative Works as a whole, provided Your use,
129 | reproduction, and distribution of the Work otherwise complies with
130 | the conditions stated in this License.
131 |
132 | 5. Submission of Contributions. Unless You explicitly state otherwise,
133 | any Contribution intentionally submitted for inclusion in the Work
134 | by You to the Licensor shall be under the terms and conditions of
135 | this License, without any additional terms or conditions.
136 | Notwithstanding the above, nothing herein shall supersede or modify
137 | the terms of any separate license agreement you may have executed
138 | with Licensor regarding such Contributions.
139 |
140 | 6. Trademarks. This License does not grant permission to use the trade
141 | names, trademarks, service marks, or product names of the Licensor,
142 | except as required for reasonable and customary use in describing the
143 | origin of the Work and reproducing the content of the NOTICE file.
144 |
145 | 7. Disclaimer of Warranty. Unless required by applicable law or
146 | agreed to in writing, Licensor provides the Work (and each
147 | Contributor provides its Contributions) on an "AS IS" BASIS,
148 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
149 | implied, including, without limitation, any warranties or conditions
150 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
151 | PARTICULAR PURPOSE. You are solely responsible for determining the
152 | appropriateness of using or redistributing the Work and assume any
153 | risks associated with Your exercise of permissions under this License.
154 |
155 | 8. Limitation of Liability. In no event and under no legal theory,
156 | whether in tort (including negligence), contract, or otherwise,
157 | unless required by applicable law (such as deliberate and grossly
158 | negligent acts) or agreed to in writing, shall any Contributor be
159 | liable to You for damages, including any direct, indirect, special,
160 | incidental, or consequential damages of any character arising as a
161 | result of this License or out of the use or inability to use the
162 | Work (including but not limited to damages for loss of goodwill,
163 | work stoppage, computer failure or malfunction, or any and all
164 | other commercial damages or losses), even if such Contributor
165 | has been advised of the possibility of such damages.
166 |
167 | 9. Accepting Warranty or Additional Liability. While redistributing
168 | the Work or Derivative Works thereof, You may choose to offer,
169 | and charge a fee for, acceptance of support, warranty, indemnity,
170 | or other liability obligations and/or rights consistent with this
171 | License. However, in accepting such obligations, You may act only
172 | on Your own behalf and on Your sole responsibility, not on behalf
173 | of any other Contributor, and only if You agree to indemnify,
174 | defend, and hold each Contributor harmless for any liability
175 | incurred by, or claims asserted against, such Contributor by reason
176 | of your accepting any such warranty or additional liability.
177 |
178 | END OF TERMS AND CONDITIONS
179 |
180 | APPENDIX: How to apply the Apache License to your work.
181 |
182 | To apply the Apache License to your work, attach the following
183 | boilerplate notice, with the fields enclosed by brackets "[]"
184 | replaced with your own identifying information. (Don't include
185 | the brackets!) The text should be enclosed in the appropriate
186 | comment syntax for the file format. We also recommend that a
187 | file or class name and description of purpose be included on the
188 | same "printed page" as the copyright notice for easier
189 | identification within third-party archives.
190 |
191 | Copyright [yyyy] [name of copyright owner]
192 |
193 | Licensed under the Apache License, Version 2.0 (the "License");
194 | you may not use this file except in compliance with the License.
195 | You may obtain a copy of the License at
196 |
197 | http://www.apache.org/licenses/LICENSE-2.0
198 |
199 | Unless required by applicable law or agreed to in writing, software
200 | distributed under the License is distributed on an "AS IS" BASIS,
201 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
202 | See the License for the specific language governing permissions and
203 | limitations under the License.
204 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Instruction-tuning Stable Diffusion
2 |
3 | **TL;DR**: Motivated partly by [FLAN](https://arxiv.org/abs/2109.01652) and partly by [InstructPix2Pix](https://arxiv.org/abs/2211.09800), we explore a way to instruction-tune [Stable Diffusion](https://stability.ai/blog/stable-diffusion-public-release). This allows us to prompt our model using an input image and an “instruction”, such as - *Apply a cartoon filter to the natural image*.
4 |
5 | You can read [our blog post](https://hf.co/blog/instruction-tuning-sd) to know more details.
6 |
7 | ## Table of contents
8 |
9 | 🐶 [Motivation](#motivation)
10 | 📷 [Data preparation](#data-preparation)
11 | 💺 [Training](#training)
12 | 🎛 [Models, datasets, demo](#models-datasets-demo)
13 | ⭐️ [Inference](#inference)
14 | 🧭 [Results](#results)
15 | 🤝 [Acknowledgements](#acknowledgements)
16 |
17 | ## Motivation
18 |
19 | Instruction-tuning is a supervised way of teaching language models to follow instructions to solve a task. It was introduced in [Fine-tuned Language Models Are Zero-Shot Learners](https://arxiv.org/abs/2109.01652) (FLAN) by Google. From recent times, you might recall works like [Alpaca](https://crfm.stanford.edu/2023/03/13/alpaca.html) and [FLAN V2](https://arxiv.org/abs/2210.11416), which are good examples of how beneficial instruction-tuning can be for various tasks.
20 |
21 | On the other hand, the idea of teaching Stable Diffusion to follow user instructions to perform edits on input images was introduced in [InstructPix2Pix: Learning to Follow Image Editing Instructions](https://arxiv.org/abs/2211.09800).
22 |
23 | Our motivation behind this work comes partly from the FLAN line of works and partly from InstructPix2Pix. We wanted to explore if it’s possible to prompt Stable Diffusion with specific instructions and input images to process them as per our needs.
24 |
25 |
26 |
27 |
28 |
29 | Our main idea is to first create an instruction prompted dataset (as described in [our blog](https://hf.co/blog/instruction-tuning-sd) and then conduct InstructPix2Pix style training. The end objective is to make Stable Diffusion better at following specific instructions that entail image transformation related operations.
30 |
31 |
32 | ## Data preparation
33 |
34 | Our data preparation process is inspired by FLAN. Refer to the sections below for more details.
35 |
36 | * **Cartoonization**: Refer to the `data_preparation` directory.
37 | * **Low-level image processing**: Refer to the [dataset card](https://huggingface.co/datasets/instruction-tuning-sd/low-level-image-proc).
38 |
39 | ## Training
40 |
41 | > [!TIP]
42 | > In case of using custom datasets, one needs to configure the dataset as per their choice as long as you maintain the format presented here. You might have to configure your dataloader and dataset class in case you don't want to make use of the `datasets` library. If you do so, you might have to adjust the training scripts accordingly.
43 |
44 | ### Dev env setup
45 |
46 | We recommend using a Python virtual environment for this. Feel free to use your favorite one here.
47 |
48 | We conducted our experiments with PyTorch 1.13.1 (CUDA 11.6) and a single A100 GPU. Since PyTorch installation can be hardware-dependent, we refer you to the [official docs](https://pytorch.org/) for installing PyTorch.
49 |
50 | Once PyTorch is installed, we can install the rest of the dependencies:
51 |
52 | ```bash
53 | pip install -r requirements.txt
54 | ```
55 |
56 | Additionally, we recommend installing [xformers](https://github.com/facebookresearch/xformers) as well for enabling memory-efficient training.
57 |
58 | > 💡 **Note**: If you're using PyTorch 2.0 then you don't need to additionally install xformers. This is because we default to a memory-efficient attention processor in Diffusers when PyTorch 2.0 is being used.
59 |
60 | ### Launching training
61 |
62 | Our training code leverages [🧨 diffusers](https://github.com/huggingface/diffusers), [🤗 accelerate](https://github.com/huggingface/accelerate), and [🤗 transformers](https://github.com/huggingface/transformers). In particular, we extend [this training example](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix.py) to fit our needs.
63 |
64 | ### Cartoonization
65 |
66 | #### Training from scratch using the InstructPix2Pix methodology
67 |
68 | ```bash
69 | export MODEL_ID="runwayml/stable-diffusion-v1-5"
70 | export DATASET_ID="instruction-tuning-sd/cartoonization"
71 | export OUTPUT_DIR="cartoonization-scratch"
72 |
73 | accelerate launch --mixed_precision="fp16" train_instruct_pix2pix.py \
74 | --pretrained_model_name_or_path=$MODEL_ID \
75 | --dataset_name=$DATASET_ID \
76 | --use_ema \
77 | --enable_xformers_memory_efficient_attention \
78 | --resolution=256 --random_flip \
79 | --train_batch_size=2 --gradient_accumulation_steps=4 --gradient_checkpointing \
80 | --max_train_steps=15000 \
81 | --checkpointing_steps=5000 --checkpoints_total_limit=1 \
82 | --learning_rate=5e-05 --lr_warmup_steps=0 \
83 | --mixed_precision=fp16 \
84 | --val_image_url="https://hf.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png" \
85 | --validation_prompt="Generate a cartoonized version of the natural image" \
86 | --seed=42 \
87 | --output_dir=$OUTPUT_DIR \
88 | --report_to=wandb \
89 | --push_to_hub
90 | ```
91 |
92 | > 💡 **Note**: Following InstructPix2Pix, we train on the 256x256 resolution and that doesn't seem to affect the end quality too much when we perform inference with the 512x512 resolution.
93 |
94 | Once the training successfully launched, the logs will be automatically tracked using Weights and Biases. Depending on how you specified the `checkpointing_steps` and the `max_train_steps`, there will be intermediate checkpoints too. At the end of training, you can expect a directory (namely `OUTPUT_DIR`) that contains the intermediate checkpoints and the final pipeline artifacts.
95 |
96 | If `--push_to_hub` is specified, the contents of `OUTPUT_DIR` will be pushed to a repository on the Hugging Face Hub.
97 |
98 | [Here](https://wandb.ai/sayakpaul/instruction-tuning-sd/runs/wszjpb1b) is an example run page on Weights and Biases. [Here](https://huggingface.co/instruction-tuning-sd/scratch-cartoonizer) is an example of how the pipeline repository would look like on the Hugging Face Hub.
99 |
100 | #### Fine-tuning from InstructPix2Pix
101 |
102 | ```bash
103 | export MODEL_ID="timbrooks/instruct-pix2pix"
104 | export DATASET_ID="instruction-tuning-sd/cartoonization"
105 | export OUTPUT_DIR="cartoonization-finetuned"
106 |
107 | accelerate launch --mixed_precision="fp16" finetune_instruct_pix2pix.py \
108 | --pretrained_model_name_or_path=$MODEL_ID \
109 | --dataset_name=$DATASET_ID \
110 | --use_ema \
111 | --enable_xformers_memory_efficient_attention \
112 | --resolution=256 --random_flip \
113 | --train_batch_size=2 --gradient_accumulation_steps=4 --gradient_checkpointing \
114 | --max_train_steps=15000 \
115 | --checkpointing_steps=5000 --checkpoints_total_limit=1 \
116 | --learning_rate=5e-05 --lr_warmup_steps=0 \
117 | --mixed_precision=fp16 \
118 | --val_image_url="https://hf.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png" \
119 | --validation_prompt="Generate a cartoonized version of the natural image" \
120 | --seed=42 \
121 | --output_dir=$OUTPUT_DIR \
122 | --report_to=wandb \
123 | --push_to_hub
124 | ```
125 |
126 | ### Low-level image processing
127 |
128 | #### Training from scratch using the InstructPix2Pix methodology
129 |
130 | ```bash
131 | export MODEL_ID="runwayml/stable-diffusion-v1-5"
132 | export DATASET_ID="instruction-tuning-sd/low-level-image-proc"
133 | export OUTPUT_DIR="low-level-img-proc-scratch"
134 |
135 | accelerate launch --mixed_precision="fp16" train_instruct_pix2pix.py \
136 | --pretrained_model_name_or_path=$MODEL_ID \
137 | --dataset_name=$DATASET_ID \
138 | --original_image_column="input_image" \
139 | --edit_prompt_column="instruction" \
140 | --edited_image_column="ground_truth_image" \
141 | --use_ema \
142 | --enable_xformers_memory_efficient_attention \
143 | --resolution=256 --random_flip \
144 | --train_batch_size=2 --gradient_accumulation_steps=4 --gradient_checkpointing \
145 | --max_train_steps=15000 \
146 | --checkpointing_steps=5000 --checkpoints_total_limit=1 \
147 | --learning_rate=5e-05 --lr_warmup_steps=0 \
148 | --mixed_precision=fp16 \
149 | --val_image_url="https://hf.co/datasets/sayakpaul/sample-datasets/resolve/main/derain_the_image_1.png" \
150 | --validation_prompt="Derain the image" \
151 | --seed=42 \
152 | --output_dir=$OUTPUT_DIR \
153 | --report_to=wandb \
154 | --push_to_hub
155 | ```
156 |
157 | #### Fine-tuning from InstructPix2Pix
158 |
159 | ```bash
160 | export MODEL_ID="timbrooks/instruct-pix2pix"
161 | export DATASET_ID="instruction-tuning-sd/low-level-image-proc"
162 | export OUTPUT_DIR="low-level-img-proc-finetuned"
163 |
164 | accelerate launch --mixed_precision="fp16" finetune_instruct_pix2pix.py \
165 | --pretrained_model_name_or_path=$MODEL_ID \
166 | --dataset_name=$DATASET_ID \
167 | --original_image_column="input_image" \
168 | --edit_prompt_column="instruction" \
169 | --edited_image_column="ground_truth_image" \
170 | --use_ema \
171 | --enable_xformers_memory_efficient_attention \
172 | --resolution=256 --random_flip \
173 | --train_batch_size=2 --gradient_accumulation_steps=4 --gradient_checkpointing \
174 | --max_train_steps=15000 \
175 | --checkpointing_steps=5000 --checkpoints_total_limit=1 \
176 | --learning_rate=5e-05 --lr_warmup_steps=0 \
177 | --mixed_precision=fp16 \
178 | --val_image_url="https://hf.co/datasets/sayakpaul/sample-datasets/resolve/main/derain_the_image_1.png" \
179 | --validation_prompt="Derain the image" \
180 | --seed=42 \
181 | --output_dir=$OUTPUT_DIR \
182 | --report_to=wandb \
183 | --push_to_hub
184 | ```
185 |
186 | ## Models, datasets, demo
187 |
188 | ### **Models**:
189 | * [instruction-tuning-sd/scratch-low-level-img-proc](https://huggingface.co/instruction-tuning-sd/scratch-low-level-img-proc)
190 | * [instruction-tuning-sd/scratch-cartoonizer](https://huggingface.co/instruction-tuning-sd/scratch-cartoonizer)
191 | * [instruction-tuning-sd/cartoonizer](https://huggingface.co/instruction-tuning-sd/cartoonizer)
192 | * [instruction-tuning-sd/low-level-img-proc](https://huggingface.co/instruction-tuning-sd/low-level-img-proc)
193 |
194 | ### **Datasets**:
195 | * [Instruction-prompted cartoonization](https://huggingface.co/datasets/instruction-tuning-sd/cartoonization)
196 | * [Instruction-prompted low-level image processing](https://huggingface.co/datasets/instruction-tuning-sd/low-level-image-proc)
197 |
198 | ### Demo on 🤗 Spaces
199 |
200 | Try out the models interactively WITHOUT any setup: [Demo](https://huggingface.co/spaces/instruction-tuning-sd/instruction-tuned-sd)
201 |
202 | ## Inference
203 |
204 | ### Cartoonization
205 |
206 | ```python
207 | import torch
208 | from diffusers import StableDiffusionInstructPix2PixPipeline
209 | from diffusers.utils import load_image
210 |
211 | model_id = "instruction-tuning-sd/cartoonizer"
212 | pipeline = StableDiffusionInstructPix2PixPipeline.from_pretrained(
213 | model_id, torch_dtype=torch.float16, use_auth_token=True
214 | ).to("cuda")
215 |
216 | image_path = "https://hf.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png"
217 | image = load_image(image_path)
218 |
219 | image = pipeline("Cartoonize the following image", image=image).images[0]
220 | image.save("image.png")
221 | ```
222 |
223 | ### Low-level image processing
224 |
225 | ```python
226 | import torch
227 | from diffusers import StableDiffusionInstructPix2PixPipeline
228 | from diffusers.utils import load_image
229 |
230 | model_id = "instruction-tuning-sd/low-level-img-proc"
231 | pipeline = StableDiffusionInstructPix2PixPipeline.from_pretrained(
232 | model_id, torch_dtype=torch.float16, use_auth_token=True
233 | ).to("cuda")
234 |
235 | image_path = "https://hf.co/datasets/sayakpaul/sample-datasets/resolve/main/derain%20the%20image_1.png"
236 | image = load_image(image_path)
237 |
238 | image = pipeline("derain the image", image=image).images[0]
239 | image.save("image.png")
240 | ```
241 |
242 |
243 | > 💡 **Note**: Since the above pipelines are essentially of type `StableDiffusionInstructPix2PixPipeline`, you can customize several arguments that
244 | the pipeline exposes. Refer to the [official docs](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/pix2pix) for more details.
245 |
246 | ## Results
247 |
248 | ### Cartoonization
249 |
250 |
251 |
252 |
253 |
254 | ---
255 |
256 |
257 |
258 |
259 |
260 | ### Low-level image processing
261 |
262 |
263 |
264 |
265 |
266 | ---
267 |
268 |
269 |
270 |
271 |
272 | Refer to our [blog post](https://hf.co/blog/instruction-tuning-sd) for more discussions on results and open questions.
273 |
274 |
275 | ## Acknowledgements
276 |
277 | Thanks to [Alara Dirik](https://www.linkedin.com/in/alaradirik/) and [Zhengzhong Tu](https://www.linkedin.com/in/zhengzhongtu) for the helpful discussions.
278 |
279 | ## Citation
280 |
281 | ```bibtex
282 | @article{
283 | Paul2023instruction-tuning-sd,
284 | author = {Paul, Sayak},
285 | title = {Instruction-tuning Stable Diffusion with InstructPix2Pix},
286 | journal = {Hugging Face Blog},
287 | year = {2023},
288 | note = {https://huggingface.co/blog/instruction-tuning-sd},
289 | }
290 | ```
291 |
292 |
--------------------------------------------------------------------------------
/train_instruct_pix2pix.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | """Script to fine-tune Stable Diffusion for InstructPix2Pix."""
18 |
19 | import argparse
20 | import logging
21 | import math
22 | import os
23 | from pathlib import Path
24 | from typing import Optional
25 |
26 | import accelerate
27 | import datasets
28 | import numpy as np
29 | import PIL
30 | import requests
31 | import torch
32 | import torch.nn as nn
33 | import torch.nn.functional as F
34 | import torch.utils.checkpoint
35 | import transformers
36 | from accelerate import Accelerator
37 | from accelerate.logging import get_logger
38 | from accelerate.utils import ProjectConfiguration, set_seed
39 | from datasets import load_dataset
40 | from huggingface_hub import HfFolder, Repository, create_repo, whoami
41 | from packaging import version
42 | from torchvision import transforms
43 | from tqdm.auto import tqdm
44 | from transformers import CLIPTextModel, CLIPTokenizer
45 |
46 | import diffusers
47 | from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionInstructPix2PixPipeline, UNet2DConditionModel
48 | from diffusers.optimization import get_scheduler
49 | from diffusers.training_utils import EMAModel
50 | from diffusers.utils import check_min_version, deprecate, is_wandb_available
51 | from diffusers.utils.import_utils import is_xformers_available
52 |
53 |
54 | # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
55 | check_min_version("0.15.0.dev0")
56 |
57 | logger = get_logger(__name__, log_level="INFO")
58 |
59 | DATASET_NAME_MAPPING = {
60 | "fusing/instructpix2pix-1000-samples": ("input_image", "edit_prompt", "edited_image"),
61 | }
62 | WANDB_TABLE_COL_NAMES = ["original_image", "edited_image", "edit_prompt"]
63 |
64 |
65 | def parse_args():
66 | parser = argparse.ArgumentParser(description="Simple example of a training script for InstructPix2Pix.")
67 | parser.add_argument(
68 | "--pretrained_model_name_or_path",
69 | type=str,
70 | default=None,
71 | required=True,
72 | help="Path to pretrained model or model identifier from huggingface.co/models.",
73 | )
74 | parser.add_argument(
75 | "--revision",
76 | type=str,
77 | default=None,
78 | required=False,
79 | help="Revision of pretrained model identifier from huggingface.co/models.",
80 | )
81 | parser.add_argument(
82 | "--dataset_name",
83 | type=str,
84 | default=None,
85 | help=(
86 | "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
87 | " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
88 | " or to a folder containing files that 🤗 Datasets can understand."
89 | ),
90 | )
91 | parser.add_argument(
92 | "--dataset_config_name",
93 | type=str,
94 | default=None,
95 | help="The config of the Dataset, leave as None if there's only one config.",
96 | )
97 | parser.add_argument(
98 | "--train_data_dir",
99 | type=str,
100 | default=None,
101 | help=(
102 | "A folder containing the training data. Folder contents must follow the structure described in"
103 | " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file"
104 | " must exist to provide the captions for the images. Ignored if `dataset_name` is specified."
105 | ),
106 | )
107 | parser.add_argument(
108 | "--original_image_column",
109 | type=str,
110 | default="input_image",
111 | help="The column of the dataset containing the original image on which edits where made.",
112 | )
113 | parser.add_argument(
114 | "--edited_image_column",
115 | type=str,
116 | default="edited_image",
117 | help="The column of the dataset containing the edited image.",
118 | )
119 | parser.add_argument(
120 | "--edit_prompt_column",
121 | type=str,
122 | default="edit_prompt",
123 | help="The column of the dataset containing the edit instruction.",
124 | )
125 | parser.add_argument(
126 | "--val_image_url",
127 | type=str,
128 | default=None,
129 | help="URL to the original image that you would like to edit (used during inference for debugging purposes).",
130 | )
131 | parser.add_argument(
132 | "--validation_prompt", type=str, default=None, help="A prompt that is sampled during training for inference."
133 | )
134 | parser.add_argument(
135 | "--num_validation_images",
136 | type=int,
137 | default=4,
138 | help="Number of images that should be generated during validation with `validation_prompt`.",
139 | )
140 | parser.add_argument(
141 | "--validation_epochs",
142 | type=int,
143 | default=1,
144 | help=(
145 | "Run fine-tuning validation every X epochs. The validation process consists of running the prompt"
146 | " `args.validation_prompt` multiple times: `args.num_validation_images`."
147 | ),
148 | )
149 | parser.add_argument(
150 | "--max_train_samples",
151 | type=int,
152 | default=None,
153 | help=(
154 | "For debugging purposes or quicker training, truncate the number of training examples to this "
155 | "value if set."
156 | ),
157 | )
158 | parser.add_argument(
159 | "--output_dir",
160 | type=str,
161 | default="instruct-pix2pix-model",
162 | help="The output directory where the model predictions and checkpoints will be written.",
163 | )
164 | parser.add_argument(
165 | "--cache_dir",
166 | type=str,
167 | default=None,
168 | help="The directory where the downloaded models and datasets will be stored.",
169 | )
170 | parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
171 | parser.add_argument(
172 | "--resolution",
173 | type=int,
174 | default=256,
175 | help=(
176 | "The resolution for input images, all the images in the train/validation dataset will be resized to this"
177 | " resolution"
178 | ),
179 | )
180 | parser.add_argument(
181 | "--center_crop",
182 | default=False,
183 | action="store_true",
184 | help=(
185 | "Whether to center crop the input images to the resolution. If not set, the images will be randomly"
186 | " cropped. The images will be resized to the resolution first before cropping."
187 | ),
188 | )
189 | parser.add_argument(
190 | "--random_flip",
191 | action="store_true",
192 | help="whether to randomly flip images horizontally",
193 | )
194 | parser.add_argument(
195 | "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
196 | )
197 | parser.add_argument("--num_train_epochs", type=int, default=100)
198 | parser.add_argument(
199 | "--max_train_steps",
200 | type=int,
201 | default=None,
202 | help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
203 | )
204 | parser.add_argument(
205 | "--gradient_accumulation_steps",
206 | type=int,
207 | default=1,
208 | help="Number of updates steps to accumulate before performing a backward/update pass.",
209 | )
210 | parser.add_argument(
211 | "--gradient_checkpointing",
212 | action="store_true",
213 | help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
214 | )
215 | parser.add_argument(
216 | "--learning_rate",
217 | type=float,
218 | default=1e-4,
219 | help="Initial learning rate (after the potential warmup period) to use.",
220 | )
221 | parser.add_argument(
222 | "--scale_lr",
223 | action="store_true",
224 | default=False,
225 | help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
226 | )
227 | parser.add_argument(
228 | "--lr_scheduler",
229 | type=str,
230 | default="constant",
231 | help=(
232 | 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
233 | ' "constant", "constant_with_warmup"]'
234 | ),
235 | )
236 | parser.add_argument(
237 | "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
238 | )
239 | parser.add_argument(
240 | "--conditioning_dropout_prob",
241 | type=float,
242 | default=None,
243 | help="Conditioning dropout probability. Drops out the conditionings (image and edit prompt) used in training InstructPix2Pix. See section 3.2.1 in the paper: https://arxiv.org/abs/2211.09800.",
244 | )
245 | parser.add_argument(
246 | "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
247 | )
248 | parser.add_argument(
249 | "--allow_tf32",
250 | action="store_true",
251 | help=(
252 | "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
253 | " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
254 | ),
255 | )
256 | parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.")
257 | parser.add_argument(
258 | "--non_ema_revision",
259 | type=str,
260 | default=None,
261 | required=False,
262 | help=(
263 | "Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or"
264 | " remote repository specified with --pretrained_model_name_or_path."
265 | ),
266 | )
267 | parser.add_argument(
268 | "--dataloader_num_workers",
269 | type=int,
270 | default=0,
271 | help=(
272 | "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
273 | ),
274 | )
275 | parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
276 | parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
277 | parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
278 | parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
279 | parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
280 | parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
281 | parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
282 | parser.add_argument(
283 | "--hub_model_id",
284 | type=str,
285 | default=None,
286 | help="The name of the repository to keep in sync with the local `output_dir`.",
287 | )
288 | parser.add_argument(
289 | "--logging_dir",
290 | type=str,
291 | default="logs",
292 | help=(
293 | "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
294 | " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
295 | ),
296 | )
297 | parser.add_argument(
298 | "--mixed_precision",
299 | type=str,
300 | default=None,
301 | choices=["no", "fp16", "bf16"],
302 | help=(
303 | "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
304 | " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
305 | " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
306 | ),
307 | )
308 | parser.add_argument(
309 | "--report_to",
310 | type=str,
311 | default="tensorboard",
312 | help=(
313 | 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
314 | ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
315 | ),
316 | )
317 | parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
318 | parser.add_argument(
319 | "--checkpointing_steps",
320 | type=int,
321 | default=500,
322 | help=(
323 | "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
324 | " training using `--resume_from_checkpoint`."
325 | ),
326 | )
327 | parser.add_argument(
328 | "--checkpoints_total_limit",
329 | type=int,
330 | default=None,
331 | help=(
332 | "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`."
333 | " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state"
334 | " for more docs"
335 | ),
336 | )
337 | parser.add_argument(
338 | "--resume_from_checkpoint",
339 | type=str,
340 | default=None,
341 | help=(
342 | "Whether training should be resumed from a previous checkpoint. Use a path saved by"
343 | ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
344 | ),
345 | )
346 | parser.add_argument(
347 | "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
348 | )
349 |
350 | args = parser.parse_args()
351 | env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
352 | if env_local_rank != -1 and env_local_rank != args.local_rank:
353 | args.local_rank = env_local_rank
354 |
355 | # Sanity checks
356 | if args.dataset_name is None and args.train_data_dir is None:
357 | raise ValueError("Need either a dataset name or a training folder.")
358 |
359 | # default to using the same revision for the non-ema model if not specified
360 | if args.non_ema_revision is None:
361 | args.non_ema_revision = args.revision
362 |
363 | return args
364 |
365 |
366 | def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
367 | if token is None:
368 | token = HfFolder.get_token()
369 | if organization is None:
370 | username = whoami(token)["name"]
371 | return f"{username}/{model_id}"
372 | else:
373 | return f"{organization}/{model_id}"
374 |
375 |
376 | def convert_to_np(image, resolution):
377 | image = image.convert("RGB").resize((resolution, resolution))
378 | return np.array(image).transpose(2, 0, 1)
379 |
380 |
381 | def download_image(url):
382 | image = PIL.Image.open(requests.get(url, stream=True).raw)
383 | image = PIL.ImageOps.exif_transpose(image)
384 | image = image.convert("RGB")
385 | return image
386 |
387 |
388 | def main():
389 | args = parse_args()
390 |
391 | if args.non_ema_revision is not None:
392 | deprecate(
393 | "non_ema_revision!=None",
394 | "0.15.0",
395 | message=(
396 | "Downloading 'non_ema' weights from revision branches of the Hub is deprecated. Please make sure to"
397 | " use `--variant=non_ema` instead."
398 | ),
399 | )
400 | logging_dir = os.path.join(args.output_dir, args.logging_dir)
401 | accelerator_project_config = ProjectConfiguration(
402 | total_limit=args.checkpoints_total_limit, logging_dir=logging_dir
403 | )
404 | accelerator = Accelerator(
405 | gradient_accumulation_steps=args.gradient_accumulation_steps,
406 | mixed_precision=args.mixed_precision,
407 | log_with=args.report_to,
408 | project_config=accelerator_project_config,
409 | )
410 |
411 | generator = torch.Generator(device=accelerator.device).manual_seed(args.seed)
412 |
413 | if args.report_to == "wandb":
414 | if not is_wandb_available():
415 | raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
416 | import wandb
417 |
418 | # Make one log on every process with the configuration for debugging.
419 | logging.basicConfig(
420 | format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
421 | datefmt="%m/%d/%Y %H:%M:%S",
422 | level=logging.INFO,
423 | )
424 | logger.info(accelerator.state, main_process_only=False)
425 | if accelerator.is_local_main_process:
426 | datasets.utils.logging.set_verbosity_warning()
427 | transformers.utils.logging.set_verbosity_warning()
428 | diffusers.utils.logging.set_verbosity_info()
429 | else:
430 | datasets.utils.logging.set_verbosity_error()
431 | transformers.utils.logging.set_verbosity_error()
432 | diffusers.utils.logging.set_verbosity_error()
433 |
434 | # If passed along, set the training seed now.
435 | if args.seed is not None:
436 | set_seed(args.seed)
437 |
438 | # Handle the repository creation
439 | if accelerator.is_main_process:
440 | if args.push_to_hub:
441 | if args.hub_model_id is None:
442 | repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
443 | else:
444 | repo_name = args.hub_model_id
445 | create_repo(repo_name, exist_ok=True, token=args.hub_token)
446 | repo = Repository(args.output_dir, clone_from=repo_name, token=args.hub_token)
447 |
448 | with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
449 | if "step_*" not in gitignore:
450 | gitignore.write("step_*\n")
451 | if "epoch_*" not in gitignore:
452 | gitignore.write("epoch_*\n")
453 | elif args.output_dir is not None:
454 | os.makedirs(args.output_dir, exist_ok=True)
455 |
456 | # Load scheduler, tokenizer and models.
457 | noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
458 | tokenizer = CLIPTokenizer.from_pretrained(
459 | args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision
460 | )
461 | text_encoder = CLIPTextModel.from_pretrained(
462 | args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
463 | )
464 | vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision)
465 | unet = UNet2DConditionModel.from_pretrained(
466 | args.pretrained_model_name_or_path, subfolder="unet", revision=args.non_ema_revision
467 | )
468 |
469 | # InstructPix2Pix uses an additional image for conditioning. To accommodate that,
470 | # it uses 8 channels (instead of 4) in the first (conv) layer of the UNet. This UNet is
471 | # then fine-tuned on the custom InstructPix2Pix dataset. This modified UNet is initialized
472 | # from the pre-trained checkpoints. For the extra channels added to the first layer, they are
473 | # initialized to zero.
474 | if accelerator.is_main_process:
475 | logger.info("Initializing the InstructPix2Pix UNet from the pretrained UNet.")
476 | in_channels = 8
477 | out_channels = unet.conv_in.out_channels
478 | unet.register_to_config(in_channels=in_channels)
479 |
480 | with torch.no_grad():
481 | new_conv_in = nn.Conv2d(
482 | in_channels, out_channels, unet.conv_in.kernel_size, unet.conv_in.stride, unet.conv_in.padding
483 | )
484 | new_conv_in.weight.zero_()
485 | new_conv_in.weight[:, :4, :, :].copy_(unet.conv_in.weight)
486 | unet.conv_in = new_conv_in
487 |
488 | # Freeze vae and text_encoder
489 | vae.requires_grad_(False)
490 | text_encoder.requires_grad_(False)
491 |
492 | # Create EMA for the unet.
493 | if args.use_ema:
494 | ema_unet = EMAModel(unet.parameters(), model_cls=UNet2DConditionModel, model_config=unet.config)
495 |
496 | if args.enable_xformers_memory_efficient_attention:
497 | if is_xformers_available():
498 | import xformers
499 |
500 | xformers_version = version.parse(xformers.__version__)
501 | if xformers_version == version.parse("0.0.16"):
502 | logger.warn(
503 | "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
504 | )
505 | unet.enable_xformers_memory_efficient_attention()
506 | else:
507 | raise ValueError("xformers is not available. Make sure it is installed correctly")
508 |
509 | # `accelerate` 0.16.0 will have better support for customized saving
510 | if version.parse(accelerate.__version__) >= version.parse("0.16.0"):
511 | # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
512 | def save_model_hook(models, weights, output_dir):
513 | if args.use_ema:
514 | ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema"))
515 |
516 | for i, model in enumerate(models):
517 | model.save_pretrained(os.path.join(output_dir, "unet"))
518 |
519 | # make sure to pop weight so that corresponding model is not saved again
520 | weights.pop()
521 |
522 | def load_model_hook(models, input_dir):
523 | if args.use_ema:
524 | load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DConditionModel)
525 | ema_unet.load_state_dict(load_model.state_dict())
526 | ema_unet.to(accelerator.device)
527 | del load_model
528 |
529 | for i in range(len(models)):
530 | # pop models so that they are not loaded again
531 | model = models.pop()
532 |
533 | # load diffusers style into model
534 | load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet")
535 | model.register_to_config(**load_model.config)
536 |
537 | model.load_state_dict(load_model.state_dict())
538 | del load_model
539 |
540 | accelerator.register_save_state_pre_hook(save_model_hook)
541 | accelerator.register_load_state_pre_hook(load_model_hook)
542 |
543 | if args.gradient_checkpointing:
544 | unet.enable_gradient_checkpointing()
545 |
546 | # Enable TF32 for faster training on Ampere GPUs,
547 | # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
548 | if args.allow_tf32:
549 | torch.backends.cuda.matmul.allow_tf32 = True
550 |
551 | if args.scale_lr:
552 | args.learning_rate = (
553 | args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
554 | )
555 |
556 | # Initialize the optimizer
557 | if args.use_8bit_adam:
558 | try:
559 | import bitsandbytes as bnb
560 | except ImportError:
561 | raise ImportError(
562 | "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`"
563 | )
564 |
565 | optimizer_cls = bnb.optim.AdamW8bit
566 | else:
567 | optimizer_cls = torch.optim.AdamW
568 |
569 | optimizer = optimizer_cls(
570 | unet.parameters(),
571 | lr=args.learning_rate,
572 | betas=(args.adam_beta1, args.adam_beta2),
573 | weight_decay=args.adam_weight_decay,
574 | eps=args.adam_epsilon,
575 | )
576 |
577 | # Get the datasets: you can either provide your own training and evaluation files (see below)
578 | # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub).
579 |
580 | # In distributed training, the load_dataset function guarantees that only one local process can concurrently
581 | # download the dataset.
582 | if args.dataset_name is not None:
583 | # Downloading and loading a dataset from the hub.
584 | dataset = load_dataset(
585 | args.dataset_name,
586 | args.dataset_config_name,
587 | cache_dir=args.cache_dir,
588 | )
589 | else:
590 | data_files = {}
591 | if args.train_data_dir is not None:
592 | data_files["train"] = os.path.join(args.train_data_dir, "**")
593 | dataset = load_dataset(
594 | "imagefolder",
595 | data_files=data_files,
596 | cache_dir=args.cache_dir,
597 | )
598 | # See more about loading custom images at
599 | # https://huggingface.co/docs/datasets/main/en/image_load#imagefolder
600 |
601 | # Preprocessing the datasets.
602 | # We need to tokenize inputs and targets.
603 | column_names = dataset["train"].column_names
604 |
605 | # 6. Get the column names for input/target.
606 | dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None)
607 | if args.original_image_column is None:
608 | original_image_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
609 | else:
610 | original_image_column = args.original_image_column
611 | if original_image_column not in column_names:
612 | raise ValueError(
613 | f"--original_image_column' value '{args.original_image_column}' needs to be one of: {', '.join(column_names)}"
614 | )
615 | if args.edit_prompt_column is None:
616 | edit_prompt_column = dataset_columns[1] if dataset_columns is not None else column_names[1]
617 | else:
618 | edit_prompt_column = args.edit_prompt_column
619 | if edit_prompt_column not in column_names:
620 | raise ValueError(
621 | f"--edit_prompt_column' value '{args.edit_prompt_column}' needs to be one of: {', '.join(column_names)}"
622 | )
623 | if args.edited_image_column is None:
624 | edited_image_column = dataset_columns[2] if dataset_columns is not None else column_names[2]
625 | else:
626 | edited_image_column = args.edited_image_column
627 | if edited_image_column not in column_names:
628 | raise ValueError(
629 | f"--edited_image_column' value '{args.edited_image_column}' needs to be one of: {', '.join(column_names)}"
630 | )
631 |
632 | # Preprocessing the datasets.
633 | # We need to tokenize input captions and transform the images.
634 | def tokenize_captions(captions):
635 | inputs = tokenizer(
636 | captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt"
637 | )
638 | return inputs.input_ids
639 |
640 | # Preprocessing the datasets.
641 | train_transforms = transforms.Compose(
642 | [
643 | transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution),
644 | transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x),
645 | ]
646 | )
647 |
648 | def preprocess_images(examples):
649 | original_images = np.concatenate(
650 | [convert_to_np(image, args.resolution) for image in examples[original_image_column]]
651 | )
652 | edited_images = np.concatenate(
653 | [convert_to_np(image, args.resolution) for image in examples[edited_image_column]]
654 | )
655 | # We need to ensure that the original and the edited images undergo the same
656 | # augmentation transforms.
657 | images = np.concatenate([original_images, edited_images])
658 | images = torch.tensor(images)
659 | images = 2 * (images / 255) - 1
660 | return train_transforms(images)
661 |
662 | def preprocess_train(examples):
663 | # Preprocess images.
664 | preprocessed_images = preprocess_images(examples)
665 | # Since the original and edited images were concatenated before
666 | # applying the transformations, we need to separate them and reshape
667 | # them accordingly.
668 | original_images, edited_images = preprocessed_images.chunk(2)
669 | original_images = original_images.reshape(-1, 3, args.resolution, args.resolution)
670 | edited_images = edited_images.reshape(-1, 3, args.resolution, args.resolution)
671 |
672 | # Collate the preprocessed images into the `examples`.
673 | examples["original_pixel_values"] = original_images
674 | examples["edited_pixel_values"] = edited_images
675 |
676 | # Preprocess the captions.
677 | captions = [caption for caption in examples[edit_prompt_column]]
678 | examples["input_ids"] = tokenize_captions(captions)
679 | return examples
680 |
681 | with accelerator.main_process_first():
682 | if args.max_train_samples is not None:
683 | dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples))
684 | # Set the training transforms
685 | train_dataset = dataset["train"].with_transform(preprocess_train)
686 |
687 | def collate_fn(examples):
688 | original_pixel_values = torch.stack([example["original_pixel_values"] for example in examples])
689 | original_pixel_values = original_pixel_values.to(memory_format=torch.contiguous_format).float()
690 | edited_pixel_values = torch.stack([example["edited_pixel_values"] for example in examples])
691 | edited_pixel_values = edited_pixel_values.to(memory_format=torch.contiguous_format).float()
692 | input_ids = torch.stack([example["input_ids"] for example in examples])
693 | return {
694 | "original_pixel_values": original_pixel_values,
695 | "edited_pixel_values": edited_pixel_values,
696 | "input_ids": input_ids,
697 | }
698 |
699 | # DataLoaders creation:
700 | train_dataloader = torch.utils.data.DataLoader(
701 | train_dataset,
702 | shuffle=True,
703 | collate_fn=collate_fn,
704 | batch_size=args.train_batch_size,
705 | num_workers=args.dataloader_num_workers,
706 | )
707 |
708 | # Scheduler and math around the number of training steps.
709 | overrode_max_train_steps = False
710 | num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
711 | if args.max_train_steps is None:
712 | args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
713 | overrode_max_train_steps = True
714 |
715 | lr_scheduler = get_scheduler(
716 | args.lr_scheduler,
717 | optimizer=optimizer,
718 | num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
719 | num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
720 | )
721 |
722 | # Prepare everything with our `accelerator`.
723 | unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
724 | unet, optimizer, train_dataloader, lr_scheduler
725 | )
726 |
727 | if args.use_ema:
728 | ema_unet.to(accelerator.device)
729 |
730 | # For mixed precision training we cast the text_encoder and vae weights to half-precision
731 | # as these models are only used for inference, keeping weights in full precision is not required.
732 | weight_dtype = torch.float32
733 | if accelerator.mixed_precision == "fp16":
734 | weight_dtype = torch.float16
735 | elif accelerator.mixed_precision == "bf16":
736 | weight_dtype = torch.bfloat16
737 |
738 | # Move text_encode and vae to gpu and cast to weight_dtype
739 | text_encoder.to(accelerator.device, dtype=weight_dtype)
740 | vae.to(accelerator.device, dtype=weight_dtype)
741 |
742 | # We need to recalculate our total training steps as the size of the training dataloader may have changed.
743 | num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
744 | if overrode_max_train_steps:
745 | args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
746 | # Afterwards we recalculate our number of training epochs
747 | args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
748 |
749 | # We need to initialize the trackers we use, and also store our configuration.
750 | # The trackers initializes automatically on the main process.
751 | if accelerator.is_main_process:
752 | accelerator.init_trackers("instruct-pix2pix", config=vars(args))
753 |
754 | # Train!
755 | total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
756 |
757 | logger.info("***** Running training *****")
758 | logger.info(f" Num examples = {len(train_dataset)}")
759 | logger.info(f" Num Epochs = {args.num_train_epochs}")
760 | logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
761 | logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
762 | logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
763 | logger.info(f" Total optimization steps = {args.max_train_steps}")
764 | global_step = 0
765 | first_epoch = 0
766 |
767 | # Potentially load in the weights and states from a previous save
768 | if args.resume_from_checkpoint:
769 | if args.resume_from_checkpoint != "latest":
770 | path = os.path.basename(args.resume_from_checkpoint)
771 | else:
772 | # Get the most recent checkpoint
773 | dirs = os.listdir(args.output_dir)
774 | dirs = [d for d in dirs if d.startswith("checkpoint")]
775 | dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
776 | path = dirs[-1] if len(dirs) > 0 else None
777 |
778 | if path is None:
779 | accelerator.print(
780 | f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
781 | )
782 | args.resume_from_checkpoint = None
783 | else:
784 | accelerator.print(f"Resuming from checkpoint {path}")
785 | accelerator.load_state(os.path.join(args.output_dir, path))
786 | global_step = int(path.split("-")[1])
787 |
788 | resume_global_step = global_step * args.gradient_accumulation_steps
789 | first_epoch = global_step // num_update_steps_per_epoch
790 | resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
791 |
792 | # Only show the progress bar once on each machine.
793 | progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
794 | progress_bar.set_description("Steps")
795 |
796 | for epoch in range(first_epoch, args.num_train_epochs):
797 | unet.train()
798 | train_loss = 0.0
799 | for step, batch in enumerate(train_dataloader):
800 | # Skip steps until we reach the resumed step
801 | if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
802 | if step % args.gradient_accumulation_steps == 0:
803 | progress_bar.update(1)
804 | continue
805 |
806 | with accelerator.accumulate(unet):
807 | # We want to learn the denoising process w.r.t the edited images which
808 | # are conditioned on the original image (which was edited) and the edit instruction.
809 | # So, first, convert images to latent space.
810 | latents = vae.encode(batch["edited_pixel_values"].to(weight_dtype)).latent_dist.sample()
811 | latents = latents * vae.config.scaling_factor
812 |
813 | # Sample noise that we'll add to the latents
814 | noise = torch.randn_like(latents)
815 | bsz = latents.shape[0]
816 | # Sample a random timestep for each image
817 | timesteps = torch.randint(0, noise_scheduler.num_train_timesteps, (bsz,), device=latents.device)
818 | timesteps = timesteps.long()
819 |
820 | # Add noise to the latents according to the noise magnitude at each timestep
821 | # (this is the forward diffusion process)
822 | noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
823 |
824 | # Get the text embedding for conditioning.
825 | encoder_hidden_states = text_encoder(batch["input_ids"])[0]
826 |
827 | # Get the additional image embedding for conditioning.
828 | # Instead of getting a diagonal Gaussian here, we simply take the mode.
829 | original_image_embeds = vae.encode(batch["original_pixel_values"].to(weight_dtype)).latent_dist.mode()
830 |
831 | # Conditioning dropout to support classifier-free guidance during inference. For more details
832 | # check out the section 3.2.1 of the original paper https://arxiv.org/abs/2211.09800.
833 | if args.conditioning_dropout_prob is not None:
834 | random_p = torch.rand(bsz, device=latents.device, generator=generator)
835 | # Sample masks for the edit prompts.
836 | prompt_mask = random_p < 2 * args.conditioning_dropout_prob
837 | prompt_mask = prompt_mask.reshape(bsz, 1, 1)
838 | # Final text conditioning.
839 | null_conditioning = text_encoder(tokenize_captions([""]).to(accelerator.device))[0]
840 | encoder_hidden_states = torch.where(prompt_mask, null_conditioning, encoder_hidden_states)
841 |
842 | # Sample masks for the original images.
843 | image_mask_dtype = original_image_embeds.dtype
844 | image_mask = 1 - (
845 | (random_p >= args.conditioning_dropout_prob).to(image_mask_dtype)
846 | * (random_p < 3 * args.conditioning_dropout_prob).to(image_mask_dtype)
847 | )
848 | image_mask = image_mask.reshape(bsz, 1, 1, 1)
849 | # Final image conditioning.
850 | original_image_embeds = image_mask * original_image_embeds
851 |
852 | # Concatenate the `original_image_embeds` with the `noisy_latents`.
853 | concatenated_noisy_latents = torch.cat([noisy_latents, original_image_embeds], dim=1)
854 |
855 | # Get the target for loss depending on the prediction type
856 | if noise_scheduler.config.prediction_type == "epsilon":
857 | target = noise
858 | elif noise_scheduler.config.prediction_type == "v_prediction":
859 | target = noise_scheduler.get_velocity(latents, noise, timesteps)
860 | else:
861 | raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
862 |
863 | # Predict the noise residual and compute loss
864 | model_pred = unet(concatenated_noisy_latents, timesteps, encoder_hidden_states).sample
865 | loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
866 |
867 | # Gather the losses across all processes for logging (if we use distributed training).
868 | avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean()
869 | train_loss += avg_loss.item() / args.gradient_accumulation_steps
870 |
871 | # Backpropagate
872 | accelerator.backward(loss)
873 | if accelerator.sync_gradients:
874 | accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm)
875 | optimizer.step()
876 | lr_scheduler.step()
877 | optimizer.zero_grad()
878 |
879 | # Checks if the accelerator has performed an optimization step behind the scenes
880 | if accelerator.sync_gradients:
881 | if args.use_ema:
882 | ema_unet.step(unet.parameters())
883 | progress_bar.update(1)
884 | global_step += 1
885 | accelerator.log({"train_loss": train_loss}, step=global_step)
886 | train_loss = 0.0
887 |
888 | if global_step % args.checkpointing_steps == 0:
889 | if accelerator.is_main_process:
890 | save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
891 | accelerator.save_state(save_path)
892 | logger.info(f"Saved state to {save_path}")
893 |
894 | logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
895 | progress_bar.set_postfix(**logs)
896 |
897 | if global_step >= args.max_train_steps:
898 | break
899 |
900 | if accelerator.is_main_process:
901 | if (
902 | (args.val_image_url is not None)
903 | and (args.validation_prompt is not None)
904 | and (epoch % args.validation_epochs == 0)
905 | ):
906 | logger.info(
907 | f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
908 | f" {args.validation_prompt}."
909 | )
910 | # create pipeline
911 | if args.use_ema:
912 | # Store the UNet parameters temporarily and load the EMA parameters to perform inference.
913 | ema_unet.store(unet.parameters())
914 | ema_unet.copy_to(unet.parameters())
915 | pipeline = StableDiffusionInstructPix2PixPipeline.from_pretrained(
916 | args.pretrained_model_name_or_path,
917 | unet=unet,
918 | revision=args.revision,
919 | torch_dtype=weight_dtype,
920 | )
921 | pipeline = pipeline.to(accelerator.device)
922 | pipeline.set_progress_bar_config(disable=True)
923 |
924 | # run inference
925 | original_image = download_image(args.val_image_url)
926 | edited_images = []
927 | with torch.autocast(str(accelerator.device), enabled=accelerator.mixed_precision == "fp16"):
928 | for _ in range(args.num_validation_images):
929 | edited_images.append(
930 | pipeline(
931 | args.validation_prompt,
932 | image=original_image,
933 | num_inference_steps=20,
934 | image_guidance_scale=1.5,
935 | guidance_scale=7,
936 | generator=generator,
937 | ).images[0]
938 | )
939 |
940 | for tracker in accelerator.trackers:
941 | if tracker.name == "wandb":
942 | wandb_table = wandb.Table(columns=WANDB_TABLE_COL_NAMES)
943 | for edited_image in edited_images:
944 | wandb_table.add_data(
945 | wandb.Image(original_image), wandb.Image(edited_image), args.validation_prompt
946 | )
947 | tracker.log({"validation": wandb_table})
948 | if args.use_ema:
949 | # Switch back to the original UNet parameters.
950 | ema_unet.restore(unet.parameters())
951 |
952 | del pipeline
953 | torch.cuda.empty_cache()
954 |
955 | # Create the pipeline using the trained modules and save it.
956 | accelerator.wait_for_everyone()
957 | if accelerator.is_main_process:
958 | unet = accelerator.unwrap_model(unet)
959 | if args.use_ema:
960 | ema_unet.copy_to(unet.parameters())
961 |
962 | pipeline = StableDiffusionInstructPix2PixPipeline.from_pretrained(
963 | args.pretrained_model_name_or_path,
964 | text_encoder=accelerator.unwrap_model(text_encoder),
965 | vae=accelerator.unwrap_model(vae),
966 | unet=unet,
967 | revision=args.revision,
968 | )
969 | pipeline.save_pretrained(args.output_dir)
970 |
971 | if args.push_to_hub:
972 | repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True)
973 |
974 | accelerator.end_training()
975 |
976 |
977 | if __name__ == "__main__":
978 | main()
979 |
--------------------------------------------------------------------------------
/finetune_instruct_pix2pix.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | """Script to fine-tune InstructPix2Pix."""
18 |
19 | import argparse
20 | import logging
21 | import math
22 | import os
23 | from pathlib import Path
24 | from typing import Optional
25 |
26 | import accelerate
27 | import datasets
28 | import diffusers
29 | import numpy as np
30 | import PIL
31 | import requests
32 | import torch
33 | import torch.nn.functional as F
34 | import torch.utils.checkpoint
35 | import transformers
36 | from accelerate import Accelerator
37 | from accelerate.logging import get_logger
38 | from accelerate.utils import ProjectConfiguration, set_seed
39 | from datasets import load_dataset
40 | from diffusers import (AutoencoderKL, DDPMScheduler,
41 | StableDiffusionInstructPix2PixPipeline,
42 | UNet2DConditionModel)
43 | from diffusers.optimization import get_scheduler
44 | from diffusers.training_utils import EMAModel
45 | from diffusers.utils import check_min_version, deprecate, is_wandb_available
46 | from diffusers.utils.import_utils import is_xformers_available
47 | from huggingface_hub import HfFolder, Repository, create_repo, whoami
48 | from packaging import version
49 | from torchvision import transforms
50 | from tqdm.auto import tqdm
51 | from transformers import CLIPTextModel, CLIPTokenizer
52 |
53 | # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
54 | check_min_version("0.15.0.dev0")
55 |
56 | logger = get_logger(__name__, log_level="INFO")
57 |
58 | DATASET_NAME_MAPPING = {
59 | "sayakpaul/cartoonizer-dataset": (
60 | "original_image",
61 | "edit_prompt",
62 | "cartoonized_image",
63 | ),
64 | }
65 | WANDB_TABLE_COL_NAMES = ["original_image", "edited_image", "edit_prompt"]
66 |
67 |
68 | def parse_args():
69 | parser = argparse.ArgumentParser(
70 | description="Simple example of a training script for InstructPix2Pix."
71 | )
72 | parser.add_argument(
73 | "--pretrained_model_name_or_path",
74 | type=str,
75 | default=None,
76 | required=True,
77 | help="Path to pretrained model or model identifier from huggingface.co/models.",
78 | )
79 | parser.add_argument(
80 | "--revision",
81 | type=str,
82 | default=None,
83 | required=False,
84 | help="Revision of pretrained model identifier from huggingface.co/models.",
85 | )
86 | parser.add_argument(
87 | "--dataset_name",
88 | type=str,
89 | default=None,
90 | help=(
91 | "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
92 | " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
93 | " or to a folder containing files that 🤗 Datasets can understand."
94 | ),
95 | )
96 | parser.add_argument(
97 | "--dataset_config_name",
98 | type=str,
99 | default=None,
100 | help="The config of the Dataset, leave as None if there's only one config.",
101 | )
102 | parser.add_argument(
103 | "--train_data_dir",
104 | type=str,
105 | default=None,
106 | help=(
107 | "A folder containing the training data. Folder contents must follow the structure described in"
108 | " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file"
109 | " must exist to provide the captions for the images. Ignored if `dataset_name` is specified."
110 | ),
111 | )
112 | parser.add_argument(
113 | "--original_image_column",
114 | type=str,
115 | default="original_image",
116 | help="The column of the dataset containing the original image on which edits where made.",
117 | )
118 | parser.add_argument(
119 | "--edited_image_column",
120 | type=str,
121 | default="cartoonized_image",
122 | help="The column of the dataset containing the edited image.",
123 | )
124 | parser.add_argument(
125 | "--edit_prompt_column",
126 | type=str,
127 | default="edit_prompt",
128 | help="The column of the dataset containing the edit instruction.",
129 | )
130 | parser.add_argument(
131 | "--val_image_url",
132 | type=str,
133 | default=None,
134 | help="URL to the original image that you would like to edit (used during inference for debugging purposes).",
135 | )
136 | parser.add_argument(
137 | "--validation_prompt",
138 | type=str,
139 | default=None,
140 | help="A prompt that is sampled during training for inference.",
141 | )
142 | parser.add_argument(
143 | "--num_validation_images",
144 | type=int,
145 | default=4,
146 | help="Number of images that should be generated during validation with `validation_prompt`.",
147 | )
148 | parser.add_argument(
149 | "--validation_epochs",
150 | type=int,
151 | default=1,
152 | help=(
153 | "Run fine-tuning validation every X epochs. The validation process consists of running the prompt"
154 | " `args.validation_prompt` multiple times: `args.num_validation_images`."
155 | ),
156 | )
157 | parser.add_argument(
158 | "--max_train_samples",
159 | type=int,
160 | default=None,
161 | help=(
162 | "For debugging purposes or quicker training, truncate the number of training examples to this "
163 | "value if set."
164 | ),
165 | )
166 | parser.add_argument(
167 | "--output_dir",
168 | type=str,
169 | default="instruct-pix2pix-model",
170 | help="The output directory where the model predictions and checkpoints will be written.",
171 | )
172 | parser.add_argument(
173 | "--cache_dir",
174 | type=str,
175 | default=None,
176 | help="The directory where the downloaded models and datasets will be stored.",
177 | )
178 | parser.add_argument(
179 | "--seed", type=int, default=None, help="A seed for reproducible training."
180 | )
181 | parser.add_argument(
182 | "--resolution",
183 | type=int,
184 | default=256,
185 | help=(
186 | "The resolution for input images, all the images in the train/validation dataset will be resized to this"
187 | " resolution"
188 | ),
189 | )
190 | parser.add_argument(
191 | "--center_crop",
192 | default=False,
193 | action="store_true",
194 | help=(
195 | "Whether to center crop the input images to the resolution. If not set, the images will be randomly"
196 | " cropped. The images will be resized to the resolution first before cropping."
197 | ),
198 | )
199 | parser.add_argument(
200 | "--random_flip",
201 | action="store_true",
202 | help="whether to randomly flip images horizontally",
203 | )
204 | parser.add_argument(
205 | "--train_batch_size",
206 | type=int,
207 | default=16,
208 | help="Batch size (per device) for the training dataloader.",
209 | )
210 | parser.add_argument("--num_train_epochs", type=int, default=100)
211 | parser.add_argument(
212 | "--max_train_steps",
213 | type=int,
214 | default=None,
215 | help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
216 | )
217 | parser.add_argument(
218 | "--gradient_accumulation_steps",
219 | type=int,
220 | default=1,
221 | help="Number of updates steps to accumulate before performing a backward/update pass.",
222 | )
223 | parser.add_argument(
224 | "--gradient_checkpointing",
225 | action="store_true",
226 | help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
227 | )
228 | parser.add_argument(
229 | "--learning_rate",
230 | type=float,
231 | default=1e-4,
232 | help="Initial learning rate (after the potential warmup period) to use.",
233 | )
234 | parser.add_argument(
235 | "--scale_lr",
236 | action="store_true",
237 | default=False,
238 | help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
239 | )
240 | parser.add_argument(
241 | "--lr_scheduler",
242 | type=str,
243 | default="constant",
244 | help=(
245 | 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
246 | ' "constant", "constant_with_warmup"]'
247 | ),
248 | )
249 | parser.add_argument(
250 | "--lr_warmup_steps",
251 | type=int,
252 | default=500,
253 | help="Number of steps for the warmup in the lr scheduler.",
254 | )
255 | parser.add_argument(
256 | "--conditioning_dropout_prob",
257 | type=float,
258 | default=None,
259 | help="Conditioning dropout probability. Drops out the conditionings (image and edit prompt) used in training InstructPix2Pix. See section 3.2.1 in the paper: https://arxiv.org/abs/2211.09800.",
260 | )
261 | parser.add_argument(
262 | "--use_8bit_adam",
263 | action="store_true",
264 | help="Whether or not to use 8-bit Adam from bitsandbytes.",
265 | )
266 | parser.add_argument(
267 | "--allow_tf32",
268 | action="store_true",
269 | help=(
270 | "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
271 | " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
272 | ),
273 | )
274 | parser.add_argument(
275 | "--use_ema", action="store_true", help="Whether to use EMA model."
276 | )
277 | parser.add_argument(
278 | "--non_ema_revision",
279 | type=str,
280 | default=None,
281 | required=False,
282 | help=(
283 | "Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or"
284 | " remote repository specified with --pretrained_model_name_or_path."
285 | ),
286 | )
287 | parser.add_argument(
288 | "--dataloader_num_workers",
289 | type=int,
290 | default=0,
291 | help=(
292 | "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
293 | ),
294 | )
295 | parser.add_argument(
296 | "--adam_beta1",
297 | type=float,
298 | default=0.9,
299 | help="The beta1 parameter for the Adam optimizer.",
300 | )
301 | parser.add_argument(
302 | "--adam_beta2",
303 | type=float,
304 | default=0.999,
305 | help="The beta2 parameter for the Adam optimizer.",
306 | )
307 | parser.add_argument(
308 | "--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use."
309 | )
310 | parser.add_argument(
311 | "--adam_epsilon",
312 | type=float,
313 | default=1e-08,
314 | help="Epsilon value for the Adam optimizer",
315 | )
316 | parser.add_argument(
317 | "--max_grad_norm", default=1.0, type=float, help="Max gradient norm."
318 | )
319 | parser.add_argument(
320 | "--push_to_hub",
321 | action="store_true",
322 | help="Whether or not to push the model to the Hub.",
323 | )
324 | parser.add_argument(
325 | "--hub_token",
326 | type=str,
327 | default=None,
328 | help="The token to use to push to the Model Hub.",
329 | )
330 | parser.add_argument(
331 | "--hub_model_id",
332 | type=str,
333 | default=None,
334 | help="The name of the repository to keep in sync with the local `output_dir`.",
335 | )
336 | parser.add_argument(
337 | "--logging_dir",
338 | type=str,
339 | default="logs",
340 | help=(
341 | "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
342 | " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
343 | ),
344 | )
345 | parser.add_argument(
346 | "--mixed_precision",
347 | type=str,
348 | default=None,
349 | choices=["no", "fp16", "bf16"],
350 | help=(
351 | "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
352 | " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
353 | " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
354 | ),
355 | )
356 | parser.add_argument(
357 | "--report_to",
358 | type=str,
359 | default="tensorboard",
360 | help=(
361 | 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
362 | ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
363 | ),
364 | )
365 | parser.add_argument(
366 | "--local_rank",
367 | type=int,
368 | default=-1,
369 | help="For distributed training: local_rank",
370 | )
371 | parser.add_argument(
372 | "--checkpointing_steps",
373 | type=int,
374 | default=500,
375 | help=(
376 | "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
377 | " training using `--resume_from_checkpoint`."
378 | ),
379 | )
380 | parser.add_argument(
381 | "--checkpoints_total_limit",
382 | type=int,
383 | default=None,
384 | help=(
385 | "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`."
386 | " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state"
387 | " for more docs"
388 | ),
389 | )
390 | parser.add_argument(
391 | "--resume_from_checkpoint",
392 | type=str,
393 | default=None,
394 | help=(
395 | "Whether training should be resumed from a previous checkpoint. Use a path saved by"
396 | ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
397 | ),
398 | )
399 | parser.add_argument(
400 | "--enable_xformers_memory_efficient_attention",
401 | action="store_true",
402 | help="Whether or not to use xformers.",
403 | )
404 |
405 | args = parser.parse_args()
406 | env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
407 | if env_local_rank != -1 and env_local_rank != args.local_rank:
408 | args.local_rank = env_local_rank
409 |
410 | # Sanity checks
411 | if args.dataset_name is None and args.train_data_dir is None:
412 | raise ValueError("Need either a dataset name or a training folder.")
413 |
414 | # default to using the same revision for the non-ema model if not specified
415 | if args.non_ema_revision is None:
416 | args.non_ema_revision = args.revision
417 |
418 | return args
419 |
420 |
421 | def get_full_repo_name(
422 | model_id: str, organization: Optional[str] = None, token: Optional[str] = None
423 | ):
424 | if token is None:
425 | token = HfFolder.get_token()
426 | if organization is None:
427 | username = whoami(token)["name"]
428 | return f"{username}/{model_id}"
429 | else:
430 | return f"{organization}/{model_id}"
431 |
432 |
433 | def convert_to_np(image, resolution):
434 | image = image.convert("RGB").resize((resolution, resolution))
435 | return np.array(image).transpose(2, 0, 1)
436 |
437 |
438 | def download_image(url):
439 | image = PIL.Image.open(requests.get(url, stream=True).raw)
440 | image = PIL.ImageOps.exif_transpose(image)
441 | image = image.convert("RGB")
442 | return image
443 |
444 |
445 | def main():
446 | args = parse_args()
447 |
448 | if args.non_ema_revision is not None:
449 | deprecate(
450 | "non_ema_revision!=None",
451 | "0.15.0",
452 | message=(
453 | "Downloading 'non_ema' weights from revision branches of the Hub is deprecated. Please make sure to"
454 | " use `--variant=non_ema` instead."
455 | ),
456 | )
457 | logging_dir = os.path.join(args.output_dir, args.logging_dir)
458 | accelerator_project_config = ProjectConfiguration(
459 | total_limit=args.checkpoints_total_limit, logging_dir=logging_dir
460 | )
461 | accelerator = Accelerator(
462 | gradient_accumulation_steps=args.gradient_accumulation_steps,
463 | mixed_precision=args.mixed_precision,
464 | log_with=args.report_to,
465 | project_config=accelerator_project_config,
466 | )
467 |
468 | generator = torch.Generator(device=accelerator.device).manual_seed(args.seed)
469 |
470 | if args.report_to == "wandb":
471 | if not is_wandb_available():
472 | raise ImportError(
473 | "Make sure to install wandb if you want to use it for logging during training."
474 | )
475 | import wandb
476 |
477 | # Make one log on every process with the configuration for debugging.
478 | logging.basicConfig(
479 | format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
480 | datefmt="%m/%d/%Y %H:%M:%S",
481 | level=logging.INFO,
482 | )
483 | logger.info(accelerator.state, main_process_only=False)
484 | if accelerator.is_local_main_process:
485 | datasets.utils.logging.set_verbosity_warning()
486 | transformers.utils.logging.set_verbosity_warning()
487 | diffusers.utils.logging.set_verbosity_info()
488 | else:
489 | datasets.utils.logging.set_verbosity_error()
490 | transformers.utils.logging.set_verbosity_error()
491 | diffusers.utils.logging.set_verbosity_error()
492 |
493 | # If passed along, set the training seed now.
494 | if args.seed is not None:
495 | set_seed(args.seed)
496 |
497 | # Handle the repository creation
498 | if accelerator.is_main_process:
499 | if args.push_to_hub:
500 | if args.hub_model_id is None:
501 | repo_name = get_full_repo_name(
502 | Path(args.output_dir).name, token=args.hub_token
503 | )
504 | else:
505 | repo_name = args.hub_model_id
506 | create_repo(repo_name, exist_ok=True, token=args.hub_token)
507 | repo = Repository(
508 | args.output_dir, clone_from=repo_name, token=args.hub_token
509 | )
510 |
511 | with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
512 | if "step_*" not in gitignore:
513 | gitignore.write("step_*\n")
514 | if "checkpoint-*" not in gitignore:
515 | gitignore.write("checkpoint-*\n")
516 | if "checkpoint-*" not in gitignore:
517 | gitignore.write("checkpoint-*\n")
518 |
519 | elif args.output_dir is not None:
520 | os.makedirs(args.output_dir, exist_ok=True)
521 |
522 | # Load scheduler, tokenizer and models.
523 | noise_scheduler = DDPMScheduler.from_pretrained(
524 | args.pretrained_model_name_or_path, subfolder="scheduler"
525 | )
526 | tokenizer = CLIPTokenizer.from_pretrained(
527 | args.pretrained_model_name_or_path,
528 | subfolder="tokenizer",
529 | revision=args.revision,
530 | )
531 | text_encoder = CLIPTextModel.from_pretrained(
532 | args.pretrained_model_name_or_path,
533 | subfolder="text_encoder",
534 | revision=args.revision,
535 | )
536 | vae = AutoencoderKL.from_pretrained(
537 | args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision
538 | )
539 | unet = UNet2DConditionModel.from_pretrained(
540 | args.pretrained_model_name_or_path,
541 | subfolder="unet",
542 | revision=args.non_ema_revision,
543 | )
544 |
545 | # Freeze vae and text_encoder
546 | vae.requires_grad_(False)
547 | text_encoder.requires_grad_(False)
548 |
549 | # Create EMA for the unet.
550 | if args.use_ema:
551 | ema_unet = EMAModel(
552 | unet.parameters(), model_cls=UNet2DConditionModel, model_config=unet.config
553 | )
554 |
555 | if args.enable_xformers_memory_efficient_attention:
556 | if is_xformers_available():
557 | import xformers
558 |
559 | xformers_version = version.parse(xformers.__version__)
560 | if xformers_version == version.parse("0.0.16"):
561 | logger.warn(
562 | "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
563 | )
564 | unet.enable_xformers_memory_efficient_attention()
565 | else:
566 | raise ValueError(
567 | "xformers is not available. Make sure it is installed correctly"
568 | )
569 |
570 | # `accelerate` 0.16.0 will have better support for customized saving
571 | if version.parse(accelerate.__version__) >= version.parse("0.16.0"):
572 | # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
573 | def save_model_hook(models, weights, output_dir):
574 | if args.use_ema:
575 | ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema"))
576 |
577 | for i, model in enumerate(models):
578 | model.save_pretrained(os.path.join(output_dir, "unet"))
579 |
580 | # make sure to pop weight so that corresponding model is not saved again
581 | weights.pop()
582 |
583 | def load_model_hook(models, input_dir):
584 | if args.use_ema:
585 | load_model = EMAModel.from_pretrained(
586 | os.path.join(input_dir, "unet_ema"), UNet2DConditionModel
587 | )
588 | ema_unet.load_state_dict(load_model.state_dict())
589 | ema_unet.to(accelerator.device)
590 | del load_model
591 |
592 | for i in range(len(models)):
593 | # pop models so that they are not loaded again
594 | model = models.pop()
595 |
596 | # load diffusers style into model
597 | load_model = UNet2DConditionModel.from_pretrained(
598 | input_dir, subfolder="unet"
599 | )
600 | model.register_to_config(**load_model.config)
601 |
602 | model.load_state_dict(load_model.state_dict())
603 | del load_model
604 |
605 | accelerator.register_save_state_pre_hook(save_model_hook)
606 | accelerator.register_load_state_pre_hook(load_model_hook)
607 |
608 | if args.gradient_checkpointing:
609 | unet.enable_gradient_checkpointing()
610 |
611 | # Enable TF32 for faster training on Ampere GPUs,
612 | # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
613 | if args.allow_tf32:
614 | torch.backends.cuda.matmul.allow_tf32 = True
615 |
616 | if args.scale_lr:
617 | args.learning_rate = (
618 | args.learning_rate
619 | * args.gradient_accumulation_steps
620 | * args.train_batch_size
621 | * accelerator.num_processes
622 | )
623 |
624 | # Initialize the optimizer
625 | if args.use_8bit_adam:
626 | try:
627 | import bitsandbytes as bnb
628 | except ImportError:
629 | raise ImportError(
630 | "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`"
631 | )
632 |
633 | optimizer_cls = bnb.optim.AdamW8bit
634 | else:
635 | optimizer_cls = torch.optim.AdamW
636 |
637 | optimizer = optimizer_cls(
638 | unet.parameters(),
639 | lr=args.learning_rate,
640 | betas=(args.adam_beta1, args.adam_beta2),
641 | weight_decay=args.adam_weight_decay,
642 | eps=args.adam_epsilon,
643 | )
644 |
645 | # Get the datasets: you can either provide your own training and evaluation files (see below)
646 | # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub).
647 |
648 | # In distributed training, the load_dataset function guarantees that only one local process can concurrently
649 | # download the dataset.
650 | if args.dataset_name is not None:
651 | # Downloading and loading a dataset from the hub.
652 | dataset = load_dataset(
653 | args.dataset_name,
654 | args.dataset_config_name,
655 | cache_dir=args.cache_dir,
656 | use_auth_token=True,
657 | )
658 | else:
659 | data_files = {}
660 | if args.train_data_dir is not None:
661 | data_files["train"] = os.path.join(args.train_data_dir, "**")
662 | dataset = load_dataset(
663 | "imagefolder",
664 | data_files=data_files,
665 | cache_dir=args.cache_dir,
666 | )
667 | # See more about loading custom images at
668 | # https://huggingface.co/docs/datasets/main/en/image_load#imagefolder
669 |
670 | # Preprocessing the datasets.
671 | # We need to tokenize inputs and targets.
672 | column_names = dataset["train"].column_names
673 |
674 | # 6. Get the column names for input/target.
675 | dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None)
676 | if args.original_image_column is None:
677 | original_image_column = (
678 | dataset_columns[0] if dataset_columns is not None else column_names[0]
679 | )
680 | else:
681 | original_image_column = args.original_image_column
682 | if original_image_column not in column_names:
683 | raise ValueError(
684 | f"--original_image_column' value '{args.original_image_column}' needs to be one of: {', '.join(column_names)}"
685 | )
686 | if args.edit_prompt_column is None:
687 | edit_prompt_column = (
688 | dataset_columns[1] if dataset_columns is not None else column_names[1]
689 | )
690 | else:
691 | edit_prompt_column = args.edit_prompt_column
692 | if edit_prompt_column not in column_names:
693 | raise ValueError(
694 | f"--edit_prompt_column' value '{args.edit_prompt_column}' needs to be one of: {', '.join(column_names)}"
695 | )
696 | if args.edited_image_column is None:
697 | edited_image_column = (
698 | dataset_columns[2] if dataset_columns is not None else column_names[2]
699 | )
700 | else:
701 | edited_image_column = args.edited_image_column
702 | if edited_image_column not in column_names:
703 | raise ValueError(
704 | f"--edited_image_column' value '{args.edited_image_column}' needs to be one of: {', '.join(column_names)}"
705 | )
706 |
707 | # Preprocessing the datasets.
708 | # We need to tokenize input captions and transform the images.
709 | def tokenize_captions(captions):
710 | inputs = tokenizer(
711 | captions,
712 | max_length=tokenizer.model_max_length,
713 | padding="max_length",
714 | truncation=True,
715 | return_tensors="pt",
716 | )
717 | return inputs.input_ids
718 |
719 | # Preprocessing the datasets.
720 | train_transforms = transforms.Compose(
721 | [
722 | transforms.CenterCrop(args.resolution)
723 | if args.center_crop
724 | else transforms.RandomCrop(args.resolution),
725 | transforms.RandomHorizontalFlip()
726 | if args.random_flip
727 | else transforms.Lambda(lambda x: x),
728 | ]
729 | )
730 |
731 | def preprocess_images(examples):
732 | original_images = np.concatenate(
733 | [
734 | convert_to_np(image, args.resolution)
735 | for image in examples[original_image_column]
736 | ]
737 | )
738 | edited_images = np.concatenate(
739 | [
740 | convert_to_np(image, args.resolution)
741 | for image in examples[edited_image_column]
742 | ]
743 | )
744 | # We need to ensure that the original and the edited images undergo the same
745 | # augmentation transforms.
746 | images = np.concatenate([original_images, edited_images])
747 | images = torch.tensor(images)
748 | images = 2 * (images / 255) - 1
749 | return train_transforms(images)
750 |
751 | def preprocess_train(examples):
752 | # Preprocess images.
753 | preprocessed_images = preprocess_images(examples)
754 | # Since the original and edited images were concatenated before
755 | # applying the transformations, we need to separate them and reshape
756 | # them accordingly.
757 | original_images, edited_images = preprocessed_images.chunk(2)
758 | original_images = original_images.reshape(
759 | -1, 3, args.resolution, args.resolution
760 | )
761 | edited_images = edited_images.reshape(-1, 3, args.resolution, args.resolution)
762 |
763 | # Collate the preprocessed images into the `examples`.
764 | examples["original_pixel_values"] = original_images
765 | examples["edited_pixel_values"] = edited_images
766 |
767 | # Preprocess the captions.
768 | captions = [caption for caption in examples[edit_prompt_column]]
769 | examples["input_ids"] = tokenize_captions(captions)
770 | return examples
771 |
772 | with accelerator.main_process_first():
773 | if args.max_train_samples is not None:
774 | dataset["train"] = (
775 | dataset["train"]
776 | .shuffle(seed=args.seed)
777 | .select(range(args.max_train_samples))
778 | )
779 | # Set the training transforms
780 | train_dataset = dataset["train"].with_transform(preprocess_train)
781 |
782 | def collate_fn(examples):
783 | original_pixel_values = torch.stack(
784 | [example["original_pixel_values"] for example in examples]
785 | )
786 | original_pixel_values = original_pixel_values.to(
787 | memory_format=torch.contiguous_format
788 | ).float()
789 | edited_pixel_values = torch.stack(
790 | [example["edited_pixel_values"] for example in examples]
791 | )
792 | edited_pixel_values = edited_pixel_values.to(
793 | memory_format=torch.contiguous_format
794 | ).float()
795 | input_ids = torch.stack([example["input_ids"] for example in examples])
796 | return {
797 | "original_pixel_values": original_pixel_values,
798 | "edited_pixel_values": edited_pixel_values,
799 | "input_ids": input_ids,
800 | }
801 |
802 | # DataLoaders creation:
803 | train_dataloader = torch.utils.data.DataLoader(
804 | train_dataset,
805 | shuffle=True,
806 | collate_fn=collate_fn,
807 | batch_size=args.train_batch_size,
808 | num_workers=args.dataloader_num_workers,
809 | )
810 |
811 | # Scheduler and math around the number of training steps.
812 | overrode_max_train_steps = False
813 | num_update_steps_per_epoch = math.ceil(
814 | len(train_dataloader) / args.gradient_accumulation_steps
815 | )
816 | if args.max_train_steps is None:
817 | args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
818 | overrode_max_train_steps = True
819 |
820 | lr_scheduler = get_scheduler(
821 | args.lr_scheduler,
822 | optimizer=optimizer,
823 | num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
824 | num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
825 | )
826 |
827 | # Prepare everything with our `accelerator`.
828 | unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
829 | unet, optimizer, train_dataloader, lr_scheduler
830 | )
831 |
832 | if args.use_ema:
833 | ema_unet.to(accelerator.device)
834 |
835 | # For mixed precision training we cast the text_encoder and vae weights to half-precision
836 | # as these models are only used for inference, keeping weights in full precision is not required.
837 | weight_dtype = torch.float32
838 | if accelerator.mixed_precision == "fp16":
839 | weight_dtype = torch.float16
840 | elif accelerator.mixed_precision == "bf16":
841 | weight_dtype = torch.bfloat16
842 |
843 | # Move text_encode and vae to gpu and cast to weight_dtype
844 | text_encoder.to(accelerator.device, dtype=weight_dtype)
845 | vae.to(accelerator.device, dtype=weight_dtype)
846 |
847 | # We need to recalculate our total training steps as the size of the training dataloader may have changed.
848 | num_update_steps_per_epoch = math.ceil(
849 | len(train_dataloader) / args.gradient_accumulation_steps
850 | )
851 | if overrode_max_train_steps:
852 | args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
853 | # Afterwards we recalculate our number of training epochs
854 | args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
855 |
856 | # We need to initialize the trackers we use, and also store our configuration.
857 | # The trackers initializes automatically on the main process.
858 | if accelerator.is_main_process:
859 | accelerator.init_trackers("instruct-pix2pix-cartoonizer", config=vars(args))
860 |
861 | # Train!
862 | total_batch_size = (
863 | args.train_batch_size
864 | * accelerator.num_processes
865 | * args.gradient_accumulation_steps
866 | )
867 |
868 | logger.info("***** Running training *****")
869 | logger.info(f" Num examples = {len(train_dataset)}")
870 | logger.info(f" Num Epochs = {args.num_train_epochs}")
871 | logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
872 | logger.info(
873 | f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}"
874 | )
875 | logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
876 | logger.info(f" Total optimization steps = {args.max_train_steps}")
877 | global_step = 0
878 | first_epoch = 0
879 |
880 | # Potentially load in the weights and states from a previous save
881 | if args.resume_from_checkpoint:
882 | if args.resume_from_checkpoint != "latest":
883 | path = os.path.basename(args.resume_from_checkpoint)
884 | else:
885 | # Get the most recent checkpoint
886 | dirs = os.listdir(args.output_dir)
887 | dirs = [d for d in dirs if d.startswith("checkpoint")]
888 | dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
889 | path = dirs[-1] if len(dirs) > 0 else None
890 |
891 | if path is None:
892 | accelerator.print(
893 | f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
894 | )
895 | args.resume_from_checkpoint = None
896 | else:
897 | accelerator.print(f"Resuming from checkpoint {path}")
898 | accelerator.load_state(os.path.join(args.output_dir, path))
899 | global_step = int(path.split("-")[1])
900 |
901 | resume_global_step = global_step * args.gradient_accumulation_steps
902 | first_epoch = global_step // num_update_steps_per_epoch
903 | resume_step = resume_global_step % (
904 | num_update_steps_per_epoch * args.gradient_accumulation_steps
905 | )
906 |
907 | # Only show the progress bar once on each machine.
908 | progress_bar = tqdm(
909 | range(global_step, args.max_train_steps),
910 | disable=not accelerator.is_local_main_process,
911 | )
912 | progress_bar.set_description("Steps")
913 |
914 | for epoch in range(first_epoch, args.num_train_epochs):
915 | unet.train()
916 | train_loss = 0.0
917 | for step, batch in enumerate(train_dataloader):
918 | # Skip steps until we reach the resumed step
919 | if (
920 | args.resume_from_checkpoint
921 | and epoch == first_epoch
922 | and step < resume_step
923 | ):
924 | if step % args.gradient_accumulation_steps == 0:
925 | progress_bar.update(1)
926 | continue
927 |
928 | with accelerator.accumulate(unet):
929 | # We want to learn the denoising process w.r.t the edited images which
930 | # are conditioned on the original image (which was edited) and the edit instruction.
931 | # So, first, convert images to latent space.
932 | latents = vae.encode(
933 | batch["edited_pixel_values"].to(weight_dtype)
934 | ).latent_dist.sample()
935 | latents = latents * vae.config.scaling_factor
936 |
937 | # Sample noise that we'll add to the latents
938 | noise = torch.randn_like(latents)
939 | bsz = latents.shape[0]
940 | # Sample a random timestep for each image
941 | timesteps = torch.randint(
942 | 0,
943 | noise_scheduler.num_train_timesteps,
944 | (bsz,),
945 | device=latents.device,
946 | )
947 | timesteps = timesteps.long()
948 |
949 | # Add noise to the latents according to the noise magnitude at each timestep
950 | # (this is the forward diffusion process)
951 | noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
952 |
953 | # Get the text embedding for conditioning.
954 | encoder_hidden_states = text_encoder(batch["input_ids"])[0]
955 |
956 | # Get the additional image embedding for conditioning.
957 | # Instead of getting a diagonal Gaussian here, we simply take the mode.
958 | original_image_embeds = vae.encode(
959 | batch["original_pixel_values"].to(weight_dtype)
960 | ).latent_dist.mode()
961 |
962 | # Conditioning dropout to support classifier-free guidance during inference. For more details
963 | # check out the section 3.2.1 of the original paper https://arxiv.org/abs/2211.09800.
964 | if args.conditioning_dropout_prob is not None:
965 | random_p = torch.rand(
966 | bsz, device=latents.device, generator=generator
967 | )
968 | # Sample masks for the edit prompts.
969 | prompt_mask = random_p < 2 * args.conditioning_dropout_prob
970 | prompt_mask = prompt_mask.reshape(bsz, 1, 1)
971 | # Final text conditioning.
972 | null_conditioning = text_encoder(
973 | tokenize_captions([""]).to(accelerator.device)
974 | )[0]
975 | encoder_hidden_states = torch.where(
976 | prompt_mask, null_conditioning, encoder_hidden_states
977 | )
978 |
979 | # Sample masks for the original images.
980 | image_mask_dtype = original_image_embeds.dtype
981 | image_mask = 1 - (
982 | (random_p >= args.conditioning_dropout_prob).to(
983 | image_mask_dtype
984 | )
985 | * (random_p < 3 * args.conditioning_dropout_prob).to(
986 | image_mask_dtype
987 | )
988 | )
989 | image_mask = image_mask.reshape(bsz, 1, 1, 1)
990 | # Final image conditioning.
991 | original_image_embeds = image_mask * original_image_embeds
992 |
993 | # Concatenate the `original_image_embeds` with the `noisy_latents`.
994 | concatenated_noisy_latents = torch.cat(
995 | [noisy_latents, original_image_embeds], dim=1
996 | )
997 |
998 | # Get the target for loss depending on the prediction type
999 | if noise_scheduler.config.prediction_type == "epsilon":
1000 | target = noise
1001 | elif noise_scheduler.config.prediction_type == "v_prediction":
1002 | target = noise_scheduler.get_velocity(latents, noise, timesteps)
1003 | else:
1004 | raise ValueError(
1005 | f"Unknown prediction type {noise_scheduler.config.prediction_type}"
1006 | )
1007 |
1008 | # Predict the noise residual and compute loss
1009 | model_pred = unet(
1010 | concatenated_noisy_latents, timesteps, encoder_hidden_states
1011 | ).sample
1012 | loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
1013 |
1014 | # Gather the losses across all processes for logging (if we use distributed training).
1015 | avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean()
1016 | train_loss += avg_loss.item() / args.gradient_accumulation_steps
1017 |
1018 | # Backpropagate
1019 | accelerator.backward(loss)
1020 | if accelerator.sync_gradients:
1021 | accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm)
1022 | optimizer.step()
1023 | lr_scheduler.step()
1024 | optimizer.zero_grad()
1025 |
1026 | # Checks if the accelerator has performed an optimization step behind the scenes
1027 | if accelerator.sync_gradients:
1028 | if args.use_ema:
1029 | ema_unet.step(unet.parameters())
1030 | progress_bar.update(1)
1031 | global_step += 1
1032 | accelerator.log({"train_loss": train_loss}, step=global_step)
1033 | train_loss = 0.0
1034 |
1035 | if global_step % args.checkpointing_steps == 0:
1036 | if accelerator.is_main_process:
1037 | save_path = os.path.join(
1038 | args.output_dir, f"checkpoint-{global_step}"
1039 | )
1040 | accelerator.save_state(save_path)
1041 | logger.info(f"Saved state to {save_path}")
1042 |
1043 | logs = {
1044 | "step_loss": loss.detach().item(),
1045 | "lr": lr_scheduler.get_last_lr()[0],
1046 | }
1047 | progress_bar.set_postfix(**logs)
1048 |
1049 | if global_step >= args.max_train_steps:
1050 | break
1051 |
1052 | if accelerator.is_main_process:
1053 | if (
1054 | (args.val_image_url is not None)
1055 | and (args.validation_prompt is not None)
1056 | and (epoch % args.validation_epochs == 0)
1057 | ):
1058 | logger.info(
1059 | f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
1060 | f" {args.validation_prompt}."
1061 | )
1062 | # create pipeline
1063 | if args.use_ema:
1064 | # Store the UNet parameters temporarily and load the EMA parameters to perform inference.
1065 | ema_unet.store(unet.parameters())
1066 | ema_unet.copy_to(unet.parameters())
1067 | pipeline = StableDiffusionInstructPix2PixPipeline.from_pretrained(
1068 | args.pretrained_model_name_or_path,
1069 | unet=unet,
1070 | revision=args.revision,
1071 | torch_dtype=weight_dtype,
1072 | )
1073 | pipeline = pipeline.to(accelerator.device)
1074 | pipeline.set_progress_bar_config(disable=True)
1075 |
1076 | # run inference
1077 | original_image = download_image(args.val_image_url)
1078 | edited_images = []
1079 | with torch.autocast(
1080 | str(accelerator.device),
1081 | enabled=accelerator.mixed_precision == "fp16",
1082 | ):
1083 | for _ in range(args.num_validation_images):
1084 | edited_images.append(
1085 | pipeline(
1086 | args.validation_prompt,
1087 | image=original_image,
1088 | num_inference_steps=20,
1089 | image_guidance_scale=1.5,
1090 | guidance_scale=7,
1091 | generator=generator,
1092 | ).images[0]
1093 | )
1094 |
1095 | for tracker in accelerator.trackers:
1096 | if tracker.name == "wandb":
1097 | wandb_table = wandb.Table(columns=WANDB_TABLE_COL_NAMES)
1098 | for edited_image in edited_images:
1099 | wandb_table.add_data(
1100 | wandb.Image(original_image),
1101 | wandb.Image(edited_image),
1102 | args.validation_prompt,
1103 | )
1104 | tracker.log({"validation": wandb_table})
1105 | if args.use_ema:
1106 | # Switch back to the original UNet parameters.
1107 | ema_unet.restore(unet.parameters())
1108 |
1109 | del pipeline
1110 | torch.cuda.empty_cache()
1111 |
1112 | # Create the pipeline using the trained modules and save it.
1113 | accelerator.wait_for_everyone()
1114 | if accelerator.is_main_process:
1115 | unet = accelerator.unwrap_model(unet)
1116 | if args.use_ema:
1117 | ema_unet.copy_to(unet.parameters())
1118 |
1119 | pipeline = StableDiffusionInstructPix2PixPipeline.from_pretrained(
1120 | args.pretrained_model_name_or_path,
1121 | text_encoder=text_encoder,
1122 | vae=vae,
1123 | unet=unet,
1124 | revision=args.revision,
1125 | )
1126 | pipeline.save_pretrained(args.output_dir)
1127 |
1128 | if args.push_to_hub:
1129 | repo.push_to_hub(
1130 | commit_message="End of training", blocking=False, auto_lfs_prune=True
1131 | )
1132 |
1133 | accelerator.end_training()
1134 |
1135 |
1136 | if __name__ == "__main__":
1137 | main()
1138 |
--------------------------------------------------------------------------------